[ofw] [patch][core][l2w+genutils] Unify general utilities functions into one library and move l2w into a separate directory
Irena Gannon
irena at mellanox.co.il
Tue Feb 1 10:30:37 PST 2011
Signed-off-by: Irena Gannon (irena at mellanox.co.il<mailto:irena at mellanox.co.il>)
This patch:
1. Unifies some general utilities functions into one library called genutils (see core/genutils/kernel/readme.txt for details)
2. Moves all l2w functionalities into a separate directory under the core dir
Index: B:/users/irena/proj1/trunk/core/dirs
===================================================================
--- B:/users/irena/proj1/trunk/core/dirs (revision 6771)
+++ B:/users/irena/proj1/trunk/core/dirs (revision 6862)
@@ -5,4 +5,6 @@
iou \
ibat \
winverbs \
- winmad
+ winmad \
+ genutils \
+ l2w
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_precomp.h
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_precomp.h (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_precomp.h (revision 6862)
@@ -0,0 +1,16 @@
+
+
+#include "l2w.h"
+
+#include "l2w_dbg.h"
+#include "l2w_wpptrace.h"
+
+#include <wdm.h>
+#include <ntstrsafe.h>
+
+#include <stdarg.h>
+#include <string.h>
+#include <strsafe.h>
+#include <stdio.h>
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/SOURCES
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/SOURCES (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/SOURCES (revision 6862)
@@ -0,0 +1,59 @@
+TARGETNAME=l2w
+TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR)
+TARGETTYPE=DRIVER_LIBRARY
+
+
+
+!if $(FREEBUILD)
+ENABLE_EVENT_TRACING=1
+!else
+#ENABLE_EVENT_TRACING=1
+!endif
+
+
+DLLDEF=l2w.def
+
+SOURCES= \
+ l2w.rc \
+ l2w.c \
+ l2w_debug.c \
+ l2w_radix.c \
+ packer.c \
+ ud_header.c \
+ l2w_memory.c \
+ l2w_workqueue.c \
+ l2w_umem.c \
+ l2w_dbg.c
+
+INCLUDES=..\..\..\inc; \
+ ..\..\..\inc\kernel; \
+ ..\..\..\inc\kernel\l2w; \
+ ..\..\..\hw\mlx4\kernel\bus\inc; \
+ ..\..\..\hw\mlx4\kernel\bus\core; \
+ ..\..\..\hw\mlx4\kernel\bus\net; \
+ ..\..\..\hw\mlx4\kernel\inc; \
+
+C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS
+
+TARGETLIBS= \
+ $(TARGETPATH)\*\complib.lib \
+
+!IFDEF ENABLE_EVENT_TRACING
+
+C_DEFINES = $(C_DEFINES) -DEVENT_TRACING
+
+RUN_WPP= $(SOURCES) -km -dll -ext: .c .cpp .h .C .CPP .H\
+ -preserveext:.c .h\
+ -scan:l2w_wpptrace.h\
+ -func:L2W_PRINT(LEVEL,FLAGS,MSG,...)
+!ENDIF
+
+MC_SOURCEDIR=.
+
+MSC_WARNING_LEVEL= /W4
+
+
+PRECOMPILED_INCLUDE=l2w_precomp.h
+PRECOMPILED_PCH=l2w_precomp.pch
+PRECOMPILED_CXX=1
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w.c (revision 6862)
@@ -0,0 +1,282 @@
+#include "l2w_precomp.h"
+
+#include "core.h"
+#include "pa_cash.h"
+#include "mlx4.h"
+
+
+/* Nth element of the table contains the index of the first set bit of N; 8 - for N=0 */
+char g_set_bit_tbl[256];
+
+/* Nth element of the table contains the index of the first 0 bit of N; 8 - for N=255 */
+char g_clr_bit_tbl[256];
+
+/* interval for a cmd go-bit waiting */
+// TODO: not clear what is to be this value:
+// 1. it has to be enough great, so as the tread will go waiting;
+// 2. it has to be enough small, so as there is no too large waiting after first command try;
+// 3. it has to be enough great, so as not to cause to intensive rescheduling;
+#define CMD_WAIT_USECS 2
+#define CMD_WAIT_INTERVAL ((-10) * CMD_WAIT_USECS)
+LARGE_INTEGER g_cmd_interval = { (ULONG)CMD_WAIT_INTERVAL, 0 };
+
+////////////////////////////////////////////////////////
+//
+// PCI POOL
+//
+////////////////////////////////////////////////////////
+
+pci_pool_t *
+pci_pool_create (const char *name, struct pci_dev *pdev,
+ size_t size, size_t align, size_t allocation)
+{
+ pci_pool_t *pool;
+ UNREFERENCED_PARAMETER(align);
+ UNREFERENCED_PARAMETER(allocation);
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ // allocation parameter is not handled yet
+ ASSERT(allocation == 0);
+
+ //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,
+ // while default alloc function - ExAllocatePoolWithTag -doesn't.
+ // But for now it is used for elements of size <= PAGE_SIZE
+ // Anyway - a sanity check:
+ ASSERT(size <= PAGE_SIZE);
+ if (size > PAGE_SIZE)
+ return NULL;
+
+ // allocate object
+ pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );
+ if (pool == NULL)
+ return NULL;
+
+ //TODO: not too effective: one can read its own alloc/free functions
+ ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );
+
+ // fill the object
+ pool->mdev = pdev->dev;
+ pool->size = size;
+ strncpy( pool->name, name, sizeof pool->name );
+
+ return pool;
+}
+
+
+////////////////////////////////////////////////////////
+//
+// BIT TECHNIQUES
+//
+////////////////////////////////////////////////////////
+
+void fill_bit_tbls()
+{
+ unsigned long i;
+ for (i=0; i<256; ++i) {
+ g_set_bit_tbl[i] = (char)(_ffs_raw(&i,0) - 1);
+ g_clr_bit_tbl[i] = (char)(_ffz_raw(&i,0) - 1);
+ }
+ g_set_bit_tbl[0] = g_clr_bit_tbl[255] = 8;
+}
+
+
+////////////////////////////////////////////////////////
+//
+// BIT MAPS
+//
+////////////////////////////////////////////////////////
+
+int __bitmap_full(const unsigned long *bitmap, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (~bitmap[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+
+int __bitmap_empty(const unsigned long *bitmap, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+
+
+////////////////////////////////////////////////////////
+//
+// DEBUG PRINT
+//
+////////////////////////////////////////////////////////
+
+VOID
+WriteEventLogEntry(
+ PVOID pi_pIoObject,
+ ULONG pi_ErrorCode,
+ ULONG pi_UniqueErrorCode,
+ ULONG pi_FinalStatus,
+ ULONG pi_nDataItems,
+ ...
+ )
+/*++
+
+Routine Description:
+ Writes an event log entry to the event log.
+
+Arguments:
+
+ pi_pIoObject......... The IO object ( driver object or device object ).
+ pi_ErrorCode......... The error code.
+ pi_UniqueErrorCode... A specific error code.
+ pi_FinalStatus....... The final status.
+ pi_nDataItems........ Number of data items.
+ .
+ . data items values
+ .
+
+Return Value:
+
+ None .
+
+--*/
+{ /* WriteEventLogEntry */
+
+ /* Variable argument list */
+ va_list l_Argptr;
+ /* Pointer to an error log entry */
+ PIO_ERROR_LOG_PACKET l_pErrorLogEntry;
+
+ /* Init the variable argument list */
+ va_start(l_Argptr, pi_nDataItems);
+
+ /* Allocate an error log entry */
+ l_pErrorLogEntry =
+ (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
+ pi_pIoObject,
+ (UCHAR)(sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG))
+ );
+ /* Check allocation */
+ if ( l_pErrorLogEntry != NULL)
+ { /* OK */
+
+ /* Data item index */
+ USHORT l_nDataItem ;
+
+ /* Set the error log entry header */
+ l_pErrorLogEntry->ErrorCode = pi_ErrorCode;
+ l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG));
+ l_pErrorLogEntry->SequenceNumber = 0;
+ l_pErrorLogEntry->MajorFunctionCode = 0;
+ l_pErrorLogEntry->IoControlCode = 0;
+ l_pErrorLogEntry->RetryCount = 0;
+ l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode;
+ l_pErrorLogEntry->FinalStatus = pi_FinalStatus;
+
+ /* Insert the data items */
+ for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++)
+ { /* Inset a data item */
+
+ /* Current data item */
+ int l_CurDataItem ;
+
+ /* Get next data item */
+ l_CurDataItem = va_arg( l_Argptr, int);
+
+ /* Put it into the data array */
+ l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ;
+
+ } /* Inset a data item */
+
+ /* Write the packet */
+ IoWriteErrorLogEntry(l_pErrorLogEntry);
+
+ } /* OK */
+
+ /* Term the variable argument list */
+ va_end(l_Argptr);
+
+} /* WriteEventLogEntry */
+
+
+////////////////////////////////////////////////////////
+//
+// GENERAL
+//
+////////////////////////////////////////////////////////
+
+// from lib/string.c
+/**
+* strlcpy - Copy a %NUL terminated string into a sized buffer
+* @dest: Where to copy the string to
+* @src: Where to copy the string from
+* @size: size of destination buffer
+*
+* Compatible with *BSD: the result is always a valid
+* NUL-terminated string that fits in the buffer (unless,
+* of course, the buffer size is zero). It does not pad
+* out the result like strncpy() does.
+*/
+SIZE_T strlcpy(char *dest, const void *src, SIZE_T size)
+{
+ SIZE_T ret = strlen(src);
+
+ if (size) {
+ SIZE_T len = (ret >= size) ? size-1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+int parse_dev_location(
+ const char *buffer,
+ const char *format,
+ int *bus, int *dev, int *func
+)
+{
+ return sscanf( buffer, format, bus, dev, func );
+}
+
+int core_init()
+{
+ int err;
+
+ fill_bit_tbls();
+ init_qp_state_tbl();
+ err = ib_core_init();
+ if (err)
+ return err;
+ return pa_cash_init();
+}
+
+void core_cleanup()
+{
+ ib_core_cleanup();
+ pa_cash_release();
+}
+
+int l2w_init()
+{
+ fill_bit_tbls();
+ init_workqueues();
+ return 0;
+}
+
+void l2w_cleanup()
+{
+ shutdown_workqueues();
+}
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/packer.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/packer.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/packer.c (revision 6862)
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $
+ */
+
+#include "l2w.h"
+#include "ib_pack.h"
+
+#include "l2w_dbg.h"
+#include "l2w_wpptrace.h"
+
+#ifdef offsetof
+#undef offsetof
+#endif
+#if defined(EVENT_TRACING)
+#include "packer.tmh"
+#endif
+
+
+static u64 value_read(int offset, int size, u8 *structure)
+{
+ switch (size) {
+ case 1: return *(u8 *) (structure + offset);
+ case 2: return be16_to_cpup((__be16 *) (structure + offset));
+ case 4: return be32_to_cpup((__be32 *) (structure + offset));
+ case 8: return be64_to_cpup((__be64 *) (structure + offset));
+ default:
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Field size %d bits not handled\n", size * 8);
+ return 0;
+ }
+}
+
+/**
+ * ib_pack - Pack a structure into a buffer
+ * @desc:Array of structure field descriptions
+ * @desc_len:Number of entries in @desc
+ * @structure:Structure to pack from
+ * @buf:Buffer to pack into
+ *
+ * ib_pack() packs a list of structure fields into a buffer,
+ * controlled by the array of fields in @desc.
+ */
+void ib_pack(const struct ib_field *desc,
+ int desc_len,
+ void *structure,
+ u8 *buf)
+{
+ int i;
+
+ for (i = 0; i < desc_len; ++i) {
+ if (desc[i].size_bits <= 32) {
+ int shift;
+ u32 val;
+ __be32 mask;
+ __be32 *addr;
+
+ shift = 32 - desc[i].offset_bits - desc[i].size_bits;
+ if (desc[i].struct_size_bytes)
+ val = (u32)(value_read((int)desc[i].struct_offset_bytes,
+ (int)desc[i].struct_size_bytes,
+ structure) << shift);
+ else
+ val = 0;
+
+ mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift);
+ addr = (__be32 *) buf + desc[i].offset_words;
+ *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask);
+ } else if (desc[i].size_bits <= 64) {
+ int shift;
+ u64 val;
+ __be64 mask;
+ __be64 *addr;
+
+ shift = 64 - desc[i].offset_bits - desc[i].size_bits;
+ if (desc[i].struct_size_bytes)
+ val = value_read((int)desc[i].struct_offset_bytes,
+ (int)desc[i].struct_size_bytes,
+ structure) << shift;
+ else
+ val = 0;
+
+ mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift);
+ addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
+ *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask);
+ } else {
+ if (desc[i].offset_bits % 8 ||
+ desc[i].size_bits % 8) {
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W ,
+ "Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
+ }
+
+ if (desc[i].struct_size_bytes)
+ memcpy(buf + desc[i].offset_words * 4 +
+ desc[i].offset_bits / 8,
+ (u8*)structure + desc[i].struct_offset_bytes,
+ desc[i].size_bits / 8);
+ else
+ memset(buf + desc[i].offset_words * 4 +
+ desc[i].offset_bits / 8,
+ 0,
+ desc[i].size_bits / 8);
+ }
+ }
+}
+EXPORT_SYMBOL(ib_pack);
+
+static void value_write(int offset, int size, u64 val, u8 *structure)
+{
+ switch (size * 8) {
+ case 8: *( u8 *) (structure + offset) = (u8)val; break;
+ case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break;
+ case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
+ case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
+ default:
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Field size %d bits not handled\n", size * 8);
+ }
+}
+
+/**
+ * ib_unpack - Unpack a buffer into a structure
+ * @desc:Array of structure field descriptions
+ * @desc_len:Number of entries in @desc
+ * @buf:Buffer to unpack from
+ * @structure:Structure to unpack into
+ *
+ * ib_pack() unpacks a list of structure fields from a buffer,
+ * controlled by the array of fields in @desc.
+ */
+void ib_unpack(const struct ib_field *desc,
+ int desc_len,
+ void *buf,
+ void *structure)
+{
+ int i;
+
+ for (i = 0; i < desc_len; ++i) {
+ if (!desc[i].struct_size_bytes)
+ continue;
+
+ if (desc[i].size_bits <= 32) {
+ int shift;
+ u32 val;
+ u32 mask;
+ __be32 *addr;
+
+ shift = 32 - desc[i].offset_bits - desc[i].size_bits;
+ mask = ((1ull << desc[i].size_bits) - 1) << shift;
+ addr = (__be32 *) buf + desc[i].offset_words;
+ val = (be32_to_cpup(addr) & mask) >> shift;
+ value_write((int)desc[i].struct_offset_bytes,
+ (int)desc[i].struct_size_bytes,
+ val,
+ structure);
+ } else if (desc[i].size_bits <= 64) {
+ int shift;
+ u64 val;
+ u64 mask;
+ __be64 *addr;
+
+ shift = 64 - desc[i].offset_bits - desc[i].size_bits;
+ mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
+ addr = (__be64 *) buf + desc[i].offset_words;
+ val = (be64_to_cpup(addr) & mask) >> shift;
+ value_write((int)desc[i].struct_offset_bytes,
+ (int)desc[i].struct_size_bytes,
+ val,
+ structure);
+ } else {
+ if (desc[i].offset_bits % 8 ||
+ desc[i].size_bits % 8) {
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W ,
+ "Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
+ }
+
+ memcpy((u8*)structure + desc[i].struct_offset_bytes,
+ (u8*)buf + desc[i].offset_words * 4 +
+ desc[i].offset_bits / 8,
+ desc[i].size_bits / 8);
+ }
+ }
+}
+EXPORT_SYMBOL(ib_unpack);
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_wpptrace.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_wpptrace.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_wpptrace.c (revision 6862)
@@ -0,0 +1,24 @@
+#include "l2w_wpptrace.h"
+
+u32 ROUNDUP_LOG2(u32 arg)
+{
+ if (arg <= 1) return 0;
+ if (arg <= 2) return 1;
+ if (arg <= 4) return 2;
+ if (arg <= 8) return 3;
+ if (arg <= 16) return 4;
+ if (arg <= 32) return 5;
+ if (arg <= 64) return 6;
+ if (arg <= 128) return 7;
+ if (arg <= 256) return 8;
+ if (arg <= 512) return 9;
+ if (arg <= 1024) return 10;
+ if (arg <= 2048) return 11;
+ if (arg <= 4096) return 12;
+ if (arg <= 8192) return 13;
+ if (arg <= 16384) return 14;
+ if (arg <= 32768) return 15;
+ if (arg <= 65536) return 16;
+ ASSERT(FALSE);
+ return 32;
+}
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_debug.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_debug.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_debug.c (revision 6862)
@@ -0,0 +1,440 @@
+#include "l2w_precomp.h"
+#include "device.h"
+//#include "ev_log.h"
+
+#define MAX_BUFFER_SIZE 256
+
+struct mlx4_dev;
+
+/*
+ * This function sends to Event Log messages with one WCHAR string and several binary parameters.
+ * The string will be inserted instead of %2 parameter of the message.
+ * Binary parameters will be shown in Dump Area of the message.
+ * Binary parameters should be of type LONG.
+ */
+VOID
+WriteEventLogEntryStr(
+ PVOID pi_pIoObject,
+ ULONG pi_ErrorCode,
+ ULONG pi_UniqueErrorCode,
+ ULONG pi_FinalStatus,
+ PWCHAR pi_InsertionStr,
+ ULONG pi_nDataItems,
+ ...
+ )
+/*++
+
+Routine Description:
+ Writes an event log entry to the event log.
+
+Arguments:
+
+ pi_pIoObject......... The IO object ( driver object or device object ).
+ pi_ErrorCode......... The error code.
+ pi_UniqueErrorCode... A specific error code.
+ pi_FinalStatus....... The final status.
+ pi_nDataItems........ Number of data items.
+ .
+ . data items values
+ .
+
+Return Value:
+
+ None .
+
+--*/
+{ /* WriteEventLogEntryStr */
+
+ /* Variable argument list */
+ va_list l_Argptr;
+ /* Pointer to an error log entry */
+ PIO_ERROR_LOG_PACKET l_pErrorLogEntry;
+ /* sizeof insertion string */
+ int l_Size = (int)((pi_InsertionStr) ? ((wcslen(pi_InsertionStr) + 1) * sizeof( WCHAR )) : 0);
+ int l_PktSize =sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG);
+ int l_TotalSize =l_PktSize +l_Size;
+
+ if (pi_pIoObject == NULL) {
+ ASSERT(pi_pIoObject != NULL);
+ return;
+ }
+
+ /* Init the variable argument list */
+ va_start(l_Argptr, pi_nDataItems);
+
+ /* Allocate an error log entry */
+ if (l_TotalSize >= ERROR_LOG_MAXIMUM_SIZE - 2)
+ l_TotalSize = ERROR_LOG_MAXIMUM_SIZE - 2;
+ l_pErrorLogEntry = (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
+ pi_pIoObject, (UCHAR)l_TotalSize );
+
+ /* Check allocation */
+ if ( l_pErrorLogEntry != NULL)
+ { /* OK */
+
+ /* Data item index */
+ USHORT l_nDataItem ;
+
+ /* Set the error log entry header */
+ l_pErrorLogEntry->ErrorCode = pi_ErrorCode;
+ l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG));
+ l_pErrorLogEntry->SequenceNumber = 0;
+ l_pErrorLogEntry->MajorFunctionCode = 0;
+ l_pErrorLogEntry->IoControlCode = 0;
+ l_pErrorLogEntry->RetryCount = 0;
+ l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode;
+ l_pErrorLogEntry->FinalStatus = pi_FinalStatus;
+
+ /* Insert the data items */
+ for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++)
+ { /* Inset a data item */
+
+ /* Current data item */
+ int l_CurDataItem ;
+
+ /* Get next data item */
+ l_CurDataItem = va_arg( l_Argptr, int);
+
+ /* Put it into the data array */
+ l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ;
+
+ } /* Inset a data item */
+
+ /* add insertion string */
+ if (pi_InsertionStr) {
+ char *ptr;
+ int sz = min( l_TotalSize - l_PktSize, l_Size );
+ l_pErrorLogEntry->NumberOfStrings = 1;
+ l_pErrorLogEntry->StringOffset = sizeof(IO_ERROR_LOG_PACKET) + l_pErrorLogEntry->DumpDataSize;
+ ptr = (char*)l_pErrorLogEntry + l_pErrorLogEntry->StringOffset;
+ memcpy( ptr, pi_InsertionStr, sz );
+ *(WCHAR*)&ptr[sz - 2] = (WCHAR)0;
+ }
+
+ /* Write the packet */
+ IoWriteErrorLogEntry(l_pErrorLogEntry);
+
+ } /* OK */
+
+ /* Term the variable argument list */
+ va_end(l_Argptr);
+
+} /* WriteEventLogEntry */
+
+/*
+ * This function sends to Event Log messages with various parameters.
+ * Every parameter should be coded as a pair: a format specifier and the value.
+ * 'pi_nDataItems' presents the number of the pairs.
+ *
+ * Here is an example:
+ *
+ * To print a message (from MC file) like:
+ *
+ * MessageId=0x0006 Facility=MLX4 Severity=Informational SymbolicName=EVENT_MLX4_INFO_TEST
+ * Language=English
+ * some_long %2, some_short %3, some_byte %4, some_wide_char_str %5, some_ansii_str %6
+ *
+ * you have to code:
+ *
+ * WriteEventLogEntryData( pdev->p_self_do, (ULONG)EVENT_MLX4_INFO_TEST, 0, 0, 5,
+ * L"%d", long_int, // LONG
+ * L"%04x", (ULONG)short_int, // SHORT
+ * L"%02x", (ULONG)byte_int, // CHAR
+ * L"%s", wide_char_str, // PWCHAR
+ * L"%S", ansii_str // PCHAR
+ * );
+ */
+VOID
+WriteEventLogEntryData(
+ PVOID pi_pIoObject,
+ ULONG pi_ErrorCode,
+ ULONG pi_UniqueErrorCode,
+ ULONG pi_FinalStatus,
+ ULONG pi_nDataItems,
+ ...
+ )
+/*++
+
+Routine Description:
+ Writes an event log entry to the event log.
+
+Arguments:
+
+ pi_pIoObject......... The IO object ( driver object or device object ).
+ pi_ErrorCode......... The error code.
+ pi_UniqueErrorCode... A specific error code.
+ pi_FinalStatus....... The final status.
+ pi_nDataItems........ Number of data items (i.e. pairs of data parameters).
+ .
+ . data items values
+ .
+
+Return Value:
+
+ None .
+
+--*/
+{ /* WriteEventLogEntryData */
+
+ /* Variable argument list */
+ va_list l_Argptr;
+ /* Pointer to an error log entry */
+ PIO_ERROR_LOG_PACKET l_pErrorLogEntry;
+ /* sizeof insertion string */
+ int l_Size = 0;
+ /* temp buffer */
+ UCHAR l_Buf[ERROR_LOG_MAXIMUM_SIZE - 2];
+ /* position in buffer */
+ UCHAR * l_Ptr = l_Buf;
+ /* Data item index */
+ USHORT l_nDataItem ;
+ /* total packet size */
+ int l_TotalSize;
+
+ if (pi_pIoObject == NULL) {
+ ASSERT(pi_pIoObject != NULL);
+ return;
+ }
+
+ /* Init the variable argument list */
+ va_start(l_Argptr, pi_nDataItems);
+
+ /* Create the insertion strings Insert the data items */
+ memset( l_Buf, 0, sizeof(l_Buf) );
+ for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++)
+ {
+ NTSTATUS status;
+ /* Current binary data item */
+ int l_CurDataItem ;
+ /* Current pointer data item */
+ void* l_CurPtrDataItem ;
+ /* format specifier */
+ WCHAR* l_FormatStr;
+ /* the rest of the buffer */
+ int l_BufSize = (int)(l_Buf + sizeof(l_Buf)- l_Ptr);
+ /* size of insertion string */
+ size_t l_StrSize;
+
+ /* print as much as we can */
+ if ( l_BufSize < 4 )
+ break;
+
+ /* Get format specifier */
+ l_FormatStr = va_arg( l_Argptr, PWCHAR);
+
+ /* Get next data item */
+ if ( !wcscmp( l_FormatStr, L"%s" ) || !wcscmp( l_FormatStr, L"%S" ) ) {
+ l_CurPtrDataItem = va_arg( l_Argptr, PWCHAR);
+ /* convert to string */
+ status = RtlStringCchPrintfW( (NTSTRSAFE_PWSTR)l_Ptr, l_BufSize>>1, l_FormatStr , l_CurPtrDataItem );
+ }
+ else {
+ l_CurDataItem = va_arg( l_Argptr, int);
+ /* convert to string */
+ status = RtlStringCchPrintfW( (NTSTRSAFE_PWSTR)l_Ptr, l_BufSize>>1, l_FormatStr , l_CurDataItem );
+ }
+
+ if (!NT_SUCCESS(status))
+ return;
+
+ /* prepare the next loop */
+ status = RtlStringCbLengthW( (NTSTRSAFE_PWSTR)l_Ptr, l_BufSize, &l_StrSize );
+ if (!NT_SUCCESS(status))
+ return;
+ *(WCHAR*)&l_Ptr[l_StrSize] = (WCHAR)0;
+ l_StrSize += 2;
+ l_Size = l_Size + (int)l_StrSize;
+ l_Ptr = l_Buf + l_Size;
+ l_BufSize = (int)(l_Buf + sizeof(l_Buf)- l_Ptr);
+
+ } /* Inset a data item */
+
+ /* Term the variable argument list */
+ va_end(l_Argptr);
+
+ /* Allocate an error log entry */
+ l_TotalSize =sizeof(IO_ERROR_LOG_PACKET) +l_Size;
+ if (l_TotalSize >= ERROR_LOG_MAXIMUM_SIZE - 2) {
+ l_TotalSize = ERROR_LOG_MAXIMUM_SIZE - 2;
+ l_Size = l_TotalSize - sizeof(IO_ERROR_LOG_PACKET);
+ }
+ l_pErrorLogEntry = (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
+ pi_pIoObject, (UCHAR)l_TotalSize );
+
+ /* Check allocation */
+ if ( l_pErrorLogEntry != NULL)
+ { /* OK */
+
+ /* Set the error log entry header */
+ l_pErrorLogEntry->ErrorCode = pi_ErrorCode;
+ l_pErrorLogEntry->DumpDataSize = 0;
+ l_pErrorLogEntry->SequenceNumber = 0;
+ l_pErrorLogEntry->MajorFunctionCode = 0;
+ l_pErrorLogEntry->IoControlCode = 0;
+ l_pErrorLogEntry->RetryCount = 0;
+ l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode;
+ l_pErrorLogEntry->FinalStatus = pi_FinalStatus;
+ l_pErrorLogEntry->NumberOfStrings = l_nDataItem;
+ l_pErrorLogEntry->StringOffset = sizeof(IO_ERROR_LOG_PACKET) + l_pErrorLogEntry->DumpDataSize;
+ l_Ptr = (UCHAR*)l_pErrorLogEntry + l_pErrorLogEntry->StringOffset;
+ if ( l_Size )
+ memcpy( l_Ptr, l_Buf, l_Size );
+
+ /* Write the packet */
+ IoWriteErrorLogEntry(l_pErrorLogEntry);
+
+ } /* OK */
+
+} /* WriteEventLogEntry */
+
+// bsize is to be a strlen(src)
+// dest has to have enough place, i.e at least (2*strlen(src) + 2)
+void __ansi_to_wchar( USHORT *dest, UCHAR *src, int bsize)
+{
+ int i;
+
+ for (i=0; i<bsize; ++i)
+ *dest++ = *src++;
+ *dest = 0;
+}
+
+VOID
+mlx4_err(
+ IN struct mlx4_dev * mdev,
+ IN char* format,
+ ...
+ )
+{
+ va_list list;
+ UCHAR buf[MAX_BUFFER_SIZE];
+ //WCHAR wbuf[MAX_BUFFER_SIZE];
+
+ // print to Debugger
+ va_start(list, format);
+ buf[MAX_BUFFER_SIZE - 1] = '\0';
+
+ if (mdev == NULL) {
+ ASSERT(mdev != NULL);
+ return;
+ }
+
+
+ if (RtlStringCbVPrintfA( (char*)buf, sizeof(buf), format, list))
+ return;
+ cl_dbg_out( "%s\n", (char*)buf );
+ va_end(list);
+
+ // print to Event Log
+ //__ansi_to_wchar( wbuf, buf, (int)strlen((void*)buf) );
+ //WriteEventLogEntryStr( mdev->pdev->p_self_do, (ULONG)EVENT_MLX4_ANY_ERROR, 0, 0, wbuf, 0, 0 );
+}
+
+VOID
+mlx4_warn(
+ IN struct mlx4_dev * mdev,
+ IN char* format,
+ ...
+ )
+{
+ va_list list;
+ UCHAR buf[MAX_BUFFER_SIZE];
+ //WCHAR wbuf[MAX_BUFFER_SIZE];
+
+ // print to Debugger
+ va_start(list, format);
+ buf[MAX_BUFFER_SIZE - 1] = '\0';
+
+ if (mdev == NULL) {
+ ASSERT(mdev != NULL);
+ return;
+ }
+
+
+ if (RtlStringCbVPrintfA( (char*)buf, sizeof(buf), format, list))
+ return;
+ cl_dbg_out( "%s\n", (char*)buf );
+ va_end(list);
+
+ // print to Event Log
+ //__ansi_to_wchar( wbuf, buf, (int)strlen((void*)buf) );
+ //WriteEventLogEntryStr( mdev->pdev->p_self_do, (ULONG)EVENT_MLX4_ANY_WARN, 0, 0, wbuf, 0, 0 );
+}
+
+VOID
+mlx4_dbg(
+ IN struct mlx4_dev * mdev,
+ IN char* format,
+ ...
+ )
+{
+#if DBG
+ va_list list;
+ UCHAR buf[MAX_BUFFER_SIZE];
+ UNUSED_PARAM(mdev);
+
+ // print to Debugger
+ va_start(list, format);
+ buf[MAX_BUFFER_SIZE - 1] = '\0';
+ RtlStringCbVPrintfA( (char*)buf, sizeof(buf), format, list);
+ cl_dbg_out( "%s\n", (char*)buf );
+ va_end(list);
+#else
+ UNUSED_PARAM(mdev);
+ UNUSED_PARAM(format);
+#endif //DBG
+}
+
+VOID
+dev_err(
+ IN struct mlx4_dev ** mdev,
+ IN char* format,
+ ...
+ )
+{
+ va_list list;
+ UCHAR buf[MAX_BUFFER_SIZE];
+ //WCHAR wbuf[MAX_BUFFER_SIZE];
+
+ if (mdev == NULL) {
+ ASSERT(mdev != NULL);
+ return;
+ }
+
+ // print to Debugger
+ va_start(list, format);
+ buf[MAX_BUFFER_SIZE - 1] = '\0';
+ RtlStringCbVPrintfA( (char*)buf, sizeof(buf), format, list);
+ cl_dbg_out( "%s\n", (char*)buf );
+ va_end(list);
+
+ // print to Event Log
+ //RtlStringCchPrintfW(wbuf, sizeof(wbuf)/sizeof(wbuf[0]), L"%S", buf);
+ //WriteEventLogEntryStr( (*mdev)->pdev->p_self_do, (ULONG)EVENT_MLX4_ANY_ERROR, 0, 0, wbuf, 0, 0 );
+}
+
+VOID
+dev_info(
+ IN struct mlx4_dev ** p_mdev,
+ IN char* format,
+ ...
+ )
+{
+#if DBG
+ va_list list;
+ UCHAR buf[MAX_BUFFER_SIZE];
+ UNUSED_PARAM(p_mdev);
+
+ // print to Debugger
+ va_start(list, format);
+ buf[MAX_BUFFER_SIZE - 1] = '\0';
+ RtlStringCbVPrintfA( (char*)buf, sizeof(buf), format, list);
+ cl_dbg_out( "%s\n", (char*)buf );
+ va_end(list);
+#else
+ UNUSED_PARAM(p_mdev);
+ UNUSED_PARAM(format);
+#endif
+}
+
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_workqueue.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_workqueue.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_workqueue.c (revision 6862)
@@ -0,0 +1,85 @@
+#include "l2w_precomp.h"
+
+static struct workqueue_struct *delayed_wq = NULL;
+
+int init_workqueues()
+{
+ delayed_wq = create_singlethread_workqueue("DELAYED_WQ");
+
+ if(delayed_wq == NULL)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+void shutdown_workqueues()
+{
+ flush_workqueue(delayed_wq);
+ destroy_workqueue(delayed_wq);
+}
+
+static void queue_delayed_work_timer(void* context)
+{
+ struct delayed_work *work = (struct delayed_work *) context;
+
+ queue_work(delayed_wq, &work->work);
+}
+
+
+
+int cancel_work_sync(struct work_struct *work)
+{
+ struct workqueue_struct *wq = NULL;
+ int pending = 0;
+
+ if(work == NULL)
+ {
+ return 0;
+ }
+
+ if(work->func == NULL)
+ {// work was not initialized
+ return 0;
+ }
+
+ wq = work->wq;
+
+ if(wq == NULL)
+ {
+ return 0;
+ }
+
+ spin_lock(&wq->lock);
+ if(wq->current_work == work)
+ {// work is running - wait for completion
+ while(wq->current_work == work)
+ {
+ spin_unlock(&wq->lock);
+ msleep(10);
+ spin_lock(&wq->lock);
+ }
+ spin_unlock(&wq->lock);
+ }
+ else
+ {// work is pending in the queue
+ if(work->wq != NULL)
+ {// work is queued, and not just initialized
+ list_del(&work->list);
+ pending = 1;
+ }
+ spin_unlock(&wq->lock);
+ }
+ return pending;
+}
+
+void cancel_delayed_work_sync(struct delayed_work *work)
+{
+ if(work->timer.pfn_callback != NULL)
+ {// timer was set
+ del_timer_sync(&work->timer);
+ }
+
+ cancel_work_sync(&work->work);
+}
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_wpptrace.h
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_wpptrace.h (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_wpptrace.h (revision 6862)
@@ -0,0 +1,84 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_wpptrace.h
+
+Abstract:
+ This module contains all debug-related code.
+
+Revision History:
+
+Notes:
+
+--*/
+
+#pragma once
+
+
+#if defined(EVENT_TRACING)
+
+#define WPP_CONTROL_GUIDS \
+ WPP_DEFINE_CONTROL_GUID(EthrnetGuid,(684E068C, 3FDC, 4bce, 89C3, CDB77A8B75A4), \
+ WPP_DEFINE_BIT(L2W) \
+ WPP_DEFINE_BIT(L2W_INIT)) \
+
+#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl)
+#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags)
+#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE)
+#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags)
+
+// begin_wpp config
+// L2W_ENTER();
+// L2W_EXIT();
+// USESUFFIX(L2W_PRINT, "%!STDPREFIX! %!FUNC!");
+// L2W_PRINT(LEVEL,FLAGS,MSG,...)
+// USESUFFIX(L2W_ENTER, "====>>> %!FUNC! ");
+// USESUFFIX(L2W_EXIT, "<<<====== %!FUNC!]");
+// end_wpp
+
+
+#else //defined(EVENT_TRACING)
+
+// Debug toppics
+#define L2W 0x000001
+#define L2W_INIT 0x000020
+
+#define TRACE_LEVEL_CRITICAL DPFLTR_ERROR_LEVEL
+#define TRACE_LEVEL_FATAL DPFLTR_ERROR_LEVEL
+#define TRACE_LEVEL_ERROR DPFLTR_ERROR_LEVEL
+#define TRACE_LEVEL_WARNING DPFLTR_WARNING_LEVEL
+#define TRACE_LEVEL_INFORMATION DPFLTR_TRACE_LEVEL
+#define TRACE_LEVEL_VERBOSE DPFLTR_INFO_LEVEL
+
+#define DBG_LEVEL_THRESH TRACE_LEVEL_ERROR
+#define DBG_FLAGS 0xffff
+
+void
+TraceL2WMessage(
+ char* func,
+ char* file,
+ unsigned long line,
+ unsigned long level,
+ char* format,
+ ...
+ );
+
+#pragma warning(disable:4296) // expression is always true/false
+#pragma warning(disable:4127) //conditional expression is constant
+#define L2W_PRINT(_level_,_flag_, _format_, ...) \
+ if ((DBG_FLAGS & (_flag_)) && (DBG_LEVEL_THRESH >= (_level_))) \
+ { \
+ TraceL2WMessage(__FUNCTION__, __FILE__, __LINE__, _level_, _format_, __VA_ARGS__); \
+ }
+
+#define L2W_ENTER()\
+ L2W_PRINT(TRACE_LEVEL_VERBOSE, L2W, "===>\n");
+
+#define L2W_EXIT()\
+ L2W_PRINT(TRACE_LEVEL_VERBOSE, L2W, "<===\n");
+
+#endif //defined(EVENT_TRACING)
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_dbg.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_dbg.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_dbg.c (revision 6862)
@@ -0,0 +1,104 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_dbg.cpp
+
+Abstract:
+ This modulde contains all related dubug code
+Notes:
+
+--*/
+
+#include "l2w_precomp.h"
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "l2w_dbg.tmh"
+#endif
+
+#if DBG
+
+#define TEMP_BUFFER_SIZE 128
+
+#if 0
+VOID cl_dbg_out( IN PCCH format, ...)
+{
+ va_list list;
+ va_start(list, format);
+ vDbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, format, list);
+ va_end(list);
+}
+#endif
+
+#if !defined(EVENT_TRACING)
+void
+TraceL2WMessage(
+ char* func,
+ char* file,
+ unsigned long line,
+ unsigned long level,
+ char* format,
+ ...
+ )
+/*++
+
+Routine Description:
+
+ Debug print for the sample driver.
+
+Arguments:
+
+ TraceEventsLevel - print level between 0 and 3, with 3 the most verbose
+
+Return Value:
+
+ None.
+
+ --*/
+ {
+#if DBG
+
+ va_list list;
+ long status;
+
+ char psPrefix[TEMP_BUFFER_SIZE];
+ char* fileName = strrchr(file, '\\');
+
+ va_start(list, format);
+
+ if (!fileName)
+ {
+ fileName++;
+ }
+
+ if(level == TRACE_LEVEL_ERROR)
+ {
+ status = RtlStringCchPrintfA(psPrefix, TEMP_BUFFER_SIZE, "***ERROR*** %s (%s:%d) ", func, fileName, line);
+ }
+ else
+ {
+ status = RtlStringCchPrintfA(psPrefix, TEMP_BUFFER_SIZE, "%s (%s:%d) ", func, fileName, line);
+ level = TRACE_LEVEL_ERROR;
+ }
+
+ ASSERT(status >= 0);
+ vDbgPrintExWithPrefix(psPrefix , DPFLTR_IHVNETWORK_ID, level, format, list);
+
+ va_end(list);
+
+#else
+
+ UNREFERENCED_PARAMETER(TraceEventsLevel);
+ UNREFERENCED_PARAMETER(TraceEventsFlag);
+ UNREFERENCED_PARAMETER(DebugMessage);
+
+#endif
+}
+#endif
+
+#endif // DBG
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/ud_header.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/ud_header.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/ud_header.c (revision 6862)
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ud_header.c 1349 2004-12-16 21:09:43Z roland $
+ */
+
+#include "l2w.h"
+#include "ib_pack.h"
+
+#include "l2w_dbg.h"
+#include "l2w_wpptrace.h"
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+
+#include "ud_header.tmh"
+#endif
+
+
+#define STRUCT_FIELD(header, field) \
+ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
+ .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+ .field_name = #header ":" #field
+
+#define STRUCT_FIELD_INIT(header, field,ow,ob,sb) \
+ offsetof(struct ib_unpacked_ ## header, field), \
+ sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+ ow,ob,sb, \
+ #header ":" #field
+
+#define STRUCT_FIELD_INITR(ow,ob,sb) \
+ 0, 0, ow, ob, sb, "reserved"
+
+static const struct ib_field lrh_table[] = {
+ { STRUCT_FIELD_INIT(lrh, virtual_lane, 0, 0, 4) },
+ { STRUCT_FIELD_INIT(lrh, link_version, 0, 4, 4) },
+ { STRUCT_FIELD_INIT(lrh, service_level, 0, 8, 4) },
+ { STRUCT_FIELD_INITR(0,12,2) },
+ { STRUCT_FIELD_INIT(lrh, link_next_header, 0, 14, 2) },
+ { STRUCT_FIELD_INIT(lrh, destination_lid, 0, 16, 16) },
+ { STRUCT_FIELD_INITR(1,0,5) },
+ { STRUCT_FIELD_INIT(lrh, packet_length, 1, 5, 11) },
+ { STRUCT_FIELD_INIT(lrh, source_lid, 1, 16, 16) }
+};
+
+static const struct ib_field eth_table[] = {
+ { STRUCT_FIELD_INIT(eth, dmac_h, 0, 0, 32) },
+ { STRUCT_FIELD_INIT(eth, dmac_l, 1, 0, 16) },
+ { STRUCT_FIELD_INIT(eth, smac_h, 1, 16,16) },
+ { STRUCT_FIELD_INIT(eth, smac_l, 2, 0 ,32) },
+ { STRUCT_FIELD_INIT(eth, type, 3, 0, 16)}
+};
+
+
+static const struct ib_field grh_table[] = {
+ { STRUCT_FIELD_INIT(grh, ip_version, 0, 0, 4) },
+ { STRUCT_FIELD_INIT(grh, traffic_class, 0, 4, 8) },
+ { STRUCT_FIELD_INIT(grh, flow_label, 0, 12, 20) },
+ { STRUCT_FIELD_INIT(grh, payload_length, 1, 0, 16) },
+ { STRUCT_FIELD_INIT(grh, next_header, 1, 16, 8) },
+ { STRUCT_FIELD_INIT(grh, hop_limit, 1, 24, 8) },
+ { STRUCT_FIELD_INIT(grh, source_gid, 2, 0, 128) },
+ { STRUCT_FIELD_INIT(grh, destination_gid, 6, 0, 128) }
+};
+
+static const struct ib_field bth_table[] = {
+ { STRUCT_FIELD_INIT(bth, opcode, 0, 0, 8) },
+ { STRUCT_FIELD_INIT(bth, solicited_event, 0, 8, 1) },
+ { STRUCT_FIELD_INIT(bth, mig_req, 0, 9, 1) },
+ { STRUCT_FIELD_INIT(bth, pad_count, 0, 10, 2) },
+ { STRUCT_FIELD_INIT(bth, transport_header_version, 0, 12, 4) },
+ { STRUCT_FIELD_INIT(bth, pkey, 0, 16, 16) },
+ { STRUCT_FIELD_INITR(1,0,8) },
+ { STRUCT_FIELD_INIT(bth, destination_qpn, 1, 8, 24) },
+ { STRUCT_FIELD_INIT(bth, ack_req, 2, 0, 1) },
+ { STRUCT_FIELD_INITR(2,1,7) },
+ { STRUCT_FIELD_INIT(bth, psn, 2, 8, 24) }
+};
+
+static const struct ib_field deth_table[] = {
+ { STRUCT_FIELD_INIT(deth, qkey, 0, 0, 32) },
+ { STRUCT_FIELD_INITR(1,0,8) },
+ { STRUCT_FIELD_INIT(deth, source_qpn, 1, 8, 24) }
+};
+
+/**
+ * ib_ud_header_init - Initialize UD header structure
+ * @payload_bytes:Length of packet payload
+ * @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @header:Structure to initialize
+ *
+ * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
+ * lrh.packet_length, grh.ip_version, grh.payload_length,
+ * grh.next_header, bth.opcode, bth.pad_count and
+ * bth.transport_header_version fields of a &struct ib_ud_header given
+ * the payload length and whether a GRH will be included.
+ */
+void ib_ud_header_init(int payload_bytes,
+ int grh_present,
+ struct ib_ud_header *header)
+{
+ int header_len;
+ u16 packet_length;
+
+ memset(header, 0, sizeof *header);
+
+ header_len =
+ IB_LRH_BYTES +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES;
+ if (grh_present) {
+ header_len += IB_GRH_BYTES;
+ }
+
+ header->lrh.link_version = 0;
+ header->lrh.link_next_header =
+ grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
+ packet_length = (u16)((IB_LRH_BYTES +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ payload_bytes +
+ 4 + /* ICRC */
+ 3) / 4); /* round up */
+
+ header->grh_present = grh_present;
+ if (grh_present) {
+ packet_length += IB_GRH_BYTES / 4;
+ header->grh.ip_version = 6;
+ header->grh.payload_length =
+ cpu_to_be16((IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ payload_bytes +
+ 4 + /* ICRC */
+ 3) & ~3); /* round up */
+ header->grh.next_header = 0x1b;
+ }
+
+ header->lrh.packet_length = cpu_to_be16(packet_length);
+
+ if (header->immediate_present)
+ header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ else
+ header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ header->bth.pad_count = (u8)((4 - payload_bytes) & 3);
+ header->bth.transport_header_version = 0;
+}
+EXPORT_SYMBOL(ib_ud_header_init);
+
+/**
+ * ib_ud_header_pack - Pack UD header struct into wire format
+ * @header:UD header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_ud_header_pack() packs the UD header structure @header into wire
+ * format in the buffer @buf.
+ */
+int ib_ud_header_pack(struct ib_ud_header *header,
+ u8 *buf)
+{
+ int len = 0;
+
+ ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
+ &header->lrh, buf);
+ len += IB_LRH_BYTES;
+
+ if (header->grh_present) {
+ ib_pack(grh_table, ARRAY_SIZE(grh_table),
+ &header->grh, buf + len);
+ len += IB_GRH_BYTES;
+ }
+
+ ib_pack(bth_table, ARRAY_SIZE(bth_table),
+ &header->bth, buf + len);
+ len += IB_BTH_BYTES;
+
+ ib_pack(deth_table, ARRAY_SIZE(deth_table),
+ &header->deth, buf + len);
+ len += IB_DETH_BYTES;
+
+ if (header->immediate_present) {
+ memcpy(buf + len, &header->immediate_data, sizeof header->immediate_data);
+ len += sizeof header->immediate_data;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL(ib_ud_header_pack);
+
+/**
+ * ib_ud_header_unpack - Unpack UD header struct from wire format
+ * @header:UD header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_ud_header_pack() unpacks the UD header structure @header from wire
+ * format in the buffer @buf.
+ */
+int ib_ud_header_unpack(u8 *buf,
+ struct ib_ud_header *header)
+{
+ ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
+ buf, &header->lrh);
+ buf += IB_LRH_BYTES;
+
+ if (header->lrh.link_version != 0) {
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Invalid LRH.link_version %d\n",
+ header->lrh.link_version);
+ return -EINVAL;
+ }
+
+ switch (header->lrh.link_next_header) {
+ case IB_LNH_IBA_LOCAL:
+ header->grh_present = 0;
+ break;
+
+ case IB_LNH_IBA_GLOBAL:
+ header->grh_present = 1;
+ ib_unpack(grh_table, ARRAY_SIZE(grh_table),
+ buf, &header->grh);
+ buf += IB_GRH_BYTES;
+
+ if (header->grh.ip_version != 6) {
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Invalid GRH.ip_version %d\n",
+ header->grh.ip_version);
+ return -EINVAL;
+ }
+ if (header->grh.next_header != 0x1b) {
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Invalid GRH.next_header 0x%02x\n",
+ header->grh.next_header);
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Invalid LRH.link_next_header %d\n",
+ header->lrh.link_next_header);
+ return -EINVAL;
+ }
+
+ ib_unpack(bth_table, ARRAY_SIZE(bth_table),
+ buf, &header->bth);
+ buf += IB_BTH_BYTES;
+
+ switch (header->bth.opcode) {
+ case IB_OPCODE_UD_SEND_ONLY:
+ header->immediate_present = 0;
+ break;
+ case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
+ header->immediate_present = 1;
+ break;
+ default:
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Invalid BTH.opcode 0x%02x\n",
+ header->bth.opcode);
+ return -EINVAL;
+ }
+
+ if (header->bth.transport_header_version != 0) {
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W , "Invalid BTH.transport_header_version %d\n",
+ header->bth.transport_header_version);
+ return -EINVAL;
+ }
+
+ ib_unpack(deth_table, ARRAY_SIZE(deth_table),
+ buf, &header->deth);
+ buf += IB_DETH_BYTES;
+
+ if (header->immediate_present)
+ memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_ud_header_unpack);
+
+/**
+ * ib_rdmaoe_ud_header_init - Initialize UD header structure
+ * @payload_bytes:Length of packet payload
+ * @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @header:Structure to initialize
+ *
+ * ib_rdmaoe_ud_header_init() initializes the grh.ip_version, grh.payload_length,
+ * grh.next_header, bth.opcode, bth.pad_count and
+ * bth.transport_header_version fields of a &struct eth_ud_header given
+ * the payload length and whether a GRH will be included.
+ */
+void ib_rdmaoe_ud_header_init(int payload_bytes,
+ int grh_present,
+ struct eth_ud_header *header)
+{
+ int header_len;
+
+ memset(header, 0, sizeof *header);
+
+ header_len =
+ sizeof header->eth +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES;
+ if (grh_present)
+ header_len += IB_GRH_BYTES;
+
+ header->grh_present = grh_present;
+ if (grh_present) {
+ header->grh.ip_version = 6;
+ header->grh.payload_length =
+ cpu_to_be16((IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ payload_bytes +
+ 4 + /* ICRC */
+ 3) & ~3); /* round up */
+ header->grh.next_header = 0x1b;
+ }
+
+ if (header->immediate_present)
+ header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ else
+ header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ header->bth.pad_count =(u8) ((4 - payload_bytes) & 3);
+ header->bth.transport_header_version = 0;
+}
+
+
+
+/**
+ * rdmaoe_ud_header_pack - Pack UD header struct into eth wire format
+ * @header:UD header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_ud_header_pack() packs the UD header structure @header into wire
+ * format in the buffer @buf.
+ */
+int rdmaoe_ud_header_pack(struct eth_ud_header *header,
+ void *buf)
+{
+ int len = 0;
+
+ ib_pack(eth_table, ARRAY_SIZE(eth_table),
+ &header->eth, buf);
+ len += IB_ETH_BYTES;
+
+ if (header->grh_present) {
+ ib_pack(grh_table, ARRAY_SIZE(grh_table),
+ &header->grh, (u8*)buf + len);
+ len += IB_GRH_BYTES;
+ }
+
+ ib_pack(bth_table, ARRAY_SIZE(bth_table),
+ &header->bth, (u8*)buf + len);
+ len += IB_BTH_BYTES;
+
+ ib_pack(deth_table, ARRAY_SIZE(deth_table),
+ &header->deth, (u8*)buf + len);
+ len += IB_DETH_BYTES;
+
+ if (header->immediate_present) {
+ memcpy((u8*)buf + len, &header->immediate_data,
+ sizeof header->immediate_data);
+ len += sizeof header->immediate_data;
+ }
+
+ return len;
+}
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_dbg.h
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_dbg.h (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_dbg.h (revision 6862)
@@ -0,0 +1,40 @@
+/*++
+
+Copyright (c) 2005-2010 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_dbg.h
+
+Abstract:
+ This modulde contains all related dubug code
+Notes:
+
+--*/
+
+#pragma once
+
+#ifdef _PREFAST_
+#define CONDITION_ASSUMED(X) __analysis_assume((X))
+#else
+#define CONDITION_ASSUMED(X)
+#endif // _PREFAST_
+
+#if DBG
+
+#undef ASSERT
+#define ASSERT(x) if(!(x)) { \
+ DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, "Assertion failed: %s:%d %s\n", __FILE__, __LINE__, #x);\
+ DbgBreakPoint(); }\
+ CONDITION_ASSUMED(x);
+
+#define ASSERT_ALWAYS(x) ASSERT(x)
+
+#else // !DBG
+
+#undef ASSERT
+#define ASSERT(x)
+
+#define ASSERT_ALWAYS(x) if(!(x)) { \
+ DbgBreakPoint(); }
+
+#endif // DBG
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w.rc
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w.rc (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w.rc (revision 6862)
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ibal.rc 1611 2006-08-20 14:48:55Z sleybo $
+ */
+
+
+#include <oib_ver.h>
+
+#define VER_FILETYPE VFT_DRV
+#define VER_FILESUBTYPE VFT2_UNKNOWN
+
+#ifdef _DEBUG_
+#define VER_FILEDESCRIPTION_STR "L2W Library (Debug)"
+#else
+#define VER_FILEDESCRIPTION_STR "L2W Library"
+#endif
+
+#define VER_INTERNALNAME_STR "l2w.lib"
+#define VER_ORIGINALFILENAME_STR "l2w.lib"
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_umem.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_umem.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_umem.c (revision 6862)
@@ -0,0 +1,181 @@
+
+#include "l2w_precomp.h"
+#include "ib_verbs.h"
+
+#if defined (EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "l2w_umem.tmh"
+#endif
+
+/**
+ * ib_umem_release - release memory pinned with ib_umem_get
+ * @umem: umem struct to release
+ */
+void ib_umem_release(struct ib_umem *p_ib_umem)
+{
+ if (p_ib_umem->secure_handle) {
+ __try {
+ MmUnsecureVirtualMemory( p_ib_umem->secure_handle );
+ p_ib_umem->secure_handle = NULL;
+ }
+ __except (EXCEPTION_EXECUTE_HANDLER) {
+ NTSTATUS Status = GetExceptionCode();
+ UNUSED_PARAM_WOWPP(Status);
+ L2W_PRINT(TRACE_LEVEL_ERROR ,L2W ,
+ "Exception 0x%x on MmUnsecureVirtualMemory(), addr %I64x, size %I64x, seg_num %d, nr_pages %d\n",
+ Status, p_ib_umem->iobuf.va, (u64)p_ib_umem->iobuf.size,
+ p_ib_umem->iobuf.seg_num, p_ib_umem->iobuf.nr_pages );
+ }
+ }
+ if (p_ib_umem->iobuf_used)
+ iobuf_deregister_with_cash(&p_ib_umem->iobuf);
+ kfree(p_ib_umem);
+}
+
+
+/**
+ * ib_umem_get - Pin and DMA map userspace memory.
+ * @context: userspace context to pin memory for
+ * @addr: userspace virtual address to start at
+ * @size: length of region to pin
+ * @access: IB_ACCESS_xxx flags for memory being pinned
+ */
+struct ib_umem *ib_umem_get(struct ib_ucontext *context, u64 addr,
+ size_t size, enum ib_access_flags access, boolean_t secure)
+{
+ int err;
+ struct ib_umem *p_ib_umem;
+
+ // create the object
+ p_ib_umem = kzalloc(sizeof *p_ib_umem, GFP_KERNEL);
+ if (!p_ib_umem)
+ goto err_nomem;
+
+ p_ib_umem->p_uctx = context;
+ p_ib_umem->page_size = PAGE_SIZE;
+
+ // register the memory
+ iobuf_init( addr, (u64)size, !!context, &p_ib_umem->iobuf);
+ err = iobuf_register_with_cash( addr, (u64)size, !!context,
+ &access, &p_ib_umem->iobuf );
+ if (err)
+ goto err_reg_mem;
+ p_ib_umem->iobuf_used = TRUE;
+
+ // TODO: map the memory for DMA
+
+ // secure memory
+ if (!context || !secure)
+ goto done;
+ __try {
+ p_ib_umem->secure_handle = MmSecureVirtualMemory (
+ (PVOID)(ULONG_PTR)addr, size,
+ (access & IB_ACCESS_LOCAL_WRITE) ? PAGE_READWRITE : PAGE_READONLY );
+ if (p_ib_umem->secure_handle == NULL)
+ goto err_secure;
+ }
+ __except (EXCEPTION_EXECUTE_HANDLER) {
+ NTSTATUS Status = GetExceptionCode();
+ UNUSED_PARAM_WOWPP(Status);
+ L2W_PRINT(TRACE_LEVEL_ERROR ,L2W ,
+ "Exception 0x%x on MmSecureVirtualMemory(), addr %I64x, size %I64x, access %#x\n",
+ Status, addr, (u64)size, access );
+ goto err_secure;
+ }
+ goto done;
+
+err_secure:
+ iobuf_deregister(&p_ib_umem->iobuf);
+
+err_reg_mem:
+ kfree(p_ib_umem);
+
+err_nomem:
+ p_ib_umem = ERR_PTR(-ENOMEM);
+
+done:
+ return p_ib_umem;
+}
+
+int ib_umem_page_count(struct ib_umem *p_ib_umem)
+{
+ return (int)p_ib_umem->iobuf.nr_pages;
+}
+
+dma_addr_t ib_umem_get_dma(struct ib_umem *p_ib_umem)
+{
+ u64 pages[1] = { 0 };
+ iobuf_iter_t iobuf_iter;
+ dma_addr_t dma_addr = { 0, 0 , 0 };
+
+ iobuf_iter_init( &p_ib_umem->iobuf, &iobuf_iter );
+ iobuf_get_tpt_seg( &p_ib_umem->iobuf, &iobuf_iter, 1, pages );
+ // TODO: convert phys address to DMA one
+ dma_addr.da = pages[0];
+
+ return dma_addr;
+}
+
+
+// Returns: 0 on success, -ENOMEM or -EACCESS or -EFAULT on error
+int ib_umem_map(
+ IN u64 va,
+ IN u64 size,
+ IN ib_access_t acc,
+ OUT PMDL *mdl,
+ OUT void **kva)
+{
+ PMDL p_mdl;
+ int rc = 0;
+ LOCK_OPERATION lock_op = (acc & IB_AC_LOCAL_WRITE) ? IoModifyAccess : IoReadAccess;
+
+ p_mdl = IoAllocateMdl( (PVOID)(ULONG_PTR)va, (ULONG)size, FALSE,FALSE,NULL);
+ if (p_mdl == NULL) {
+ rc = -ENOMEM;
+ goto err_alloc_mdl;
+ }
+
+ __try {
+ MmProbeAndLockPages( p_mdl, UserMode, lock_op ); /* lock memory */
+ }
+ __except (EXCEPTION_EXECUTE_HANDLER) {
+ L2W_PRINT(TRACE_LEVEL_ERROR ,L2W ,
+ "MOSAL_iobuf_register: Exception 0x%x on MmProbeAndLockPages(), va %I64d, sz %I64d\n",
+ GetExceptionCode(), va, size);
+ rc = -EACCES;
+ goto err_probe;
+ }
+
+ *kva = MmMapLockedPagesSpecifyCache( p_mdl,
+ KernelMode, MmNonCached, NULL, FALSE, NormalPagePriority );
+ if (*kva == NULL) {
+ L2W_PRINT(TRACE_LEVEL_ERROR ,L2W ,"MmMapLockedPagesSpecifyCache failed\n");
+ rc = -EFAULT;
+ goto err_map;
+ }
+
+ *mdl = p_mdl;
+ return 0;
+
+err_map:
+ MmUnlockPages(p_mdl);
+err_probe:
+ IoFreeMdl(p_mdl);
+err_alloc_mdl:
+ return rc;
+}
+
+void ib_umem_unmap(
+ IN PMDL p_mdl,
+ IN void *kva)
+{
+ if (kva) {
+ MmUnmapLockedPages( kva, p_mdl );
+ MmUnlockPages(p_mdl);
+ IoFreeMdl(p_mdl);
+ }
+}
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/readme.txt
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/readme.txt (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/readme.txt (revision 6862)
@@ -0,0 +1,118 @@
+This library is intended for drivers ported from Linux.
+It contains mostly wrappers for Linux kernel/compiler tools.
+To use it one needs to include l2w.h file and to link with l2w.lib.
+l2w.lib uses in turn complib.lib library.
+
+Here are the services, l2w provides:
+
+//
+// l2w_atomic.h - dealing with atomics
+//
+
+// data types
+atomic_t - 32-bit atomic variable
+
+// methods
+atomic_read - read atomic value
+atomic_set - set atomic value
+atomic_inc - increment atomic value
+atomic_dec - decrement atomic value
+atomic_inc_and_test - increment and test atomic value
+atomic_dec_and_test - decrement and test atomic value
+
+
+//
+// l2w_bit.h - dealing with bit dwords and maps
+//
+
+// methods
+fls - find last set bit in a dword
+ffs - find first set bit in a dword
+ffz - find first zero bit in a dword
+find_first_bit - find first set bit in a map
+find_first_zero_bit - find first zero bit in a map
+find_next_zero_bit - find the next zero bit in a map
+DECLARE_BITMAP - declare a bit map
+atomic_set_bit - set atomically a bit in a bit map
+atomic_clear_bit - clear atomically a bit in a bit map
+set_bit - set atomically a bit in a dword
+clear_bit - clear atomically a bit in a dword
+test_bit - test a bit in a dword
+bitmap_zero - zero a bit map
+bitmap_full - returns TRUE if bit map is full (all bits are set)
+bitmap_empty - returns TRUE if bit map is empty (all bits are clear)
+bitmap_fill - fill a map with ones
+ilog2 - find log2 of the value, stored in dword
+is_power_of_2 - return TRUE if the value, stored in dword is a power of 2
+roundup_pow_of_two - round a dword value to an upper power of 2 (e.g., 5-->8)
+
+//
+// l2w_list.h - dealing with double-linked lists
+//
+
+// data types
+list_head - a list header/link
+
+// methods
+LIST_HEAD - define and initialize a list header
+INIT_LIST_HEAD - initialize a list header
+list_entry - get to the beginning of the structure for the given list entry
+list_for_each_entry - iterate over a list of 'list_els' of given 'type'
+list_for_each_entry_reverse - iterate backwards over a list of 'list_els' of given 'type'
+list_for_each_entry_safe - iterate over a list of given type safe against removal of list entry
+list_add - insert a new entry after the specified head
+list_add_tail - insert a new entry before the specified head
+list_del - deletes an entry from a list
+list_empty - tests whether a list is empty
+list_splice_init - insert src_list into dst_list and reinitialise the emptied src_list
+
+//
+// l2w_memory.h - dealing with memory allocations
+//
+
+// data types
+dma_addr_t - implementatin of Linux dma_addr_t type, describing DMA address
+struct scatterlist - implementatin of Linux struct scatterlist
+
+// methods
+get_order - returns log of number of pages (i.e for size <= 4096 ==> 0, for size <= 8192 ==> 1)
+kmalloc - allocate kernel memory
+kzalloc - allocate kernel memory and zero it
+kcalloc - allocate and array of elements in kernel memory and zero it
+kfree - free kernel memory
+ioremap - map bus memory into CPU space
+iounmap - unmap bus memory
+lowmem_page_address - get virtual address of dma_addr_t
+__get_free_page - allocate a page and zero it
+dma_sync_single - flush DMA buffers (not implemented)
+sg_dma_addr - returns of dma_addr_t of SC list
+sg_page - returns of dma_addr_t of SC list
+sg_dma_address - returns physical address of SC list
+sg_dma_address_inc - increment physical address in SC list
+sg_dma_len - returns the size of SG list
+sg_init_table - zero an array of SG list
+sg_set_buf - set offset in SG list
+sg_set_page - set offset and buffer address in SG list
+
+
+//
+// l2w_pci.h - work with PCI bus
+//
+
+// methods
+pci_resource_start - get BAR physical address
+pci_resource_len - get BAR size
+readq - read a word from IO space, mapped to system memory
+readl - read a dword from IO space, mapped to system memory
+reads - read a word from IO space, mapped to system memory
+readb - read a byte from IO space, mapped to system memory
+writeq - write a word from IO space, mapped to system memory
+writel - write a dword from IO space, mapped to system memory
+writes - write a word from IO space, mapped to system memory
+writeb - write a byte from IO space, mapped to system memory
+
+//
+//
+//
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_radix.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_radix.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_radix.c (revision 6862)
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: radix.c 1611 2006-08-20 14:48:55Z sleybo $
+ */
+
+#include "l2w_precomp.h"
+#include "errno.h"
+
+int radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *item)
+{
+ if ( NULL == cl_map_insert( &root->map, (const uint64_t)index, item ) )
+ return -EFAULT;
+ return 0;
+}
+
+void *radix_tree_lookup(struct radix_tree_root *root,
+ unsigned long index)
+{
+ void* item = cl_map_get( &root->map, (const uint64_t)index );
+ return item;
+}
+
+void *radix_tree_delete(struct radix_tree_root *root,
+ unsigned long index)
+{
+ void* item = cl_map_remove( &root->map, (const uint64_t)index );
+ return item;
+}
+
+cl_status_t radix_tree_create(struct radix_tree_root *root,
+ gfp_t gfp_mask)
+{
+#define MIN_ITEMS 32
+ cl_status_t cl_status;
+ UNUSED_PARAM(gfp_mask);
+
+ cl_map_construct( &root->map );
+ cl_status = cl_map_init( &root->map, MIN_ITEMS );
+ return cl_status;
+}
+
+void radix_tree_destroy(struct radix_tree_root *root )
+{
+ cl_map_destroy( &root->map );
+}
+
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/makefile
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/makefile (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/makefile (revision 6862)
@@ -0,0 +1,7 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the driver components of the Windows NT DDK
+#
+
+!INCLUDE $(NTMAKEENV)\makefile.def
Index: B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_memory.c
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_memory.c (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/kernel/l2w_memory.c (revision 6862)
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mt_memory.c 2020 2007-05-01 09:29:10Z leonid $
+ */
+#include "l2w_precomp.h"
+
+#ifdef offsetof
+#undef offsetof
+#endif
+#if defined(EVENT_TRACING)
+#include "l2w_memory.tmh"
+#endif
+
+
+
+void st_dev_add_cont_mem_stat( PMLX4_ST_DEVICE p_stat, ULONG size );
+void st_dev_rmv_cont_mem_stat( PMLX4_ST_DEVICE p_stat, ULONG size );
+
+void *alloc_cont_mem(
+ IN struct pci_dev *pdev,
+ IN unsigned long size,
+ OUT dma_addr_t*p_dma_addr)
+{
+ void *va = NULL;
+ PHYSICAL_ADDRESS pa = {0};
+
+ ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+
+ memset( p_dma_addr, 0, sizeof(dma_addr_t) );
+ if (!size)
+ goto end;
+
+//
+// DmaOperations->AllocateCommonBuffer can get stuck for a long time
+// when there is no enough contiguous memory
+//
+
+#ifdef SUPPORT_DMA_MEMORY
+
+ {
+ DMA_ADAPTER *p_adapter = pdev->p_dma_adapter;
+
+ va = p_adapter->DmaOperations->AllocateCommonBuffer(
+ p_adapter, size, &pa, FALSE );
+ if (va) {
+ p_dma_addr->da = pa.QuadPart;
+ p_dma_addr->va = va;
+ p_dma_addr->sz = (ULONG)size;
+ st_dev_add_cont_mem_stat( pdev->p_stat, size );
+ }
+ }
+
+#else
+
+ {
+ PHYSICAL_ADDRESS la = {0}, ha = {(u64)(-1I64)};
+
+ va = MmAllocateContiguousMemorySpecifyCache( (SIZE_T)size, la, ha, pa, MmCached );
+ if (va) {
+ pa = MmGetPhysicalAddress( va );
+ // TODO: convert physical adress to dma one
+ p_dma_addr->da = pa.QuadPart;
+ p_dma_addr->va = va;
+ p_dma_addr->sz = (ULONG)size;
+ st_dev_add_cont_mem_stat( pdev->p_stat, size );
+ }
+ }
+
+#endif
+
+end:
+ if (!va)
+ L2W_PRINT(TRACE_LEVEL_WARNING ,L2W ,
+ "%s: AllocateCommonBuffer: failed to allocate contiguous %#x bytes\n",
+ pdev->name, size );
+ return va;
+}
+
+void free_cont_mem(
+ IN struct pci_dev *pdev,
+ IN dma_addr_t*p_dma_addr)
+{
+#ifdef SUPPORT_DMA_MEMORY
+
+ {
+ DMA_ADAPTER *p_adapter = pdev->p_dma_adapter;
+ PHYSICAL_ADDRESS pa;
+
+ ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+ pa.QuadPart = p_dma_addr->da;
+ p_adapter->DmaOperations->FreeCommonBuffer(
+ p_adapter, p_dma_addr->sz, pa, p_dma_addr->va, FALSE );
+ st_dev_rmv_cont_mem_stat( pdev->p_stat, p_dma_addr->sz );
+ }
+
+#else
+
+ {
+ KIRQL old_irql = 0, cur_irql = KeGetCurrentIrql();
+
+ ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+ if (cur_irql < APC_LEVEL)
+ KeRaiseIrql( APC_LEVEL, &old_irql );
+ MmFreeContiguousMemory( p_dma_addr->va );
+ st_dev_rmv_cont_mem_stat( pdev->p_stat, (ULONG)(p_dma_addr->sz) );
+ if (cur_irql < APC_LEVEL)
+ KeLowerIrql( old_irql );
+ }
+
+#endif
+}
+
+void *
+dma_alloc_coherent( struct mlx4_dev **dev, size_t size,
+ dma_addr_t *p_dma_addr, gfp_t gfp )
+{
+ UNUSED_PARAM(gfp);
+
+ if (!size)
+ return NULL;
+ return alloc_cont_mem( (*dev)->pdev, (unsigned long)size, p_dma_addr );
+}
+
+void
+dma_free_coherent( struct mlx4_dev **dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr)
+{
+ UNUSED_PARAM(size);
+ UNUSED_PARAM(vaddr);
+ ASSERT(size == dma_addr.sz);
+ ASSERT(vaddr == dma_addr.va);
+ free_cont_mem( (*dev)->pdev, &dma_addr );
+}
+
+void
+pci_free_consistent( struct pci_dev *pdev, size_t size,
+ void *vaddr, dma_addr_t dma_addr)
+{
+ dma_free_coherent( &pdev->dev, size, vaddr, dma_addr );
+}
+
+
+
Index: B:/users/irena/proj1/trunk/core/l2w/dirs
===================================================================
--- B:/users/irena/proj1/trunk/core/l2w/dirs (revision 0)
+++ B:/users/irena/proj1/trunk/core/l2w/dirs (revision 6862)
@@ -0,0 +1,2 @@
+DIRS=\
+ kernel
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/SOURCES
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/SOURCES (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/SOURCES (revision 6862)
@@ -0,0 +1,36 @@
+TARGETNAME=genutils
+TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR)
+TARGETTYPE=DRIVER_LIBRARY
+
+!if $(FREEBUILD)
+ENABLE_EVENT_TRACING=1
+!endif
+
+SOURCES= \
+ gu_timer.cpp \
+ gu_utils.cpp \
+ gu_dbg.cpp
+
+INCLUDES=..\..\..\inc;..\..\..\inc\kernel;..\..\..\inc\kernel\genutils;
+
+C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS
+
+C_DEFINES = $(C_DEFINES) -DIOCTL_INTERFACE=1
+
+!IFDEF ENABLE_EVENT_TRACING
+
+C_DEFINES = $(C_DEFINES) -DEVENT_TRACING
+
+RUN_WPP= $(SOURCES) -km -dll -ext: .c .cpp .h .C .CPP .H\
+ -preserveext:.cpp .h\
+ -scan:..\..\..\inc\kernel\genutils\gu_wpptrace.h\
+ -func:GU_PRINT(LEVEL,FLAGS,MSG,...)
+!ENDIF
+
+MC_SOURCEDIR=.
+
+MSC_WARNING_LEVEL= /W4
+
+PRECOMPILED_INCLUDE=gu_precomp.h
+PRECOMPILED_PCH=gu_precomp.pch
+PRECOMPILED_CXX=1
\ No newline at end of file
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/gu_precomp.h
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/gu_precomp.h (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/gu_precomp.h (revision 6862)
@@ -0,0 +1,38 @@
+#pragma warning(disable:4214) // bit field types other than int
+#pragma warning(disable:4201) // nameless struct/union
+#pragma warning(disable:4115) // named type definition in parentheses
+#pragma warning(disable:4127) // conditional expression is constant
+#pragma warning(disable:4054) // cast of function pointer to PVOID
+#pragma warning(disable:4206) // translation unit is empty
+#pragma warning(disable:4100) // unreferenced formal parameter
+
+
+//extern "C" {
+#include <ntddk.h>
+#include <wdm.h>
+//#include <wdf.h>
+#include <wdmsec.h>
+#include <ndis.h>
+//}
+
+
+#include <ntintsafe.h>
+
+
+#include <ntstatus.h>
+#include <initguid.h>
+#include <stdio.h>
+#include <WinDef.h>
+#include <ntstrsafe.h>
+#include <strsafe.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#include "gu_defs.h"
+#include "gu_dbg.h"
+#include "gu_wpptrace.h"
+#include "gu_utils.h"
+
+#include "shutter.h"
+
+
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/gu_dbg.cpp
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/gu_dbg.cpp (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/gu_dbg.cpp (revision 6862)
@@ -0,0 +1,128 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_dbg.cpp
+
+Abstract:
+ This modulde contains all related dubug code
+Notes:
+
+--*/
+#include "gu_precomp.h"
+#include <stdarg.h>
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "gu_dbg.tmh"
+#endif
+
+#if DBG
+
+#define TEMP_BUFFER_SIZE 128
+
+CGUDebugFlags g_GUDbgFlagsDef[] = {
+ { L"GU", TRACE_LEVEL_ERROR},
+ { L"GU_INIT", TRACE_LEVEL_ERROR}
+ };
+
+
+VOID cl_dbg_out( IN PCCH format, ...)
+{
+ va_list list;
+ va_start(list, format);
+ vDbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, format, list);
+ va_end(list);
+}
+
+void DebugGUPrintInit(IN LPCWSTR pszRegistryPath)
+{
+ LONG resultFromRegistry = TRACE_LEVEL_ERROR;
+
+ for (int i = 0; i < SIZE_OF(g_GUDbgFlagsDef); ++i)
+ {
+ DWORD defaultVal = g_GUDbgFlagsDef[i].dbgLevel;
+ NTSTATUS Status = ReadRegistryDword(
+ pszRegistryPath,
+ L"\\Parameters\\Debug",
+ g_GUDbgFlagsDef[i].pszName,
+ defaultVal,
+ &resultFromRegistry);
+ if (NT_SUCCESS(Status))
+ {
+ g_GUDbgFlagsDef[i].dbgLevel = resultFromRegistry;
+ }
+ }
+}
+
+
+#if !defined(EVENT_TRACING)
+VOID
+TraceGUMessage(
+ IN PCCHAR func,
+ IN PCCHAR file,
+ IN ULONG line,
+ IN ULONG level,
+ IN PCCHAR format,
+ ...
+ )
+/*++
+
+Routine Description:
+
+ Debug print for the sample driver.
+
+Arguments:
+
+ TraceEventsLevel - print level between 0 and 3, with 3 the most verbose
+
+Return Value:
+
+ None.
+
+ --*/
+ {
+#if DBG
+
+ va_list list;
+ NTSTATUS status;
+
+ va_start(list, format);
+
+ char psPrefix[TEMP_BUFFER_SIZE];
+ PCCHAR fileName = strrchr(file, '\\');
+ if (fileName != NULL)
+ {
+ fileName++;
+ }
+
+ if(level == TRACE_LEVEL_ERROR)
+ {
+ status = RtlStringCchPrintfA(psPrefix, TEMP_BUFFER_SIZE, "***ERROR*** %s (%s:%d) ", func, fileName, line);
+ }
+ else
+ {
+ status = RtlStringCchPrintfA(psPrefix, TEMP_BUFFER_SIZE, "%s (%s:%d) ", func, fileName, line);
+ level = TRACE_LEVEL_ERROR;
+ }
+
+ ASSERT(NT_SUCCESS(status));
+ vDbgPrintExWithPrefix(psPrefix , DPFLTR_IHVNETWORK_ID, level, format, list);
+
+ va_end(list);
+
+#else
+
+ UNREFERENCED_PARAMETER(TraceEventsLevel);
+ UNREFERENCED_PARAMETER(TraceEventsFlag);
+ UNREFERENCED_PARAMETER(DebugMessage);
+
+#endif
+}
+#endif
+
+#endif // DBG
+
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/readme.txt
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/readme.txt (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/readme.txt (revision 6862)
@@ -0,0 +1,141 @@
+This library unifies in it general useful utilities.
+To use it one needs to link with genutils.lib.
+
+genutils provides:
+
+//
+// gu_utils.h - general utils file
+//
+
+Flags manipulation:
+ (assuming M has a field named Flags)
+ - GU_SET_FLAG(_M, _F)
+ - GU_CLEAR_FLAG(_M, _F)
+ - GU_CLEAR_FLAGS(_M)
+ - GU_TEST_FLAG(_M, _F)
+ - GU_TEST_FLAGS(_M, _F)
+
+Time utilities:
+ - GenUtilsInit() - init the QueryTimeIncrement factor
+ - GetTickCountInMsec() - returns tick count in milliseconds
+ - GetTickCountInNsec() - returns tick count in nanoseconds
+ - GetTimeStamp() - returns tick count divided by frequency in units of nanoseconds
+ - TimeFromLong(ULONG HandredNanos) - converts time from ULONG representation to LARGE_INTEGER representation
+ - Sleep(ULONG HandredNanos) - returns STATUS_SUCCESS after the specified time has passed.
+ Sleep function must be running at IRQL <= APC_LEVEL.
+ NOTE: The input parameter is in 100 Nano Second units. Multiply by 10000 to specify Milliseconds.
+ - MyKeWaitForSingleObject - A wrapper for the KeWaitForSingleObject that adds assertions to the values returned by it
+
+General utils:
+ - ROUNDUP_LOG2(u32 arg) - return the log2 of the given number rounded up
+ - guid_to_str(u64 guid, WCHAR* pstr, DWORD BufLen)
+ - H_TO_BE(const u32 src)
+ - Floor_4(UINT value)
+ - nthos(USHORT in)
+ - DbgPrintIpAddress(LPCSTR str_description, u8 ipAddress[], unsigned int traceLevel)
+ - DbgPrintMacAddress(LPCSTR str_description, u8 macAddress[], unsigned int traceLevel)
+ - UpdateRc(NTSTATUS *rc, NTSTATUS rc1) - set rc to be rc1 if rc was a success value (>0)
+
+Memory utils:
+ - AllocateSharedMemory - allocate ndis shared memory according to Ndis.h _NDIS_SHARED_MEMORY_PARAMETERS
+ - FreeSharedMemory
+ - CopyFromUser - copy from source buffer to destination buffer a given number of bytes. Checks that the source can be read
+ - CopyToUser - copy from source buffer to destination buffer a given number of bytes. Checks that the destination can be written to
+ - MapUserMemory - lock and map specified memory pages
+ - UnMapMemory - unmap and unlock specified memory pages
+
+Registry values:
+ - ReadRegistryDword
+ - ReadRegStrRegistryValueInNonPagedMemory
+ - ReadRegistryValue
+
+VERIFY_DISPATCH_LEVEL:
+ At the begining of the function one should call:
+ VERIFY_DISPATCH_LEVEL(KIRQL irql), this call will verify that the current IRQL is the given IRQL.
+ At the end of the function the distructor of the class will ASSERT that the level stayed the same
+ throughout the function.
+
+CSpinLockWrapper:
+ - CSpinLockWrapper (KSPIN_LOCK &SpinLock) - Spinlock must already be initialized
+ - Lock() - Uses KeAcquireSpinLock and saves the IRQL by itself
+ - Unlock() - Uses KeReleaseSpinLock
+
+LinkedList:
+ - Init()
+ - Size()
+ - RemoveHeadList() - returns LIST_ENTRY*
+ - RemoveTailList() - returns LIST_ENTRY*
+ - InsertTailList(LIST_ENTRY *Item)
+ - InsertHeadList(LIST_ENTRY *Item)
+ - Head() - returns LIST_ENTRY*. ASSERTS that the list is not empty!
+ - Tail() - returns LIST_ENTRY*. ASSERTS that the list is not empty!
+ - RawHead() - returns LIST_ENTRY*. Return the head of the list without any checks, to be used as an iterator
+ - IsAfterTheLast(LIST_ENTRY *pEntry) - true if the list is empty or the entry is the raw head.
+ - RemoveEntryList(LIST_ENTRY *Item) - ASSERTS that the list is not empty!
+
+Queue:
+ - InitializeQueueHeader(QueueHeader)
+ - IsQueueEmpty(QueueHeader)
+ - RemoveHeadQueue(QueueHeader)
+ - InsertHeadQueue(QueueHeader, QueueEntry)
+ - InsertTailQueue(QueueHeader, QueueEntry)
+
+Array (A simple static array):
+ - Init(int MaxNumberofPackets)
+ - Shutdown()
+ - Array()
+ - Add(void *ptr) - add member to the current count (as indicated by GetCount())
+ - GetCount()
+ - GetPtr(int Place) - get member from a given index
+ - Reset() - after a call to this function the next add will be into the first index
+
+ProcessorArray:
+ This class is used for freeing the sent packets.
+ It is based on the assumption that this happens at raised irql and therefore,
+ if we allocate a data structure for each processor we should be fine
+ - Init(int MaxNumberofPackets)
+ - Shutdown()
+ - GetArray() - returns a reseted array of the current processor
+
+FIFO:
+ - Init(int MaxSize)
+ - Shutdown()
+ - Push(T pNewItem)
+ - Pop()
+ - Count()
+ - IsFull()
+ - IsEmpty()
+
+Bitmap:
+ - Set(ULONG* pData, ULONG BitIndex) - returns true if the bit was set and false if it was already set or is out of range
+ - Clear(ULONG* pData, ULONG BitIndex) - returns true if the bit was cleared and false if it was already clear or is out of range
+ - Test(ULONG* pData, ULONG BitIndex) - returns true if the bit is set and false if it is clear or out of range
+
+//
+// gu_timer.h
+//
+
+IGUWorkItem:
+ The element that is queue for execution. It must have a void Execute() function.
+
+CGUWorkerThread:
+ - Start() - start the thread. The thread will run its Run() function.
+ - Stop() - signals the Run() function to stop its execution.
+ - Run() - while Stop() was not called, wait for an item to be enqueued and execute all currently enqueued items
+ - EnqueueWorkItem(IGUWorkItem *pWorkItem)
+ - DequeueWorkItem(IGUWorkItem *pWorkItem)
+
+CGUTimer:
+ - Initialize(CGUWorkerThread *pThread, IGUWorkItem *pWorkItem, ULONG TimerIntervalMillis = 0, bool IsPeriodic = true)
+ - init a timer on an execution thread, a specific work item.
+ The thread will enqueue the item after a given delay
+ and will requeue it each interval if the timer is periodic.
+ - Run() - enqueue the item for execution. Called when the delay expires.
+ - Cancel() - stops the wait for the delay to end, meaning the enqueue at the end of the delay will not take place.
+ returns true if timer was canceled and will not run or was idle
+ returns false if the event has just finished running and cannot be canceled anymore
+ - Start() - internally used to re-start the timer on the delay time, if the timer is periodic
+ - Start(ULONG dwInterval) - may be used to re-start the timer on a new delay time
+ - Stop() - Cancel and release timer
+ - PassiveRun() - internally used to run the work item in PASSIVE_LEVEL
+
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/gu_timer.cpp
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/gu_timer.cpp (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/gu_timer.cpp (revision 6862)
@@ -0,0 +1,471 @@
+#include "gu_precomp.h"
+#include "gu_timer.h"
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "gu_timer.tmh"
+#endif
+
+LONG IGUWorkItem::AddRef(LPCSTR str)
+{
+ ASSERT(RefCount >= 0);
+
+ LONG rc = InterlockedIncrement(&RefCount);
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "AddRef (%s): WI %p: new count %d\n", str, this, rc);
+
+ return rc;
+}
+
+LONG IGUWorkItem::Release(LPCSTR str)
+{
+ ASSERT(RefCount > 0);
+
+ ULONG rc = InterlockedDecrement(&RefCount);
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "Release (%s): WI %p: new count %d\n", str, this, rc);
+
+ if (rc == 0)
+ {
+ //
+ // Free memory if there is no outstanding reference.
+ //
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Free WI %p\n", this);
+ delete this;
+ }
+
+ return rc;
+}
+
+IGUWorkItem::~IGUWorkItem()
+{
+ //
+ // The work item must be executed
+ //
+ ASSERT(m_pWorkerThread == NULL);
+
+ if(m_pWorkerThread != NULL)
+ {
+ m_pWorkerThread->DequeueWorkItem(this);
+ }
+}
+
+VOID GUThreadFunc(void *pContext)
+{
+ class CGUWorkerThread *pWorkerThread = (CGUWorkerThread*) pContext;
+ pWorkerThread->Run();
+}
+
+CGUWorkerThread::CGUWorkerThread() :
+ m_bIsStarted(false),
+ m_bExit(false)
+{
+ InitializeListHead(&m_WorkItems);
+ KeInitializeSpinLock(&m_Lock);
+ KeInitializeEvent(&m_Event, SynchronizationEvent, FALSE);
+}
+
+CGUWorkerThread::~CGUWorkerThread()
+{
+ if(m_bIsStarted && ! m_bExit)
+ {
+ ASSERT(FALSE);
+ Stop();
+ }
+}
+
+NDIS_STATUS CGUWorkerThread::Start()
+{
+ NTSTATUS Status = STATUS_SUCCESS;
+ OBJECT_ATTRIBUTES attr;
+ HANDLE ThreadHandle;
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CGUWorkerThread::Start: thread %p\n", this);
+
+ InitializeObjectAttributes( &attr, NULL, OBJ_KERNEL_HANDLE, NULL, NULL );
+
+ Status = PsCreateSystemThread(
+ &ThreadHandle,
+ THREAD_ALL_ACCESS,
+ &attr,
+ NULL,
+ NULL,
+ ::GUThreadFunc,
+ this
+ );
+ if (!NT_SUCCESS(Status))
+ {
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "PsCreateSystemThread failed\n");
+ goto Cleanup;
+ }
+
+ // Convert the thread into a handle
+ Status = ObReferenceObjectByHandle(
+ ThreadHandle,
+ THREAD_ALL_ACCESS,
+ NULL,
+ KernelMode,
+ &m_ThreadObject,
+ NULL
+ );
+
+ ASSERT(Status == STATUS_SUCCESS); // According to MSDN, must succeed if I set the params
+
+ Status = ZwClose(ThreadHandle);
+ ASSERT(NT_SUCCESS(Status)); // Should always succeed
+
+ m_bIsStarted = true;
+
+Cleanup:
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CGUWorkerThread::Stop: thread %p\n", this);
+ return Status;
+
+}
+
+void CGUWorkerThread::Stop()
+{
+ ASSERT(m_bIsStarted == true);
+ ASSERT(m_bExit == false);
+
+ NTSTATUS Status = STATUS_SUCCESS;
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CGUWorkerThread::Stop: thread %p\n", this);
+
+ if(! m_bExit)
+ {
+ m_bExit = true;
+ KeSetEvent(&m_Event, IO_NO_INCREMENT, FALSE);
+ Status = KeWaitForSingleObject(
+ m_ThreadObject,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL
+ );
+ }
+
+ ASSERT(Status == STATUS_SUCCESS);
+
+ ASSERT(IsListEmpty(&m_WorkItems) == TRUE);
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CGUWorkerThread::Stop: thread %p\n", this);
+}
+
+void CGUWorkerThread::Run()
+{
+ NTSTATUS Status;
+ KIRQL irql;
+
+ while(! m_bExit)
+ {
+ ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+ Status = KeWaitForSingleObject(
+ &m_Event,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL
+ );
+ ASSERT(Status == STATUS_SUCCESS);
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "Thread wake up\n");
+
+ KeAcquireSpinLock(&m_Lock, &irql);
+
+ while (!IsListEmpty(&m_WorkItems))
+ {
+ PLIST_ENTRY p = m_WorkItems.Flink;
+ IGUWorkItem* pWorkItem = CONTAINING_RECORD(p, IGUWorkItem, m_Link);
+ RemoveHeadList(&m_WorkItems);
+ pWorkItem->Release("CGUWorkerThread::Run");
+ pWorkItem->m_pWorkerThread = NULL;
+ KeReleaseSpinLock(&m_Lock, irql);
+
+ pWorkItem->Execute();
+
+ KeAcquireSpinLock(&m_Lock, &irql);
+ }
+
+ KeReleaseSpinLock(&m_Lock, irql);
+ }
+
+ PsTerminateSystemThread(STATUS_SUCCESS);
+}
+
+NDIS_STATUS CGUWorkerThread::EnqueueWorkItem(IGUWorkItem *pWorkItem)
+{
+ BOOLEAN IsEmpty = false;
+ KIRQL irql;
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CGUWorkerThread::EnqueueWorkItem: thread %p\n", this);
+ if(! m_bExit)
+ {
+ KeAcquireSpinLock(&m_Lock, &irql);
+ IsEmpty = IsListEmpty(&m_WorkItems);
+ InsertTailList(&m_WorkItems, &pWorkItem->m_Link);
+ pWorkItem->AddRef("CGUWorkerThread::EnqueueWorkItem");
+ pWorkItem->m_pWorkerThread = this;
+ KeReleaseSpinLock(&m_Lock, irql);
+
+ if(IsEmpty)
+ KeSetEvent(&m_Event, IO_NO_INCREMENT, FALSE);
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CGUWorkerThread::EnqueueWorkItem: thread %p SUCCESS\n", this);
+ return NDIS_STATUS_SUCCESS;
+ }
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CGUWorkerThread::EnqueueWorkItem: thread %p - NOT ACCEPTED\n", this);
+ return NDIS_STATUS_NOT_ACCEPTED;
+}
+
+NDIS_STATUS CGUWorkerThread::DequeueWorkItem(IGUWorkItem *pWorkItem)
+{
+ KIRQL irql;
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CGUWorkerThread::DequeueWorkItem: thread %p\n", this);
+ KeAcquireSpinLock(&m_Lock, &irql);
+ RemoveEntryList(&pWorkItem->m_Link);
+ pWorkItem->Release("CGUWorkerThread::DequeueWorkItem");
+ pWorkItem->m_pWorkerThread = NULL;
+ KeReleaseSpinLock(&m_Lock, irql);
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CGUWorkerThread::DequeueWorkItem: thread %p\n", this);
+
+ return NDIS_STATUS_SUCCESS;
+}
+
+VOID
+ GUTimerFunc(
+ IN struct _KDPC *Dpc,
+ IN PVOID DeferredContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+{
+ CGUTimer* pTimer = (CGUTimer *) DeferredContext;
+ pTimer->Run();
+}
+
+CGUTimer::CGUTimer()
+{
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "====>CGUTimer::CGUTimer: Timer %p, RefCount %d\n",
+ this, m_RefCount);
+
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "<====CGUTimer::CGUTimer: Timer %p\n", this);
+}
+
+CGUTimer::~CGUTimer()
+{
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "====>CGUTimer::~CGUTimer: Timer %p, RefCount %d\n",
+ this, m_RefCount);
+
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "<====CGUTimer::~CGUTimer: Timer %p\n", this);
+}
+
+void CGUTimer::Initialize(
+ CGUWorkerThread *pThread,
+ IGUWorkItem *pWorkItem,
+ ULONG TimerIntervalMillis,
+ bool IsPeriodic)
+{
+ m_pThread = pThread;
+ m_pWorkItem = pWorkItem;
+ m_bExit = false;
+ m_IsPeriodic = IsPeriodic;
+
+ KeInitializeTimer(&m_Timer);
+ KeInitializeDpc(&m_Dpc, GUTimerFunc, this);
+
+ m_TimerIntervalMillis = TimerIntervalMillis;
+
+ shutter_init(&m_cancel);
+
+ KeInitializeEvent(&m_Event, SynchronizationEvent, TRUE);
+
+ m_TimerWorkItem.Init(this);
+
+ AddRef("CGUTimer::CGUTimer");
+}
+
+LONG CGUTimer::AddRef(LPCSTR str)
+{
+ ASSERT(m_RefCount >= 0);
+
+ LONG rc = InterlockedIncrement(&m_RefCount);
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "AddRef (%s): Timer %p: new count %d\n", str, this, rc);
+
+ return rc;
+}
+
+LONG CGUTimer::Release(LPCSTR str)
+{
+ ASSERT(m_RefCount > 0);
+
+ UINT rc = InterlockedDecrement(&m_RefCount);
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "Release (%s): Timer %p: new count %d\n", str, this, rc);
+
+ if (rc == 0)
+ {
+ //
+ // Free memory if there is no outstanding reference.
+ //
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Free Timer %p\n", this);
+ delete this;
+ }
+
+ return rc;
+}
+
+bool CGUTimer::Start()
+{
+ ASSERT(m_IsPeriodic);
+ return Start(m_TimerIntervalMillis);
+}
+
+bool CGUTimer::Start(DWORD dwTimerIntervalMillis)
+{
+ VERIFY_DISPATCH_LEVEL(PASSIVE_LEVEL);
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "===>CGUTimer::Start\n");
+
+
+ KeWaitForSingleObject(&m_Event, Executive, KernelMode , FALSE, NULL);
+
+ bool bret = true;
+ if (shutter_use(&m_cancel) <= 0)
+ {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Cancelling is in progress\n");
+ bret = false;
+ goto Exit;
+ }
+
+ BOOLEAN bPrevTimerWasCancelled = FALSE;
+
+ AddRef("CGUTimer::Start");
+
+ LARGE_INTEGER TimerInterval;
+ if(dwTimerIntervalMillis == 0)
+ {
+ m_pThread->EnqueueWorkItem(&m_TimerWorkItem);
+ goto Exit;
+ }
+ else
+ {
+ TimerInterval.QuadPart = ((LONGLONG)-10000) * dwTimerIntervalMillis;
+ }
+
+ bPrevTimerWasCancelled = KeSetTimer(&m_Timer, TimerInterval, &m_Dpc);
+ if(bPrevTimerWasCancelled)
+ {
+ Release("CGUTimer::Start");
+ }
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<===CGUTimer::Start\n");
+
+Exit:
+ KeSetEvent(&m_Event, IO_NO_INCREMENT, FALSE);
+ return bret;
+}
+
+
+void CGUTimer::Stop()
+{
+ VERIFY_DISPATCH_LEVEL(PASSIVE_LEVEL);
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CMuxTimer::Stop: Timer %p, RefCount %d\n",
+ this, m_RefCount);
+
+ Cancel();
+ Release("CMuxTimer::Stop");
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CMuxTimer::Stop: Timer %p\n",
+ this);
+}
+
+// true = timer was canceled and will not run
+// false = event has just finished running
+bool CGUTimer::Cancel()
+{
+ VERIFY_DISPATCH_LEVEL(PASSIVE_LEVEL);
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CGUTimer::cancel: Timer %p, RefCount %d\n",
+ this, m_RefCount);
+
+ KeWaitForSingleObject(&m_Event, Executive, KernelMode , FALSE, NULL);
+
+ bool bret = false;
+
+ if(m_cancel.cnt == 0)
+ {
+ //
+ // Do not fail cancel call.
+ //
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Cancel called while timer is idle\n");
+ KeSetEvent(&m_Event, IO_NO_INCREMENT, FALSE);
+ return true;
+ }
+
+ m_bExit = true;
+ BOOLEAN bTimerCancelled = KeCancelTimer(&m_Timer);
+ if(bTimerCancelled)
+ {
+ shutter_loose(&m_cancel);
+ Release("CMuxTimer::Stop_cancel");
+ bret = true;
+ }
+
+ // the call is still running
+
+ KeSetEvent(&m_Event, IO_NO_INCREMENT, FALSE);
+
+ shutter_shut(&m_cancel);
+ shutter_alive(&m_cancel);
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CGUTimer::cancel: Timer %p\n", this);
+ return bret;
+}
+
+
+void CGUTimer::Run()
+{
+ NDIS_STATUS Status = NDIS_STATUS_SUCCESS;
+ LARGE_INTEGER CurrentTime;
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "====>CMuxTimer::Run: Timer %p\n", this);
+
+ if(m_pWorkItem && !m_bExit)
+ {
+ CurrentTime.QuadPart = GetTickCountInMsec();
+
+ if (! m_IsPeriodic || (CurrentTime.QuadPart - m_LastRunTime.QuadPart) >= m_TimerIntervalMillis)
+ {
+ Status = m_pThread->EnqueueWorkItem(&m_TimerWorkItem);
+ m_LastRunTime.QuadPart = CurrentTime.QuadPart;
+ }
+ else
+ {
+ ASSERT(FALSE);
+ }
+ }
+
+ Release("CMuxTimer::Run");
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "<====CMuxTimer::Run: Timer %p\n", this);
+}
+
+void CGUTimer::PassiveRun()
+{
+ VERIFY_DISPATCH_LEVEL(PASSIVE_LEVEL);
+
+ m_pWorkItem->Execute();
+
+ shutter_loose(&m_cancel);
+
+ if(! m_bExit && m_IsPeriodic)
+ {
+ Start();
+ }
+}
+
+
+void CTimerWorkItem::Execute()
+{
+ m_pTimer->PassiveRun();
+}
+
+
+
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/gu_utils.cpp
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/gu_utils.cpp (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/gu_utils.cpp (revision 6862)
@@ -0,0 +1,794 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ GenUtils.cpp
+
+Abstract:
+ This module contains all debug-related code.
+
+Revision History:
+
+Notes:
+
+--*/
+
+#include "gu_precomp.h"
+
+#ifdef offsetof
+#undef offsetof
+#endif
+#if defined(EVENT_TRACING)
+#include "gu_utils.tmh"
+#endif
+
+ULONG g_QueryTimeIncrement;
+
+LARGE_INTEGER TimeFromLong(ULONG HandredNanos)
+{
+ LARGE_INTEGER Timeout;
+ Timeout.HighPart = 0xffffffff;
+ Timeout.LowPart = 0xffffffff ^ HandredNanos;
+ return Timeout;
+}
+
+//
+// Sleep function must be running at IRQL <= APC_LEVEL
+//
+// NOTE: The input parameter is in 100 Nano Second units. Multiply by 10000 to specify Milliseconds.
+//
+NTSTATUS Sleep(ULONG HandredNanos)
+{
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+
+ NTSTATUS rc = STATUS_SUCCESS;
+ LARGE_INTEGER Timeout = TimeFromLong(HandredNanos);
+
+ rc = KeDelayExecutionThread( KernelMode, FALSE, &Timeout );
+ ASSERT(rc == STATUS_SUCCESS);
+
+ return rc;
+
+}
+
+// In units of ms
+uint64_t GetTickCountInMsec()
+{
+ LARGE_INTEGER Ticks;
+ KeQueryTickCount(&Ticks);
+ return Ticks.QuadPart * g_QueryTimeIncrement / 10000; // 10,000 moves from 100ns to ms
+}
+
+// In units of nano-seconds
+uint64_t GetTickCountInNsec()
+{
+ LARGE_INTEGER Ticks;
+ KeQueryTickCount(&Ticks);
+ return Ticks.QuadPart * g_QueryTimeIncrement * 100;
+}
+
+// In units of ns
+uint64_t GetTimeStamp( void )
+{
+ LARGE_INTEGER tick_count, frequency;
+
+ tick_count = KeQueryPerformanceCounter( &frequency );
+ double dtick_count = (double)tick_count.QuadPart;
+ double dfrequency = (double)frequency.QuadPart;
+ return (uint64_t)( 1000000000 * dtick_count / dfrequency );
+}
+
+u32 ROUNDUP_LOG2(u32 arg)
+{
+ if (arg <= 1) return 0;
+ if (arg <= 2) return 1;
+ if (arg <= 4) return 2;
+ if (arg <= 8) return 3;
+ if (arg <= 16) return 4;
+ if (arg <= 32) return 5;
+ if (arg <= 64) return 6;
+ if (arg <= 128) return 7;
+ if (arg <= 256) return 8;
+ if (arg <= 512) return 9;
+ if (arg <= 1024) return 10;
+ if (arg <= 2048) return 11;
+ if (arg <= 4096) return 12;
+ if (arg <= 8192) return 13;
+ if (arg <= 16384) return 14;
+ if (arg <= 32768) return 15;
+ if (arg <= 65536) return 16;
+ ASSERT(FALSE);
+ return 32;
+}
+
+NTSTATUS GenUtilsInit()
+{
+ //fill_bit_tbls(); // Do we need this ?
+ g_QueryTimeIncrement = KeQueryTimeIncrement();
+
+ return STATUS_SUCCESS;
+}
+
+class ZeroMemoryClass {
+} zmClass;
+
+void* __cdecl operator new(size_t n ) throw() {
+
+ //From WinDDK: "Avoid calling ExAllocatePoolWithTag with memory size == 0. Doing so will result in pool header wastage"
+ // Verifier with low mem simulation will crash with memory size == 0
+ //TODO throw exception
+ if (n ==0) {
+ return &zmClass;
+ }
+
+ void * p = ExAllocatePoolWithTag(NonPagedPool , n, GLOBAL_ALLOCATION_TAG);
+ if (p) {
+ RtlZeroMemory(p , n);
+ }
+ return p;
+}
+
+void __cdecl operator delete(void* p) {
+ if (p != &zmClass)
+ {
+ ExFreePoolWithTag(p, GLOBAL_ALLOCATION_TAG);
+ }
+
+}
+
+void* __cdecl operator new(size_t n, void *addr ) throw() {
+ return addr;
+}
+
+
+NTSTATUS
+Array::Init(int MaxNumberofPackets) {
+ NTSTATUS Status = STATUS_SUCCESS;
+ m_Size = MaxNumberofPackets;
+ m_pData = new void*[MaxNumberofPackets];
+ if (m_pData == NULL) {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU,"new failed \n");
+ return STATUS_NO_MEMORY;
+ }
+ return Status;
+}
+
+#if 0
+NTSTATUS
+
+ProcessorArray::Init(int MaxNumberofPackets) {
+ NTSTATUS Status = STATUS_SUCCESS;
+ u32 i = 0,j=0;
+ m_NumberOfProcessors = NdisSystemProcessorCount();
+ m_Arrays = new Array[m_NumberOfProcessors];
+ if (m_Arrays == NULL) {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU,"new failed \n");
+ Status = STATUS_NO_MEMORY;
+ goto Cleanup;
+ }
+ for (i=0; i < m_NumberOfProcessors; i++) {
+ Status = m_Arrays[i].Init(MaxNumberofPackets);
+ if (!NT_SUCCESS(Status)) {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU,"Array[i].Init failed \n");
+ goto Cleanup;
+ }
+ }
+
+Cleanup:
+ if (!NT_SUCCESS(Status)) {
+ if (m_Arrays) {
+ for (j=0; j< i; j++) {
+ m_Arrays[i].Shutdown();
+ }
+ delete []m_Arrays;
+ m_Arrays = NULL;
+ }
+ m_NumberOfProcessors = 0;
+
+ }
+ return Status;
+}
+#endif
+
+NTSTATUS ReadRegistryDword(
+ IN LPCWSTR pszRegistryPath,
+ IN LPCWSTR pszSuffix,
+ IN LPCWSTR pszValueName,
+ IN ULONG DefaultVal,
+ OUT LONG *pVal
+ )
+{
+ NTSTATUS status;
+ /* Remember the terminating entry in the table below. */
+ RTL_QUERY_REGISTRY_TABLE table[2];
+ UNICODE_STRING ParamPath;
+
+ ASSERT(NULL != pszRegistryPath);
+ ASSERT(NULL != pszValueName);
+ ASSERT(NULL != pVal);
+
+ USHORT suffixLength = 0;
+
+ if (NULL != pszSuffix)
+ {
+ suffixLength = (USHORT)wcslen(pszSuffix) ;
+ }
+
+ RtlInitUnicodeString( &ParamPath, NULL );
+ USHORT length = (USHORT)wcslen(pszRegistryPath) + suffixLength + 1;
+ ParamPath.Length = (length -1) * sizeof(WCHAR); // length in bytes, of the Buffer, not including the terminating NULL character
+ ParamPath.MaximumLength = length * sizeof(WCHAR); // total size in bytes, of memory allocated for Buffer
+ ParamPath.Buffer = new WCHAR[length];
+ if( !ParamPath.Buffer )
+ {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU,"Failed to allocate parameters path buffer\n");
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ RtlStringCchCopyW(ParamPath.Buffer, length, pszRegistryPath);
+
+ if (NULL != pszSuffix)
+ {
+ #pragma prefast(suppress:6053, "The ParamPath.Buffer is preallocated to the required length, and the assert checks for this assumption")
+ RtlStringCchCatW(ParamPath.Buffer, length, pszSuffix);
+ }
+
+ //
+ //Clear the table. This clears all the query callback pointers,
+ // and sets up the terminating table entry.
+ //
+ memset(table, 0, sizeof(table));
+
+ //
+ // Setup the table entries.
+ //
+ table[0].Flags = RTL_QUERY_REGISTRY_DIRECT;
+ table[0].Name = const_cast <LPWSTR>(pszValueName);
+ table[0].EntryContext = pVal;
+ table[0].DefaultType = REG_DWORD;
+ table[0].DefaultData = &DefaultVal;
+ table[0].DefaultLength = sizeof(ULONG);
+
+ status = RtlQueryRegistryValues(RTL_REGISTRY_ABSOLUTE, ParamPath.Buffer, table, NULL, NULL );
+ if (!NT_SUCCESS(status))
+ {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU,"RtlQueryRegistryValues failed status =0x%x\n", status);
+ *pVal = DefaultVal;
+ status = STATUS_SUCCESS;
+ }
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, " status %#x, path %S, name %S \n",
+ status, ParamPath.Buffer, table[0].Name );
+
+ delete [] ParamPath.Buffer;
+ return status;
+}
+
+NTSTATUS
+ReadRegStrRegistryValueInNonPagedMemory(
+ IN LPCWSTR pszRegistryPath,
+ IN LPCWSTR pszSuffix,
+ IN LPCWSTR pszValueName,
+ IN UINT flags,
+ OUT LPWSTR * pWstr
+ )
+{
+ //
+ // NDIS Query Unicode in ReadRegistryValue allocates PAGED memory
+ // In Dispatch leavel this causes an ASSERT
+ // Hence this function using our customized operator new for Non paged allocation
+ //
+
+ UCHAR* pWcharTemp = NULL;
+ *pWstr = NULL;
+
+ VERIFY_DISPATCH_LEVEL(PASSIVE_LEVEL);
+
+ UNICODE_STRING tempString = { 0, 0, NULL};
+
+ NTSTATUS ntStatus = ReadRegistryValue(
+ pszRegistryPath,
+ pszSuffix,
+ pszValueName,
+ REG_NONE,
+ NULL,
+ 0,
+ flags,
+ &tempString
+ );
+
+ if(tempString.Buffer == NULL)
+ {
+ ntStatus = STATUS_OBJECT_NAME_NOT_FOUND;
+ }
+
+ if(ntStatus != STATUS_SUCCESS)
+ {
+ switch (ntStatus)
+ {
+ case STATUS_OBJECT_NAME_NOT_FOUND:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Resitry string failed to read: Suffix = %S, Name = %S not found\n",pszSuffix, pszValueName);
+ break;
+ default:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Resitry string failed to read with NTSTATUS 0x%X\n",ntStatus);
+ }
+ return ntStatus;
+ }
+
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, "Read string value from registry: %S\n", tempString.Buffer);
+ const ULONG c_NdisStringMaxLength = tempString.MaximumLength;
+ pWcharTemp = new UCHAR[c_NdisStringMaxLength];
+ if (NULL == pWcharTemp)
+ {
+ //
+ // Allocaton failed
+ //
+ RtlFreeUnicodeString(&tempString);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ RtlCopyMemory(pWcharTemp,tempString.Buffer, c_NdisStringMaxLength);
+ *pWstr = (LPWSTR)pWcharTemp;
+
+ RtlFreeUnicodeString(&tempString);
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS ReadRegistryValue(
+ IN LPCWSTR pszRegistryPath,
+ IN LPCWSTR pszSuffix,
+ IN LPCWSTR pszValueName,
+ IN ULONG DefaultValueType,
+ IN PVOID DefaultVal,
+ IN ULONG DefaultValLength,
+ IN ULONG Flags,
+ OUT PVOID pVal
+ )
+{
+ NTSTATUS status;
+ /* Remember the terminating entry in the table below. */
+ RTL_QUERY_REGISTRY_TABLE table[2];
+ UNICODE_STRING ParamPath;
+
+ ASSERT(NULL != pszRegistryPath);
+ ASSERT(NULL != pszValueName);
+ ASSERT(NULL != pVal);
+
+ USHORT suffixLength = 0;
+
+ if (NULL != pszSuffix)
+ {
+ suffixLength = (USHORT)wcslen(pszSuffix) ;
+ }
+
+ RtlInitUnicodeString( &ParamPath, NULL );
+ USHORT length = (USHORT)wcslen(pszRegistryPath) + suffixLength + 1;
+ ParamPath.Length = (length -1) * sizeof(WCHAR); // length in bytes, of the Buffer, not including the terminating NULL character
+ ParamPath.MaximumLength = length * sizeof(WCHAR); // total size in bytes, of memory allocated for Buffer
+ ParamPath.Buffer = new WCHAR[length];
+ if( !ParamPath.Buffer )
+ {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ RtlStringCchCopyW(ParamPath.Buffer, length, pszRegistryPath);
+
+ if (NULL != pszSuffix)
+ {
+ #pragma prefast(suppress:6053, "The ParamPath.Buffer is preallocated to the required length, and the assert checks for this assumption")
+ RtlStringCchCatW(ParamPath.Buffer, length, pszSuffix);
+ }
+
+ //
+ //Clear the table. This clears all the query callback pointers,
+ // and sets up the terminating table entry.
+ //
+ memset(table, 0, sizeof(table));
+
+ //
+ // Setup the table entries.
+ //
+ table[0].Flags = RTL_QUERY_REGISTRY_DIRECT | Flags;
+ table[0].Name = const_cast <LPWSTR>(pszValueName);
+ table[0].EntryContext = pVal;
+ table[0].DefaultType = DefaultValueType;
+ table[0].DefaultData = DefaultVal;
+ table[0].DefaultLength = DefaultValLength;
+
+ status = RtlQueryRegistryValues(RTL_REGISTRY_ABSOLUTE, ParamPath.Buffer, table, NULL, NULL );
+ if (!NT_SUCCESS(status) && DefaultVal != NULL)
+ {
+ GU_PRINT(TRACE_LEVEL_WARNING, GU, "RtlQueryRegistryValues failed to read %S\\%S. status =0x%x. Use deafault value.\n", ParamPath.Buffer, table[0].Name, status);
+ RtlCopyMemory(pVal, DefaultVal, DefaultValLength);
+ status = STATUS_SUCCESS;
+ }
+
+ GU_PRINT(TRACE_LEVEL_INFORMATION, GU, " status 0x%x, path %S, name %S \n", status, ParamPath.Buffer, table[0].Name);
+
+ delete [] ParamPath.Buffer;
+ return status;
+}
+
+
+
+
+void
+DbgPrintMacAddress(
+ LPCSTR str_description,
+ u8 macAddress[],
+ unsigned int traceLevel
+ )
+{
+ ASSERT(NULL != macAddress);
+ ASSERT(NULL != str_description);
+
+ GU_PRINT(traceLevel, GU,
+ "%s%.2X-%.2X-%.2X-%.2X-%.2X-%.2X\n",
+ str_description,
+ macAddress[0], macAddress[1],macAddress[2],
+ macAddress[3],macAddress[4],macAddress[5]
+ );
+}
+
+void
+DbgPrintIpAddress(
+ LPCSTR str_description,
+ u8 ipAddress[],
+ unsigned int traceLevel
+ )
+{
+ ASSERT(NULL != ipAddress);
+ ASSERT(NULL != str_description);
+
+ GU_PRINT(traceLevel, GU,
+ "%s%d.%d.%d.%d\n",
+ str_description,
+ ipAddress[0], ipAddress[1],ipAddress[2],ipAddress[3]
+ );
+}
+
+bool
+Validate16bitValue(
+ __be16 be16_currentValue,
+ u16 expectedValue,
+ LPCSTR valueName)
+{
+ ASSERT(NULL != valueName);
+ u16 valueByHardwareBytesOrder = be16_to_cpu(be16_currentValue);
+ if (valueByHardwareBytesOrder != expectedValue)
+ {
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU,
+ "ARP detection: %s field; Expected Value = %0xX, current Value = %0xX\n",
+ valueName,expectedValue,valueByHardwareBytesOrder
+ );
+ return false;
+ }
+ return true;
+}
+
+bool
+Validate8BitValue(
+ u8 value,
+ u8 expectedValue,
+ LPCSTR valueName)
+{
+ ASSERT(NULL != valueName);
+ if (value != expectedValue)
+ {
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU,
+ "ARP detection: %s field; Expected Value = %0xX, current Value = %0xX\n",
+ valueName,expectedValue,value
+ );
+ return false;
+ }
+ return true;
+}
+
+void guid_to_str(u64 guid, WCHAR * pWstr, DWORD BufLen)
+{
+ PUCHAR pGuid = (UCHAR*)&guid;
+
+ char temp[BUFFER_SIZE] = {0};
+
+ HRESULT hr = StringCbPrintf(temp, BUFFER_SIZE , "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x",
+ pGuid[7],
+ pGuid[6],
+ pGuid[5],
+ pGuid[4],
+ pGuid[3],
+ pGuid[2],
+ pGuid[1],
+ pGuid[0]
+ );
+ if (!SUCCEEDED(hr))
+ {
+ ASSERT(false);
+ }
+
+ mbstowcs(pWstr, temp, BUFFER_SIZE);
+}
+
+
+template <class T>
+NTSTATUS FIFO<T>::Init(int MaxSize) {
+ ASSERT(m_pData == NULL);
+ m_pData = new VOID *[MaxSize];
+ if (m_pData == NULL) {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU,"new failed\n");
+ return STATUS_NO_MEMORY;
+ }
+ m_Head = m_Tail = 0;
+ m_Size = MaxSize;
+ m_Count = 0;
+ return STATUS_SUCCESS;
+}
+
+
+// This is needed in order to force the compiler to create the function
+void FIFO_DUMMY_FUNCTION(){
+ FIFO <VOID *> m_DataToSend;
+
+ m_DataToSend.Init(5);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Tracer Class //
+///////////////////////////////////////////////////////////////////////////////
+
+VOID Tracer::Init() {
+ m_CurrentLocation = 0;
+}
+
+VOID Tracer::AddEvent(EventType Event, int ExtraData) {
+ if (m_CurrentLocation >= MAX_EVENTS) {
+ return;
+ }
+ int Location = m_CurrentLocation++;
+ if ((Location > 0) && ( Event == PROCESS_RX_INTERNAL_START)) {
+ if (m_data[Location-1].Event == PROCESS_RX_INTERNAL_START) {
+ ExtraData = 0;
+ Event = PROCESS_RX_INTERNAL_START_SKIPING;
+ } else if (m_data[Location-1].Event == PROCESS_RX_INTERNAL_START_SKIPING) {
+ Location--;
+ ExtraData = m_data[Location].ExtraData+1;
+ Event = PROCESS_RX_INTERNAL_START_SKIPING;
+ }
+
+
+ }
+ m_data[Location].TimeStamp = GetTimeStamp();
+ m_data[Location].Event = Event;
+ m_data[Location].ExtraData = ExtraData;
+
+}
+
+void Tracer::Printxx() {
+ int i;
+ for(i=0; i < m_CurrentLocation; i++) {
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "Time = %I64d: ", m_data[i].TimeStamp / 1000);
+ switch(m_data[i].Event) {
+ case PROCESS_RX_START:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "PROCESS_RX_START\n");
+ break;
+ case PROCESS_RX_END:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "PROCESS_RX_END handeled %d packets\n", m_data[i].ExtraData);
+ break;
+ case PROCESS_RX_INTERNAL_START:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "PROCESS_RX_INTERNAL_START\n");
+ break;
+ case PROCESS_RX_INTERNAL_START_SKIPING:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "PROCESS_RX_INTERNAL_START_SKIPING pooled %d times\n", m_data[i].ExtraData);
+ break;
+
+ case MP_PORT_SEND_PACKETS:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "MP_PORT_SEND_PACKETS \n");
+ break;
+
+ case COMPLEATD_INDICATING:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "COMPLEATD_INDICATING handeled %d packets\n", m_data[i].ExtraData);
+ break;
+ default:
+ GU_PRINT(TRACE_LEVEL_ERROR, GU, "iligal event %d\n", m_data[i].Event);
+ }
+ }
+ m_CurrentLocation = 0;
+}
+
+USHORT ntohs(USHORT in)
+{
+ return ((in & 0xff) << 8) | ((in & 0xff00) >> 8);
+}
+
+// BUGBUG: Understand how to reomove the 20 from the code.
+// This function is a wrapper for the KeWaitForSingleObject that adds
+// assertsions to the valuas returned by it
+NTSTATUS
+ MyKeWaitForSingleObject(
+ IN PVOID Object,
+ IN KWAIT_REASON WaitReason,
+ IN KPROCESSOR_MODE WaitMode,
+ IN BOOLEAN Alertable,
+ IN PLARGE_INTEGER Timeout OPTIONAL,
+ IN BOOLEAN ExceptApc
+ )
+{
+ NTSTATUS rc = STATUS_SUCCESS;
+ int i;
+ for (i=0; i < 20; i++) {
+ rc = KeWaitForSingleObject(
+ Object,
+ WaitReason,
+ WaitMode,
+ Alertable,
+ Timeout
+ );
+ if (!NT_SUCCESS(rc)) {
+ ASSERT(FALSE);
+ GU_PRINT(TRACE_LEVEL_ERROR ,GU ,"KeWaitForSingleObject failed rc = 0x%x\n", rc );
+ // No meter what we do the program can't continue, let's crush it
+ int *i = NULL;
+ *i = 5;
+ }
+ ASSERT((rc == STATUS_SUCCESS ) ||
+ (rc == STATUS_ALERTED ) ||
+ (rc == STATUS_USER_APC ) ||
+ (rc == STATUS_TIMEOUT )); // This are simply all the return code from DDK
+
+ ASSERT( (Timeout != NULL ) || rc != STATUS_TIMEOUT);
+ if (rc != STATUS_USER_APC) {
+ break;
+ } else {
+ // Currently we only expect to have an APC from the user threads call back
+ if (ExceptApc == FALSE) {
+ GU_PRINT(TRACE_LEVEL_WARNING ,GU ,("KeWaitForSingleObject was stoped because of STATUS_USER_APC\n" ));
+ ASSERT(FALSE);
+ } else {
+ break;
+ }
+ }
+ }
+ if (i == 20) {
+ GU_PRINT(TRACE_LEVEL_ERROR ,GU ,("!!!! KeWaitForSingleObject was Exhausted STATUS_USER_APC\n" ));
+ // This is probably fine if we are runnign for a user thread
+ ASSERT((WaitReason == UserRequest) && (WaitMode == UserMode));
+ }
+ return rc;
+}
+
+int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep) {
+ // This filter currently only allows us to check the error, before we contiue
+ ASSERT(FALSE);
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+NTSTATUS
+CopyFromUser(
+ IN void* const p_dest,
+ IN const void* const p_src,
+ IN const size_t count )
+{
+ /*
+ * The memory copy must be done within a try/except block as the
+ * memory could be changing while the buffer is copied.
+ */
+ __try
+ {
+ ProbeForRead( (void*)p_src, count, 1 );
+#ifdef DONT_COPY_DATA
+ if (count < 1000){
+ RtlCopyMemory( p_dest, p_src, count );
+ }
+#else
+ RtlCopyMemory( p_dest, p_src, count );
+#endif
+ return STATUS_SUCCESS;
+ }
+ __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) {
+ GU_PRINT(TRACE_LEVEL_ERROR ,GU ,("copying memory from user failed\n"));
+ return STATUS_ACCESS_DENIED;
+ }
+}
+
+
+NTSTATUS
+CopyToUser(
+ IN void* const p_dest,
+ IN const void* const p_src,
+ IN const size_t count
+ )
+{
+ /*
+ * The memory copy must be done within a try/except block as the
+ * memory could be changing while the buffer is copied.
+ */
+ __try
+ {
+ ProbeForWrite( p_dest, count, 1 );
+#ifdef DONT_COPY_DATA
+ if (count < 1000){
+ RtlCopyMemory( p_dest, p_src, count );
+ }
+#else
+ RtlCopyMemory( p_dest, p_src, count );
+#endif
+ return STATUS_SUCCESS;
+ }
+ __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) {
+ GU_PRINT(TRACE_LEVEL_ERROR ,GU ,("copying memory to user failed\n"));
+ return STATUS_ACCESS_DENIED;
+ }
+}
+
+VOID * MapUserMemory(
+ IN PVOID Address,
+ IN ULONG size,
+ OUT PMDL *ppMdl
+ ) {
+ // Create the MDL:
+ PMDL pMdl = NULL;
+ PVOID pKernelAddress;
+
+ // Probe here for write
+ __try
+ {
+ ProbeForWrite( Address, size, 1 );
+
+ pMdl = IoAllocateMdl(Address, size, FALSE, FALSE, NULL);
+ ASSERT(pMdl != NULL);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ GU_PRINT(TRACE_LEVEL_ERROR ,GU ,("copying memory to user failed\n"));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ if (pMdl == NULL) {
+ ASSERT(FALSE);
+ return NULL;
+ }
+
+ __try {
+ MmProbeAndLockPages(pMdl, KernelMode , IoModifyAccess );
+ } __except(EXCEPTION_EXECUTE_HANDLER) {
+ ASSERT(FALSE);
+ IoFreeMdl(pMdl);
+ return NULL;
+ }
+
+ pKernelAddress = MmMapLockedPagesSpecifyCache(
+ pMdl,
+ KernelMode,
+ MmCached , //??????????????
+ NULL,
+ FALSE,
+ LowPagePriority
+ );
+
+ // Copy the output data
+ *ppMdl = pMdl;
+ return pKernelAddress;
+}
+
+VOID UnMapMemory(
+ IN VOID *pKernelAddress,
+ IN PMDL pMdl)
+{
+ MmUnmapLockedPages(pKernelAddress, pMdl);
+
+ MmUnlockPages(pMdl);
+
+ IoFreeMdl(pMdl);
+
+}
+
+VOID UpdateRc(NTSTATUS *rc, NTSTATUS rc1)
+{
+ // We want to keep the first errro
+ if (NT_SUCCESS(*rc)) {
+ *rc = rc1;
+ }
+}
+
Index: B:/users/irena/proj1/trunk/core/genutils/kernel/makefile
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/kernel/makefile (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/kernel/makefile (revision 6862)
@@ -0,0 +1,7 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the driver components of the Windows NT DDK
+#
+
+!INCLUDE $(NTMAKEENV)\makefile.def
Index: B:/users/irena/proj1/trunk/core/genutils/dirs
===================================================================
--- B:/users/irena/proj1/trunk/core/genutils/dirs (revision 0)
+++ B:/users/irena/proj1/trunk/core/genutils/dirs (revision 6862)
@@ -0,0 +1,3 @@
+DIRS=\
+ kernel \
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_pcipool.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_pcipool.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_pcipool.h (revision 6862)
@@ -0,0 +1,102 @@
+#pragma once
+
+typedef struct pci_pool {
+ size_t size;
+ struct mlx4_dev * mdev;
+ char name [32];
+ NPAGED_LOOKASIDE_LIST pool_hdr;
+} pci_pool_t;
+
+// taken from dmapool.c
+
+/**
+* pci_pool_create - Creates a pool of consistent memory blocks, for dma.
+* @name: name of pool, for diagnostics
+* @mdev: device that will be doing the DMA
+* @size: size of the blocks in this pool.
+* @align: alignment requirement for blocks; must be a power of two
+* @allocation: returned blocks won't cross this boundary (or zero)
+* Context: !in_interrupt()
+*
+* Returns a dma allocation pool with the requested characteristics, or
+* null if one can't be created. Given one of these pools, dma_pool_alloc()
+* may be used to allocate memory. Such memory will all have "consistent"
+* DMA mappings, accessible by the device and its driver without using
+* cache flushing primitives. The actual size of blocks allocated may be
+* larger than requested because of alignment.
+*
+* If allocation is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary. This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+
+pci_pool_t *
+pci_pool_create (const char *name, struct pci_dev *pdev,
+ size_t size, size_t align, size_t allocation);
+
+/**
+ * dma_pool_alloc - get a block of consistent memory
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * This returns the kernel virtual address of a currently unused block,
+ * and reports its dma address through the handle.
+ * If such a memory block can't be allocated, null is returned.
+ */
+static inline void *
+pci_pool_alloc (pci_pool_t *pool, int mem_flags, dma_addr_t *handle)
+{
+ PHYSICAL_ADDRESS pa;
+ void * ptr;
+ UNREFERENCED_PARAMETER(mem_flags);
+
+ ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+
+ ptr = ExAllocateFromNPagedLookasideList( &pool->pool_hdr );
+ if (ptr != NULL) {
+ pa = MmGetPhysicalAddress( ptr );
+ // TODO: convert physical adress to dma one
+ handle->da = pa.QuadPart;
+ handle->va = ptr;
+ handle->sz = 0; /* not known here */
+ }
+ return ptr;
+}
+
+
+/**
+* dma_pool_free - put block back into dma pool
+* @pool: the dma pool holding the block
+* @vaddr: virtual address of block
+* @dma: dma address of block
+*
+* Caller promises neither device nor driver will again touch this block
+* unless it is first re-allocated.
+*/
+static inline void
+pci_pool_free (pci_pool_t *pool, void *vaddr, dma_addr_t dma)
+{
+ UNREFERENCED_PARAMETER(dma);
+ ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+ ExFreeToNPagedLookasideList( &pool->pool_hdr, vaddr );
+}
+
+
+
+/**
+ * pci_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+static inline void
+pci_pool_destroy (pci_pool_t *pool)
+{
+ ExDeleteNPagedLookasideList( &pool->pool_hdr );
+ ExFreePool( pool);
+}
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_radix.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_radix.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_radix.h (revision 6862)
@@ -0,0 +1,26 @@
+#pragma once
+
+#include <complib/cl_map.h>
+
+struct radix_tree_root {
+ cl_map_t map;
+};
+
+int radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *item);
+
+void *radix_tree_lookup(struct radix_tree_root *root,
+ unsigned long index);
+
+void *radix_tree_delete(struct radix_tree_root *root,
+ unsigned long index);
+
+
+cl_status_t radix_tree_create(struct radix_tree_root *root,
+ gfp_t gfp_mask);
+
+void radix_tree_destroy(struct radix_tree_root *root );
+
+#define INIT_RADIX_TREE(root, mask) radix_tree_create(root, mask)
+#define RMV_RADIX_TREE(root) radix_tree_destroy(root)
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_memory.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_memory.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_memory.h (revision 6862)
@@ -0,0 +1,406 @@
+#pragma once
+
+#include "iobuf.h"
+#include "complib\cl_debug.h"
+#include "complib\cl_memory.h"
+
+////////////////////////////////////////////////////////
+//
+// CONSTANTS
+//
+////////////////////////////////////////////////////////
+
+#define MT_TAG_ATOMIC 'MOTA'
+#define MT_TAG_KERNEL 'LNRK'
+#define MT_TAG_HIGH 'HGIH'
+#define MT_TAG_PCIPOOL 'PICP'
+#define MT_TAG_IOMAP 'PAMI'
+
+////////////////////////////////////////////////////////
+//
+// SUBSTITUTIONS
+//
+////////////////////////////////////////////////////////
+
+
+////////////////////////////////////////////////////////
+//
+// MACROS
+//
+////////////////////////////////////////////////////////
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+
+ ////////////////////////////////////////////////////////
+ //
+ // Helper functions
+ //
+ ////////////////////////////////////////////////////////
+
+// returns log of number of pages, i.e
+// for size <= 4096 ==> 0
+// for size <= 8192 ==> 1
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+
+////////////////////////////////////////////////////////
+//
+// SYSTEM MEMORY
+//
+////////////////////////////////////////////////////////
+
+typedef enum _gfp {
+ __GFP_NOWARN = 0, /* Suppress page allocation failure warning */
+ __GFP_HIGHMEM = 0, /* high memory */
+ GFP_ATOMIC = 1, /* can't wait (i.e. DPC or higher) */
+ GFP_KERNEL = 2, /* can wait (npaged) */
+ GFP_HIGHUSER = 4 /* GFP_KERNEL, that can be in HIGH memory */
+}
+gfp_t;
+
+struct vm_area_struct {
+ void * ptr;
+};
+
+static inline void * kmalloc( SIZE_T bsize, gfp_t gfp_mask)
+{
+ void *ptr;
+ ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ ASSERT(bsize);
+ switch (gfp_mask) {
+ case GFP_ATOMIC:
+ ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_ATOMIC );
+ break;
+ case GFP_KERNEL:
+ ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL );
+ break;
+ case GFP_HIGHUSER:
+ ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_HIGH );
+ break;
+ default:
+ cl_dbg_out("kmalloc: unsupported flag %d\n", gfp_mask);
+ ptr = NULL;
+ break;
+ }
+ return ptr;
+}
+
+static inline void * kzalloc( SIZE_T bsize, gfp_t gfp_mask)
+{
+ void* va = kmalloc(bsize, gfp_mask);
+ if (va)
+ RtlZeroMemory(va, bsize);
+ return va;
+}
+
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+ if (n != 0 && size > ULONG_MAX / n)
+ return NULL;
+ return kzalloc(n * size, flags);
+}
+
+static inline void kfree (const void *pobj)
+{
+ ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+ if (pobj)
+ ExFreePool((void *)pobj);
+}
+
+#define vmalloc(size) kmalloc(size, GFP_KERNEL)
+#define vfree kfree
+
+#define get_zeroed_page(mask) kzalloc(PAGE_SIZE, mask)
+#define free_page(ptr) kfree(ptr)
+
+
+////////////////////////////////////////////////////////
+//
+// IO SPACE <==> SYSTEM MEMORY
+//
+////////////////////////////////////////////////////////
+
+/**
+* ioremap - map bus memory into CPU space
+* @addr: bus address of the memory
+* @size: size of the resource to map
+*
+* ioremap performs a platform specific sequence of operations to
+* make bus memory CPU accessible via the readb/readw/readl/writeb/
+* writew/writel functions and the other mmio helpers. The returned
+* address is not guaranteed to be usable directly as a virtual
+* address.
+*/
+static inline void *ioremap(io_addr_t addr, SIZE_T size, MEMORY_CACHING_TYPE cache_type)
+{
+ PHYSICAL_ADDRESS pa;
+ void *va;
+
+ ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+ pa.QuadPart = addr;
+ va = MmMapIoSpace( pa, size, cache_type );
+ return va;
+}
+
+static inline void iounmap(void *va, SIZE_T size)
+{
+ MmUnmapIoSpace( va, size );
+}
+
+
+////////////////////////////////////////////////////////
+//
+// DMA SUPPORT
+//
+////////////////////////////////////////////////////////
+
+enum dma_data_direction {
+ PCI_DMA_BIDIRECTIONAL = 0,
+ PCI_DMA_TODEVICE = 1,
+ PCI_DMA_FROMDEVICE = 2,
+ PCI_DMA_NONE = 3,
+ DMA_TO_DEVICE = PCI_DMA_TODEVICE,
+ DMA_FROM_DEVICE = PCI_DMA_FROMDEVICE,
+ DMA_BIDIRECTIONAL = PCI_DMA_BIDIRECTIONAL,
+ DMA_NONE = PCI_DMA_NONE,
+};
+
+#define dma_get_cache_alignment (int)KeGetRecommendedSharedDataAlignment
+
+// wrapper to DMA address
+typedef struct _dma_addr
+{
+ // TODO: in some cases it is still physical address today
+ io_addr_t da; /* logical (device) address */
+ void * va; /* kernel virtual address */
+ size_t sz; /* buffer size */
+} dma_addr_t;
+
+#define lowmem_page_address(dma_addr) ((dma_addr).va)
+
+struct mlx4_dev;
+
+void *alloc_cont_mem(
+ IN struct pci_dev *pdev,
+ IN unsigned long size,
+ OUT dma_addr_t*p_dma_addr);
+
+void free_cont_mem(
+ IN struct pci_dev *pdev,
+ IN dma_addr_t*p_dma_addr);
+
+// TODO: translate to DMA space - for now is not done anything
+static inline dma_addr_t pci_map_page(struct pci_dev *pdev,
+ dma_addr_t dma_addr, unsigned long offset, SIZE_T size, int direction)
+{
+ UNUSED_PARAM(pdev);
+ UNUSED_PARAM(offset);
+ UNUSED_PARAM(size);
+ UNUSED_PARAM(direction);
+
+ return dma_addr;
+}
+
+static inline dma_addr_t
+alloc_pages( struct pci_dev *pdev, gfp_t gfp, int order )
+{
+ dma_addr_t dma_addr;
+ UNUSED_PARAM(gfp);
+ alloc_cont_mem( pdev, PAGE_SIZE << order, &dma_addr );
+ return dma_addr;
+}
+
+#define alloc_page(pdev, mask) alloc_pages(pdev, (mask), 0)
+#define __get_free_page(mask) kzalloc(PAGE_SIZE, mask)
+
+static inline void
+__free_pages( struct pci_dev *pdev, dma_addr_t dma_addr, int order )
+{
+ UNUSED_PARAM(order);
+ ASSERT((PAGE_SIZE << order) == (int)dma_addr.sz);
+ free_cont_mem( pdev, &dma_addr );
+}
+
+#define __free_page(pdev, dma_addr) __free_pages(pdev, (dma_addr), 0)
+
+
+
+static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return !dma_addr.sz;
+}
+
+static inline void pci_unmap_page(struct pci_dev *pdev,
+ dma_addr_t dma_addr, SIZE_T size, int direction)
+{
+ UNUSED_PARAM(pdev);
+ UNUSED_PARAM(dma_addr);
+ UNUSED_PARAM(size);
+ UNUSED_PARAM(direction);
+}
+
+static inline void
+dma_sync_single( void *vdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ UNUSED_PARAM(vdev);
+ UNUSED_PARAM(dma_addr);
+ UNUSED_PARAM(size);
+ UNUSED_PARAM(direction);
+ // TODO: here is to be used FlushAdapterBuffers()
+}
+
+struct pci_dev;
+
+void *
+dma_alloc_coherent( struct mlx4_dev **dev, size_t size,
+ dma_addr_t *p_dma, gfp_t gfp );
+
+void dma_free_coherent( struct mlx4_dev **dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+ void pci_free_consistent( struct pci_dev *pdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *buf, size_t buf_size, int direction)
+{
+ dma_addr_t dma;
+ PHYSICAL_ADDRESS pa;
+
+ UNUSED_PARAM(pdev);
+ UNUSED_PARAM(direction);
+
+ pa = MmGetPhysicalAddress(buf);
+ dma.da = pa.QuadPart;
+ dma.va = buf;
+ dma.sz = buf_size;
+ return dma;
+}
+
+static inline void pci_unmap_single(struct pci_dev *hwdev, u64 ba, size_t size,
+ int direction)
+{
+ UNUSED_PARAM(hwdev);
+ UNUSED_PARAM(ba);
+ UNUSED_PARAM(size);
+ UNUSED_PARAM(direction);
+}
+
+////////////////////////////////////////////////////////
+//
+// SG lists
+//
+////////////////////////////////////////////////////////
+
+#define sg_dma_addr(sg) ((sg)->dma_addr)
+#define sg_dma_address(sg) ((sg)->dma_addr.da)
+#define sg_dma_len(sg) ((sg)->dma_addr.sz)
+#define sg_dma_address_inc(p_dma,val) (p_dma)->da += val
+#define sg_page(sg) ((sg)->dma_addr)
+
+struct scatterlist {
+ dma_addr_t dma_addr; /* logical (device) address */
+ unsigned int offset; /* offset in the first page */
+ unsigned int length;
+ PMDL p_mdl; /* MDL, if any (used for user space buffers) */
+};
+
+struct sg_table {
+ struct scatterlist *sgl; /* the list */
+ unsigned int nents; /* number of mapped entries */
+};
+
+#define offset_in_page(va) ((ULONG)((ULONG_PTR)(va) & ~PAGE_MASK))
+
+#define sg_next(sg) (sg)++
+
+/*
+* Loop over each sg element, following the pointer to a new list if necessary
+*/
+#define for_each_sg(sglist, sg, nr, __i) \
+ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+
+#define scsi_for_each_sg(cmd, sg, nseg, __i) \
+ for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
+
+static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
+{
+ memset(sgl, 0, sizeof(*sgl) * nents);
+}
+
+static inline void sg_set_buf(struct scatterlist *sg, void *buf,
+ unsigned int buflen)
+{
+ PHYSICAL_ADDRESS pa = {0};
+
+ sg->offset = offset_in_page(buf);
+ sg->length = buflen;
+
+ pa = MmGetPhysicalAddress(buf);
+ sg->dma_addr.da = pa.QuadPart;
+ sg->dma_addr.va = buf;
+ sg->dma_addr.sz = buflen;
+}
+
+static inline void sg_set_page(struct scatterlist *sg,
+ dma_addr_t dma_addr, unsigned int len, unsigned int offset)
+{
+ sg->offset = offset;
+ sg->dma_addr = dma_addr;
+ sg->length = len;
+}
+
+static inline void sg_init_one(struct scatterlist *sg, void *buf, unsigned int buflen)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, buflen);
+}
+
+/* Returns: the number of unmapped sg elements */
+static inline int pci_map_sg(struct pci_dev *pdev,
+ struct scatterlist *sg, int nents, int direction)
+{
+ UNUSED_PARAM(pdev);
+ UNUSED_PARAM(sg);
+ UNUSED_PARAM(direction);
+ return nents;
+}
+
+/* Returns: the number of unmapped sg elements */
+static inline int pci_unmap_sg(struct pci_dev *pdev,
+ struct scatterlist *sg, int nents, int direction)
+{
+ UNUSED_PARAM(pdev);
+ UNUSED_PARAM(sg);
+ UNUSED_PARAM(direction);
+ return nents;
+}
+
+/* highmem mapping */
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_TYPE_NR
+};
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_pci.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_pci.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_pci.h (revision 6862)
@@ -0,0 +1,86 @@
+#pragma once
+
+// ===========================================
+// LITERALS
+// ===========================================
+
+
+// ===========================================
+// TYPES
+// ===========================================
+
+
+// ===========================================
+// MACROS/FUNCTIONS
+// ===========================================
+
+#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+NTSTATUS pci_hca_reset( struct pci_dev *pdev);
+
+/* use shim to implement that */
+#define mlx4_reset(dev) pci_hca_reset(dev->pdev)
+
+// get bar boundaries
+#define pci_resource_start(dev,bar_num) ((dev)->bar[bar_num >> 1].phys)
+#define pci_resource_len(dev,bar_num) ((dev)->bar[bar_num >> 1].size)
+
+// i/o to registers
+
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ //TODO: write atomic implementation of _IO_READ_QWORD and change mthca_doorbell.h
+ u64 val;
+ READ_REGISTER_BUFFER_ULONG((PULONG)(addr), (PULONG)&val, 2 );
+ return val;
+}
+
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ return READ_REGISTER_ULONG((PULONG)(addr));
+}
+
+static inline u16 reads(const volatile void __iomem *addr)
+{
+ return READ_REGISTER_USHORT((PUSHORT)(addr));
+}
+
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ return READ_REGISTER_UCHAR((PUCHAR)(addr));
+}
+
+#define __raw_readq readq
+#define __raw_readl readl
+#define __raw_reads reads
+#define __raw_readb readb
+
+static inline void writeq(unsigned __int64 val, volatile void __iomem *addr)
+{
+ //TODO: write atomic implementation of _IO_WRITE_QWORD and change mthca_doorbell.h
+ WRITE_REGISTER_BUFFER_ULONG( (PULONG)(addr), (PULONG)&val, 2 );
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+ WRITE_REGISTER_ULONG((PULONG)(addr),val);
+}
+
+static inline void writes(unsigned short val, volatile void __iomem *addr)
+{
+ WRITE_REGISTER_USHORT((PUSHORT)(addr),val);
+}
+
+static inline void writeb(unsigned char val, volatile void __iomem *addr)
+{
+ WRITE_REGISTER_UCHAR((PUCHAR)(addr),val);
+}
+
+#define __raw_writeq writeq
+#define __raw_writel writel
+#define __raw_writes writes
+#define __raw_writeb writeb
+
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_list.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_list.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_list.h (revision 6862)
@@ -0,0 +1,99 @@
+#pragma once
+
+////////////////////////////////////////////////////////
+//
+// TYPES
+//
+////////////////////////////////////////////////////////
+
+// Use the type, defined in wdm.h
+#define list_head _LIST_ENTRY
+
+
+////////////////////////////////////////////////////////
+//
+// MACROS
+//
+////////////////////////////////////////////////////////
+
+
+// Define and initialize a list header
+#define LIST_HEAD(name) \
+ struct list_head name = { &(name), &(name) }
+
+// Initialize a list header
+#define INIT_LIST_HEAD(ptr) InitializeListHead(ptr)
+
+// Get to the beginning of the struct for this list entry
+#define list_entry(ptr, type, member) CONTAINING_RECORD(ptr, type, member)
+
+// Iterate over list of 'list_els' of given 'type'
+#define list_for_each_entry(list_el, head, member, type) \
+ for ( list_el = list_entry((head)->Flink, type, member); \
+ &list_el->member != (head); \
+ list_el = list_entry(list_el->member.Flink, type, member))
+
+// Iterate backwards over list of 'list_els' of given 'type'
+#define list_for_each_entry_reverse(list_el, head, member, type) \
+ for (list_el = list_entry((head)->Blink, type, member); \
+ &list_el->member != (head); \
+ list_el = list_entry(list_el->member.Blink, type, member))
+
+// Iterate over list of given type safe against removal of list entry
+#define list_for_each_entry_safe(list_el, tmp_list_el, head, member,type, tmp_type) \
+ for (list_el = list_entry((head)->Flink, type, member), \
+ tmp_list_el = list_entry(list_el->member.Flink, type, member); \
+ &list_el->member != (head); \
+ list_el = tmp_list_el, \
+ tmp_list_el = list_entry(tmp_list_el->member.Flink, tmp_type, member))
+
+
+////////////////////////////////////////////////////////
+//
+// FUNCTIONS
+//
+////////////////////////////////////////////////////////
+
+// Insert a new entry after the specified head.
+static inline void list_add(struct list_head *new_entry, struct list_head *head)
+{
+ InsertHeadList( head, new_entry );
+}
+
+// Insert a new entry before the specified head.
+static inline void list_add_tail(struct list_head *new_entry, struct list_head *head)
+{
+ InsertTailList( head, new_entry );
+}
+
+// Deletes entry from list.
+static inline void list_del(struct list_head *entry)
+{
+ RemoveEntryList( entry );
+}
+
+// Tests whether a list is empty
+static inline int list_empty(const struct list_head *head)
+{
+ return IsListEmpty( head );
+}
+
+// Insert src_list into dst_list and reinitialise the emptied src_list.
+static inline void list_splice_init(struct list_head *src_list,
+ struct list_head *dst_list)
+{
+ if (!list_empty(src_list)) {
+ struct list_head *first = src_list->Flink;
+ struct list_head *last = src_list->Blink;
+ struct list_head *at = dst_list->Flink;
+
+ first->Blink = dst_list;
+ dst_list->Flink = first;
+
+ last->Flink = at;
+ at->Blink = last;
+
+ INIT_LIST_HEAD(src_list);
+ }
+}
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_sync.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_sync.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_sync.h (revision 6862)
@@ -0,0 +1,165 @@
+#pragma once
+
+// literals
+#ifndef LONG_MAX
+#define LONG_MAX 2147483647L /* maximum (signed) long value */
+#endif
+
+#ifndef ULONG_MAX
+#define ULONG_MAX 4294967295UL
+#endif
+
+//
+// mutex wrapper
+//
+
+struct mutex
+{
+ KMUTEX m;
+};
+
+#define DEFINE_MUTEX(a) struct mutex a
+
+static inline void mutex_init( struct mutex * mutex )
+{
+ KeInitializeMutex( &mutex->m, 0 );
+}
+
+static inline void mutex_lock( struct mutex * mutex )
+{
+ NTSTATUS status;
+ int need_to_wait = 1;
+
+ ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+ while (need_to_wait) {
+ status = KeWaitForSingleObject( &mutex->m, Executive, KernelMode, FALSE, NULL );
+ if (status == STATUS_SUCCESS)
+ break;
+ }
+}
+
+static inline void mutex_unlock( struct mutex * mutex )
+{
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ KeReleaseMutex( &mutex->m, FALSE );
+}
+
+
+//
+// semaphore wrapper
+//
+
+struct semaphore
+{
+ KSEMAPHORE s;
+};
+
+static inline void sema_init(
+ IN struct semaphore *sem,
+ IN LONG cnt)
+{
+ ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+ KeInitializeSemaphore( &sem->s, cnt, cnt );
+}
+
+static inline void up( struct semaphore *sem )
+{
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ KeReleaseSemaphore( &sem->s, 0, 1, FALSE );
+}
+static inline void down( struct semaphore *sem )
+{
+ NTSTATUS status;
+ int need_to_wait = 1;
+
+ ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+ while (need_to_wait) {
+ status = KeWaitForSingleObject( &sem->s, Executive, KernelMode, FALSE, NULL );
+ if (status == STATUS_SUCCESS)
+ break;
+ }
+}
+
+
+//
+// completion wrapper
+//
+
+struct completion
+{
+ KEVENT event;
+ int done;
+};
+
+static inline void init_completion( struct completion * compl )
+{
+ //TODO: ASSERT is temporary outcommented, because using of fast mutexes in CompLib
+ // cause working on APC_LEVEL
+ //ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+ KeInitializeEvent( &compl->event, NotificationEvent , FALSE );
+ compl->done = 0;
+}
+
+static inline int wait_for_completion_timeout( struct completion * compl, unsigned long timeout )
+{
+ LARGE_INTEGER interval;
+ ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+ interval.QuadPart = (-10)* (__int64)timeout;
+ return (int)KeWaitForSingleObject( &compl->event, Executive, KernelMode, FALSE, &interval );
+}
+
+static inline void wait_for_completion( struct completion * compl )
+{
+ NTSTATUS status;
+ int need_to_wait = 1;
+
+ ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+
+ while (need_to_wait) {
+ status = KeWaitForSingleObject( &compl->event, Executive, KernelMode, FALSE, NULL );
+ if (status == STATUS_SUCCESS)
+ break;
+ }
+}
+
+
+
+static inline void complete( struct completion * compl )
+{
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ compl->done++;
+ KeSetEvent( &compl->event, 0, FALSE );
+}
+
+#ifdef USE_WDM_INTERRUPTS
+
+//
+// IRQ wrapper
+//
+
+void free_irq(struct mlx4_dev *dev);
+
+int request_irq(
+ IN struct mlx4_dev * dev,
+ IN PKSERVICE_ROUTINE isr, /* ISR */
+ IN PVOID isr_ctx, /* ISR context */
+ IN PKMESSAGE_SERVICE_ROUTINE misr, /* Message ISR */
+ OUT PKINTERRUPT * int_obj /* interrupt object */
+ );
+
+#endif
+
+//
+// various
+//
+
+// TODO: Is it enough to wait at DPC level ?
+// Maybe we need to use here KeSynchronizeExecution ?
+static inline void synchronize_irq(unsigned int irq)
+{
+ UNUSED_PARAM(irq);
+ KeFlushQueuedDpcs();
+}
+
+
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_bitmap.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_bitmap.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_bitmap.h (revision 6862)
@@ -0,0 +1,79 @@
+#pragma once
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+static inline unsigned long atomic_set_bit(int nr, volatile long * addr)
+{
+ return InterlockedOr( addr, (1 << nr) );
+}
+
+static inline unsigned long atomic_clear_bit(int nr, volatile long * addr)
+{
+ return InterlockedAnd( addr, ~(1 << nr) );
+}
+
+static inline int set_bit(int nr,unsigned long * addr)
+{
+ addr += nr >> 5;
+ return atomic_set_bit( nr & 0x1f, (volatile long *)addr );
+}
+
+static inline int clear_bit(int nr, unsigned long * addr)
+{
+ addr += nr >> 5;
+ return atomic_clear_bit( nr & 0x1f, (volatile long *)addr );
+}
+
+static inline int test_bit(int nr, const unsigned long * addr)
+{
+ int mask;
+
+ addr += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *addr) != 0);
+}
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ RtlZeroMemory(dst, len);
+ }
+}
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+ ( ((nbits) % BITS_PER_LONG) ? (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL )
+
+int __bitmap_full(const unsigned long *bitmap, int bits);
+
+static inline int bitmap_full(const unsigned long *src, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_full(src, nbits);
+}
+
+int __bitmap_empty(const unsigned long *bitmap, int bits);
+
+static inline int bitmap_empty(const unsigned long *src, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_empty(src, nbits);
+}
+
+static inline void bitmap_fill(unsigned long *dst, int nbits)
+{
+ size_t nlongs = BITS_TO_LONGS(nbits);
+ if (nlongs > 1) {
+ int len = (int)((nlongs - 1) * sizeof(unsigned long));
+ memset(dst, 0xff, len);
+ }
+ dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_atomic.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_atomic.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_atomic.h (revision 6862)
@@ -0,0 +1,60 @@
+#pragma once
+
+#include "complib/cl_atomic.h"
+
+typedef volatile __int32 atomic_t; /* as atomic32_t */
+
+#define atomic_inc cl_atomic_inc
+#define atomic_dec cl_atomic_dec
+
+static inline atomic_t atomic_read(atomic_t *pval)
+{
+ return *pval;
+}
+
+static inline void atomic_set(atomic_t *pval, long val)
+{
+ *pval = (__int32)val;
+}
+
+/**
+* atomic_inc_and_test - increment and test
+* pval: pointer of type atomic_t
+*
+* Atomically increments pval by 1 and
+* returns true if the result is 0, or false for all other
+* cases.
+*/
+static inline int
+atomic_inc_and_test(atomic_t *pval)
+{
+ return cl_atomic_inc(pval) == 0;
+}
+
+/**
+* atomic_dec_and_test - decrement and test
+* pval: pointer of type atomic_t
+*
+* Atomically decrements pval by 1 and
+* returns true if the result is 0, or false for all other
+* cases.
+*/
+static inline int
+atomic_dec_and_test(atomic_t *pval)
+{
+ return cl_atomic_dec(pval) == 0;
+}
+
+
+/**
+* atomic_dec_return - decrement and return the value
+* pval: pointer of type atomic_t
+*
+* Atomically decrements pval by 1 and retruns the new value
+*/
+static inline int
+atomic_dec_return(atomic_t *pval)
+{
+ return cl_atomic_dec(pval);
+}
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_bit.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_bit.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_bit.h (revision 6862)
@@ -0,0 +1,203 @@
+#pragma once
+
+// Nth element of the table contains the index of the first set bit of N; 8 - for N=0
+extern char g_set_bit_tbl[256];
+// Nth element of the table contains the index of the first cleared bit of N; 8 - for N=0
+extern char g_clr_bit_tbl[256];
+
+static inline int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/**
+* _ffs_raw - find the first one bit in a word
+* @addr: The address to start the search at
+* @offset: The bitnumber to start searching at
+*
+* returns: 0 - if not found or N+1, if found Nth bit
+*/
+static __inline int _ffs_raw(const unsigned long *addr, int offset)
+{
+ //TODO: not an effective code - is better in Assembler
+ int mask;
+ int rbc;
+ int ix;
+ if (!*addr) return 0;
+ mask = 1 << offset;
+ rbc = BITS_PER_LONG - offset;
+ for (ix=0; ix<rbc; ix++, mask<<=1) {
+ if (*addr & mask)
+ return offset + ix + 1;
+ }
+ return 0;
+}
+
+// as previous with offset = 0
+static __inline int _ffs(const unsigned long *addr)
+{
+ unsigned char *ptr = (unsigned char *)addr;
+ if (!*addr) return 0; // skip sero dword
+ if (!*(short*)ptr) ptr += 2; // get to the non-zero word
+ if (!*(char*)ptr) ptr++; // get to the non-zero byte
+ return (int)(((ptr - (unsigned char *)addr ) << 3) + g_set_bit_tbl[*ptr] + 1);
+}
+
+
+#define ffs(val) _ffs((const unsigned long *)&(val))
+
+/**
+* _ffz_raw - find the first zero bit in a word
+* @addr: The address to start the search at
+* @offset: The bitnumber to start searching at
+*
+* returns: 0 - if not found or N+1, if found Nth bit
+*/
+static __inline int _ffz_raw(const unsigned long *addr, int offset)
+{
+ //TODO: not an effective code - is better in Assembler
+ int mask;
+ int rbc;
+ int ix;
+ if (!~*addr) return 0;
+ mask = 1 << offset;
+ rbc = BITS_PER_LONG - offset;
+ for (ix=0; ix<rbc; ix++, mask<<=1) {
+ if (!(*addr & mask))
+ return offset + ix + 1;
+ }
+ return 0;
+}
+
+// as previous with offset = 0
+static __inline int _ffz(const unsigned long *addr)
+{
+ unsigned char *ptr = (unsigned char *)addr;
+ if (!~*addr) return 0; // skip sero dword
+ if (!~*(short*)ptr) ptr += 2; // get to the non-zero word
+ if (!~*(char*)ptr) ptr++; // get to the non-zero byte
+ return (int)(((ptr - (unsigned char *)addr ) << 3) + g_clr_bit_tbl[*ptr] + 1);
+}
+
+#define ffz(val) _ffz((const unsigned long *)&val)
+
+// Function:
+// finds the first bit, set in the bitmap
+// Parameters:
+// ptr - address of the bitmap
+// bits_size - the size in bits
+// Returns:
+// the index of the first bit set; 'bits_size' - when there is noone
+// Notes:
+// presumes, that ptr is aligned on dword
+// presumes, that the map contains an integer number of dwords
+// on bits_size=0 will return 0, but its an illegal case
+//
+static __inline int find_first_bit(const unsigned long *addr, unsigned bits_size)
+{
+ unsigned char *ptr = (unsigned char *)addr; // bitmap start
+ unsigned char *end_ptr = (unsigned char *)(addr + BITS_TO_LONGS(bits_size)); // bitmap end
+
+ while (ptr<end_ptr) {
+ if (!*(int*)ptr) { ptr += 4; continue; } // skip zero dword
+ if (!*(short*)ptr) ptr += 2; // get to the non-zero word
+ if (!*(char*)ptr) ptr++; // get to the non-zero byte
+ return (int)(((ptr - (unsigned char *)addr ) << 3) + g_set_bit_tbl[*ptr]);
+ }
+ return bits_size;
+}
+
+static __inline int find_first_zero_bit(const unsigned long *addr, unsigned bits_size)
+{
+ unsigned char *ptr = (unsigned char *)addr; // bitmap start
+ unsigned char *end_ptr = (unsigned char *)(addr + BITS_TO_LONGS(bits_size)); // bitmap end
+
+ while (ptr<end_ptr) {
+ if (!~*(int*)ptr) { ptr += 4; continue; } // skip dword w/o zero bits
+ if (!~*(short*)ptr) ptr += 2; // get to the word with zero bits
+ if (!~*(char*)ptr) ptr++; // get to the byte with zero bits
+ return (int)(((ptr - (unsigned char *)addr ) << 3) + g_clr_bit_tbl[*ptr]);
+ }
+ return bits_size;
+}
+
+
+/**
+* find_next_zero_bit - find the first zero bit in a memory region
+* @addr: The address to base the search on
+* @offset: The bitnumber to start searching at
+* @bits_size: The maximum size to search
+*
+* Returns the bit-number of the first zero bit, not the number of the byte
+* containing a bit. If not found - returns 'size'
+*/
+static __inline int find_next_zero_bit(const unsigned long *addr, int bits_size, int offset)
+{
+ int res;
+ int ix = offset & 31;
+ int set = offset & ~31;
+ const unsigned long *p = addr + (set >> 5);
+
+ // search in the first word while we are in the middle
+ if (ix) {
+ res = _ffz_raw(p, ix);
+ if (res)
+ return set + res - 1;
+ ++p;
+ set += BITS_PER_LONG;
+ }
+
+ // search the rest of the bitmap
+ res = find_first_zero_bit(p, bits_size - (unsigned)(32 * (p - addr)));
+ return res + set;
+}
+
+/* The functions works only for 32-bit values (not as in Linux ) */
+/* on val=0 will return '-1' */
+static inline int ilog2(u32 val)
+{
+ ASSERT(val);
+ return fls(val) - 1;
+}
+
+static inline BOOLEAN is_power_of_2(unsigned long n)
+{
+ return (!!n & !(n & (n-1))) ? TRUE : FALSE;
+}
+
+static inline unsigned long roundup_pow_of_two(unsigned long x)
+{
+ return (1UL << fls(x - 1));
+}
+
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+ ( ((nbits) % BITS_PER_LONG) ? (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL )
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_sk_buff.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_sk_buff.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_sk_buff.h (revision 6862)
@@ -0,0 +1,107 @@
+#pragma once
+
+#include "l2w_network_headers.h"
+
+struct sk_buff
+{
+ unsigned char *head;
+ unsigned char *data;
+ unsigned char *tail;
+ unsigned char *end;
+ struct ethhdr *mac;
+ u32 len;
+};
+
+#define eth_hdr(skb) (skb)->mac
+
+static inline struct sk_buff *dev_alloc_skb(u32 length)
+{
+ struct sk_buff *skb = NULL;
+
+ skb = (struct sk_buff *) kmalloc(sizeof(struct sk_buff), GFP_KERNEL);
+
+ if(skb != NULL)
+ {
+ skb->head = skb->data = skb->tail = (unsigned char *) kmalloc(length, GFP_KERNEL);
+ skb->end = skb->head + length;
+ }
+ return skb;
+}
+
+static inline void skb_reserve(struct sk_buff *skb, u32 length)
+{
+ skb->data += length;
+ skb->tail += length;
+}
+
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ kfree(skb->head);
+ kfree(skb);
+}
+
+/*
+* Function: skb_put
+* Description: This function extends the used data area of the buffer.
+* If this would exceed the total buffer size the kernel will panic. A pointer to the first byte of the extra data is returned.
+*/
+static inline unsigned char* skb_put(struct sk_buff *skb, u32 length)
+{
+ unsigned char *prev_tail = NULL;
+
+ if(skb->tail + length > skb->end)
+ {
+ return NULL;
+ }
+ prev_tail = skb->tail;
+ skb->tail += length;
+ skb->len += length;
+ return prev_tail;
+}
+
+static inline void skb_set_mac_header(struct sk_buff *skb, u32 offset)
+{
+ skb->mac = (struct ethhdr *) (skb->data + offset);
+}
+
+/*
+* Function: skb_pull
+* Description: This function removes data from the start of a buffer, returning the memory to the headroom.
+* A pointer to the next data in the buffer is returned. Once the data has been pulled future pushes will overwrite the old data
+*/
+static inline unsigned char * skb_pull (struct sk_buff * skb, u32 length)
+{
+ if(skb->data + length >= skb->tail)
+ {
+ return NULL;
+ }
+ skb->data += length;
+ skb->len -= length;
+ return skb->data;
+}
+
+/*
+* Function: skb_push
+* Description: This function extends the used data area of the buffer at the buffer start.
+* If this would exceed the total buffer headroom the kernel will panic. A pointer to the first byte of the extra data is returned.
+*/
+static inline unsigned char * skb_push (struct sk_buff * skb, unsigned int length)
+{
+ if(skb->data - length < skb->head)
+ {
+ return NULL;
+ }
+ skb->data -= length;
+ skb->len += length;
+ return skb->data;
+}
+
+/*
+* Function: skb_reset_mac_header
+* Description: Set MAC to be in the beginning of data.
+*/
+static inline void skb_reset_mac_header(struct sk_buff * skb)
+{
+ skb->mac = (struct ethhdr *) skb->data;
+}
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w.h (revision 6862)
@@ -0,0 +1,422 @@
+#pragma once
+
+#ifndef L2W_H
+#define L2W_H
+
+////////////////////////////////////////////////////////
+//
+// GENERAL INCLUDES
+//
+////////////////////////////////////////////////////////
+
+// OS
+#include <ntddk.h>
+//#include <iointex.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <errno.h>
+#define NTSTRSAFE_LIB
+#include <ntstrsafe.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+// complib
+#include <complib/cl_timer.h>
+#include <complib/cl_qlist.h>
+
+// mlx4
+#include "vc.h"
+
+////////////////////////////////////////////////////////
+//
+// LITERALS
+//
+////////////////////////////////////////////////////////
+
+#define BITS_PER_LONG 32
+#define N_BARS 3
+#define HZ 1000000 /* 1 sec in usecs */
+#define EOPNOTSUPP 95
+#define ETH_LENGTH_OF_ADDRESS 6
+
+
+////////////////////////////////////////////////////////
+//
+// SUBSTITUTIONS
+//
+////////////////////////////////////////////////////////
+
+#define BUG_ON(exp) ASSERT(!(exp)) /* in Linux follows here panic() !*/
+
+#pragma warning(disable : 4995) // warning C4995: name was marked as #pragma deprecated (_snprintf)
+#define snprintf _snprintf
+#define printk cl_dbg_out
+#define KERN_ERR "err:"
+#define KERN_WARNING "warn:"
+#define KERN_DEBUG "dbg:"
+#define BUG()
+
+// memory barriers
+#define wmb KeMemoryBarrier
+#define rmb KeMemoryBarrier
+#define mb KeMemoryBarrier
+// TODO: can we make it empty ? I saw in Linux, it is an empty macro for x86 & x64
+#define mmiowb KeMemoryBarrier
+
+
+// gcc compiler attributes
+#define __devinit
+#define __devinitdata
+#define __init
+#define __exit
+#define __force
+#define __iomem
+#define __attribute_const__
+#define likely(x) (x)
+#define unlikely(x) (x)
+#define __attribute__(a)
+#define __bitwise
+
+// container_of
+#define container_of CONTAINING_RECORD
+
+// inline
+#define inline __inline
+
+// new Linux event mechanism
+#define complete(a) wake_up(a)
+
+// convert
+#define __constant_htons CL_HTON16
+#define __constant_cpu_to_be32 CL_HTON32
+
+// various
+#define __always_inline inline
+
+#if (WINVER < _WIN32_WINNT_WIN6)
+#define num_possible_cpus() KeNumberProcessors
+#else
+#define num_possible_cpus() KeQueryMaximumProcessorCount()
+#endif
+
+////////////////////////////////////////////////////////
+//
+// TYPES
+//
+////////////////////////////////////////////////////////
+
+#define true (u8)1
+#define false (u8)0
+
+// basic types
+typedef unsigned char u8, __u8;
+typedef unsigned short int u16, __u16;
+typedef unsigned int u32, __u32;
+typedef unsigned __int64 u64, __u64;
+typedef char s8, __s8;
+typedef short int s16, __s16;
+typedef int s32, __s32;
+typedef __int64 s64, __s64;
+
+#ifndef __cplusplus
+typedef u8 bool;
+#endif
+
+// inherited
+typedef u16 __le16;
+typedef u16 __be16;
+typedef u32 __le32;
+typedef u32 __be32;
+typedef u64 __le64;
+typedef u64 __be64;
+typedef u16 be16;
+typedef u32 le32;
+typedef u32 be32;
+typedef u64 le64;
+typedef u64 be64;
+typedef u64 io_addr_t;
+
+// dummy function
+typedef void (*MT_EMPTY_FUNC)();
+
+// PCI BAR descriptor
+typedef enum _hca_bar_type
+{
+ HCA_BAR_TYPE_HCR,
+ HCA_BAR_TYPE_UAR,
+ HCA_BAR_TYPE_DDR,
+ HCA_BAR_TYPE_MAX
+
+} hca_bar_type_t;
+
+
+typedef struct _hca_bar
+{
+ uint64_t phys;
+ void *virt;
+ SIZE_T size;
+
+} hca_bar_t;
+
+struct msix_saved_info {
+ PVOID vca; /* MSI-X Vector Table card address */
+ PVOID mca; /* MSI-X Mask Table card address */
+ PVOID vsa; /* MSI-X Vector Table saved address */
+ PVOID msa; /* MSI-X Mask Table saved address */
+ ULONG vsz; /* MSI-X Vector Table size */
+ ULONG msz; /* MSI-X Mask Table size */
+ int num; /* number of supported MSI-X vectors */
+ int valid; /* the structure is valid */
+};
+
+struct msix_map {
+ KAFFINITY cpu; /* affinity of this MSI-X vector */
+ int eq_ix; /* EQ index in the array of EQs */
+ int ref_cnt; /* number of users */
+};
+
+typedef struct _MLX4_ST_DEVICE *PMLX4_ST_DEVICE;
+
+// interface structure between Upper and Low Layers of the driver
+struct pci_dev
+{
+ // driver: OS/platform resources
+ BUS_INTERFACE_STANDARD bus_pci_ifc;
+ PCI_COMMON_CONFIG pci_cfg_space;
+ struct msix_saved_info msix_info;
+ struct msix_map* p_msix_map;
+ uplink_info_t uplink_info;
+ // driver: card resources
+ hca_bar_t bar[N_BARS];
+ CM_PARTIAL_RESOURCE_DESCRIPTOR int_info; /* HCA interrupt resources */
+ // driver: various objects and info
+ USHORT ven_id;
+ USHORT dev_id;
+ USHORT sub_vendor_id;
+ USHORT sub_system_id;
+ UCHAR revision_id;
+ UCHAR partition_status;
+ DMA_ADAPTER * p_dma_adapter; /* HCA adapter object */
+ DEVICE_OBJECT * p_self_do; /* mlx4_bus's FDO */
+ DEVICE_OBJECT * pdo; /* mlx4_bus's PDO */
+ PVOID p_wdf_device; /* wdf_device */
+ LONG ib_hca_created;
+ // mlx4_ib: various objects and info
+ struct ib_device * ib_dev;
+ // mlx4_net: various objects and info
+ struct mlx4_dev * dev;
+ volatile long dpc_lock;
+ PUCHAR vpd;
+ int vpd_size;
+ WCHAR location[36]; /* bus+func+dev */
+ int pci_bus;
+ int pci_device;
+ int pci_func;
+ USHORT devfn;
+ char name[24]; /* mlx4_role_bus_func_dev */
+ // statistics
+ PMLX4_ST_DEVICE p_stat;
+ struct mlx4_priv *priv;
+//
+// WDM interrupts
+//
+ // legacy
+ PKINTERRUPT int_obj; /* HCA interrupt object */
+ KSPIN_LOCK isr_lock; /* lock for the ISR */
+ // MSI-X interrupts
+ u8 n_msi_vectors_alloc;/* number of allocated MSI vectors */
+ u8 n_msi_vectors; /* number of MSI vectors; 0 - no MSI */
+ ULONG version;
+ int legacy_connect;
+ // others
+ int is_reset_prohibited;
+ boolean_t start_event_taken;
+
+ USHORT clp_ver;
+ KEVENT remove_dev_lock; /* lock remove_one process */
+};
+
+/* DPC */
+typedef void (*dpc_t)( struct _KDPC *, PVOID, PVOID, PVOID );
+
+#ifdef SUPPORTED_ONLY_IN_LINUX
+struct attribute {
+ const char *name;
+ void *owner;
+ u32 mode;
+};
+
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+};
+#endif
+
+////////////////////////////////////////////////////////
+//
+// MACROS
+//
+////////////////////////////////////////////////////////
+
+// conversions
+#define swab32(a) _byteswap_ulong((ULONG)(a))
+#define cpu_to_be16(a) _byteswap_ushort((USHORT)(a))
+#define be16_to_cpu(a) _byteswap_ushort((USHORT)(a))
+#define cpu_to_be32(a) _byteswap_ulong((ULONG)(a))
+#define be32_to_cpu(a) _byteswap_ulong((ULONG)(a))
+#define cpu_to_be64(a) _byteswap_uint64((UINT64)(a))
+#define cpu_to_be24(dst, src) {(dst)[0] = (u8) (((src) >> 16) & 0xff); (dst)[1] = (u8) (((src) >> 8) & 0xff); (dst)[2] = (u8) ((src) & 0xff);}
+#define be24_to_cpu(a) (u32)((a)[0] << 16 | (a)[1] << 8 | (a)[2])
+#define be64_to_cpu(a) _byteswap_uint64((UINT64)(a))
+#define be64_to_cpup(p) _byteswap_uint64(*(PUINT64)(p))
+#define be32_to_cpup(p) _byteswap_ulong(*(PULONG)(p))
+#define be16_to_cpup(p) _byteswap_ushort(*(PUSHORT)(p))
+
+// ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+// ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+#define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))
+
+// there is a bug in Microsoft compiler, that when _byteswap_uint64() gets an expression
+// it executes the expression but doesn't swap tte dwords
+// So, there's a workaround
+#ifdef BYTESWAP_UINT64_BUG_FIXED
+#define CPU_2_BE64_PREP
+#define CPU_2_BE64(x) cl_hton64(x)
+#else
+#define CPU_2_BE64_PREP unsigned __int64 __tmp__
+#define CPU_2_BE64(x) ( __tmp__ = x, cl_hton64(__tmp__) )
+#endif
+
+#define ERR_PTR(error) ((void*)(LONG_PTR)(error))
+#define PTR_ERR(ptr) ((long)(LONG_PTR)(void*)(ptr))
+#define ETH_ALEN 6
+
+//TODO: there are 2 assumptions here:
+// - pointer can't be too big (around -1)
+// - error can't be bigger than 1000
+#define IS_ERR(ptr) ((ULONG_PTR)ptr > (ULONG_PTR)-1000L)
+
+#define BITS_TO_LONGS(bits) \
+ (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+
+#ifndef ETIMEDOUT
+#define ETIMEDOUT (110)
+#endif
+
+#ifdef PAGE_ALIGN
+#undef PAGE_ALIGN
+#define PAGE_ALIGN(Va) ((u64)((ULONG_PTR)(Va) & ~(PAGE_SIZE - 1)))
+#endif
+
+#define NEXT_PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* typed minimum */
+#define min_t(type,x,y) ((type)(x) < (type)(y) ? (type)(x) : (type)(y))
+#define max_t(type,x,y) ((type)(x) > (type)(y) ? (type)(x) : (type)(y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define EXPORT_SYMBOL(name)
+#ifndef USE_WDM_INTERRUPTS
+#define free_irq(pdev)
+#endif
+
+static inline NTSTATUS errno_to_ntstatus(int err)
+{
+#define MAP_ERR(err,ntstatus) case err: status = ntstatus; break
+ NTSTATUS status;
+
+ if (!err)
+ return STATUS_SUCCESS;
+
+ if (err < 0)
+ err = -err;
+ switch (err) {
+ MAP_ERR( ENOENT, STATUS_NOT_FOUND );
+ MAP_ERR( EAGAIN, STATUS_DEVICE_BUSY );
+ MAP_ERR( ENOMEM, STATUS_NO_MEMORY );
+ MAP_ERR( EACCES, STATUS_ACCESS_DENIED );
+ MAP_ERR( EFAULT, STATUS_DRIVER_INTERNAL_ERROR );
+ MAP_ERR( EBUSY, STATUS_INSUFFICIENT_RESOURCES );
+ MAP_ERR( ENODEV, STATUS_NOT_SUPPORTED );
+ MAP_ERR( EINVAL, STATUS_INVALID_PARAMETER );
+ MAP_ERR( ENOSYS, STATUS_NOT_SUPPORTED );
+ default:
+ status = STATUS_UNSUCCESSFUL;
+ break;
+ }
+ return status;
+}
+
+
+////////////////////////////////////////////////////////
+//
+// PROTOTYPES
+//
+////////////////////////////////////////////////////////
+
+SIZE_T strlcpy(char *dest, const void *src, SIZE_T size);
+int core_init();
+void core_cleanup();
+int l2w_init();
+void l2w_cleanup();
+
+
+////////////////////////////////////////////////////////
+//
+// SPECIFIC INCLUDES
+//
+////////////////////////////////////////////////////////
+
+struct mlx4_dev;
+struct mlx4_priv;
+
+#include <l2w_atomic.h>
+#include <l2w_bit.h>
+#include <l2w_bitmap.h>
+#include "l2w_debug.h"
+#include <l2w_memory.h>
+#include <l2w_umem.h>
+#include <l2w_list.h>
+#include <l2w_pci.h>
+#include <l2w_pcipool.h>
+#include "l2w_radix.h"
+#include <l2w_spinlock.h>
+#include <l2w_sync.h>
+#include <l2w_time.h>
+#include <l2w_network_headers.h>
+#include <l2w_workqueue.h>
+#include <l2w_sk_buff.h>
+#include <l2w_debug.h>
+#include <l2w_scsi.h>
+
+#include "device.h"
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+static inline int mlx4_is_barred(struct mlx4_dev *dev)
+{
+ return dev->flags & MLX4_FLAG_RESET_DRIVER;
+}
+
+static inline int mlx4_is_in_reset(struct mlx4_dev *dev)
+{
+ return dev->flags & MLX4_FLAG_RESET_STARTED;
+}
+
+int parse_dev_location(
+ const char *buffer,
+ const char *format,
+ int *bus, int *dev, int *func
+);
+
+#endif
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_spinlock.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_spinlock.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_spinlock.h (revision 6862)
@@ -0,0 +1,148 @@
+#pragma once
+
+#include <complib/cl_spinlock.h>
+
+#if 1
+
+typedef cl_spinlock_t spinlock_t;
+
+static inline void spin_lock_init(
+ IN spinlock_t* const p_spinlock )
+{
+ cl_spinlock_init( p_spinlock );
+}
+
+#define spin_lock cl_spinlock_acquire
+#define spin_unlock cl_spinlock_release
+
+CL_INLINE void
+spin_lock_dpc(
+ IN cl_spinlock_t* const p_spinlock )
+{
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ KeAcquireSpinLockAtDpcLevel( &p_spinlock->lock );
+}
+
+CL_INLINE void
+spin_unlock_dpc(
+ IN cl_spinlock_t* const p_spinlock )
+{
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ KeReleaseSpinLockFromDpcLevel( &p_spinlock->lock );
+}
+
+#else
+typedef struct spinlock {
+ KSPIN_LOCK lock;
+ KLOCK_QUEUE_HANDLE lockh;
+ KIRQL irql;
+} spinlock_t;
+
+
+static inline void spin_lock_init(
+ IN spinlock_t* const p_spinlock )
+{
+ KeInitializeSpinLock( &p_spinlock->lock );
+}
+
+static inline void
+spin_lock(
+ IN spinlock_t* const l)
+{
+ KIRQL irql = KeGetCurrentIrql();
+
+ ASSERT( l && irql <= DISPATCH_LEVEL );
+
+ if (irql == DISPATCH_LEVEL)
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &l->lockh );
+ else
+ KeAcquireInStackQueuedSpinLock( &l->lock, &l->lockh );
+ l->irql = irql;
+}
+
+static inline void
+spin_unlock(
+ IN spinlock_t* const l)
+{
+ ASSERT( l && KeGetCurrentIrql() == DISPATCH_LEVEL );
+ if (l->irql == DISPATCH_LEVEL)
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &l->lockh );
+ else
+ KeReleaseInStackQueuedSpinLock( &l->lockh );
+}
+
+/* to be used only at DPC level */
+static inline void
+spin_lock_dpc(
+ IN spinlock_t* const l)
+{
+ ASSERT( l && KeGetCurrentIrql() == DISPATCH_LEVEL );
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &l->lockh );
+}
+
+/* to be used only at DPC level */
+static inline void
+spin_unlock_dpc(
+ IN spinlock_t* const l)
+{
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &l->lockh );
+}
+
+static inline void
+spin_lock_sync(
+ IN spinlock_t* const l )
+{
+ KLOCK_QUEUE_HANDLE lockh;
+ ASSERT( l && KeGetCurrentIrql() <= DISPATCH_LEVEL );
+ KeAcquireInStackQueuedSpinLock ( &l->lock, &lockh );
+ KeReleaseInStackQueuedSpinLock( &lockh );
+}
+
+#endif
+
+#define DEFINE_SPINLOCK(lock) spinlock_t lock
+
+static inline void
+spin_lock_irqsave(
+ IN spinlock_t* const l,
+ IN unsigned long * flags)
+{
+ UNUSED_PARAM(flags);
+ spin_lock(l);
+}
+
+static inline void
+spin_unlock_irqrestore(
+ IN spinlock_t* const l,
+ IN unsigned long flags)
+{
+ UNUSED_PARAM(flags);
+ spin_unlock(l);
+}
+
+static inline void
+spin_lock_sync(
+ IN spinlock_t* const l )
+{
+ KLOCK_QUEUE_HANDLE lockh;
+ ASSERT( l && KeGetCurrentIrql() <= DISPATCH_LEVEL );
+ KeAcquireInStackQueuedSpinLock ( &l->lock, &lockh );
+ KeReleaseInStackQueuedSpinLock( &lockh );
+}
+
+/* we are working from DPC level, so we can use usual spinlocks */
+#define spin_lock_irq spin_lock
+#define spin_unlock_irq spin_unlock
+#define spin_lock_nested(a,b) spin_lock(a)
+
+/* Windows doesn't support such kind of spinlocks so far, but may be tomorrow ... */
+#define rwlock_init spin_lock_init
+#define read_lock_irqsave spin_lock_irqsave
+#define read_unlock_irqrestore spin_unlock_irqrestore
+#define write_lock_irq spin_lock_irq
+#define write_unlock_irq spin_unlock_irq
+
+// rw_lock
+typedef spinlock_t rwlock_t;
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_network_headers.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_network_headers.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_network_headers.h (revision 6862)
@@ -0,0 +1,16 @@
+#pragma once
+
+#define ETH_ALEN 6 /* MAC address length in bytes */
+#define ETH_HLEN 14 /* MAC header length in bytes */
+
+#pragma pack(push, 1)
+
+struct ethhdr
+{
+ unsigned char h_dest[ETH_ALEN]; /* destination MAC */
+ unsigned char h_source[ETH_ALEN]; /* source MAC */
+ unsigned short h_proto; /* next protocol type */
+};
+
+#pragma pack(pop)
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_debug.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_debug.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_debug.h (revision 6862)
@@ -0,0 +1,63 @@
+#pragma once
+
+VOID
+WriteEventLogEntryStr(
+ PVOID pi_pIoObject,
+ ULONG pi_ErrorCode,
+ ULONG pi_UniqueErrorCode,
+ ULONG pi_FinalStatus,
+ PWCHAR pi_InsertionStr,
+ ULONG pi_nDataItems,
+ ...
+ );
+
+VOID
+WriteEventLogEntryData(
+ PVOID pi_pIoObject,
+ ULONG pi_ErrorCode,
+ ULONG pi_UniqueErrorCode,
+ ULONG pi_FinalStatus,
+ ULONG pi_nDataItems,
+ ...
+ );
+
+struct mlx4_dev;
+
+void
+mlx4_err(
+ IN struct mlx4_dev * mdev,
+ IN char* format,
+ ...
+ );
+void
+
+mlx4_warn(
+ IN struct mlx4_dev * mdev,
+ IN char* format,
+ ...
+ );
+
+void
+mlx4_dbg(
+ IN struct mlx4_dev * mdev,
+ IN char* format,
+ ...
+ );
+
+VOID
+dev_err(
+ IN struct mlx4_dev ** mdev,
+ IN char* format,
+ ...
+ );
+
+VOID
+dev_info(
+ IN struct mlx4_dev ** p_mdev,
+ IN char* format,
+ ...
+ );
+
+#define mlx4_info mlx4_dbg
+#define dev_warn dev_err
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_workqueue.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_workqueue.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_workqueue.h (revision 6862)
@@ -0,0 +1,153 @@
+#pragma once
+
+#include <complib/cl_thread.h>
+#include <complib/cl_event.h>
+#include "l2w.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define NAME_LENGTH 255
+
+struct workqueue_struct
+{
+ char name[NAME_LENGTH];
+ cl_thread_t thread;
+ struct list_head works;
+ spinlock_t lock;
+ cl_event_t work_event;
+ int terminate_flag;
+ struct work_struct *current_work;
+};
+
+struct work_struct;
+
+typedef void (*work_func_t)(struct work_struct *work);
+
+struct work_struct
+{
+ struct list_head list;
+ work_func_t func;
+
+ struct workqueue_struct *wq;
+};
+
+struct delayed_work
+{
+ struct work_struct work;
+ struct timer_list timer;
+};
+
+/* init_work_queues - init provider (must for delayed work) */
+int init_workqueues();
+void shutdown_workqueues();
+
+static void workqueue_do_work(struct workqueue_struct *queue)
+{
+ struct work_struct *work = NULL;
+
+ spin_lock(&queue->lock);
+ while(! list_empty(&queue->works))
+ {
+ work = container_of(queue->works.Flink, struct work_struct, list);
+ list_del(&work->list);
+ queue->current_work = work;
+ work->wq = NULL;
+ spin_unlock(&queue->lock);
+ work->func(work);
+ spin_lock(&queue->lock);
+ queue->current_work = NULL;
+ }
+ spin_unlock(&queue->lock);
+}
+
+static void workqueue_func(void *context)
+{
+ struct workqueue_struct *queue = (struct workqueue_struct *) context;
+
+ while(! queue->terminate_flag)
+ {
+ cl_event_wait_on(&queue->work_event, EVENT_NO_TIMEOUT, FALSE);
+ workqueue_do_work(queue);
+ }
+}
+
+static inline struct workqueue_struct *create_singlethread_workqueue(const char *name)
+{
+ struct workqueue_struct *queue = NULL;
+ cl_status_t status;
+
+ queue = (struct workqueue_struct *) kmalloc(sizeof(struct workqueue_struct), GFP_KERNEL);
+ if(queue == NULL)
+ {
+ return NULL;
+ }
+ memset(queue, 0, sizeof(struct workqueue_struct));
+ strncpy(queue->name, name, NAME_LENGTH);
+
+ INIT_LIST_HEAD(&queue->works);
+ spin_lock_init(&queue->lock);
+ cl_event_init(&queue->work_event, FALSE);
+
+ status = cl_thread_init(&queue->thread, workqueue_func, queue, name);
+
+ if(status != CL_SUCCESS)
+ {
+ kfree(queue);
+ return NULL;
+ }
+ return queue;
+}
+
+static inline void flush_workqueue(struct workqueue_struct *queue)
+{
+ workqueue_do_work(queue);
+}
+
+static inline void destroy_workqueue(struct workqueue_struct *queue)
+{
+ // set the exit flag
+ queue->terminate_flag = TRUE;
+ cl_event_signal(&queue->work_event);
+
+ // wait for thread to exit
+ cl_thread_destroy(&queue->thread);
+
+ cl_event_destroy(&queue->work_event);
+
+ kfree(queue);
+}
+
+#define INIT_WORK(_work, _func) { (_work)->func = (_func); INIT_LIST_HEAD(&(_work)->list); }
+
+static inline int queue_work(struct workqueue_struct *queue,
+ struct work_struct *work)
+{
+ if(queue == NULL || work == NULL)
+ {
+ return -1;
+ }
+
+ spin_lock(&queue->lock);
+ list_add_tail(&work->list, &queue->works);
+ work->wq = queue;
+ spin_unlock(&queue->lock);
+ cl_event_signal(&queue->work_event);
+ return 0;
+}
+
+int cancel_work_sync(struct work_struct *work);
+
+#define INIT_DELAYED_WORK(_delayed_work, func) { INIT_WORK(&(_delayed_work)->work, func); }
+
+int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
+
+/* Reliably kill delayed work */
+void cancel_delayed_work_sync(struct delayed_work *work);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_time.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_time.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_time.h (revision 6862)
@@ -0,0 +1,58 @@
+#pragma once
+
+#include <complib/cl_timer.h>
+
+// returns current time in msecs (u64)
+#define jiffies get_tickcount_in_ms()
+
+// jiffies is measured in msecs
+#define jiffies_to_usecs(msecs) ((msecs)*1000)
+
+
+#define time_after(a,b) ((__int64)(b) - (__int64)(a) < 0)
+#define time_before(a,b) time_after(b,a)
+
+#define time_after_eq(a,b) ((__int64)(a) - (__int64)(b) >= 0)
+#define time_before_eq(a,b) time_after_eq(b,a)
+
+extern u32 g_time_increment;
+extern LARGE_INTEGER g_cmd_interval;
+#define cond_resched() KeDelayExecutionThread( KernelMode, FALSE, &g_cmd_interval )
+
+uint64_t get_tickcount_in_ms(void);
+
+/*
+* Timer
+*/
+
+#define timer_list _cl_timer
+
+static inline void setup_timer(struct timer_list *timer, void (*function)(void*), void* context)
+{
+ cl_timer_init(timer, function, context);
+}
+
+
+static inline void del_timer_sync(struct timer_list *timer)
+{
+ if(timer->pfn_callback)
+ {
+ cl_timer_destroy(timer);
+ }
+}
+
+static inline void del_timer(struct timer_list * timer)
+{
+ if(timer->pfn_callback)
+ {
+ cl_timer_stop(timer);
+ }
+}
+
+static inline void msleep(unsigned int msecs)
+{
+ LARGE_INTEGER interval = {0};
+
+ interval.QuadPart = 10000 * msecs; /* msecs -> 100 nsecs */
+ KeDelayExecutionThread(KernelMode, FALSE, &interval);
+}
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_scsi.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_scsi.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/l2w_scsi.h (revision 6862)
@@ -0,0 +1,98 @@
+#pragma once
+
+struct scsi_data_buffer {
+ struct sg_table table;
+ unsigned length;
+};
+
+struct scsi_device {
+ unsigned int lun;
+ void *host; /* local port */
+ void *target; /* remote port */
+};
+
+/* SCSI command scratchpad */
+struct scsi_pointer {
+ char *ptr; /* data pointer */
+};
+
+/*
+ * ScsiLun: 8 byte LUN.
+ */
+struct scsi_lun {
+ __u8 scsilun[8];
+};
+
+struct scsi_cmnd
+{
+ struct scsi_device *device;
+
+ enum dma_data_direction sc_data_direction;
+ unsigned short cmd_len;
+
+ void (*scsi_done) (struct scsi_cmnd *); /* Completion function used by low-level driver */
+ int result; /* Status code from lower level driver */
+ unsigned char *cmnd;
+ struct scsi_data_buffer sdb;
+
+#define SCSI_SENSE_BUFFERSIZE 96
+ unsigned char *sense_buffer;
+ /* obtained by REQUEST SENSE when
+ * CHECK CONDITION is received on original
+ * command (auto-sense) */
+
+ struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
+ void *srb; /* windows SRB */
+ void *win_dev; /* windows device extension */
+};
+
+
+static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
+{
+ return cmd->sdb.length;
+}
+
+static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
+{
+ return cmd->sdb.table.nents;
+}
+
+static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
+{
+ return cmd->sdb.table.sgl;
+}
+
+/*
+ * Midlevel queue return values.
+ */
+#define SCSI_MLQUEUE_HOST_BUSY 0x1055
+#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
+#define SCSI_MLQUEUE_EH_RETRY 0x1057
+#define SCSI_MLQUEUE_TARGET_BUSY 0x1058
+
+/*
+ * Host byte codes
+ */
+
+#define DID_OK 0x00 /* NO error */
+#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
+#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
+#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
+#define DID_BAD_TARGET 0x04 /* BAD target. */
+#define DID_ABORT 0x05 /* Told to abort for some other reason */
+#define DID_PARITY 0x06 /* Parity error */
+#define DID_ERROR 0x07 /* Internal error */
+#define DID_RESET 0x08 /* Reset by somebody. */
+#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
+#define DID_PASSTHROUGH 0x0a /* Force command past mid-layer */
+#define DID_SOFT_ERROR 0x0b /* The low level driver just wish a retry */
+#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */
+#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also
+ * without decrementing the retry count */
+#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution
+ * and the driver blocked the port to
+ * recover the link. Transport class will
+ * retry or fail IO */
+#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
+#define DRIVER_OK 0x00 /* Driver status */
+
Index: B:/users/irena/proj1/trunk/inc/kernel/l2w/ib_pack.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/l2w/ib_pack.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/l2w/ib_pack.h (revision 6862)
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
+ */
+
+#ifndef IB_PACK_H
+#define IB_PACK_H
+
+//#include "ib_verbs.h"
+
+enum {
+ IB_LRH_BYTES = 8,
+ IB_ETH_BYTES = 14,
+ IB_GRH_BYTES = 40,
+ IB_BTH_BYTES = 12,
+ IB_DETH_BYTES = 8
+};
+
+struct ib_field {
+ size_t struct_offset_bytes;
+ size_t struct_size_bytes;
+ int offset_words;
+ int offset_bits;
+ int size_bits;
+ char *field_name;
+};
+
+#define RESERVED \
+ .field_name = "reserved"
+
+/*
+ * This macro cleans up the definitions of constants for BTH opcodes.
+ * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY,
+ * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives
+ * the correct value.
+ *
+ * In short, user code should use the constants defined using the
+ * macro rather than worrying about adding together other constants.
+*/
+#define IB_OPCODE(transport, op) \
+ IB_OPCODE_ ## transport ## _ ## op = \
+ IB_OPCODE_ ## transport + IB_OPCODE_ ## op
+
+enum {
+ /* transport types -- just used to define real constants */
+ IB_OPCODE_RC = 0x00,
+ IB_OPCODE_UC = 0x20,
+ IB_OPCODE_RD = 0x40,
+ IB_OPCODE_UD = 0x60,
+
+ /* operations -- just used to define real constants */
+ IB_OPCODE_SEND_FIRST = 0x00,
+ IB_OPCODE_SEND_MIDDLE = 0x01,
+ IB_OPCODE_SEND_LAST = 0x02,
+ IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03,
+ IB_OPCODE_SEND_ONLY = 0x04,
+ IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05,
+ IB_OPCODE_RDMA_WRITE_FIRST = 0x06,
+ IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07,
+ IB_OPCODE_RDMA_WRITE_LAST = 0x08,
+ IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09,
+ IB_OPCODE_RDMA_WRITE_ONLY = 0x0a,
+ IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b,
+ IB_OPCODE_RDMA_READ_REQUEST = 0x0c,
+ IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d,
+ IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e,
+ IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f,
+ IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10,
+ IB_OPCODE_ACKNOWLEDGE = 0x11,
+ IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
+ IB_OPCODE_COMPARE_SWAP = 0x13,
+ IB_OPCODE_FETCH_ADD = 0x14,
+
+ /* real constants follow -- see comment about above IB_OPCODE()
+ macro for more details */
+
+ /* RC */
+ IB_OPCODE(RC, SEND_FIRST),
+ IB_OPCODE(RC, SEND_MIDDLE),
+ IB_OPCODE(RC, SEND_LAST),
+ IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
+ IB_OPCODE(RC, SEND_ONLY),
+ IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
+ IB_OPCODE(RC, RDMA_WRITE_FIRST),
+ IB_OPCODE(RC, RDMA_WRITE_MIDDLE),
+ IB_OPCODE(RC, RDMA_WRITE_LAST),
+ IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+ IB_OPCODE(RC, RDMA_WRITE_ONLY),
+ IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+ IB_OPCODE(RC, RDMA_READ_REQUEST),
+ IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
+ IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
+ IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
+ IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
+ IB_OPCODE(RC, ACKNOWLEDGE),
+ IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
+ IB_OPCODE(RC, COMPARE_SWAP),
+ IB_OPCODE(RC, FETCH_ADD),
+
+ /* UC */
+ IB_OPCODE(UC, SEND_FIRST),
+ IB_OPCODE(UC, SEND_MIDDLE),
+ IB_OPCODE(UC, SEND_LAST),
+ IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
+ IB_OPCODE(UC, SEND_ONLY),
+ IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
+ IB_OPCODE(UC, RDMA_WRITE_FIRST),
+ IB_OPCODE(UC, RDMA_WRITE_MIDDLE),
+ IB_OPCODE(UC, RDMA_WRITE_LAST),
+ IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+ IB_OPCODE(UC, RDMA_WRITE_ONLY),
+ IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+
+ /* RD */
+ IB_OPCODE(RD, SEND_FIRST),
+ IB_OPCODE(RD, SEND_MIDDLE),
+ IB_OPCODE(RD, SEND_LAST),
+ IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
+ IB_OPCODE(RD, SEND_ONLY),
+ IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
+ IB_OPCODE(RD, RDMA_WRITE_FIRST),
+ IB_OPCODE(RD, RDMA_WRITE_MIDDLE),
+ IB_OPCODE(RD, RDMA_WRITE_LAST),
+ IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+ IB_OPCODE(RD, RDMA_WRITE_ONLY),
+ IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+ IB_OPCODE(RD, RDMA_READ_REQUEST),
+ IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
+ IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
+ IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
+ IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
+ IB_OPCODE(RD, ACKNOWLEDGE),
+ IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
+ IB_OPCODE(RD, COMPARE_SWAP),
+ IB_OPCODE(RD, FETCH_ADD),
+
+ /* UD */
+ IB_OPCODE(UD, SEND_ONLY),
+ IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
+};
+
+enum {
+ IB_LNH_RAW = 0,
+ IB_LNH_IP = 1,
+ IB_LNH_IBA_LOCAL = 2,
+ IB_LNH_IBA_GLOBAL = 3
+};
+
+union ib_gid {
+ u8 raw[16];
+ struct {
+ __be64 subnet_prefix;
+ __be64 interface_id;
+ } global;
+};
+
+struct ib_unpacked_lrh {
+ u8 virtual_lane;
+ u8 link_version;
+ u8 service_level;
+ u8 link_next_header;
+ __be16 destination_lid;
+ __be16 packet_length;
+ __be16 source_lid;
+};
+
+struct ib_unpacked_grh {
+ u8 ip_version;
+ u8 traffic_class;
+ __be32 flow_label;
+ __be16 payload_length;
+ u8 next_header;
+ u8 hop_limit;
+ union ib_gid source_gid;
+ union ib_gid destination_gid;
+};
+
+struct ib_unpacked_bth {
+ u8 opcode;
+ u8 solicited_event;
+ u8 mig_req;
+ u8 pad_count;
+ u8 transport_header_version;
+ __be16 pkey;
+ __be32 destination_qpn;
+ u8 ack_req;
+ __be32 psn;
+};
+
+struct ib_unpacked_deth {
+ __be32 qkey;
+ __be32 source_qpn;
+};
+
+struct ib_unpacked_eth {
+ u8 dmac_h[4];
+ u8 dmac_l[2];
+ u8 smac_h[2];
+ u8 smac_l[4];
+ __be16 type;
+};
+
+
+struct ib_ud_header {
+ struct ib_unpacked_lrh lrh;
+ int grh_present;
+ struct ib_unpacked_grh grh;
+ struct ib_unpacked_bth bth;
+ struct ib_unpacked_deth deth;
+ int immediate_present;
+ __be32 immediate_data;
+};
+
+
+
+struct eth_ud_header {
+ struct ib_unpacked_eth eth;
+ int grh_present;
+ struct ib_unpacked_grh grh;
+ struct ib_unpacked_bth bth;
+ struct ib_unpacked_deth deth;
+ int immediate_present;
+ __be32 immediate_data;
+};
+
+
+void ib_pack(const struct ib_field *desc,
+ int desc_len,
+ void *structure,
+ u8 *buf);
+
+void ib_unpack(const struct ib_field *desc,
+ int desc_len,
+ void *buf,
+ void *structure);
+
+void ib_ud_header_init(int payload_bytes,
+ int grh_present,
+ struct ib_ud_header *header);
+
+void ib_rdmaoe_ud_header_init(int payload_bytes,
+ int grh_present,
+ struct eth_ud_header *header);
+
+int ib_ud_header_pack(struct ib_ud_header *header,
+ void *buf);
+
+int ib_ud_header_unpack(void *buf,
+ struct ib_ud_header *header);
+int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf);
+int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh);
+
+int rdmaoe_ud_header_pack(struct eth_ud_header *header,
+ void *buf);
+
+
+#endif /* IB_PACK_H */
Index: B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_timer.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_timer.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_timer.h (revision 6862)
@@ -0,0 +1,111 @@
+#pragma once
+
+#include "gu_precomp.h"
+
+class CGUWorkerThread;
+
+struct IGUWorkItem
+{
+ virtual void Execute() = 0;
+ virtual ~IGUWorkItem();
+
+ LONG AddRef(LPCSTR str);
+ LONG Release(LPCSTR str);
+
+ LIST_ENTRY m_Link;
+ CGUWorkerThread* m_pWorkerThread;
+ LONG RefCount;
+};
+
+class CGUWorkerThread
+{
+ public:
+ CGUWorkerThread();
+ ~CGUWorkerThread();
+ NDIS_STATUS Start();
+ void Stop();
+ void Run();
+ NDIS_STATUS EnqueueWorkItem(IGUWorkItem *pWorkItem);
+ NDIS_STATUS DequeueWorkItem(IGUWorkItem *pWorkItem);
+
+ private:
+ LIST_ENTRY m_WorkItems;
+ KSPIN_LOCK m_Lock;
+ KEVENT m_Event;
+ bool m_bExit;
+ PVOID m_ThreadObject;
+ bool m_bIsStarted;
+};
+
+class CGUTimer;
+
+VOID
+ GUTimerFunc(
+ IN struct _KDPC *Dpc,
+ IN PVOID DeferredContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ );
+
+class CTimerWorkItem : public IGUWorkItem
+{
+public:
+ void Init(CGUTimer* pTimer)
+ {
+ m_pTimer = pTimer;
+ }
+
+ ~CTimerWorkItem()
+ {
+ }
+
+ void Execute();
+
+public:
+ CGUTimer* m_pTimer;
+};
+
+class CGUTimer
+{
+public:
+
+ CGUTimer();
+
+ ~CGUTimer();
+
+ void Initialize(
+ CGUWorkerThread *pThread,
+ IGUWorkItem *pWorkItem,
+ ULONG TimerIntervalMillis = 0,
+ bool IsPeriodic = true);
+ void Run();
+ bool Cancel();
+
+ bool Start();
+ bool Start(ULONG dwInterval);
+ void Stop();
+ void PassiveRun();
+
+private:
+ LONG AddRef(LPCSTR str);
+ LONG Release(LPCSTR str);
+
+private:
+ KTIMER m_Timer;
+ KDPC m_Dpc;
+ KEVENT m_Event;
+ LONG m_RefCount;
+ ULONG m_TimerIntervalMillis;
+ CGUWorkerThread* m_pThread;
+ IGUWorkItem* m_pWorkItem;
+ bool m_bExit;
+ bool m_IsPeriodic;
+ LARGE_INTEGER m_LastRunTime;
+
+ shutter_t m_cancel;
+ CTimerWorkItem m_TimerWorkItem;
+};
+
+
+
+
Index: B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_utils.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_utils.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_utils.h (revision 6862)
@@ -0,0 +1,629 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ GenUtils.h
+
+
+Notes:
+
+--*/
+
+#pragma once
+#include "gu_precomp.h"
+
+#define GU_SET_FLAG(_M, _F) ((_M)->Flags |= (_F))
+#define GU_CLEAR_FLAG(_M, _F) ((_M)->Flags &= ~(_F))
+#define GU_CLEAR_FLAGS(_M) ((_M)->Flags = 0)
+#define GU_TEST_FLAG(_M, _F) (((_M)->Flags & (_F)) != 0)
+#define GU_TEST_FLAGS(_M, _F) (((_M)->Flags & (_F)) == (_F))
+
+
+// max. length of full pathname
+#define MAX_PATH 260
+#define MAX_LONG_VALUE 0x7FFFFFFF
+
+#define BITS_PER_LONG 32
+
+#define GLOBAL_ALLOCATION_TAG 'XtoC'
+#define SIZE_OF(A) (sizeof(A)/sizeof(A[0]))
+#define FLOOR_4_MASK 0xFFFFFFFC
+
+
+#define BUFFER_SIZE 100
+
+
+// In units of ms
+uint64_t GetTickCountInMsec();
+unsigned __int64 GetTickCountInNsec();
+uint64_t GetTimeStamp(void);
+LARGE_INTEGER TimeFromLong(ULONG HandredNanos);
+NTSTATUS Sleep(ULONG HandredNanos);
+NTSTATUS GenUtilsInit();
+u32 ROUNDUP_LOG2(u32 arg);
+void guid_to_str(u64 guid, WCHAR* pstr, DWORD BufLen);
+
+FORCEINLINE const u32 H_TO_BE(const u32 src)
+{
+ return src << 24 |
+ ((src << 8 ) & 0xff0000) |
+ ((src >> 8 ) & 0xff00) |
+ (src >> 24);
+}
+
+inline UINT Floor_4(UINT value)
+{
+ return value&FLOOR_4_MASK;
+}
+
+NTSTATUS GenUtilsInit();
+
+struct AllocateSharedMemoryDeleteInfo {
+ ULONG Length;
+ BOOLEAN Cached;
+ PVOID VirtualAddress;
+ NDIS_PHYSICAL_ADDRESS PhysicalAddress;
+
+#ifdef NDIS620_MINIPORT
+ BOOLEAN fVMQ;
+ NDIS_HANDLE AllocationHandle;
+ NDIS_HANDLE SharedMemoryHandle;
+ ULONG SharedMemoryOffset;
+#endif
+};
+
+void
+DbgPrintIpAddress(
+ LPCSTR str_description,
+ u8 ipAddress[],
+ unsigned int traceLevel
+ );
+
+void
+DbgPrintMacAddress(
+ LPCSTR str_description,
+ u8 macAddress[],
+ unsigned int traceLevel
+ );
+
+NTSTATUS
+ReadRegistryDword(
+ LPCWSTR pszRegistryPath,
+ LPCWSTR pszSuffix,
+ LPCWSTR pszValueName,
+ ULONG DefaultVal,
+ LONG *pVal
+ );
+
+NTSTATUS
+ReadRegStrRegistryValueInNonPagedMemory(
+ IN LPCWSTR pszRegistryPath,
+ IN LPCWSTR pszSuffix,
+ IN LPCWSTR pszValueName,
+ IN UINT flags,
+ OUT LPWSTR* pWstr
+ );
+
+NTSTATUS ReadRegistryValue(
+ IN LPCWSTR pszRegistryPath,
+ IN LPCWSTR pszSuffix,
+ IN LPCWSTR pszValueName,
+ IN ULONG DefaultValueType,
+ IN PVOID DefaultVal,
+ IN ULONG DefaultValLength,
+ IN ULONG Flags,
+ OUT PVOID pVal
+ );
+
+
+
+
+// This is simply a wrapper to the LIST_ENTRY class that allows
+// easier work with this list
+class LinkedList {
+
+public:
+ LinkedList() {
+ size = 0;
+ InitializeListHead(&m_Data);
+ }
+
+ // Only used when the constructor can not be used.
+ VOID Init() {
+ size = 0;
+ InitializeListHead(&m_Data);
+ }
+
+ DWORD Size() {return size;}
+
+ LIST_ENTRY *RemoveHeadList() {
+ LIST_ENTRY *pTemp;
+ ASSERT(size > 0);
+ ASSERT(!IsListEmpty(&m_Data));
+ pTemp = ::RemoveHeadList(&m_Data);
+ size--;
+ return pTemp;
+ }
+
+ LIST_ENTRY *RemoveTailList() {
+ LIST_ENTRY *pTemp;
+ ASSERT(size > 0);
+ ASSERT(!IsListEmpty(&m_Data));
+ pTemp = ::RemoveTailList(&m_Data);
+ size--;
+ return pTemp;
+ }
+
+
+ VOID InsertTailList (LIST_ENTRY *Item) {
+#if DBG
+ // Before we insert, we have to verify that the object is not in the list
+ LIST_ENTRY *current = m_Data.Flink;
+ while (current != & m_Data) {
+ ASSERT(current != Item);
+ current = current->Flink;
+ }
+#endif
+ ::InsertTailList(&m_Data, Item);
+ size++;
+ }
+
+ VOID InsertHeadList (LIST_ENTRY *Item) {
+#if DBG
+ // Before we insert, we have to verify that the object is not in the list
+ LIST_ENTRY *current = m_Data.Flink;
+ while (current != & m_Data) {
+ ASSERT(current != Item);
+ current = current->Flink;
+ }
+#endif
+ ::InsertHeadList(&m_Data, Item);
+ size++;
+ }
+
+ LIST_ENTRY *Head() {
+ ASSERT(size > 0);
+ ASSERT(!IsListEmpty(&m_Data));
+ return m_Data.Flink;
+ }
+
+ LIST_ENTRY *Tail() {
+ ASSERT(size > 0);
+ ASSERT(!IsListEmpty(&m_Data));
+ return m_Data.Blink;
+ }
+
+
+ LIST_ENTRY *RawHead() {
+ // Return the head of the list without any checks,
+ // needed in order to use it as in iterator
+ return m_Data.Flink;
+ }
+
+
+ bool IsAfterTheLast(LIST_ENTRY *pEntry) {
+ if (size == 0) {
+ return true;
+ }
+ return &m_Data == pEntry;
+ }
+
+ VOID RemoveEntryList(LIST_ENTRY *Item) {
+ ASSERT(size > 0);
+ ASSERT(!IsListEmpty(&m_Data));
+#if DBG
+ // Verify that this item is indeed in the list
+ LIST_ENTRY *current = m_Data.Flink;
+ while (current != Item) {
+ if (current == & m_Data) {
+ ASSERT(FALSE);
+ //SDP_PRINT(TRACE_LEVEL_ERROR ,SDP_BUFFER_POOL ,("Object is not in the list\n"));
+ }
+ current = current->Flink;
+ }
+
+#endif
+ ::RemoveEntryList(Item);
+ size--;
+ }
+
+private:
+ DWORD size;
+ LIST_ENTRY m_Data;
+};
+
+//--------------------------------------
+// Queue structure and macros
+//--------------------------------------
+typedef struct _QUEUE_ENTRY
+{
+ struct _QUEUE_ENTRY *Next;
+} QUEUE_ENTRY, *PQUEUE_ENTRY;
+
+typedef struct _QUEUE_HEADER
+{
+ PQUEUE_ENTRY Head;
+ PQUEUE_ENTRY Tail;
+} QUEUE_HEADER, *PQUEUE_HEADER;
+
+#define InitializeQueueHeader(QueueHeader) \
+ { \
+ (QueueHeader)->Head = (QueueHeader)->Tail = NULL; \
+ }
+
+#define IsQueueEmpty(QueueHeader) ((QueueHeader)->Head == NULL)
+
+#define RemoveHeadQueue(QueueHeader) \
+ (QueueHeader)->Head; \
+ { \
+ PQUEUE_ENTRY pNext; \
+ ASSERT((QueueHeader)->Head); \
+ pNext = (QueueHeader)->Head->Next; \
+ (QueueHeader)->Head = pNext; \
+ if (pNext == NULL) \
+ (QueueHeader)->Tail = NULL; \
+ }
+
+#define InsertHeadQueue(QueueHeader, QueueEntry) \
+ { \
+ ((PQUEUE_ENTRY)QueueEntry)->Next = (QueueHeader)->Head; \
+ (QueueHeader)->Head = (PQUEUE_ENTRY)(QueueEntry); \
+ if ((QueueHeader)->Tail == NULL) \
+ (QueueHeader)->Tail = (PQUEUE_ENTRY)(QueueEntry); \
+ }
+
+#define InsertTailQueue(QueueHeader, QueueEntry) \
+ { \
+ ((PQUEUE_ENTRY)QueueEntry)->Next = NULL; \
+ if ((QueueHeader)->Tail) \
+ (QueueHeader)->Tail->Next = (PQUEUE_ENTRY)(QueueEntry); \
+ else \
+ (QueueHeader)->Head = (PQUEUE_ENTRY)(QueueEntry); \
+ (QueueHeader)->Tail = (PQUEUE_ENTRY)(QueueEntry); \
+ }
+
+
+
+#define ETH_IS_LOCALLY_ADMINISTERED(Address) \
+ (BOOLEAN)(((PUCHAR)(Address))[0] & ((UCHAR)0x02))
+
+
+
+
+// A simpale static array (for now)
+class Array {
+public:
+ NTSTATUS Init(int MaxNumberofPackets);
+
+ VOID Shutdown() {
+ delete[]m_pData;
+ }
+
+ Array() {
+ m_Count = 0;
+ }
+ void Add(void *ptr) {
+ ASSERT(m_Count < (int)m_Size);
+ m_pData[m_Count++] = ptr;
+ }
+
+ int GetCount() {return m_Count;}
+
+ void *GetPtr(int Place) {
+ ASSERT(Place < m_Count);
+ return m_pData[Place];
+ }
+ void Reset() {
+ m_Count = 0;
+ }
+
+private:
+ int m_Count;
+ void **m_pData;
+ UINT m_Size; // For Debug only
+
+};
+
+
+
+#if 0
+
+/*
+ This class is used for freeing the sent packets.
+ It is based on the assumpation that this happens at raised irql and therefore,
+ if we allocate a data structure fro each processor we should be fine
+
+*/
+class ProcessorArray {
+
+public:
+
+ ProcessorArray() {
+ m_Arrays = NULL;
+ m_NumberOfProcessors = 0;
+ }
+
+ NTSTATUS Init(int MaxNumberofPackets);
+
+ VOID Shutdown() {
+ if (m_Arrays) {
+ u32 j;
+ for (j=0; j< m_NumberOfProcessors; j++) {
+ m_Arrays[j].Shutdown();
+ }
+ delete []m_Arrays;
+ m_Arrays = NULL;
+ }
+ m_NumberOfProcessors = 0;
+
+ }
+
+ Array *GetArray() {
+ ASSERT( KeGetCurrentIrql()== DISPATCH_LEVEL);
+ ULONG pn = KeGetCurrentProcessorNumber();
+ ASSERT(pn < m_NumberOfProcessors);
+ m_Arrays[pn].Reset();
+ return &m_Arrays[pn];
+
+
+ }
+private:
+ Array *m_Arrays;
+ ULONG m_NumberOfProcessors;
+
+};
+
+#endif
+
+#if DBG
+class VERIFY_DISPATCH_LEVEL {
+public:
+ VERIFY_DISPATCH_LEVEL(KIRQL irql = -1) {
+ if (irql != (KIRQL)-1) {
+ ASSERT(KeGetCurrentIrql() == irql);
+ }
+
+ StartLevel = KeGetCurrentIrql();
+
+ }
+
+ ~VERIFY_DISPATCH_LEVEL() {
+ ASSERT(KeGetCurrentIrql() == StartLevel);
+ }
+private:
+ KIRQL StartLevel;
+};
+#else
+class VERIFY_DISPATCH_LEVEL {
+public:
+ VERIFY_DISPATCH_LEVEL(KIRQL irql = -1) {}
+};
+#endif // DBG
+
+template <class T>
+class FIFO {
+public:
+
+ NTSTATUS Init(int MaxSize);
+
+ FIFO() {
+ m_pData = NULL;
+ m_Head = 0;
+ m_Tail = 0;
+ m_Size = 0;
+ m_Count = 0;
+ }
+
+ ~FIFO() {
+ Shutdown();
+ }
+
+ VOID Shutdown() {
+ if(m_pData != NULL) {
+ delete []m_pData;
+ m_pData = NULL;
+ }
+ }
+
+ VOID Push(T pNewItem) {
+ ASSERT(m_Count < m_Size);
+ m_pData[m_Head++] = pNewItem;
+ if(m_Head == m_Size) {
+ m_Head = 0;
+ }
+ m_Count++;
+ ASSERT(m_Count <= m_Size);
+ }
+
+ T Pop() {
+ VOID *pRet = m_pData[m_Tail++];
+ ASSERT(m_Count > 0);
+ if(m_Tail == m_Size) {
+ m_Tail = 0;
+ }
+ m_Count--;
+ ASSERT(m_Count >= 0);
+ return pRet;
+ }
+ int Count() {
+ return m_Count;
+ }
+
+ bool IsFull() {
+ return m_Size == m_Count;
+ }
+
+ bool IsEmpty() {
+ return m_Count == 0;
+ }
+
+private:
+ T *m_pData;
+ int m_Head;
+ int m_Tail;
+ int m_Size;
+ int m_Count;
+};
+
+#define SIZEOF_IN_BITS(_type) (8 * sizeof(_type))
+
+class Bitmap
+{
+public:
+
+ static bool Set(ULONG* pData, ULONG BitIndex)
+ {
+ if(pData == NULL)
+ {
+ return false;
+ }
+ ULONG Offset = BitIndex / SIZEOF_IN_BITS(ULONG);
+ ULONG Bit = (BitIndex % SIZEOF_IN_BITS(ULONG));
+
+ if(pData[Offset] & (1 << Bit))
+ {// already set
+ return false;
+ }
+ pData[Offset] |= 1 << Bit;
+ return true;
+ }
+ static bool Clear(ULONG* pData, ULONG BitIndex)
+ {
+ if(pData == NULL)
+ {
+ return false;
+ }
+ ULONG Offset = BitIndex / SIZEOF_IN_BITS(ULONG);
+ ULONG Bit = (BitIndex % SIZEOF_IN_BITS(ULONG));
+
+ if((pData[Offset] & (1 << Bit)) == 0)
+ {// already clear
+ return false;
+ }
+ pData[Offset] &= ~(1 << Bit);
+ return true;
+ }
+ static bool Test(ULONG* pData, ULONG BitIndex)
+ {
+ if(pData == NULL)
+ {// out of range
+ return false;
+ }
+ ULONG Offset = BitIndex / SIZEOF_IN_BITS(ULONG);
+ ULONG Bit = (BitIndex % SIZEOF_IN_BITS(ULONG));
+
+ return (pData[Offset] & (1 << Bit)) != 0;
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Tracer Class //
+///////////////////////////////////////////////////////////////////////////////
+
+
+enum EventType {
+ PROCESS_RX_START,
+ PROCESS_RX_END,
+ PROCESS_RX_INTERNAL_START,
+ PROCESS_RX_INTERNAL_START_SKIPING,
+ COMPLEATD_INDICATING,
+ MP_PORT_SEND_PACKETS
+
+};
+
+const int MAX_EVENTS = 10000;
+
+class Tracer {
+public:
+
+ VOID Init();
+ VOID AddEvent(EventType Event, int ExtraData);
+ void Printxx() ;
+
+private:
+
+struct data {
+ uint64_t TimeStamp;
+ EventType Event;
+ int ExtraData;
+};
+
+ data m_data[MAX_EVENTS];
+ int m_CurrentLocation;
+
+
+};
+
+class CSpinLockWrapper {
+ KSPIN_LOCK m_SpinLock;
+ KIRQL m_OldIrql;
+
+public:
+
+ CSpinLockWrapper (KSPIN_LOCK &SpinLock) : m_SpinLock(SpinLock){
+ // Spinlock must already be initialized
+#if DBG
+ m_OldIrql = 0xff;
+#endif
+ }
+
+ void Lock() {
+ ASSERT(m_OldIrql == 0xff);
+ KeAcquireSpinLock(&m_SpinLock, &m_OldIrql);
+ }
+
+ void Unlock() {
+ ASSERT(m_OldIrql != 0xff);
+ KeReleaseSpinLock(&m_SpinLock, m_OldIrql);
+#if DBG
+ m_OldIrql = 0xff;
+#endif
+ }
+
+ ~CSpinLockWrapper() {
+ ASSERT(m_OldIrql == 0xff);
+ }
+
+};
+
+USHORT nthos(USHORT in);
+
+NTSTATUS
+ MyKeWaitForSingleObject(
+ IN PVOID Object,
+ IN KWAIT_REASON WaitReason,
+ IN KPROCESSOR_MODE WaitMode,
+ IN BOOLEAN Alertable,
+ IN PLARGE_INTEGER Timeout OPTIONAL,
+ IN BOOLEAN ExceptApc = FALSE
+ );
+
+
+NTSTATUS
+CopyFromUser(
+ IN void* const p_dest,
+ IN const void* const p_src,
+ IN const size_t count
+ );
+
+NTSTATUS
+CopyToUser(
+ IN void* const p_dest,
+ IN const void* const p_src,
+ IN const size_t count
+ );
+
+VOID * MapUserMemory(
+ IN PVOID Address,
+ IN ULONG size,
+ OUT PMDL *ppMdl
+ );
+
+VOID UnMapMemory(
+ IN VOID *pKernelAddress,
+ IN PMDL pMdl
+ );
+
+VOID UpdateRc(NTSTATUS *rc, NTSTATUS rc1);
+
Index: B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_defs.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_defs.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_defs.h (revision 6862)
@@ -0,0 +1,107 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_defs.h
+
+Abstract:
+
+Notes:
+
+--*/
+
+#pragma once
+
+// basic types
+typedef unsigned char u8, __u8;
+typedef unsigned short int u16, __u16;
+typedef unsigned int u32, __u32;
+typedef unsigned __int64 u64, __u64;
+typedef char s8, __s8;
+typedef short int s16, __s16;
+typedef int s32, __s32;
+typedef __int64 s64, __s64;
+typedef u16 __be16 ;
+typedef u32 __be32 ;
+typedef u64 __be64 ;
+
+#ifdef _WIN64
+typedef unsigned __int64 uintn_t;
+#else
+typedef unsigned int uintn_t;
+#endif
+
+typedef unsigned __int64 uint64_t;
+
+#define be16_to_cpu(a) _byteswap_ushort((USHORT)(a))
+#define be32_to_cpu(a) _byteswap_ulong((ULONG)(a))
+
+#define __be16_to_cpu be16_to_cpu
+#define __be32_to_cpu be32_to_cpu
+
+
+u32 inline CL_NTOH32( u32 x ) {
+ return (u32)(
+ (((u32)(x) & 0x000000FF) << 24) |
+ (((u32)(x) & 0x0000FF00) << 8) |
+ (((u32)(x) & 0x00FF0000) >> 8) |
+ (((u32)(x) & 0xFF000000) >> 24) );
+}
+#define CL_HTON32 CL_NTOH32
+
+#ifdef _WIN64
+#define __cpu_to_be32(x) ((((x) >> 24)&0x000000ff) | (((x) >> 8)&0x0000ff00) | (((x) << 8)&0x00ff0000) | (((x) << 24)&0xff000000))
+#elif defined(_WIN32)
+__inline __int32 __cpu_to_be32( __int32 dwX )
+{
+ _asm mov eax, dwX
+ _asm bswap eax
+ _asm mov dwX, eax
+
+ return dwX;
+}
+#else
+#error unsupported platform
+#endif
+
+//#define __cpu_to_be32(x) cpu_to_be32(x)
+#define __cpu_to_be16(x) cpu_to_be16(x)
+u16 inline cpu_to_be16(u16 in) {
+ return in >> 8 | in << 8;
+}
+
+inline u64 ALIGN64(u64 pAddr, u64 a) {return ((pAddr)+(a)-1)&~((a)-1);}
+
+#define XOR(x,y) (!(x) != !(y))
+#define XNOR(x,y) (!(x) == !(y))
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+
+// Convert the mac from the way that windows gives it to the way we want it.
+inline void mac_to_be64(u64 *dst, u64 *src)
+{
+ char *csrc = (char *)src;
+ char *cdst = (char *)dst;
+ cdst[0] = csrc[5];
+ cdst[1] = csrc[4];
+ cdst[2] = csrc[3];
+ cdst[3] = csrc[2];
+ cdst[4] = csrc[1];
+ cdst[5] = csrc[0];
+ cdst[6] = 0;
+ cdst[7] = 0;
+}
+
+inline u64 be64_to_mac( UCHAR *src)
+{
+ u64 dst;
+ mac_to_be64(&dst,(u64 *)src);
+ return dst;
+}
+
+#define IS_BIT_SET(val, mask) \
+ (((val) & (mask)) ? 1 : 0)
+
+
Index: B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_wpptrace.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_wpptrace.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_wpptrace.h (revision 6862)
@@ -0,0 +1,83 @@
+/*++
+
+Copyright (c) 2005-2008 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_wpptrace.h
+
+Abstract:
+ This module contains all debug-related code.
+
+Revision History:
+
+Notes:
+
+--*/
+
+#pragma once
+
+
+#if defined(EVENT_TRACING)
+
+#define WPP_CONTROL_GUIDS \
+ WPP_DEFINE_CONTROL_GUID(EthrnetGuid,(684E068C, 3FDC, 4bce, 89C3, CDB77A8B75A4), \
+ WPP_DEFINE_BIT(GU) \
+ WPP_DEFINE_BIT(GU_INIT)) \
+
+#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl)
+#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags)
+#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE)
+#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags)
+
+// begin_wpp config
+// GU_ENTER();
+// GU_EXIT();
+// USESUFFIX(GU_PRINT, "%!STDPREFIX! %!FUNC!");
+// GU_PRINT(LEVEL,FLAGS,MSG,...)
+// USESUFFIX(GU_ENTER, "====>>> %!FUNC! ");
+// USESUFFIX(GU_EXIT, "<<<====== %!FUNC!]");
+// end_wpp
+
+
+#else //defined(EVENT_TRACING)
+
+// Debug toppics
+#define GU 0x000001
+#define GU_INIT 0x000020
+// Each change to this flags requires additional change at Mp_dbg.cpp g_DbgFlags[] variabl
+
+
+#define TRACE_LEVEL_CRITICAL DPFLTR_ERROR_LEVEL
+#define TRACE_LEVEL_FATAL DPFLTR_ERROR_LEVEL
+#define TRACE_LEVEL_ERROR DPFLTR_ERROR_LEVEL
+#define TRACE_LEVEL_WARNING DPFLTR_WARNING_LEVEL
+#define TRACE_LEVEL_INFORMATION DPFLTR_TRACE_LEVEL
+#define TRACE_LEVEL_VERBOSE DPFLTR_INFO_LEVEL
+
+VOID
+TraceGUMessage(
+ IN PCCHAR func,
+ IN PCCHAR file,
+ IN ULONG line,
+ IN ULONG level,
+ IN PCCHAR format,
+ ...
+ );
+
+#pragma warning(disable:4296) // expression is always true/false
+#define GU_PRINT(_level_,_flag_, _format_, ...) \
+ if ((g_GUDbgFlags & (_flag_)) && \
+ (g_GUDbgFlagsDef[ROUNDUP_LOG2(_flag_)].dbgLevel >= (_level_))) \
+ { \
+ TraceGUMessage(__FUNCTION__, __FILE__, __LINE__, _level_, _format_, __VA_ARGS__); \
+ }
+
+#define GU_ENTER()\
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "===>\n");
+
+#define GU_EXIT()\
+ GU_PRINT(TRACE_LEVEL_VERBOSE, GU, "<===\n");
+
+#endif //defined(EVENT_TRACING)
+
+
Index: B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_dbg.h
===================================================================
--- B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_dbg.h (revision 0)
+++ B:/users/irena/proj1/trunk/inc/kernel/genutils/gu_dbg.h (revision 6862)
@@ -0,0 +1,51 @@
+/*++
+
+Copyright (c) 2005-2010 Mellanox Technologies. All rights reserved.
+
+Module Name:
+ gu_dbg.h
+
+Abstract:
+ This modulde contains all related dubug code
+Notes:
+
+--*/
+
+#pragma once
+
+#ifdef _PREFAST_
+#define CONDITION_ASSUMED(X) __analysis_assume((X))
+#else
+#define CONDITION_ASSUMED(X)
+#endif // _PREFAST_
+
+#if DBG
+
+struct CGUDebugFlags{
+ LPCWSTR pszName;
+ DWORD dbgLevel;
+ };
+
+extern CGUDebugFlags g_GUDbgFlagsDef[];
+const unsigned int g_GUDbgFlags= 0xffff;
+
+
+#undef ASSERT
+#define ASSERT(x) if(!(x)) { \
+ DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, "Assertion failed: %s:%d %s\n", __FILE__, __LINE__, #x);\
+ DbgBreakPoint(); }\
+ CONDITION_ASSUMED(x);
+
+#define ASSERT_ALWAYS(x) ASSERT(x)
+
+void DebugGUPrintInit(IN LPCWSTR pszRegistryPath);
+
+#else // !DBG
+
+#undef ASSERT
+#define ASSERT(x)
+
+#define ASSERT_ALWAYS(x) if(!(x)) { \
+ DbgBreakPoint(); }
+
+#endif // DBG
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/eth/eth5x/sources
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/eth/eth5x/sources (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/eth/eth5x/sources (revision 6862)
@@ -36,6 +36,7 @@
..\..\bus\core\$O; \
..\..\..\..\..\inc; \
..\..\..\..\..\inc\kernel; \
+ ..\..\..\..\..\inc\kernel\l2w; \
..\..\bus\net; \
..\..\bus\ib; \
$(O); \
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/eth/eth6x/sources
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/eth/eth6x/sources (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/eth/eth6x/sources (revision 6862)
@@ -37,6 +37,7 @@
..\..\bus\net; \
..\..\bus\ib; \
$(O); \
+ ..\..\..\..\..\inc\kernel\l2w; \
C_DEFINES=$(C_DEFINES) -DNDIS_MINIPORT_DRIVER -DBINARY_COMPATIBLE=0 -DNDIS_WDM=1 -DBUS_DRIVER -DMTNIC_PERF_STAT -DMTNIC -DNTDDI_VERSION=NTDDI_VISTA
@@ -54,7 +55,7 @@
#
# The driver is for windows7 Build with NDIS 6.2 and KMDF 1.9
#
-C_DEFINES=$(C_DEFINES) -DNDIS61_MINIPORT=1
+C_DEFINES=$(C_DEFINES) -DNDIS620_MINIPORT=1
KMDF_VERSION_MAJOR=1
KMDF_VERSION_MINOR=9
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/net/SOURCES
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/net/SOURCES (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/net/SOURCES (revision 6862)
@@ -33,7 +33,7 @@
sense.c \
srq.c \
-INCLUDES=..;..\inc;..\..\inc;..\..\..\inc;..\core\$O;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;..\..\eth\inc;..\drv
+INCLUDES=..;..\inc;..\..\inc;..\..\..\inc;..\core\$O;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;..\..\eth\inc;..\drv;..\..\..\..\..\inc\kernel\l2w;
#PRECOMPILED_INCLUDE=precomp.h
@@ -47,6 +47,7 @@
$(DDK_LIB_PATH)\ntstrsafe.lib \
$(TARGETPATH)\*\complib.lib \
$(TARGETPATH)\*\mlx4_core.lib \
+ $(TARGETPATH)\*\l2w.lib \
!IFDEF ENABLE_EVENT_TRACING
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w.c (revision 6862)
@@ -1,512 +0,0 @@
-#include "l2w.h"
-#include "core.h"
-#include "pa_cash.h"
-#include "mlx4.h"
-#include "mlx4_debug.h"
-
-#if defined (EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "l2w.tmh"
-#endif
-
-/* Nth element of the table contains the index of the first set bit of N; 8 - for N=0 */
-char g_set_bit_tbl[256];
-
-/* Nth element of the table contains the index of the first 0 bit of N; 8 - for N=255 */
-char g_clr_bit_tbl[256];
-
-/* interval for a cmd go-bit waiting */
-// TODO: not clear what is to be this value:
-// 1. it has to be enough great, so as the tread will go waiting;
-// 2. it has to be enough small, so as there is no too large waiting after first command try;
-// 3. it has to be enough great, so as not to cause to intensive rescheduling;
-#define CMD_WAIT_USECS 2
-#define CMD_WAIT_INTERVAL ((-10) * CMD_WAIT_USECS)
-LARGE_INTEGER g_cmd_interval = { (ULONG)CMD_WAIT_INTERVAL, 0 };
-
-////////////////////////////////////////////////////////
-//
-// PCI POOL
-//
-////////////////////////////////////////////////////////
-
-pci_pool_t *
-pci_pool_create (const char *name, struct pci_dev *pdev,
- size_t size, size_t align, size_t allocation)
-{
- pci_pool_t *pool;
- UNREFERENCED_PARAMETER(align);
- UNREFERENCED_PARAMETER(allocation);
-
- ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
-
- // allocation parameter is not handled yet
- ASSERT(allocation == 0);
-
- //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,
- // while default alloc function - ExAllocatePoolWithTag -doesn't.
- // But for now it is used for elements of size <= PAGE_SIZE
- // Anyway - a sanity check:
- ASSERT(size <= PAGE_SIZE);
- if (size > PAGE_SIZE)
- return NULL;
-
- // allocate object
- pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );
- if (pool == NULL)
- return NULL;
-
- //TODO: not too effective: one can read its own alloc/free functions
- ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );
-
- // fill the object
- pool->mdev = pdev->dev;
- pool->size = size;
- strncpy( pool->name, name, sizeof pool->name );
-
- return pool;
-}
-
-
-////////////////////////////////////////////////////////
-//
-// BIT TECHNIQUES
-//
-////////////////////////////////////////////////////////
-
-void fill_bit_tbls()
-{
- unsigned long i;
- for (i=0; i<256; ++i) {
- g_set_bit_tbl[i] = (char)(_ffs_raw(&i,0) - 1);
- g_clr_bit_tbl[i] = (char)(_ffz_raw(&i,0) - 1);
- }
- g_set_bit_tbl[0] = g_clr_bit_tbl[255] = 8;
-}
-
-
-////////////////////////////////////////////////////////
-//
-// BIT MAPS
-//
-////////////////////////////////////////////////////////
-
-int __bitmap_full(const unsigned long *bitmap, int bits)
-{
- int k, lim = bits/BITS_PER_LONG;
- for (k = 0; k < lim; ++k)
- if (~bitmap[k])
- return 0;
-
- if (bits % BITS_PER_LONG)
- if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
- return 0;
-
- return 1;
-}
-
-int __bitmap_empty(const unsigned long *bitmap, int bits)
-{
- int k, lim = bits/BITS_PER_LONG;
- for (k = 0; k < lim; ++k)
- if (bitmap[k])
- return 0;
-
- if (bits % BITS_PER_LONG)
- if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
- return 0;
-
- return 1;
-}
-
-
-////////////////////////////////////////////////////////
-//
-// DEBUG PRINT
-//
-////////////////////////////////////////////////////////
-
-VOID
-WriteEventLogEntry(
- PVOID pi_pIoObject,
- ULONG pi_ErrorCode,
- ULONG pi_UniqueErrorCode,
- ULONG pi_FinalStatus,
- ULONG pi_nDataItems,
- ...
- )
-/*++
-
-Routine Description:
- Writes an event log entry to the event log.
-
-Arguments:
-
- pi_pIoObject......... The IO object ( driver object or device object ).
- pi_ErrorCode......... The error code.
- pi_UniqueErrorCode... A specific error code.
- pi_FinalStatus....... The final status.
- pi_nDataItems........ Number of data items.
- .
- . data items values
- .
-
-Return Value:
-
- None .
-
---*/
-{ /* WriteEventLogEntry */
-
- /* Variable argument list */
- va_list l_Argptr;
- /* Pointer to an error log entry */
- PIO_ERROR_LOG_PACKET l_pErrorLogEntry;
-
- /* Init the variable argument list */
- va_start(l_Argptr, pi_nDataItems);
-
- /* Allocate an error log entry */
- l_pErrorLogEntry =
- (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
- pi_pIoObject,
- (UCHAR)(sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG))
- );
- /* Check allocation */
- if ( l_pErrorLogEntry != NULL)
- { /* OK */
-
- /* Data item index */
- USHORT l_nDataItem ;
-
- /* Set the error log entry header */
- l_pErrorLogEntry->ErrorCode = pi_ErrorCode;
- l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG));
- l_pErrorLogEntry->SequenceNumber = 0;
- l_pErrorLogEntry->MajorFunctionCode = 0;
- l_pErrorLogEntry->IoControlCode = 0;
- l_pErrorLogEntry->RetryCount = 0;
- l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode;
- l_pErrorLogEntry->FinalStatus = pi_FinalStatus;
-
- /* Insert the data items */
- for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++)
- { /* Inset a data item */
-
- /* Current data item */
- int l_CurDataItem ;
-
- /* Get next data item */
- l_CurDataItem = va_arg( l_Argptr, int);
-
- /* Put it into the data array */
- l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ;
-
- } /* Inset a data item */
-
- /* Write the packet */
- IoWriteErrorLogEntry(l_pErrorLogEntry);
-
- } /* OK */
-
- /* Term the variable argument list */
- va_end(l_Argptr);
-
-} /* WriteEventLogEntry */
-
-
-////////////////////////////////////////////////////////
-//
-// GENERAL
-//
-////////////////////////////////////////////////////////
-
-// from lib/string.c
-/**
-* strlcpy - Copy a %NUL terminated string into a sized buffer
-* @dest: Where to copy the string to
-* @src: Where to copy the string from
-* @size: size of destination buffer
-*
-* Compatible with *BSD: the result is always a valid
-* NUL-terminated string that fits in the buffer (unless,
-* of course, the buffer size is zero). It does not pad
-* out the result like strncpy() does.
-*/
-SIZE_T strlcpy(char *dest, const void *src, SIZE_T size)
-{
- SIZE_T ret = strlen(src);
-
- if (size) {
- SIZE_T len = (ret >= size) ? size-1 : ret;
- memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
-}
-
-int parse_dev_location(
- const char *buffer,
- const char *format,
- int *bus, int *dev, int *func
-)
-{
- return sscanf( buffer, format, bus, dev, func );
-}
-
-int core_init()
-{
- int err;
-
- fill_bit_tbls();
- init_qp_state_tbl();
- err = ib_core_init();
- if (err)
- return err;
- return pa_cash_init();
-}
-
-void core_cleanup()
-{
- ib_core_cleanup();
- pa_cash_release();
-}
-
-#ifdef USE_WDM_INTERRUPTS
-
-void free_irq(struct mlx4_dev *dev)
-{
- if (!dev->pdev->int_obj)
- return;
-
-#if (NTDDI_VERSION >= NTDDI_LONGHORN)
- // Vista build environment
- if (dev->pdev->legacy_connect)
- IoDisconnectInterrupt( dev->pdev->int_obj );
- else {
- IO_DISCONNECT_INTERRUPT_PARAMETERS ctx;
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,
- ("%s: IoDisconnectInterrupt: Version %d\n", dev->pdev->name, dev->pdev->version));
- ctx.Version = dev->pdev->version;
- ctx.ConnectionContext.InterruptObject = dev->pdev->int_obj;
- IoDisconnectInterruptEx( &ctx );
- }
-
-#else
- // legacy build environment
- IoDisconnectInterrupt( dev->pdev->int_obj );
-#endif
- dev->pdev->int_obj = NULL;
-}
-
-
-int request_irq(
- IN struct mlx4_dev * dev,
- IN PKSERVICE_ROUTINE isr, /* Line ISR */
- IN PVOID isr_ctx, /* ISR context */
- IN PKMESSAGE_SERVICE_ROUTINE misr, /* Message ISR */
- OUT PKINTERRUPT * int_obj
- )
-{
- NTSTATUS status;
- struct pci_dev *pdev = dev->pdev; /* interrupt resources */
-
-#if (NTDDI_VERSION >= NTDDI_LONGHORN)
-
- IO_CONNECT_INTERRUPT_PARAMETERS params;
- PIO_INTERRUPT_MESSAGE_INFO p_msi_info;
-
- KeInitializeSpinLock( &pdev->isr_lock );
- pdev->n_msi_vectors = 0; // not using MSI/MSI-X
-
- //
- // Vista and later platforms build environment
- //
-
- RtlZeroMemory( ¶ms, sizeof(IO_CONNECT_INTERRUPT_PARAMETERS) );
- if ( !mlx4_is_msi(dev) ) {
- params.Version = CONNECT_FULLY_SPECIFIED;
- goto get_legacy_int;
- }
-
- //
- // try to connect our Interrupt Message Service Rotuine to
- // all Message-Signaled Interrupts our device has been granted,
- // with automatic fallback to a single line-based interrupt.
- //
-
- params.Version = CONNECT_MESSAGE_BASED;
- params.MessageBased.PhysicalDeviceObject = pdev->pdo;
- params.MessageBased.ConnectionContext.Generic = &p_msi_info;
- params.MessageBased.MessageServiceRoutine = misr;
- params.MessageBased.ServiceContext = isr_ctx;
- params.MessageBased.SpinLock = NULL;
- params.MessageBased.SynchronizeIrql = 0;
- params.MessageBased.FloatingSave = FALSE;
- // fallback to line-based ISR if there is no MSI support
- params.MessageBased.FallBackServiceRoutine = isr;
-
- status = IoConnectInterruptEx(¶ms);
-
- pdev->version = params.Version;
- *int_obj = (PVOID)p_msi_info;
-
- if ( NT_SUCCESS(status) ) {
-
- //
- // It worked, so we're running on Vista or later.
- //
-
- if(params.Version == CONNECT_MESSAGE_BASED) {
- ULONG i;
-
- //
- // Because we succeeded in connecting to one or more Message-Signaled
- // Interrupts, the connection context that was returned was
- // a pointer to an IO_INTERRUPT_MESSAGE_INFO structure.
- //
- pdev->n_msi_vectors = (u8)p_msi_info->MessageCount; // not using MSI/MSI-X
- // print it
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,
- ("%s: request_irq: Granted %d MSI vectors ( UnifiedIrql %#x)\n",
- dev->pdev->name, p_msi_info->MessageCount, p_msi_info->UnifiedIrql ));
- for (i=0; i < p_msi_info->MessageCount; ++i) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,
- ("%s: *** Vector %#x, Affinity %#x, Irql %#x, MsgAddr %I64x, MsgData %#x, Mode %d\n",
- dev->pdev->name,
- p_msi_info->MessageInfo[i].Vector,
- (ULONG)p_msi_info->MessageInfo[i].TargetProcessorSet,
- p_msi_info->MessageInfo[i].Irql,
- p_msi_info->MessageInfo[i].MessageAddress.QuadPart,
- p_msi_info->MessageInfo[i].MessageData,
- p_msi_info->MessageInfo[i].Mode ));
- }
-
- // sanity check
- if (pdev->n_msi_vectors_alloc != pdev->n_msi_vectors) {
- MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_INIT ,
- ("%s: Connected to %d interrupts from %d allocated to us !!!\n",
- dev->pdev->name, pdev->n_msi_vectors, pdev->n_msi_vectors_alloc ));
- }
-
- // fill MSI-X map table
- for (i=0; i < p_msi_info->MessageCount; ++i) {
- pdev->p_msix_map[i].cpu = p_msi_info->MessageInfo[i].TargetProcessorSet;
- }
-
- } else {
- //
- // We are on Vista, but there is no HW MSI support
- // So we are connected to line interrupt
- ASSERT(params.Version == CONNECT_LINE_BASED);
- }
-
-
- } else {
-
- //
- // We are on a legacy system and maybe can proceed
- //
-
- if (params.Version == CONNECT_FULLY_SPECIFIED) {
-
- //
- // use IoConnectInterruptEx to connect our ISR to a
- // line-based interrupt.
- //
-get_legacy_int:
- params.FullySpecified.PhysicalDeviceObject = pdev->pdo;
- params.FullySpecified.InterruptObject = int_obj;
- params.FullySpecified.ServiceRoutine = isr;
- params.FullySpecified.ServiceContext = isr_ctx;
- params.FullySpecified.FloatingSave = FALSE;
- params.FullySpecified.SpinLock = NULL;
-
- if (pdev->int_info.Flags & CM_RESOURCE_INTERRUPT_MESSAGE) {
- // The resource is for a message-based interrupt. Use the u.MessageInterrupt.Translated member of IntResource.
-
- params.FullySpecified.Vector = pdev->int_info.u.MessageInterrupt.Translated.Vector;
- params.FullySpecified.Irql = (KIRQL)pdev->int_info.u.MessageInterrupt.Translated.Level;
- params.FullySpecified.SynchronizeIrql = (KIRQL)pdev->int_info.u.MessageInterrupt.Translated.Level;
- params.FullySpecified.ProcessorEnableMask = g.affinity ?
- g.affinity : pdev->int_info.u.MessageInterrupt.Translated.Affinity;
- } else {
- // The resource is for a line-based interrupt. Use the u.Interrupt member of IntResource.
-
- params.FullySpecified.Vector = pdev->int_info.u.Interrupt.Vector;
- params.FullySpecified.Irql = (KIRQL)pdev->int_info.u.Interrupt.Level;
- params.FullySpecified.SynchronizeIrql = (KIRQL)pdev->int_info.u.Interrupt.Level;
- params.FullySpecified.ProcessorEnableMask = g.affinity ?
- g.affinity : pdev->int_info.u.Interrupt.Affinity;
- }
-
- params.FullySpecified.InterruptMode = (pdev->int_info.Flags & CM_RESOURCE_INTERRUPT_LATCHED ? Latched : LevelSensitive);
- params.FullySpecified.ShareVector = (BOOLEAN)(pdev->int_info.ShareDisposition == CmResourceShareShared);
-
- status = IoConnectInterruptEx(¶ms);
- pdev->version = params.Version;
- }
- else {
-
- // Something wrong with IoConnectInterruptEx.
- // Lets try the usual way
- status = IoConnectInterrupt(
- int_obj, /* InterruptObject */
- isr, /* ISR */
- isr_ctx, /* ISR context */
- &pdev->isr_lock, /* spinlock */
- pdev->int_info.u.Interrupt.Vector, /* interrupt vector */
- (KIRQL)pdev->int_info.u.Interrupt.Level, /* IRQL */
- (KIRQL)pdev->int_info.u.Interrupt.Level, /* Synchronize IRQL */
- (BOOLEAN)((pdev->int_info.Flags == CM_RESOURCE_INTERRUPT_LATCHED) ?
- Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */
- (BOOLEAN)(pdev->int_info.ShareDisposition == CmResourceShareShared), /* vector shared or not */
- g.affinity ? g.affinity : (KAFFINITY)pdev->int_info.u.Interrupt.Affinity, /* interrupt affinity */
- FALSE /* whether to save Float registers */
- );
- pdev->legacy_connect = TRUE;
- }
-
- }
-
-#else
-
- //
- // Legacy (before Vista) platform build environment
- //
-
- UNUSED_PARAM(misr);
-
- KeInitializeSpinLock( &pdev->isr_lock );
- pdev->n_msi_vectors = 0; // not using MSI/MSI-X
-
- status = IoConnectInterrupt(
- int_obj, /* InterruptObject */
- isr, /* ISR */
- isr_ctx, /* ISR context */
- &pdev->isr_lock, /* spinlock */
- pdev->int_info.u.Interrupt.Vector, /* interrupt vector */
- (KIRQL)pdev->int_info.u.Interrupt.Level, /* IRQL */
- (KIRQL)pdev->int_info.u.Interrupt.Level, /* Synchronize IRQL */
- (BOOLEAN)((pdev->int_info.Flags == CM_RESOURCE_INTERRUPT_LATCHED) ?
- Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */
- (BOOLEAN)(pdev->int_info.ShareDisposition == CmResourceShareShared), /* vector shared or not */
- g.affinity ? g.affinity : (KAFFINITY)pdev->int_info.u.Interrupt.Affinity, /* interrupt affinity */
- FALSE /* whether to save Float registers */
- );
-
-#endif
-
- if (!NT_SUCCESS(status)) {
- MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_INIT ,
- ("%s: Connect interrupt failed with status %#x, affinity %#x )\n",
- dev->pdev->name, status, g.affinity ? g.affinity : (unsigned int)pdev->int_info.u.Interrupt.Affinity));
- *int_obj = NULL;
- return -EFAULT; /* failed to connect interrupt */
- }
-
- return 0;
-}
-#endif
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/packer.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/packer.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/packer.c (revision 6862)
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $
- */
-
-#include "l2w.h"
-#include "ib_pack.h"
-#include "mlx4_debug.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "packer.tmh"
-#endif
-
-
-static u64 value_read(int offset, int size, u8 *structure)
-{
- switch (size) {
- case 1: return *(u8 *) (structure + offset);
- case 2: return be16_to_cpup((__be16 *) (structure + offset));
- case 4: return be32_to_cpup((__be32 *) (structure + offset));
- case 8: return be64_to_cpup((__be64 *) (structure + offset));
- default:
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Field size %d bits not handled\n", size * 8));
- return 0;
- }
-}
-
-/**
- * ib_pack - Pack a structure into a buffer
- * @desc:Array of structure field descriptions
- * @desc_len:Number of entries in @desc
- * @structure:Structure to pack from
- * @buf:Buffer to pack into
- *
- * ib_pack() packs a list of structure fields into a buffer,
- * controlled by the array of fields in @desc.
- */
-void ib_pack(const struct ib_field *desc,
- int desc_len,
- void *structure,
- u8 *buf)
-{
- int i;
-
- for (i = 0; i < desc_len; ++i) {
- if (desc[i].size_bits <= 32) {
- int shift;
- u32 val;
- __be32 mask;
- __be32 *addr;
-
- shift = 32 - desc[i].offset_bits - desc[i].size_bits;
- if (desc[i].struct_size_bytes)
- val = (u32)(value_read((int)desc[i].struct_offset_bytes,
- (int)desc[i].struct_size_bytes,
- structure) << shift);
- else
- val = 0;
-
- mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift);
- addr = (__be32 *) buf + desc[i].offset_words;
- *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask);
- } else if (desc[i].size_bits <= 64) {
- int shift;
- u64 val;
- __be64 mask;
- __be64 *addr;
-
- shift = 64 - desc[i].offset_bits - desc[i].size_bits;
- if (desc[i].struct_size_bytes)
- val = value_read((int)desc[i].struct_offset_bytes,
- (int)desc[i].struct_size_bytes,
- structure) << shift;
- else
- val = 0;
-
- mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift);
- addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
- *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask);
- } else {
- if (desc[i].offset_bits % 8 ||
- desc[i].size_bits % 8) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits));
- }
-
- if (desc[i].struct_size_bytes)
- memcpy(buf + desc[i].offset_words * 4 +
- desc[i].offset_bits / 8,
- (u8*)structure + desc[i].struct_offset_bytes,
- desc[i].size_bits / 8);
- else
- memset(buf + desc[i].offset_words * 4 +
- desc[i].offset_bits / 8,
- 0,
- desc[i].size_bits / 8);
- }
- }
-}
-EXPORT_SYMBOL(ib_pack);
-
-static void value_write(int offset, int size, u64 val, u8 *structure)
-{
- switch (size * 8) {
- case 8: *( u8 *) (structure + offset) = (u8)val; break;
- case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break;
- case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
- case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
- default:
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Field size %d bits not handled\n", size * 8));
- }
-}
-
-/**
- * ib_unpack - Unpack a buffer into a structure
- * @desc:Array of structure field descriptions
- * @desc_len:Number of entries in @desc
- * @buf:Buffer to unpack from
- * @structure:Structure to unpack into
- *
- * ib_pack() unpacks a list of structure fields from a buffer,
- * controlled by the array of fields in @desc.
- */
-void ib_unpack(const struct ib_field *desc,
- int desc_len,
- void *buf,
- void *structure)
-{
- int i;
-
- for (i = 0; i < desc_len; ++i) {
- if (!desc[i].struct_size_bytes)
- continue;
-
- if (desc[i].size_bits <= 32) {
- int shift;
- u32 val;
- u32 mask;
- __be32 *addr;
-
- shift = 32 - desc[i].offset_bits - desc[i].size_bits;
- mask = ((1ull << desc[i].size_bits) - 1) << shift;
- addr = (__be32 *) buf + desc[i].offset_words;
- val = (be32_to_cpup(addr) & mask) >> shift;
- value_write((int)desc[i].struct_offset_bytes,
- (int)desc[i].struct_size_bytes,
- val,
- structure);
- } else if (desc[i].size_bits <= 64) {
- int shift;
- u64 val;
- u64 mask;
- __be64 *addr;
-
- shift = 64 - desc[i].offset_bits - desc[i].size_bits;
- mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
- addr = (__be64 *) buf + desc[i].offset_words;
- val = (be64_to_cpup(addr) & mask) >> shift;
- value_write((int)desc[i].struct_offset_bytes,
- (int)desc[i].struct_size_bytes,
- val,
- structure);
- } else {
- if (desc[i].offset_bits % 8 ||
- desc[i].size_bits % 8) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits));
- }
-
- memcpy((u8*)structure + desc[i].struct_offset_bytes,
- (u8*)buf + desc[i].offset_words * 4 +
- desc[i].offset_bits / 8,
- desc[i].size_bits / 8);
- }
- }
-}
-EXPORT_SYMBOL(ib_unpack);
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_debug.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_debug.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_debug.c (revision 6862)
@@ -1,298 +0,0 @@
-#include "l2w.h"
-#include "ev_log.h"
-
-#define MAX_BUFFER_SIZE 256
-
-/*
- * This function sends to Event Log messages with one WCHAR string and several binary parameters.
- * The string will be inserted instead of %2 parameter of the message.
- * Binary parameters will be shown in Dump Area of the message.
- * Binary parameters should be of type LONG.
- */
-VOID
-WriteEventLogEntryStr(
- PVOID pi_pIoObject,
- ULONG pi_ErrorCode,
- ULONG pi_UniqueErrorCode,
- ULONG pi_FinalStatus,
- PWCHAR pi_InsertionStr,
- ULONG pi_nDataItems,
- ...
- )
-/*++
-
-Routine Description:
- Writes an event log entry to the event log.
-
-Arguments:
-
- pi_pIoObject......... The IO object ( driver object or device object ).
- pi_ErrorCode......... The error code.
- pi_UniqueErrorCode... A specific error code.
- pi_FinalStatus....... The final status.
- pi_nDataItems........ Number of data items.
- .
- . data items values
- .
-
-Return Value:
-
- None .
-
---*/
-{ /* WriteEventLogEntryStr */
-
- /* Variable argument list */
- va_list l_Argptr;
- /* Pointer to an error log entry */
- PIO_ERROR_LOG_PACKET l_pErrorLogEntry;
- /* sizeof insertion string */
- int l_Size = (int)((pi_InsertionStr) ? ((wcslen(pi_InsertionStr) + 1) * sizeof( WCHAR )) : 0);
- int l_PktSize =sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG);
- int l_TotalSize =l_PktSize +l_Size;
-
- if (pi_pIoObject == NULL) {
- ASSERT(pi_pIoObject != NULL);
- return;
- }
-
- /* Init the variable argument list */
- va_start(l_Argptr, pi_nDataItems);
-
- /* Allocate an error log entry */
- if (l_TotalSize >= ERROR_LOG_MAXIMUM_SIZE - 2)
- l_TotalSize = ERROR_LOG_MAXIMUM_SIZE - 2;
- l_pErrorLogEntry = (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
- pi_pIoObject, (UCHAR)l_TotalSize );
-
- /* Check allocation */
- if ( l_pErrorLogEntry != NULL)
- { /* OK */
-
- /* Data item index */
- USHORT l_nDataItem ;
-
- /* Set the error log entry header */
- l_pErrorLogEntry->ErrorCode = pi_ErrorCode;
- l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG));
- l_pErrorLogEntry->SequenceNumber = 0;
- l_pErrorLogEntry->MajorFunctionCode = 0;
- l_pErrorLogEntry->IoControlCode = 0;
- l_pErrorLogEntry->RetryCount = 0;
- l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode;
- l_pErrorLogEntry->FinalStatus = pi_FinalStatus;
-
- /* Insert the data items */
- for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++)
- { /* Inset a data item */
-
- /* Current data item */
- int l_CurDataItem ;
-
- /* Get next data item */
- l_CurDataItem = va_arg( l_Argptr, int);
-
- /* Put it into the data array */
- l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ;
-
- } /* Inset a data item */
-
- /* add insertion string */
- if (pi_InsertionStr) {
- char *ptr;
- int sz = min( l_TotalSize - l_PktSize, l_Size );
- l_pErrorLogEntry->NumberOfStrings = 1;
- l_pErrorLogEntry->StringOffset = sizeof(IO_ERROR_LOG_PACKET) + l_pErrorLogEntry->DumpDataSize;
- ptr = (char*)l_pErrorLogEntry + l_pErrorLogEntry->StringOffset;
- memcpy( ptr, pi_InsertionStr, sz );
- *(WCHAR*)&ptr[sz - 2] = (WCHAR)0;
- }
-
- /* Write the packet */
- IoWriteErrorLogEntry(l_pErrorLogEntry);
-
- } /* OK */
-
- /* Term the variable argument list */
- va_end(l_Argptr);
-
-} /* WriteEventLogEntry */
-
-/*
- * This function sends to Event Log messages with various parameters.
- * Every parameter should be coded as a pair: a format specifier and the value.
- * 'pi_nDataItems' presents the number of the pairs.
- *
- * Here is an example:
- *
- * To print a message (from MC file) like:
- *
- * MessageId=0x0006 Facility=MLX4 Severity=Informational SymbolicName=EVENT_MLX4_INFO_TEST
- * Language=English
- * some_long %2, some_short %3, some_byte %4, some_wide_char_str %5, some_ansii_str %6
- *
- * you have to code:
- *
- * WriteEventLogEntryData( pdev->p_self_do, (ULONG)EVENT_MLX4_INFO_TEST, 0, 0, 5,
- * L"%d", long_int, // LONG
- * L"%04x", (ULONG)short_int, // SHORT
- * L"%02x", (ULONG)byte_int, // CHAR
- * L"%s", wide_char_str, // PWCHAR
- * L"%S", ansii_str // PCHAR
- * );
- */
-VOID
-WriteEventLogEntryData(
- PVOID pi_pIoObject,
- ULONG pi_ErrorCode,
- ULONG pi_UniqueErrorCode,
- ULONG pi_FinalStatus,
- ULONG pi_nDataItems,
- ...
- )
-/*++
-
-Routine Description:
- Writes an event log entry to the event log.
-
-Arguments:
-
- pi_pIoObject......... The IO object ( driver object or device object ).
- pi_ErrorCode......... The error code.
- pi_UniqueErrorCode... A specific error code.
- pi_FinalStatus....... The final status.
- pi_nDataItems........ Number of data items (i.e. pairs of data parameters).
- .
- . data items values
- .
-
-Return Value:
-
- None .
-
---*/
-{ /* WriteEventLogEntryData */
-
- /* Variable argument list */
- va_list l_Argptr;
- /* Pointer to an error log entry */
- PIO_ERROR_LOG_PACKET l_pErrorLogEntry;
- /* sizeof insertion string */
- int l_Size = 0;
- /* temp buffer */
- UCHAR l_Buf[ERROR_LOG_MAXIMUM_SIZE - 2];
- /* position in buffer */
- UCHAR * l_Ptr = l_Buf;
- /* Data item index */
- USHORT l_nDataItem ;
- /* total packet size */
- int l_TotalSize;
-
- if (pi_pIoObject == NULL) {
- ASSERT(pi_pIoObject != NULL);
- return;
- }
-
- /* Init the variable argument list */
- va_start(l_Argptr, pi_nDataItems);
-
- /* Create the insertion strings Insert the data items */
- memset( l_Buf, 0, sizeof(l_Buf) );
- for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++)
- {
- NTSTATUS status;
- /* Current binary data item */
- int l_CurDataItem ;
- /* Current pointer data item */
- void* l_CurPtrDataItem ;
- /* format specifier */
- WCHAR* l_FormatStr;
- /* the rest of the buffer */
- int l_BufSize = (int)(l_Buf + sizeof(l_Buf)- l_Ptr);
- /* size of insertion string */
- size_t l_StrSize;
-
- /* print as much as we can */
- if ( l_BufSize < 4 )
- break;
-
- /* Get format specifier */
- l_FormatStr = va_arg( l_Argptr, PWCHAR);
-
- /* Get next data item */
- if ( !wcscmp( l_FormatStr, L"%s" ) || !wcscmp( l_FormatStr, L"%S" ) ) {
- l_CurPtrDataItem = va_arg( l_Argptr, PWCHAR);
- /* convert to string */
- status = RtlStringCchPrintfW( (NTSTRSAFE_PWSTR)l_Ptr, l_BufSize>>1, l_FormatStr , l_CurPtrDataItem );
- }
- else {
- l_CurDataItem = va_arg( l_Argptr, int);
- /* convert to string */
- status = RtlStringCchPrintfW( (NTSTRSAFE_PWSTR)l_Ptr, l_BufSize>>1, l_FormatStr , l_CurDataItem );
- }
-
- if (!NT_SUCCESS(status))
- return;
-
- /* prepare the next loop */
- status = RtlStringCbLengthW( (NTSTRSAFE_PWSTR)l_Ptr, l_BufSize, &l_StrSize );
- if (!NT_SUCCESS(status))
- return;
- *(WCHAR*)&l_Ptr[l_StrSize] = (WCHAR)0;
- l_StrSize += 2;
- l_Size = l_Size + (int)l_StrSize;
- l_Ptr = l_Buf + l_Size;
- l_BufSize = (int)(l_Buf + sizeof(l_Buf)- l_Ptr);
-
- } /* Inset a data item */
-
- /* Term the variable argument list */
- va_end(l_Argptr);
-
- /* Allocate an error log entry */
- l_TotalSize =sizeof(IO_ERROR_LOG_PACKET) +l_Size;
- if (l_TotalSize >= ERROR_LOG_MAXIMUM_SIZE - 2) {
- l_TotalSize = ERROR_LOG_MAXIMUM_SIZE - 2;
- l_Size = l_TotalSize - sizeof(IO_ERROR_LOG_PACKET);
- }
- l_pErrorLogEntry = (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
- pi_pIoObject, (UCHAR)l_TotalSize );
-
- /* Check allocation */
- if ( l_pErrorLogEntry != NULL)
- { /* OK */
-
- /* Set the error log entry header */
- l_pErrorLogEntry->ErrorCode = pi_ErrorCode;
- l_pErrorLogEntry->DumpDataSize = 0;
- l_pErrorLogEntry->SequenceNumber = 0;
- l_pErrorLogEntry->MajorFunctionCode = 0;
- l_pErrorLogEntry->IoControlCode = 0;
- l_pErrorLogEntry->RetryCount = 0;
- l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode;
- l_pErrorLogEntry->FinalStatus = pi_FinalStatus;
- l_pErrorLogEntry->NumberOfStrings = l_nDataItem;
- l_pErrorLogEntry->StringOffset = sizeof(IO_ERROR_LOG_PACKET) + l_pErrorLogEntry->DumpDataSize;
- l_Ptr = (UCHAR*)l_pErrorLogEntry + l_pErrorLogEntry->StringOffset;
- if ( l_Size )
- memcpy( l_Ptr, l_Buf, l_Size );
-
- /* Write the packet */
- IoWriteErrorLogEntry(l_pErrorLogEntry);
-
- } /* OK */
-
-} /* WriteEventLogEntry */
-
-// bsize is to be a strlen(src)
-// dest has to have enough place, i.e at least (2*strlen(src) + 2)
-void __ansi_to_wchar( USHORT *dest, UCHAR *src, int bsize)
-{
- int i;
-
- for (i=0; i<bsize; ++i)
- *dest++ = *src++;
- *dest = 0;
-}
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/ud_header.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/ud_header.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/ud_header.c (revision 6862)
@@ -1,388 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: ud_header.c 1349 2004-12-16 21:09:43Z roland $
- */
-
-#include "l2w.h"
-#include "ib_pack.h"
-#include "mlx4_debug.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "ud_header.tmh"
-#endif
-
-#define STRUCT_FIELD(header, field) \
- .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
- .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
- .field_name = #header ":" #field
-
-#define STRUCT_FIELD_INIT(header, field,ow,ob,sb) \
- offsetof(struct ib_unpacked_ ## header, field), \
- sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
- ow,ob,sb, \
- #header ":" #field
-
-#define STRUCT_FIELD_INITR(ow,ob,sb) \
- 0, 0, ow, ob, sb, "reserved"
-
-static const struct ib_field lrh_table[] = {
- { STRUCT_FIELD_INIT(lrh, virtual_lane, 0, 0, 4) },
- { STRUCT_FIELD_INIT(lrh, link_version, 0, 4, 4) },
- { STRUCT_FIELD_INIT(lrh, service_level, 0, 8, 4) },
- { STRUCT_FIELD_INITR(0,12,2) },
- { STRUCT_FIELD_INIT(lrh, link_next_header, 0, 14, 2) },
- { STRUCT_FIELD_INIT(lrh, destination_lid, 0, 16, 16) },
- { STRUCT_FIELD_INITR(1,0,5) },
- { STRUCT_FIELD_INIT(lrh, packet_length, 1, 5, 11) },
- { STRUCT_FIELD_INIT(lrh, source_lid, 1, 16, 16) }
-};
-
-static const struct ib_field eth_table[] = {
- { STRUCT_FIELD_INIT(eth, dmac_h, 0, 0, 32) },
- { STRUCT_FIELD_INIT(eth, dmac_l, 1, 0, 16) },
- { STRUCT_FIELD_INIT(eth, smac_h, 1, 16,16) },
- { STRUCT_FIELD_INIT(eth, smac_l, 2, 0 ,32) },
- { STRUCT_FIELD_INIT(eth, type, 3, 0, 16)}
-};
-
-
-static const struct ib_field grh_table[] = {
- { STRUCT_FIELD_INIT(grh, ip_version, 0, 0, 4) },
- { STRUCT_FIELD_INIT(grh, traffic_class, 0, 4, 8) },
- { STRUCT_FIELD_INIT(grh, flow_label, 0, 12, 20) },
- { STRUCT_FIELD_INIT(grh, payload_length, 1, 0, 16) },
- { STRUCT_FIELD_INIT(grh, next_header, 1, 16, 8) },
- { STRUCT_FIELD_INIT(grh, hop_limit, 1, 24, 8) },
- { STRUCT_FIELD_INIT(grh, source_gid, 2, 0, 128) },
- { STRUCT_FIELD_INIT(grh, destination_gid, 6, 0, 128) }
-};
-
-static const struct ib_field bth_table[] = {
- { STRUCT_FIELD_INIT(bth, opcode, 0, 0, 8) },
- { STRUCT_FIELD_INIT(bth, solicited_event, 0, 8, 1) },
- { STRUCT_FIELD_INIT(bth, mig_req, 0, 9, 1) },
- { STRUCT_FIELD_INIT(bth, pad_count, 0, 10, 2) },
- { STRUCT_FIELD_INIT(bth, transport_header_version, 0, 12, 4) },
- { STRUCT_FIELD_INIT(bth, pkey, 0, 16, 16) },
- { STRUCT_FIELD_INITR(1,0,8) },
- { STRUCT_FIELD_INIT(bth, destination_qpn, 1, 8, 24) },
- { STRUCT_FIELD_INIT(bth, ack_req, 2, 0, 1) },
- { STRUCT_FIELD_INITR(2,1,7) },
- { STRUCT_FIELD_INIT(bth, psn, 2, 8, 24) }
-};
-
-static const struct ib_field deth_table[] = {
- { STRUCT_FIELD_INIT(deth, qkey, 0, 0, 32) },
- { STRUCT_FIELD_INITR(1,0,8) },
- { STRUCT_FIELD_INIT(deth, source_qpn, 1, 8, 24) }
-};
-
-/**
- * ib_ud_header_init - Initialize UD header structure
- * @payload_bytes:Length of packet payload
- * @grh_present:GRH flag (if non-zero, GRH will be included)
- * @header:Structure to initialize
- *
- * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
- * lrh.packet_length, grh.ip_version, grh.payload_length,
- * grh.next_header, bth.opcode, bth.pad_count and
- * bth.transport_header_version fields of a &struct ib_ud_header given
- * the payload length and whether a GRH will be included.
- */
-void ib_ud_header_init(int payload_bytes,
- int grh_present,
- struct ib_ud_header *header)
-{
- int header_len;
- u16 packet_length;
-
- memset(header, 0, sizeof *header);
-
- header_len =
- IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES;
- if (grh_present) {
- header_len += IB_GRH_BYTES;
- }
-
- header->lrh.link_version = 0;
- header->lrh.link_next_header =
- grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
- packet_length = (u16)((IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES +
- payload_bytes +
- 4 + /* ICRC */
- 3) / 4); /* round up */
-
- header->grh_present = grh_present;
- if (grh_present) {
- packet_length += IB_GRH_BYTES / 4;
- header->grh.ip_version = 6;
- header->grh.payload_length =
- cpu_to_be16((IB_BTH_BYTES +
- IB_DETH_BYTES +
- payload_bytes +
- 4 + /* ICRC */
- 3) & ~3); /* round up */
- header->grh.next_header = 0x1b;
- }
-
- header->lrh.packet_length = cpu_to_be16(packet_length);
-
- if (header->immediate_present)
- header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
- else
- header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
- header->bth.pad_count = (u8)((4 - payload_bytes) & 3);
- header->bth.transport_header_version = 0;
-}
-EXPORT_SYMBOL(ib_ud_header_init);
-
-/**
- * ib_ud_header_pack - Pack UD header struct into wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() packs the UD header structure @header into wire
- * format in the buffer @buf.
- */
-int ib_ud_header_pack(struct ib_ud_header *header,
- u8 *buf)
-{
- int len = 0;
-
- ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
- &header->lrh, buf);
- len += IB_LRH_BYTES;
-
- if (header->grh_present) {
- ib_pack(grh_table, ARRAY_SIZE(grh_table),
- &header->grh, buf + len);
- len += IB_GRH_BYTES;
- }
-
- ib_pack(bth_table, ARRAY_SIZE(bth_table),
- &header->bth, buf + len);
- len += IB_BTH_BYTES;
-
- ib_pack(deth_table, ARRAY_SIZE(deth_table),
- &header->deth, buf + len);
- len += IB_DETH_BYTES;
-
- if (header->immediate_present) {
- memcpy(buf + len, &header->immediate_data, sizeof header->immediate_data);
- len += sizeof header->immediate_data;
- }
-
- return len;
-}
-EXPORT_SYMBOL(ib_ud_header_pack);
-
-/**
- * ib_ud_header_unpack - Unpack UD header struct from wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() unpacks the UD header structure @header from wire
- * format in the buffer @buf.
- */
-int ib_ud_header_unpack(u8 *buf,
- struct ib_ud_header *header)
-{
- ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
- buf, &header->lrh);
- buf += IB_LRH_BYTES;
-
- if (header->lrh.link_version != 0) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Invalid LRH.link_version %d\n",
- header->lrh.link_version));
- return -EINVAL;
- }
-
- switch (header->lrh.link_next_header) {
- case IB_LNH_IBA_LOCAL:
- header->grh_present = 0;
- break;
-
- case IB_LNH_IBA_GLOBAL:
- header->grh_present = 1;
- ib_unpack(grh_table, ARRAY_SIZE(grh_table),
- buf, &header->grh);
- buf += IB_GRH_BYTES;
-
- if (header->grh.ip_version != 6) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Invalid GRH.ip_version %d\n",
- header->grh.ip_version));
- return -EINVAL;
- }
- if (header->grh.next_header != 0x1b) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header));
- return -EINVAL;
- }
- break;
-
- default:
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Invalid LRH.link_next_header %d\n",
- header->lrh.link_next_header));
- return -EINVAL;
- }
-
- ib_unpack(bth_table, ARRAY_SIZE(bth_table),
- buf, &header->bth);
- buf += IB_BTH_BYTES;
-
- switch (header->bth.opcode) {
- case IB_OPCODE_UD_SEND_ONLY:
- header->immediate_present = 0;
- break;
- case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
- header->immediate_present = 1;
- break;
- default:
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Invalid BTH.opcode 0x%02x\n",
- header->bth.opcode));
- return -EINVAL;
- }
-
- if (header->bth.transport_header_version != 0) {
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,( "Invalid BTH.transport_header_version %d\n",
- header->bth.transport_header_version));
- return -EINVAL;
- }
-
- ib_unpack(deth_table, ARRAY_SIZE(deth_table),
- buf, &header->deth);
- buf += IB_DETH_BYTES;
-
- if (header->immediate_present)
- memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_ud_header_unpack);
-
-/**
- * ib_rdmaoe_ud_header_init - Initialize UD header structure
- * @payload_bytes:Length of packet payload
- * @grh_present:GRH flag (if non-zero, GRH will be included)
- * @header:Structure to initialize
- *
- * ib_rdmaoe_ud_header_init() initializes the grh.ip_version, grh.payload_length,
- * grh.next_header, bth.opcode, bth.pad_count and
- * bth.transport_header_version fields of a &struct eth_ud_header given
- * the payload length and whether a GRH will be included.
- */
-void ib_rdmaoe_ud_header_init(int payload_bytes,
- int grh_present,
- struct eth_ud_header *header)
-{
- int header_len;
-
- memset(header, 0, sizeof *header);
-
- header_len =
- sizeof header->eth +
- IB_BTH_BYTES +
- IB_DETH_BYTES;
- if (grh_present)
- header_len += IB_GRH_BYTES;
-
- header->grh_present = grh_present;
- if (grh_present) {
- header->grh.ip_version = 6;
- header->grh.payload_length =
- cpu_to_be16((IB_BTH_BYTES +
- IB_DETH_BYTES +
- payload_bytes +
- 4 + /* ICRC */
- 3) & ~3); /* round up */
- header->grh.next_header = 0x1b;
- }
-
- if (header->immediate_present)
- header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
- else
- header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
- header->bth.pad_count =(u8) ((4 - payload_bytes) & 3);
- header->bth.transport_header_version = 0;
-}
-
-
-
-/**
- * rdmaoe_ud_header_pack - Pack UD header struct into eth wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() packs the UD header structure @header into wire
- * format in the buffer @buf.
- */
-int rdmaoe_ud_header_pack(struct eth_ud_header *header,
- void *buf)
-{
- int len = 0;
-
- ib_pack(eth_table, ARRAY_SIZE(eth_table),
- &header->eth, buf);
- len += IB_ETH_BYTES;
-
- if (header->grh_present) {
- ib_pack(grh_table, ARRAY_SIZE(grh_table),
- &header->grh, (u8*)buf + len);
- len += IB_GRH_BYTES;
- }
-
- ib_pack(bth_table, ARRAY_SIZE(bth_table),
- &header->bth, (u8*)buf + len);
- len += IB_BTH_BYTES;
-
- ib_pack(deth_table, ARRAY_SIZE(deth_table),
- &header->deth, (u8*)buf + len);
- len += IB_DETH_BYTES;
-
- if (header->immediate_present) {
- memcpy((u8*)buf + len, &header->immediate_data,
- sizeof header->immediate_data);
- len += sizeof header->immediate_data;
- }
-
- return len;
-}
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c (revision 6862)
@@ -1,186 +0,0 @@
-#include <mlx4_debug.h>
-#include "l2w.h"
-#include "ib_verbs.h"
-
-#if defined (EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "l2w_umem.tmh"
-#endif
-
-/**
- * ib_umem_release - release memory pinned with ib_umem_get
- * @umem: umem struct to release
- */
-void ib_umem_release(struct ib_umem *p_ib_umem)
-{
- MLX4_ENTER(MLX4_DBG_MEMORY);
- if (p_ib_umem->secure_handle) {
- __try {
- MmUnsecureVirtualMemory( p_ib_umem->secure_handle );
- p_ib_umem->secure_handle = NULL;
- }
- __except (EXCEPTION_EXECUTE_HANDLER) {
- NTSTATUS Status = GetExceptionCode();
- UNUSED_PARAM_WOWPP(Status);
- MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_MEMORY ,
- ("Exception 0x%x on MmUnsecureVirtualMemory(), addr %I64x, size %I64x, seg_num %d, nr_pages %d\n",
- Status, p_ib_umem->iobuf.va, (u64)p_ib_umem->iobuf.size,
- p_ib_umem->iobuf.seg_num, p_ib_umem->iobuf.nr_pages ));
- }
- }
- if (p_ib_umem->iobuf_used)
- iobuf_deregister_with_cash(&p_ib_umem->iobuf);
- kfree(p_ib_umem);
- MLX4_EXIT(MLX4_DBG_MEMORY);
-}
-
-
-/**
- * ib_umem_get - Pin and DMA map userspace memory.
- * @context: userspace context to pin memory for
- * @addr: userspace virtual address to start at
- * @size: length of region to pin
- * @access: IB_ACCESS_xxx flags for memory being pinned
- */
-struct ib_umem *ib_umem_get(struct ib_ucontext *context, u64 addr,
- size_t size, enum ib_access_flags access, boolean_t secure)
-{
- int err;
- struct ib_umem *p_ib_umem;
-
- MLX4_ENTER(MLX4_DBG_MEMORY);
-
- // create the object
- p_ib_umem = kzalloc(sizeof *p_ib_umem, GFP_KERNEL);
- if (!p_ib_umem)
- goto err_nomem;
-
- p_ib_umem->p_uctx = context;
- p_ib_umem->page_size = PAGE_SIZE;
-
- // register the memory
- iobuf_init( addr, (u64)size, !!context, &p_ib_umem->iobuf);
- err = iobuf_register_with_cash( addr, (u64)size, !!context,
- &access, &p_ib_umem->iobuf );
- if (err)
- goto err_reg_mem;
- p_ib_umem->iobuf_used = TRUE;
-
- // TODO: map the memory for DMA
-
- // secure memory
- if (!context || !secure)
- goto done;
- __try {
- p_ib_umem->secure_handle = MmSecureVirtualMemory (
- (PVOID)(ULONG_PTR)addr, size,
- (access & IB_ACCESS_LOCAL_WRITE) ? PAGE_READWRITE : PAGE_READONLY );
- if (p_ib_umem->secure_handle == NULL)
- goto err_secure;
- }
- __except (EXCEPTION_EXECUTE_HANDLER) {
- NTSTATUS Status = GetExceptionCode();
- UNUSED_PARAM_WOWPP(Status);
- MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_MEMORY ,
- ("Exception 0x%x on MmSecureVirtualMemory(), addr %I64x, size %I64x, access %#x\n",
- Status, addr, (u64)size, access ));
- goto err_secure;
- }
- goto done;
-
-err_secure:
- iobuf_deregister(&p_ib_umem->iobuf);
-
-err_reg_mem:
- kfree(p_ib_umem);
-
-err_nomem:
- p_ib_umem = ERR_PTR(-ENOMEM);
-
-done:
- MLX4_EXIT(MLX4_DBG_MEMORY);
- return p_ib_umem;
-}
-
-int ib_umem_page_count(struct ib_umem *p_ib_umem)
-{
- return (int)p_ib_umem->iobuf.nr_pages;
-}
-
-dma_addr_t ib_umem_get_dma(struct ib_umem *p_ib_umem)
-{
- u64 pages[1] = { 0 };
- iobuf_iter_t iobuf_iter;
- dma_addr_t dma_addr = { 0, 0 , 0 };
-
- iobuf_iter_init( &p_ib_umem->iobuf, &iobuf_iter );
- iobuf_get_tpt_seg( &p_ib_umem->iobuf, &iobuf_iter, 1, pages );
- // TODO: convert phys address to DMA one
- dma_addr.da = pages[0];
-
- return dma_addr;
-}
-
-
-// Returns: 0 on success, -ENOMEM or -EACCESS or -EFAULT on error
-int ib_umem_map(
- IN u64 va,
- IN u64 size,
- IN ib_access_t acc,
- OUT PMDL *mdl,
- OUT void **kva)
-{
- PMDL p_mdl;
- int rc = 0;
- LOCK_OPERATION lock_op = (acc & IB_AC_LOCAL_WRITE) ? IoModifyAccess : IoReadAccess;
-
- p_mdl = IoAllocateMdl( (PVOID)(ULONG_PTR)va, (ULONG)size, FALSE,FALSE,NULL);
- if (p_mdl == NULL) {
- rc = -ENOMEM;
- goto err_alloc_mdl;
- }
-
- __try {
- MmProbeAndLockPages( p_mdl, UserMode, lock_op ); /* lock memory */
- }
- __except (EXCEPTION_EXECUTE_HANDLER) {
- MLX4_PRINT(TRACE_LEVEL_ERROR, MLX4_DBG_MEMORY,
- ("MOSAL_iobuf_register: Exception 0x%x on MmProbeAndLockPages(), va %I64d, sz %I64d\n",
- GetExceptionCode(), va, size));
- rc = -EACCES;
- goto err_probe;
- }
-
- *kva = MmMapLockedPagesSpecifyCache( p_mdl,
- KernelMode, MmNonCached, NULL, FALSE, NormalPagePriority );
- if (*kva == NULL) {
- MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_MEMORY ,("MmMapLockedPagesSpecifyCache failed\n"));
- rc = -EFAULT;
- goto err_map;
- }
-
- *mdl = p_mdl;
- return 0;
-
-err_map:
- MmUnlockPages(p_mdl);
-err_probe:
- IoFreeMdl(p_mdl);
-err_alloc_mdl:
- return rc;
-}
-
-void ib_umem_unmap(
- IN PMDL p_mdl,
- IN void *kva)
-{
- if (kva) {
- MmUnmapLockedPages( kva, p_mdl );
- MmUnlockPages(p_mdl);
- IoFreeMdl(p_mdl);
- }
-}
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_radix.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_radix.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_radix.c (revision 6862)
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: radix.c 1611 2006-08-20 14:48:55Z sleybo $
- */
-
-#include "l2w.h"
-#include "errno.h"
-
-int radix_tree_insert(struct radix_tree_root *root,
- unsigned long index, void *item)
-{
- if ( NULL == cl_map_insert( &root->map, (const uint64_t)index, item ) )
- return -EFAULT;
- return 0;
-}
-
-void *radix_tree_lookup(struct radix_tree_root *root,
- unsigned long index)
-{
- void* item = cl_map_get( &root->map, (const uint64_t)index );
- return item;
-}
-
-void *radix_tree_delete(struct radix_tree_root *root,
- unsigned long index)
-{
- void* item = cl_map_remove( &root->map, (const uint64_t)index );
- return item;
-}
-
-cl_status_t radix_tree_create(struct radix_tree_root *root,
- gfp_t gfp_mask)
-{
-#define MIN_ITEMS 32
- cl_status_t cl_status;
- UNUSED_PARAM(gfp_mask);
-
- cl_map_construct( &root->map );
- cl_status = cl_map_init( &root->map, MIN_ITEMS );
- return cl_status;
-}
-
-void radix_tree_destroy(struct radix_tree_root *root )
-{
- cl_map_destroy( &root->map );
-}
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c (revision 6862)
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: mt_memory.c 2020 2007-05-01 09:29:10Z leonid $
- */
-#include "l2w.h"
-#include <mlx4_debug.h>
-
-#if defined (EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "l2w_memory.tmh"
-#endif
-
-
-void st_dev_add_cont_mem_stat( PMLX4_ST_DEVICE p_stat, ULONG size );
-void st_dev_rmv_cont_mem_stat( PMLX4_ST_DEVICE p_stat, ULONG size );
-
-void *alloc_cont_mem(
- IN struct pci_dev *pdev,
- IN unsigned long size,
- OUT dma_addr_t*p_dma_addr)
-{
- void *va = NULL;
- PHYSICAL_ADDRESS pa = {0};
-
- ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
-
- memset( p_dma_addr, 0, sizeof(dma_addr_t) );
- if (!size)
- goto end;
-
-//
-// DmaOperations->AllocateCommonBuffer can get stuck for a long time
-// when there is no enough contiguous memory
-//
-
-#ifdef SUPPORT_DMA_MEMORY
-
- {
- DMA_ADAPTER *p_adapter = pdev->p_dma_adapter;
-
- va = p_adapter->DmaOperations->AllocateCommonBuffer(
- p_adapter, size, &pa, FALSE );
- if (va) {
- p_dma_addr->da = pa.QuadPart;
- p_dma_addr->va = va;
- p_dma_addr->sz = (ULONG)size;
- st_dev_add_cont_mem_stat( pdev->p_stat, size );
- }
- }
-
-#else
-
- {
- PHYSICAL_ADDRESS la = {0}, ha = {(u64)(-1I64)};
-
- va = MmAllocateContiguousMemorySpecifyCache( (SIZE_T)size, la, ha, pa, MmCached );
- if (va) {
- pa = MmGetPhysicalAddress( va );
- // TODO: convert physical adress to dma one
- p_dma_addr->da = pa.QuadPart;
- p_dma_addr->va = va;
- p_dma_addr->sz = (ULONG)size;
- st_dev_add_cont_mem_stat( pdev->p_stat, size );
- }
- }
-
-#endif
-
-end:
- if (!va)
- MLX4_PRINT(TRACE_LEVEL_ERROR, MLX4_DBG_MEMORY,
- ("%s: AllocateCommonBuffer: failed to allocate contiguous %#x bytes\n",pdev->name, size));
- return va;
-}
-
-void free_cont_mem(
- IN struct pci_dev *pdev,
- IN dma_addr_t*p_dma_addr)
-{
-#ifdef SUPPORT_DMA_MEMORY
-
- {
- DMA_ADAPTER *p_adapter = pdev->p_dma_adapter;
- PHYSICAL_ADDRESS pa;
-
- ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
- pa.QuadPart = p_dma_addr->da;
- p_adapter->DmaOperations->FreeCommonBuffer(
- p_adapter, p_dma_addr->sz, pa, p_dma_addr->va, FALSE );
- st_dev_rmv_cont_mem_stat( pdev->p_stat, p_dma_addr->sz );
- }
-
-#else
-
- {
- KIRQL old_irql = 0, cur_irql = KeGetCurrentIrql();
-
- ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
- if (cur_irql < APC_LEVEL)
- KeRaiseIrql( APC_LEVEL, &old_irql );
- MmFreeContiguousMemory( p_dma_addr->va );
- st_dev_rmv_cont_mem_stat( pdev->p_stat, p_dma_addr->sz );
- if (cur_irql < APC_LEVEL)
- KeLowerIrql( old_irql );
- }
-
-#endif
-}
-
-void *
-dma_alloc_coherent( struct mlx4_dev **dev, size_t size,
- dma_addr_t *p_dma_addr, gfp_t gfp )
-{
- UNUSED_PARAM(gfp);
-
- if (!size)
- return NULL;
- return alloc_cont_mem( (*dev)->pdev, (unsigned long)size, p_dma_addr );
-}
-
-void
-dma_free_coherent( struct mlx4_dev **dev, size_t size,
- void *vaddr, dma_addr_t dma_addr)
-{
- UNUSED_PARAM(size);
- UNUSED_PARAM(vaddr);
- ASSERT(size == dma_addr.sz);
- ASSERT(vaddr == dma_addr.va);
- free_cont_mem( (*dev)->pdev, &dma_addr );
-}
-
-void
-pci_free_consistent( struct pci_dev *pdev, size_t size,
- void *vaddr, dma_addr_t dma_addr)
-{
- dma_free_coherent( &pdev->dev, size, vaddr, dma_addr );
-}
-
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/SOURCES
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/SOURCES (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/core/SOURCES (revision 6862)
@@ -19,17 +19,10 @@
cache.c \
device.c \
iobuf.c \
- l2w.c \
- l2w_radix.c \
- l2w_debug.c \
- l2w_memory.c \
- l2w_umem.c \
pa_cash.c \
- packer.c \
- ud_header.c \
verbs.c \
-INCLUDES=..;..\inc;..\..\inc;..\net;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;
+INCLUDES=..;..\inc;..\..\inc;..\net;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;..\..\..\..\..\inc\kernel\l2w;
C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN -DUSE_WDM_INTERRUPTS
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/ib/SOURCES
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/ib/SOURCES (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/ib/SOURCES (revision 6862)
@@ -23,7 +23,7 @@
qp.c \
srq.c
-INCLUDES=..;..\inc;..\..\inc;..\core\$O;..\..\..\inc;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;
+INCLUDES=..;..\inc;..\..\inc;..\core\$O;..\..\..\inc;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;..\..\..\..\..\inc\kernel\l2w;
C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/drv/sources
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/drv/sources (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/drv/sources (revision 6862)
@@ -43,7 +43,7 @@
C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN -DUSE_WDM_INTERRUPTS
-INCLUDES=..;..\inc;..\..\inc;..\core;..\..\..\inc;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;..\core;..\core\$O
+INCLUDES=..;..\inc;..\..\inc;..\core;..\..\..\inc;..\..\..\..\..\inc;..\..\..\..\..\inc\kernel;..\core;..\core\$O;..\..\..\..\..\inc\kernel\l2w;
TARGETLIBS= $(TARGETLIBS) \
@@ -51,7 +51,8 @@
$(LIBPATH)\*\complib.lib \
$(LIBPATH)\*\mlx4_core.lib \
$(LIBPATH)\*\mlx4_ib.lib \
- $(LIBPATH)\*\mlx4_net.lib
+ $(LIBPATH)\*\mlx4_net.lib \
+ $(LIBPATH)\*\l2w.lib
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/drv/drv.c
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/drv/drv.c (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/bus/drv/drv.c (revision 6862)
@@ -1341,6 +1341,9 @@
KeInitializeEvent(&pVipBusIfc->NicData.ConfigChangeEvent, SynchronizationEvent, TRUE);
}
+
+//#define DONT_START_ON_BOOT
+
NTSTATUS
EvtDriverDeviceAdd(
IN WDFDRIVER Driver,
@@ -1380,11 +1383,24 @@
int bus = 0, dev = 0, function= 0;
UCHAR *ptr;
+
UNREFERENCED_PARAMETER(Driver);
PAGED_CODE ();
MLX4_ENTER(MLX4_DBG_DRV);
+#ifdef DONT_START_ON_BOOT
+ {
+ int QueryTimeIncrement = KeQueryTimeIncrement();
+ LARGE_INTEGER Ticks;
+
+ KeQueryTickCount(&Ticks);
+ if (Ticks.QuadPart * QueryTimeIncrement / 10000 < 30000) // 10,000 moves from 100ns to ms
+ {
+ return STATUS_NO_MEMORY;
+ }
+ }
+#endif
//
// register PnP & Power stuff
//
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_pcipool.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_pcipool.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_pcipool.h (revision 6862)
@@ -1,102 +0,0 @@
-#pragma once
-
-typedef struct pci_pool {
- size_t size;
- struct mlx4_dev * mdev;
- char name [32];
- NPAGED_LOOKASIDE_LIST pool_hdr;
-} pci_pool_t;
-
-// taken from dmapool.c
-
-/**
-* pci_pool_create - Creates a pool of consistent memory blocks, for dma.
-* @name: name of pool, for diagnostics
-* @mdev: device that will be doing the DMA
-* @size: size of the blocks in this pool.
-* @align: alignment requirement for blocks; must be a power of two
-* @allocation: returned blocks won't cross this boundary (or zero)
-* Context: !in_interrupt()
-*
-* Returns a dma allocation pool with the requested characteristics, or
-* null if one can't be created. Given one of these pools, dma_pool_alloc()
-* may be used to allocate memory. Such memory will all have "consistent"
-* DMA mappings, accessible by the device and its driver without using
-* cache flushing primitives. The actual size of blocks allocated may be
-* larger than requested because of alignment.
-*
-* If allocation is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary. This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- */
-
-pci_pool_t *
-pci_pool_create (const char *name, struct pci_dev *pdev,
- size_t size, size_t align, size_t allocation);
-
-/**
- * dma_pool_alloc - get a block of consistent memory
- * @pool: dma pool that will produce the block
- * @mem_flags: GFP_* bitmask
- * @handle: pointer to dma address of block
- *
- * This returns the kernel virtual address of a currently unused block,
- * and reports its dma address through the handle.
- * If such a memory block can't be allocated, null is returned.
- */
-static inline void *
-pci_pool_alloc (pci_pool_t *pool, int mem_flags, dma_addr_t *handle)
-{
- PHYSICAL_ADDRESS pa;
- void * ptr;
- UNREFERENCED_PARAMETER(mem_flags);
-
- ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
-
- ptr = ExAllocateFromNPagedLookasideList( &pool->pool_hdr );
- if (ptr != NULL) {
- pa = MmGetPhysicalAddress( ptr );
- // TODO: convert physical adress to dma one
- handle->da = pa.QuadPart;
- handle->va = ptr;
- handle->sz = 0; /* not known here */
- }
- return ptr;
-}
-
-
-/**
-* dma_pool_free - put block back into dma pool
-* @pool: the dma pool holding the block
-* @vaddr: virtual address of block
-* @dma: dma address of block
-*
-* Caller promises neither device nor driver will again touch this block
-* unless it is first re-allocated.
-*/
-static inline void
-pci_pool_free (pci_pool_t *pool, void *vaddr, dma_addr_t dma)
-{
- UNREFERENCED_PARAMETER(dma);
- ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
- ExFreeToNPagedLookasideList( &pool->pool_hdr, vaddr );
-}
-
-
-
-/**
- * pci_pool_destroy - destroys a pool of dma memory blocks.
- * @pool: dma pool that will be destroyed
- * Context: !in_interrupt()
- *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
- */
-static inline void
-pci_pool_destroy (pci_pool_t *pool)
-{
- ExDeleteNPagedLookasideList( &pool->pool_hdr );
- ExFreePool( pool);
-}
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_radix.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_radix.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_radix.h (revision 6862)
@@ -1,26 +0,0 @@
-#pragma once
-
-#include <complib/cl_map.h>
-
-struct radix_tree_root {
- cl_map_t map;
-};
-
-int radix_tree_insert(struct radix_tree_root *root,
- unsigned long index, void *item);
-
-void *radix_tree_lookup(struct radix_tree_root *root,
- unsigned long index);
-
-void *radix_tree_delete(struct radix_tree_root *root,
- unsigned long index);
-
-
-cl_status_t radix_tree_create(struct radix_tree_root *root,
- gfp_t gfp_mask);
-
-void radix_tree_destroy(struct radix_tree_root *root );
-
-#define INIT_RADIX_TREE(root, mask) radix_tree_create(root, mask)
-#define RMV_RADIX_TREE(root) radix_tree_destroy(root)
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_memory.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_memory.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_memory.h (revision 6862)
@@ -1,334 +0,0 @@
-#pragma once
-
-#include "iobuf.h"
-#include "complib\cl_debug.h"
-
-////////////////////////////////////////////////////////
-//
-// CONSTANTS
-//
-////////////////////////////////////////////////////////
-
-#define MT_TAG_ATOMIC 'MOTA'
-#define MT_TAG_KERNEL 'LNRK'
-#define MT_TAG_HIGH 'HGIH'
-#define MT_TAG_PCIPOOL 'PICP'
-#define MT_TAG_IOMAP 'PAMI'
-
-////////////////////////////////////////////////////////
-//
-// SUBSTITUTIONS
-//
-////////////////////////////////////////////////////////
-
-
-////////////////////////////////////////////////////////
-//
-// MACROS
-//
-////////////////////////////////////////////////////////
-
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-
- ////////////////////////////////////////////////////////
- //
- // Helper functions
- //
- ////////////////////////////////////////////////////////
-
-// returns log of number of pages, i.e
-// for size <= 4096 ==> 0
-// for size <= 8192 ==> 1
-static inline int get_order(unsigned long size)
-{
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
-
-static inline unsigned long roundup_pow_of_two(unsigned long x)
-{
- return (1UL << fls(x - 1));
-}
-
-
-
-////////////////////////////////////////////////////////
-//
-// SYSTEM MEMORY
-//
-////////////////////////////////////////////////////////
-
-typedef enum _gfp {
- __GFP_NOWARN = 0, /* Suppress page allocation failure warning */
- __GFP_HIGHMEM = 0, /* high memory */
- GFP_ATOMIC = 1, /* can't wait (i.e. DPC or higher) */
- GFP_KERNEL = 2, /* can wait (npaged) */
- GFP_HIGHUSER = 4 /* GFP_KERNEL, that can be in HIGH memory */
-}
-gfp_t;
-
-struct vm_area_struct {
- void * ptr;
-};
-
-static inline void * kmalloc( SIZE_T bsize, gfp_t gfp_mask)
-{
- void *ptr;
- ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL);
- ASSERT(bsize);
- switch (gfp_mask) {
- case GFP_ATOMIC:
- ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_ATOMIC );
- break;
- case GFP_KERNEL:
- ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL );
- break;
- case GFP_HIGHUSER:
- ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_HIGH );
- break;
- default:
- cl_dbg_out("kmalloc: unsupported flag %d\n", gfp_mask);
- ptr = NULL;
- break;
- }
- return ptr;
-}
-
-static inline void * kzalloc( SIZE_T bsize, gfp_t gfp_mask)
-{
- void* va = kmalloc(bsize, gfp_mask);
- if (va)
- RtlZeroMemory(va, bsize);
- return va;
-}
-
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
-{
- if (n != 0 && size > ULONG_MAX / n)
- return NULL;
- return kzalloc(n * size, flags);
-}
-
-static inline void kfree (const void *pobj)
-{
- ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
- if (pobj)
- ExFreePool((void *)pobj);
-}
-
-#define get_zeroed_page(mask) kzalloc(PAGE_SIZE, mask)
-#define free_page(ptr) kfree(ptr)
-
-
-////////////////////////////////////////////////////////
-//
-// IO SPACE <==> SYSTEM MEMORY
-//
-////////////////////////////////////////////////////////
-
-/**
-* ioremap - map bus memory into CPU space
-* @addr: bus address of the memory
-* @size: size of the resource to map
-*
-* ioremap performs a platform specific sequence of operations to
-* make bus memory CPU accessible via the readb/readw/readl/writeb/
-* writew/writel functions and the other mmio helpers. The returned
-* address is not guaranteed to be usable directly as a virtual
-* address.
-*/
-static inline void *ioremap(io_addr_t addr, SIZE_T size, MEMORY_CACHING_TYPE cache_type)
-{
- PHYSICAL_ADDRESS pa;
- void *va;
-
- ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
- pa.QuadPart = addr;
- va = MmMapIoSpace( pa, size, cache_type );
- return va;
-}
-
-static inline void iounmap(void *va, SIZE_T size)
-{
- MmUnmapIoSpace( va, size );
-}
-
-
-////////////////////////////////////////////////////////
-//
-// DMA SUPPORT
-//
-////////////////////////////////////////////////////////
-
-enum dma_data_direction {
- PCI_DMA_BIDIRECTIONAL,
- PCI_DMA_TODEVICE,
- DMA_TO_DEVICE = PCI_DMA_TODEVICE
-};
-
-#define dma_get_cache_alignment (int)KeGetRecommendedSharedDataAlignment
-
-// wrapper to DMA address
-typedef struct _dma_addr
-{
- // TODO: in some cases it is still physical address today
- io_addr_t da; /* logical (device) address */
- void * va; /* kernel virtual address */
- unsigned long sz; /* buffer size */
-} dma_addr_t;
-
-#define lowmem_page_address(dma_addr) ((dma_addr).va)
-
-struct mlx4_dev;
-
-void *alloc_cont_mem(
- IN struct pci_dev *pdev,
- IN unsigned long size,
- OUT dma_addr_t*p_dma_addr);
-
-void free_cont_mem(
- IN struct pci_dev *pdev,
- IN dma_addr_t*p_dma_addr);
-
-// TODO: translate to DMA space - for now is not done anything
-static inline dma_addr_t pci_map_page(struct pci_dev *pdev,
- dma_addr_t dma_addr, unsigned long offset, SIZE_T size, int direction)
-{
- UNUSED_PARAM(pdev);
- UNUSED_PARAM(offset);
- UNUSED_PARAM(size);
- UNUSED_PARAM(direction);
-
- return dma_addr;
-}
-
-static inline dma_addr_t
-alloc_pages( struct pci_dev *pdev, gfp_t gfp, int order )
-{
- dma_addr_t dma_addr;
- UNUSED_PARAM(gfp);
- alloc_cont_mem( pdev, PAGE_SIZE << order, &dma_addr );
- return dma_addr;
-}
-
-#define alloc_page(pdev, mask) alloc_pages(pdev, (mask), 0)
-#define __get_free_page(mask) kzalloc(PAGE_SIZE, mask)
-
-static inline void
-__free_pages( struct pci_dev *pdev, dma_addr_t dma_addr, int order )
-{
- UNUSED_PARAM(order);
- ASSERT((PAGE_SIZE << order) == (int)dma_addr.sz);
- free_cont_mem( pdev, &dma_addr );
-}
-
-#define __free_page(pdev, dma_addr) __free_pages(pdev, (dma_addr), 0)
-
-
-
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return !dma_addr.sz;
-}
-
-static inline void pci_unmap_page(struct pci_dev *pdev,
- dma_addr_t dma_addr, SIZE_T size, int direction)
-{
- UNUSED_PARAM(pdev);
- UNUSED_PARAM(dma_addr);
- UNUSED_PARAM(size);
- UNUSED_PARAM(direction);
-}
-
-static inline void
-dma_sync_single( struct mlx4_dev **dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- UNUSED_PARAM(dev);
- UNUSED_PARAM(dma_addr);
- UNUSED_PARAM(size);
- UNUSED_PARAM(direction);
- // TODO: here is to be used FlushAdapterBuffers()
-}
-
-void *
-dma_alloc_coherent( struct mlx4_dev **dev, size_t size,
- dma_addr_t *p_dma, gfp_t gfp );
-
-void dma_free_coherent( struct mlx4_dev **dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
- void pci_free_consistent( struct pci_dev *pdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-
-
-////////////////////////////////////////////////////////
-//
-// SG lists
-//
-////////////////////////////////////////////////////////
-
-#define sg_dma_addr(sg) ((sg)->dma_addr)
-#define sg_dma_address(sg) ((sg)->dma_addr.da)
-#define sg_dma_len(sg) ((sg)->dma_addr.sz)
-#define sg_dma_address_inc(p_dma,val) (p_dma)->da += val
-#define sg_page(sg) ((sg)->dma_addr)
-
-struct scatterlist {
- dma_addr_t dma_addr; /* logical (device) address */
- unsigned int offset; /* offset in the first page */
- PMDL p_mdl; /* MDL, if any (used for user space buffers) */
-};
-
-#define offset_in_page(va) ((ULONG)((ULONG_PTR)(va) & ~PAGE_MASK))
-
-static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
-{
- memset(sgl, 0, sizeof(*sgl) * nents);
-}
-
-static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
- unsigned int buflen)
-{
- UNUSED_PARAM(buflen);
- ASSERT(sg->dma_addr.sz == buflen);
- sg->offset = offset_in_page(buf);
-}
-
-static inline void sg_set_page(struct scatterlist *sg,
- dma_addr_t dma_addr, unsigned int len, unsigned int offset)
-{
- UNUSED_PARAM(len);
- sg->offset = offset;
- sg->dma_addr = dma_addr;
-}
-
-/* Returns: the number of unmapped sg elements */
-static inline int pci_map_sg(struct pci_dev *pdev,
- struct scatterlist *sg, int nents, int direction)
-{
- UNUSED_PARAM(pdev);
- UNUSED_PARAM(sg);
- UNUSED_PARAM(direction);
- return nents;
-}
-
-/* Returns: the number of unmapped sg elements */
-static inline int pci_unmap_sg(struct pci_dev *pdev,
- struct scatterlist *sg, int nents, int direction)
-{
- UNUSED_PARAM(pdev);
- UNUSED_PARAM(sg);
- UNUSED_PARAM(direction);
- return nents;
-}
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_pci.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_pci.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_pci.h (revision 6862)
@@ -1,86 +0,0 @@
-#pragma once
-
-// ===========================================
-// LITERALS
-// ===========================================
-
-
-// ===========================================
-// TYPES
-// ===========================================
-
-
-// ===========================================
-// MACROS/FUNCTIONS
-// ===========================================
-
-#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
-#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
-#define PCI_FUNC(devfn) ((devfn) & 0x07)
-
-NTSTATUS pci_hca_reset( struct pci_dev *pdev);
-
-/* use shim to implement that */
-#define mlx4_reset(dev) pci_hca_reset(dev->pdev)
-
-// get bar boundaries
-#define pci_resource_start(dev,bar_num) ((dev)->bar[bar_num >> 1].phys)
-#define pci_resource_len(dev,bar_num) ((dev)->bar[bar_num >> 1].size)
-
-// i/o to registers
-
-static inline u64 readq(const volatile void __iomem *addr)
-{
- //TODO: write atomic implementation of _IO_READ_QWORD and change mthca_doorbell.h
- u64 val;
- READ_REGISTER_BUFFER_ULONG((PULONG)(addr), (PULONG)&val, 2 );
- return val;
-}
-
-static inline u32 readl(const volatile void __iomem *addr)
-{
- return READ_REGISTER_ULONG((PULONG)(addr));
-}
-
-static inline u16 reads(const volatile void __iomem *addr)
-{
- return READ_REGISTER_USHORT((PUSHORT)(addr));
-}
-
-static inline u8 readb(const volatile void __iomem *addr)
-{
- return READ_REGISTER_UCHAR((PUCHAR)(addr));
-}
-
-#define __raw_readq readq
-#define __raw_readl readl
-#define __raw_reads reads
-#define __raw_readb readb
-
-static inline void writeq(unsigned __int64 val, volatile void __iomem *addr)
-{
- //TODO: write atomic implementation of _IO_WRITE_QWORD and change mthca_doorbell.h
- WRITE_REGISTER_BUFFER_ULONG( (PULONG)(addr), (PULONG)&val, 2 );
-}
-
-static inline void writel(unsigned int val, volatile void __iomem *addr)
-{
- WRITE_REGISTER_ULONG((PULONG)(addr),val);
-}
-
-static inline void writes(unsigned short val, volatile void __iomem *addr)
-{
- WRITE_REGISTER_USHORT((PUSHORT)(addr),val);
-}
-
-static inline void writeb(unsigned char val, volatile void __iomem *addr)
-{
- WRITE_REGISTER_UCHAR((PUCHAR)(addr),val);
-}
-
-#define __raw_writeq writeq
-#define __raw_writel writel
-#define __raw_writes writes
-#define __raw_writeb writeb
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_list.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_list.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_list.h (revision 6862)
@@ -1,99 +0,0 @@
-#pragma once
-
-////////////////////////////////////////////////////////
-//
-// TYPES
-//
-////////////////////////////////////////////////////////
-
-// Use the type, defined in wdm.h
-#define list_head _LIST_ENTRY
-
-
-////////////////////////////////////////////////////////
-//
-// MACROS
-//
-////////////////////////////////////////////////////////
-
-
-// Define and initialize a list header
-#define LIST_HEAD(name) \
- struct list_head name = { &(name), &(name) }
-
-// Initialize a list header
-#define INIT_LIST_HEAD(ptr) InitializeListHead(ptr)
-
-// Get to the beginning of the struct for this list entry
-#define list_entry(ptr, type, member) CONTAINING_RECORD(ptr, type, member)
-
-// Iterate over list of 'list_els' of given 'type'
-#define list_for_each_entry(list_el, head, member, type) \
- for ( list_el = list_entry((head)->Flink, type, member); \
- &list_el->member != (head); \
- list_el = list_entry(list_el->member.Flink, type, member))
-
-// Iterate backwards over list of 'list_els' of given 'type'
-#define list_for_each_entry_reverse(list_el, head, member, type) \
- for (list_el = list_entry((head)->Blink, type, member); \
- &list_el->member != (head); \
- list_el = list_entry(list_el->member.Blink, type, member))
-
-// Iterate over list of given type safe against removal of list entry
-#define list_for_each_entry_safe(list_el, tmp_list_el, head, member,type, tmp_type) \
- for (list_el = list_entry((head)->Flink, type, member), \
- tmp_list_el = list_entry(list_el->member.Flink, type, member); \
- &list_el->member != (head); \
- list_el = tmp_list_el, \
- tmp_list_el = list_entry(tmp_list_el->member.Flink, tmp_type, member))
-
-
-////////////////////////////////////////////////////////
-//
-// FUNCTIONS
-//
-////////////////////////////////////////////////////////
-
-// Insert a new entry after the specified head.
-static inline void list_add(struct list_head *new_entry, struct list_head *head)
-{
- InsertHeadList( head, new_entry );
-}
-
-// Insert a new entry before the specified head.
-static inline void list_add_tail(struct list_head *new_entry, struct list_head *head)
-{
- InsertTailList( head, new_entry );
-}
-
-// Deletes entry from list.
-static inline void list_del(struct list_head *entry)
-{
- RemoveEntryList( entry );
-}
-
-// Tests whether a list is empty
-static inline int list_empty(const struct list_head *head)
-{
- return IsListEmpty( head );
-}
-
-// Insert src_list into dst_list and reinitialise the emptied src_list.
-static inline void list_splice_init(struct list_head *src_list,
- struct list_head *dst_list)
-{
- if (!list_empty(src_list)) {
- struct list_head *first = src_list->Flink;
- struct list_head *last = src_list->Blink;
- struct list_head *at = dst_list->Flink;
-
- first->Blink = dst_list;
- dst_list->Flink = first;
-
- last->Flink = at;
- at->Blink = last;
-
- INIT_LIST_HEAD(src_list);
- }
-}
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_sync.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_sync.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_sync.h (revision 6862)
@@ -1,165 +0,0 @@
-#pragma once
-
-// literals
-#ifndef LONG_MAX
-#define LONG_MAX 2147483647L /* maximum (signed) long value */
-#endif
-
-#ifndef ULONG_MAX
-#define ULONG_MAX 4294967295UL
-#endif
-
-//
-// mutex wrapper
-//
-
-struct mutex
-{
- KMUTEX m;
-};
-
-#define DEFINE_MUTEX(a) struct mutex a
-
-static inline void mutex_init( struct mutex * mutex )
-{
- KeInitializeMutex( &mutex->m, 0 );
-}
-
-static inline void mutex_lock( struct mutex * mutex )
-{
- NTSTATUS status;
- int need_to_wait = 1;
-
- ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
- while (need_to_wait) {
- status = KeWaitForSingleObject( &mutex->m, Executive, KernelMode, FALSE, NULL );
- if (status == STATUS_SUCCESS)
- break;
- }
-}
-
-static inline void mutex_unlock( struct mutex * mutex )
-{
- ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
- KeReleaseMutex( &mutex->m, FALSE );
-}
-
-
-//
-// semaphore wrapper
-//
-
-struct semaphore
-{
- KSEMAPHORE s;
-};
-
-static inline void sema_init(
- IN struct semaphore *sem,
- IN LONG cnt)
-{
- ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
- KeInitializeSemaphore( &sem->s, cnt, cnt );
-}
-
-static inline void up( struct semaphore *sem )
-{
- ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
- KeReleaseSemaphore( &sem->s, 0, 1, FALSE );
-}
-static inline void down( struct semaphore *sem )
-{
- NTSTATUS status;
- int need_to_wait = 1;
-
- ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
- while (need_to_wait) {
- status = KeWaitForSingleObject( &sem->s, Executive, KernelMode, FALSE, NULL );
- if (status == STATUS_SUCCESS)
- break;
- }
-}
-
-
-//
-// completion wrapper
-//
-
-struct completion
-{
- KEVENT event;
- int done;
-};
-
-static inline void init_completion( struct completion * compl )
-{
- //TODO: ASSERT is temporary outcommented, because using of fast mutexes in CompLib
- // cause working on APC_LEVEL
- //ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
- KeInitializeEvent( &compl->event, NotificationEvent , FALSE );
- compl->done = 0;
-}
-
-static inline int wait_for_completion_timeout( struct completion * compl, unsigned long timeout )
-{
- LARGE_INTEGER interval;
- ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
- interval.QuadPart = (-10)* (__int64)timeout;
- return (int)KeWaitForSingleObject( &compl->event, Executive, KernelMode, FALSE, &interval );
-}
-
-static inline void wait_for_completion( struct completion * compl )
-{
- NTSTATUS status;
- int need_to_wait = 1;
-
- ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
-
- while (need_to_wait) {
- status = KeWaitForSingleObject( &compl->event, Executive, KernelMode, FALSE, NULL );
- if (status == STATUS_SUCCESS)
- break;
- }
-}
-
-
-
-static inline void complete( struct completion * compl )
-{
- ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
- compl->done++;
- KeSetEvent( &compl->event, 0, FALSE );
-}
-
-#ifdef USE_WDM_INTERRUPTS
-
-//
-// IRQ wrapper
-//
-
-void free_irq(struct mlx4_dev *dev);
-
-int request_irq(
- IN struct mlx4_dev * dev,
- IN PKSERVICE_ROUTINE isr, /* ISR */
- IN PVOID isr_ctx, /* ISR context */
- IN PKMESSAGE_SERVICE_ROUTINE misr, /* Message ISR */
- OUT PKINTERRUPT * int_obj /* interrupt object */
- );
-
-#endif
-
-//
-// various
-//
-
-// TODO: Is it enough to wait at DPC level ?
-// Maybe we need to use here KeSynchronizeExecution ?
-static inline void synchronize_irq(unsigned int irq)
-{
- UNUSED_PARAM(irq);
- KeFlushQueuedDpcs();
-}
-
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_bitmap.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_bitmap.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_bitmap.h (revision 6862)
@@ -1,79 +0,0 @@
-#pragma once
-
-#define DECLARE_BITMAP(name,bits) \
- unsigned long name[BITS_TO_LONGS(bits)]
-
-static inline unsigned long atomic_set_bit(int nr, volatile long * addr)
-{
- return InterlockedOr( addr, (1 << nr) );
-}
-
-static inline unsigned long atomic_clear_bit(int nr, volatile long * addr)
-{
- return InterlockedAnd( addr, ~(1 << nr) );
-}
-
-static inline int set_bit(int nr,unsigned long * addr)
-{
- addr += nr >> 5;
- return atomic_set_bit( nr & 0x1f, (volatile long *)addr );
-}
-
-static inline int clear_bit(int nr, unsigned long * addr)
-{
- addr += nr >> 5;
- return atomic_clear_bit( nr & 0x1f, (volatile long *)addr );
-}
-
-static inline int test_bit(int nr, const unsigned long * addr)
-{
- int mask;
-
- addr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- return ((mask & *addr) != 0);
-}
-
-static inline void bitmap_zero(unsigned long *dst, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = 0UL;
- else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- RtlZeroMemory(dst, len);
- }
-}
-
-#define BITMAP_LAST_WORD_MASK(nbits) \
- ( ((nbits) % BITS_PER_LONG) ? (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL )
-
-int __bitmap_full(const unsigned long *bitmap, int bits);
-
-static inline int bitmap_full(const unsigned long *src, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_full(src, nbits);
-}
-
-int __bitmap_empty(const unsigned long *bitmap, int bits);
-
-static inline int bitmap_empty(const unsigned long *src, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_empty(src, nbits);
-}
-
-static inline void bitmap_fill(unsigned long *dst, int nbits)
-{
- size_t nlongs = BITS_TO_LONGS(nbits);
- if (nlongs > 1) {
- int len = (int)((nlongs - 1) * sizeof(unsigned long));
- memset(dst, 0xff, len);
- }
- dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
-}
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_atomic.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_atomic.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_atomic.h (revision 6862)
@@ -1,60 +0,0 @@
-#pragma once
-
-#include "complib/cl_atomic.h"
-
-typedef volatile __int32 atomic_t; /* as atomic32_t */
-
-#define atomic_inc cl_atomic_inc
-#define atomic_dec cl_atomic_dec
-
-static inline atomic_t atomic_read(atomic_t *pval)
-{
- return *pval;
-}
-
-static inline void atomic_set(atomic_t *pval, long val)
-{
- *pval = (__int32)val;
-}
-
-/**
-* atomic_inc_and_test - decrement and test
-* pval: pointer of type atomic_t
-*
-* Atomically increments pval by 1 and
-* returns true if the result is 0, or false for all other
-* cases.
-*/
-static inline int
-atomic_inc_and_test(atomic_t *pval)
-{
- return cl_atomic_inc(pval) == 0;
-}
-
-/**
-* atomic_dec_and_test - decrement and test
-* pval: pointer of type atomic_t
-*
-* Atomically decrements pval by 1 and
-* returns true if the result is 0, or false for all other
-* cases.
-*/
-static inline int
-atomic_dec_and_test(atomic_t *pval)
-{
- return cl_atomic_dec(pval) == 0;
-}
-
-
-/**
-* atomic_dec_return - decrement and return the value
-* pval: pointer of type atomic_t
-*
-* Atomically decrements pval by 1 and retruns the new value
-*/
-static inline int
-atomic_dec_return(atomic_t *pval)
-{
- return cl_atomic_dec(pval);
-}
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_bit.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_bit.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_bit.h (revision 6862)
@@ -1,192 +0,0 @@
-#pragma once
-
-// Nth element of the table contains the index of the first set bit of N; 8 - for N=0
-extern char g_set_bit_tbl[256];
-// Nth element of the table contains the index of the first cleared bit of N; 8 - for N=0
-extern char g_clr_bit_tbl[256];
-
-static inline int fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/**
-* _ffs_raw - find the first one bit in a word
-* @addr: The address to start the search at
-* @offset: The bitnumber to start searching at
-*
-* returns: 0 - if not found or N+1, if found Nth bit
-*/
-static __inline int _ffs_raw(const unsigned long *addr, int offset)
-{
- //TODO: not an effective code - is better in Assembler
- int mask;
- int rbc;
- int ix;
- if (!*addr) return 0;
- mask = 1 << offset;
- rbc = BITS_PER_LONG - offset;
- for (ix=0; ix<rbc; ix++, mask<<=1) {
- if (*addr & mask)
- return offset + ix + 1;
- }
- return 0;
-}
-
-// as previous with offset = 0
-static __inline int _ffs(const unsigned long *addr)
-{
- unsigned char *ptr = (unsigned char *)addr;
- if (!*addr) return 0; // skip sero dword
- if (!*(short*)ptr) ptr += 2; // get to the non-zero word
- if (!*(char*)ptr) ptr++; // get to the non-zero byte
- return (int)(((ptr - (unsigned char *)addr ) << 3) + g_set_bit_tbl[*ptr] + 1);
-}
-
-
-#define ffs(val) _ffs((const unsigned long *)&(val))
-
-/**
-* _ffz_raw - find the first zero bit in a word
-* @addr: The address to start the search at
-* @offset: The bitnumber to start searching at
-*
-* returns: 0 - if not found or N+1, if found Nth bit
-*/
-static __inline int _ffz_raw(const unsigned long *addr, int offset)
-{
- //TODO: not an effective code - is better in Assembler
- int mask;
- int rbc;
- int ix;
- if (!~*addr) return 0;
- mask = 1 << offset;
- rbc = BITS_PER_LONG - offset;
- for (ix=0; ix<rbc; ix++, mask<<=1) {
- if (!(*addr & mask))
- return offset + ix + 1;
- }
- return 0;
-}
-
-// as previous with offset = 0
-static __inline int _ffz(const unsigned long *addr)
-{
- unsigned char *ptr = (unsigned char *)addr;
- if (!~*addr) return 0; // skip sero dword
- if (!~*(short*)ptr) ptr += 2; // get to the non-zero word
- if (!~*(char*)ptr) ptr++; // get to the non-zero byte
- return (int)(((ptr - (unsigned char *)addr ) << 3) + g_clr_bit_tbl[*ptr] + 1);
-}
-
-#define ffz(val) _ffz((const unsigned long *)&val)
-
-// Function:
-// finds the first bit, set in the bitmap
-// Parameters:
-// ptr - address of the bitmap
-// bits_size - the size in bits
-// Returns:
-// the index of the first bit set; 'bits_size' - when there is noone
-// Notes:
-// presumes, that ptr is aligned on dword
-// presumes, that the map contains an integer number of dwords
-// on bits_size=0 will return 0, but its an illegal case
-//
-static __inline int find_first_bit(const unsigned long *addr, unsigned bits_size)
-{
- unsigned char *ptr = (unsigned char *)addr; // bitmap start
- unsigned char *end_ptr = (unsigned char *)(addr + BITS_TO_LONGS(bits_size)); // bitmap end
-
- while (ptr<end_ptr) {
- if (!*(int*)ptr) { ptr += 4; continue; } // skip zero dword
- if (!*(short*)ptr) ptr += 2; // get to the non-zero word
- if (!*(char*)ptr) ptr++; // get to the non-zero byte
- return (int)(((ptr - (unsigned char *)addr ) << 3) + g_set_bit_tbl[*ptr]);
- }
- return bits_size;
-}
-
-static __inline int find_first_zero_bit(const unsigned long *addr, unsigned bits_size)
-{
- unsigned char *ptr = (unsigned char *)addr; // bitmap start
- unsigned char *end_ptr = (unsigned char *)(addr + BITS_TO_LONGS(bits_size)); // bitmap end
-
- while (ptr<end_ptr) {
- if (!~*(int*)ptr) { ptr += 4; continue; } // skip dword w/o zero bits
- if (!~*(short*)ptr) ptr += 2; // get to the word with zero bits
- if (!~*(char*)ptr) ptr++; // get to the byte with zero bits
- return (int)(((ptr - (unsigned char *)addr ) << 3) + g_clr_bit_tbl[*ptr]);
- }
- return bits_size;
-}
-
-
-/**
-* find_next_zero_bit - find the first zero bit in a memory region
-* @addr: The address to base the search on
-* @offset: The bitnumber to start searching at
-* @bits_size: The maximum size to search
-*
-* Returns the bit-number of the first zero bit, not the number of the byte
-* containing a bit. If not found - returns 'size'
-*/
-static __inline int find_next_zero_bit(const unsigned long *addr, int bits_size, int offset)
-{
- int res;
- int ix = offset & 31;
- int set = offset & ~31;
- const unsigned long *p = addr + (set >> 5);
-
- // search in the first word while we are in the middle
- if (ix) {
- res = _ffz_raw(p, ix);
- if (res)
- return set + res - 1;
- ++p;
- set += BITS_PER_LONG;
- }
-
- // search the rest of the bitmap
- res = find_first_zero_bit(p, bits_size - (unsigned)(32 * (p - addr)));
- return res + set;
-}
-
-/* The functions works only for 32-bit values (not as in Linux ) */
-/* on val=0 will return '-1' */
-static inline int ilog2(u32 val)
-{
- ASSERT(val);
- return fls(val) - 1;
-}
-
-static inline BOOLEAN is_power_of_2(unsigned long n)
-{
- return (!!n & !(n & (n-1))) ? TRUE : FALSE;
-}
-
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w.h (revision 6862)
@@ -1,396 +0,0 @@
-#pragma once
-
-#ifndef L2W_H
-#define L2W_H
-
-////////////////////////////////////////////////////////
-//
-// GENERAL INCLUDES
-//
-////////////////////////////////////////////////////////
-
-// OS
-#include <ntddk.h>
-//#include <iointex.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <errno.h>
-#define NTSTRSAFE_LIB
-#include <ntstrsafe.h>
-
-// complib
-#include <complib/cl_timer.h>
-#include <complib/cl_qlist.h>
-
-// mlx4
-#include "vc.h"
-
-
-////////////////////////////////////////////////////////
-//
-// LITERALS
-//
-////////////////////////////////////////////////////////
-
-#define BITS_PER_LONG 32
-#define N_BARS 3
-#define HZ 1000000 /* 1 sec in usecs */
-#define EOPNOTSUPP 95
-
-
-////////////////////////////////////////////////////////
-//
-// SUBSTITUTIONS
-//
-////////////////////////////////////////////////////////
-
-#define BUG_ON(exp) ASSERT(!(exp)) /* in Linux follows here panic() !*/
-#define snprintf _snprintf
-#define KERN_ERR "err:"
-#define KERN_WARNING "warn:"
-#define KERN_DEBUG "dbg:"
-
-// memory barriers
-#define wmb KeMemoryBarrier
-#define rmb KeMemoryBarrier
-#define mb KeMemoryBarrier
-// TODO: can we make it empty ? I saw in Linux, it is an empty macro for x86 & x64
-#define mmiowb KeMemoryBarrier
-
-
-// gcc compiler attributes
-#define __devinit
-#define __devinitdata
-#define __init
-#define __exit
-#define __force
-#define __iomem
-#define __attribute_const__
-#define likely(x) (x)
-#define unlikely(x) (x)
-#define __attribute__(a)
-#define __bitwise
-
-// container_of
-#define container_of CONTAINING_RECORD
-
-// inline
-#define inline __inline
-
-// new Linux event mechanism
-#define complete(a) wake_up(a)
-
-// convert
-#define __constant_htons CL_HTON16
-#define __constant_cpu_to_be32 CL_HTON32
-
-// various
-#define __always_inline inline
-
-#if (WINVER < _WIN32_WINNT_WIN6)
-#define num_possible_cpus() KeNumberProcessors
-#else
-#define num_possible_cpus() KeQueryMaximumProcessorCount()
-#endif
-
-
-////////////////////////////////////////////////////////
-//
-// TYPES
-//
-////////////////////////////////////////////////////////
-
-#define true (u8)1
-#define false (u8)0
-
-// basic types
-typedef unsigned char u8, __u8;
-typedef unsigned short int u16, __u16;
-typedef unsigned int u32, __u32;
-typedef unsigned __int64 u64, __u64;
-typedef char s8, __s8;
-typedef short int s16, __s16;
-typedef int s32, __s32;
-typedef __int64 s64, __s64;
-
-#ifndef __cplusplus
-typedef u8 bool;
-#endif
-
-// inherited
-typedef u16 __le16;
-typedef u16 __be16;
-typedef u32 __le32;
-typedef u32 __be32;
-typedef u64 __le64;
-typedef u64 __be64;
-typedef u64 io_addr_t;
-
-// dummy function
-typedef void (*MT_EMPTY_FUNC)();
-
-// PCI BAR descriptor
-typedef enum _hca_bar_type
-{
- HCA_BAR_TYPE_HCR,
- HCA_BAR_TYPE_UAR,
- HCA_BAR_TYPE_DDR,
- HCA_BAR_TYPE_MAX
-
-} hca_bar_type_t;
-
-
-typedef struct _hca_bar
-{
- uint64_t phys;
- void *virt;
- SIZE_T size;
-
-} hca_bar_t;
-
-struct msix_saved_info {
- PVOID vca; /* MSI-X Vector Table card address */
- PVOID mca; /* MSI-X Mask Table card address */
- PVOID vsa; /* MSI-X Vector Table saved address */
- PVOID msa; /* MSI-X Mask Table saved address */
- ULONG vsz; /* MSI-X Vector Table size */
- ULONG msz; /* MSI-X Mask Table size */
- int num; /* number of supported MSI-X vectors */
- int valid; /* the structure is valid */
-};
-
-struct msix_map {
- KAFFINITY cpu; /* affinity of this MSI-X vector */
- int eq_ix; /* EQ index in the array of EQs */
- int ref_cnt; /* number of users */
-};
-
-typedef struct _MLX4_ST_DEVICE *PMLX4_ST_DEVICE;
-
-// interface structure between Upper and Low Layers of the driver
-struct pci_dev
-{
- // driver: OS/platform resources
- BUS_INTERFACE_STANDARD bus_pci_ifc;
- PCI_COMMON_CONFIG pci_cfg_space;
- struct msix_saved_info msix_info;
- struct msix_map* p_msix_map;
- uplink_info_t uplink_info;
- // driver: card resources
- hca_bar_t bar[N_BARS];
- CM_PARTIAL_RESOURCE_DESCRIPTOR int_info; /* HCA interrupt resources */
- // driver: various objects and info
- USHORT ven_id;
- USHORT dev_id;
- USHORT sub_vendor_id;
- USHORT sub_system_id;
- UCHAR revision_id;
- UCHAR partition_status;
- DMA_ADAPTER * p_dma_adapter; /* HCA adapter object */
- DEVICE_OBJECT * p_self_do; /* mlx4_bus's FDO */
- DEVICE_OBJECT * pdo; /* mlx4_bus's PDO */
- PVOID p_wdf_device; /* wdf_device */
- LONG ib_hca_created;
- // mlx4_ib: various objects and info
- struct ib_device * ib_dev;
- // mlx4_net: various objects and info
- struct mlx4_dev * dev;
- volatile long dpc_lock;
- PUCHAR vpd;
- int vpd_size;
- WCHAR location[36]; /* bus+func+dev */
- int pci_bus;
- int pci_device;
- int pci_func;
- USHORT devfn;
- char name[24]; /* mlx4_role_bus_func_dev */
- // statistics
- PMLX4_ST_DEVICE p_stat;
-//
-// WDM interrupts
-//
- // legacy
- PKINTERRUPT int_obj; /* HCA interrupt object */
- KSPIN_LOCK isr_lock; /* lock for the ISR */
- // MSI-X interrupts
- u8 n_msi_vectors_alloc;/* number of allocated MSI vectors */
- u8 n_msi_vectors; /* number of MSI vectors; 0 - no MSI */
- ULONG version;
- int legacy_connect;
- // others
- int is_reset_prohibited;
- boolean_t start_event_taken;
-
- USHORT clp_ver;
- KEVENT remove_dev_lock; /* lock remove_one process */
-};
-
-/* DPC */
-typedef void (*dpc_t)( struct _KDPC *, PVOID, PVOID, PVOID );
-
-#ifdef SUPPORTED_ONLY_IN_LINUX
-struct attribute {
- const char *name;
- void *owner;
- u32 mode;
-};
-
-struct device_attribute {
- struct attribute attr;
- ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf);
- ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
-};
-#endif
-
-////////////////////////////////////////////////////////
-//
-// MACROS
-//
-////////////////////////////////////////////////////////
-
-// conversions
-#define swab32(a) _byteswap_ulong((ULONG)(a))
-#define cpu_to_be16(a) _byteswap_ushort((USHORT)(a))
-#define be16_to_cpu(a) _byteswap_ushort((USHORT)(a))
-#define cpu_to_be32(a) _byteswap_ulong((ULONG)(a))
-#define be32_to_cpu(a) _byteswap_ulong((ULONG)(a))
-#define cpu_to_be64(a) _byteswap_uint64((UINT64)(a))
-#define be64_to_cpu(a) _byteswap_uint64((UINT64)(a))
-#define be64_to_cpup(p) _byteswap_uint64(*(PUINT64)(p))
-#define be32_to_cpup(p) _byteswap_ulong(*(PULONG)(p))
-#define be16_to_cpup(p) _byteswap_ushort(*(PUSHORT)(p))
-
-// ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
-// ALIGN
-#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
-#define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))
-
-// there is a bug in Microsoft compiler, that when _byteswap_uint64() gets an expression
-// it executes the expression but doesn't swap tte dwords
-// So, there's a workaround
-#ifdef BYTESWAP_UINT64_BUG_FIXED
-#define CPU_2_BE64_PREP
-#define CPU_2_BE64(x) cl_hton64(x)
-#else
-#define CPU_2_BE64_PREP unsigned __int64 __tmp__
-#define CPU_2_BE64(x) ( __tmp__ = x, cl_hton64(__tmp__) )
-#endif
-
-#define ERR_PTR(error) ((void*)(LONG_PTR)(error))
-#define PTR_ERR(ptr) ((long)(LONG_PTR)(void*)(ptr))
-#define ETH_ALEN 6
-
-//TODO: there are 2 assumptions here:
-// - pointer can't be too big (around -1)
-// - error can't be bigger than 1000
-#define IS_ERR(ptr) ((ULONG_PTR)ptr > (ULONG_PTR)-1000L)
-
-#define BITS_TO_LONGS(bits) \
- (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-
-#ifndef ETIMEDOUT
-#define ETIMEDOUT (110)
-#endif
-
-#ifdef PAGE_ALIGN
-#undef PAGE_ALIGN
-#define PAGE_ALIGN(Va) ((u64)((ULONG_PTR)(Va) & ~(PAGE_SIZE - 1)))
-#endif
-
-#define NEXT_PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/* typed minimum */
-#define min_t(type,x,y) ((type)(x) < (type)(y) ? (type)(x) : (type)(y))
-#define max_t(type,x,y) ((type)(x) > (type)(y) ? (type)(x) : (type)(y))
-
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
-#define EXPORT_SYMBOL(name)
-#ifndef USE_WDM_INTERRUPTS
-#define free_irq(pdev)
-#endif
-
-static inline NTSTATUS errno_to_ntstatus(int err)
-{
-#define MAP_ERR(err,ntstatus) case err: status = ntstatus; break
- NTSTATUS status;
-
- if (!err)
- return STATUS_SUCCESS;
-
- if (err < 0)
- err = -err;
- switch (err) {
- MAP_ERR( ENOENT, STATUS_NOT_FOUND );
- MAP_ERR( EAGAIN, STATUS_DEVICE_BUSY );
- MAP_ERR( ENOMEM, STATUS_NO_MEMORY );
- MAP_ERR( EACCES, STATUS_ACCESS_DENIED );
- MAP_ERR( EFAULT, STATUS_DRIVER_INTERNAL_ERROR );
- MAP_ERR( EBUSY, STATUS_INSUFFICIENT_RESOURCES );
- MAP_ERR( ENODEV, STATUS_NOT_SUPPORTED );
- MAP_ERR( EINVAL, STATUS_INVALID_PARAMETER );
- MAP_ERR( ENOSYS, STATUS_NOT_SUPPORTED );
- default:
- status = STATUS_UNSUCCESSFUL;
- break;
- }
- return status;
-}
-
-
-////////////////////////////////////////////////////////
-//
-// PROTOTYPES
-//
-////////////////////////////////////////////////////////
-
-SIZE_T strlcpy(char *dest, const void *src, SIZE_T size);
-int core_init();
-void core_cleanup();
-
-
-////////////////////////////////////////////////////////
-//
-// SPECIFIC INCLUDES
-//
-////////////////////////////////////////////////////////
-
-struct mlx4_dev;
-struct mlx4_priv;
-
-#include <l2w_atomic.h>
-#include <l2w_bit.h>
-#include <l2w_bitmap.h>
-#include "l2w_debug.h"
-#include <l2w_memory.h>
-#include <l2w_umem.h>
-#include <l2w_list.h>
-#include <l2w_pci.h>
-#include <l2w_pcipool.h>
-#include "l2w_radix.h"
-#include <l2w_spinlock.h>
-#include <l2w_sync.h>
-#include <l2w_time.h>
-
-#include "device.h"
-
-static inline int mlx4_is_barred(struct mlx4_dev *dev)
-{
- return dev->flags & MLX4_FLAG_RESET_DRIVER;
-}
-
-static inline int mlx4_is_in_reset(struct mlx4_dev *dev)
-{
- return dev->flags & MLX4_FLAG_RESET_STARTED;
-}
-
-int parse_dev_location(
- const char *buffer,
- const char *format,
- int *bus, int *dev, int *func
-);
-
-#endif
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_spinlock.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_spinlock.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_spinlock.h (revision 6862)
@@ -1,148 +0,0 @@
-#pragma once
-
-#include <complib/cl_spinlock.h>
-
-#if 1
-
-typedef cl_spinlock_t spinlock_t;
-
-static inline void spin_lock_init(
- IN spinlock_t* const p_spinlock )
-{
- cl_spinlock_init( p_spinlock );
-}
-
-#define spin_lock cl_spinlock_acquire
-#define spin_unlock cl_spinlock_release
-
-CL_INLINE void
-spin_lock_dpc(
- IN cl_spinlock_t* const p_spinlock )
-{
- ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
- KeAcquireSpinLockAtDpcLevel( &p_spinlock->lock );
-}
-
-CL_INLINE void
-spin_unlock_dpc(
- IN cl_spinlock_t* const p_spinlock )
-{
- ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
- KeReleaseSpinLockFromDpcLevel( &p_spinlock->lock );
-}
-
-#else
-typedef struct spinlock {
- KSPIN_LOCK lock;
- KLOCK_QUEUE_HANDLE lockh;
- KIRQL irql;
-} spinlock_t;
-
-
-static inline void spin_lock_init(
- IN spinlock_t* const p_spinlock )
-{
- KeInitializeSpinLock( &p_spinlock->lock );
-}
-
-static inline void
-spin_lock(
- IN spinlock_t* const l)
-{
- KIRQL irql = KeGetCurrentIrql();
-
- ASSERT( l && irql <= DISPATCH_LEVEL );
-
- if (irql == DISPATCH_LEVEL)
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &l->lockh );
- else
- KeAcquireInStackQueuedSpinLock( &l->lock, &l->lockh );
- l->irql = irql;
-}
-
-static inline void
-spin_unlock(
- IN spinlock_t* const l)
-{
- ASSERT( l && KeGetCurrentIrql() == DISPATCH_LEVEL );
- if (l->irql == DISPATCH_LEVEL)
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &l->lockh );
- else
- KeReleaseInStackQueuedSpinLock( &l->lockh );
-}
-
-/* to be used only at DPC level */
-static inline void
-spin_lock_dpc(
- IN spinlock_t* const l)
-{
- ASSERT( l && KeGetCurrentIrql() == DISPATCH_LEVEL );
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &l->lockh );
-}
-
-/* to be used only at DPC level */
-static inline void
-spin_unlock_dpc(
- IN spinlock_t* const l)
-{
- ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &l->lockh );
-}
-
-static inline void
-spin_lock_sync(
- IN spinlock_t* const l )
-{
- KLOCK_QUEUE_HANDLE lockh;
- ASSERT( l && KeGetCurrentIrql() <= DISPATCH_LEVEL );
- KeAcquireInStackQueuedSpinLock ( &l->lock, &lockh );
- KeReleaseInStackQueuedSpinLock( &lockh );
-}
-
-#endif
-
-#define DEFINE_SPINLOCK(lock) spinlock_t lock
-
-static inline void
-spin_lock_irqsave(
- IN spinlock_t* const l,
- IN unsigned long * flags)
-{
- UNUSED_PARAM(flags);
- spin_lock(l);
-}
-
-static inline void
-spin_unlock_irqrestore(
- IN spinlock_t* const l,
- IN unsigned long flags)
-{
- UNUSED_PARAM(flags);
- spin_unlock(l);
-}
-
-static inline void
-spin_lock_sync(
- IN spinlock_t* const l )
-{
- KLOCK_QUEUE_HANDLE lockh;
- ASSERT( l && KeGetCurrentIrql() <= DISPATCH_LEVEL );
- KeAcquireInStackQueuedSpinLock ( &l->lock, &lockh );
- KeReleaseInStackQueuedSpinLock( &lockh );
-}
-
-/* we are working from DPC level, so we can use usual spinlocks */
-#define spin_lock_irq spin_lock
-#define spin_unlock_irq spin_unlock
-#define spin_lock_nested(a,b) spin_lock(a)
-
-/* Windows doesn't support such kind of spinlocks so far, but may be tomorrow ... */
-#define rwlock_init spin_lock_init
-#define read_lock_irqsave spin_lock_irqsave
-#define read_unlock_irqrestore spin_unlock_irqrestore
-#define write_lock_irq spin_lock_irq
-#define write_unlock_irq spin_unlock_irq
-
-// rw_lock
-typedef spinlock_t rwlock_t;
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_debug.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_debug.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_debug.h (revision 6862)
@@ -1,23 +0,0 @@
-#pragma once
-
-VOID
-WriteEventLogEntryStr(
- PVOID pi_pIoObject,
- ULONG pi_ErrorCode,
- ULONG pi_UniqueErrorCode,
- ULONG pi_FinalStatus,
- PWCHAR pi_InsertionStr,
- ULONG pi_nDataItems,
- ...
- );
-
-VOID
-WriteEventLogEntryData(
- PVOID pi_pIoObject,
- ULONG pi_ErrorCode,
- ULONG pi_UniqueErrorCode,
- ULONG pi_FinalStatus,
- ULONG pi_nDataItems,
- ...
- );
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_time.h
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_time.h (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/inc/l2w_time.h (revision 6862)
@@ -1,20 +0,0 @@
-#pragma once
-
-// returns current time in msecs (u64)
-#define jiffies get_tickcount_in_ms()
-
-// jiffies is measured in msecs
-#define jiffies_to_usecs(msecs) ((msecs)*1000)
-
-#define time_after(a,b) ((__int64)(b) - (__int64)(a) < 0)
-#define time_before(a,b) time_after(b,a)
-
-#define time_after_eq(a,b) ((__int64)(a) - (__int64)(b) >= 0)
-#define time_before_eq(a,b) time_after_eq(b,a)
-
-extern u32 g_time_increment;
-extern LARGE_INTEGER g_cmd_interval;
-#define cond_resched() KeDelayExecutionThread( KernelMode, FALSE, &g_cmd_interval )
-
-uint64_t get_tickcount_in_ms(void);
-
Index: B:/users/irena/proj1/trunk/hw/mlx4/kernel/hca/SOURCES
===================================================================
--- B:/users/irena/proj1/trunk/hw/mlx4/kernel/hca/SOURCES (revision 6771)
+++ B:/users/irena/proj1/trunk/hw/mlx4/kernel/hca/SOURCES (revision 6862)
@@ -32,8 +32,11 @@
vp.c \
wmi.c \
-INCLUDES=..;..\inc;..\..\inc;..\bus\inc;..\bus\ib;..\bus\core\$O;..\..\..\..\inc;..\..\..\..\inc\kernel;
+INCLUDES=..;..\inc;..\..\inc;..\bus\inc;..\bus\ib;..\bus\core\$O;..\..\..\..\inc;..\..\..\..\inc\kernel;..\..\..\..\inc\kernel\l2w;
+TARGETLIBS= \
+ $(TARGETPATH)\*\l2w.lib \
+
PRECOMPILED_INCLUDE=precomp.h
NTTARGETFILE0=mofcomp
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20110201/d80eeb2e/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: l2w_genutils.patch
Type: application/octet-stream
Size: 335826 bytes
Desc: l2w_genutils.patch
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20110201/d80eeb2e/attachment.obj>
More information about the ofw
mailing list