[ofa-general] Re: mthca use of dma_sync_single is bogus

Lukas Hejtmanek xhejtman at ics.muni.cz
Tue Jul 10 07:14:09 PDT 2007


On Mon, Jul 09, 2007 at 11:48:06PM -0700, Roland Dreier wrote:
> Yes, for example on ppc 4xx the amount of coherent memory is quite
> small by default (address space for non-cached mappings is actually
> what is limited, but it amounts to the same thing).
> 
> Maybe the least bad solution is to change to using dma_map_single()
> instead of pci_map_sg() in mthca_memfree.c.

And what about the attached patch to mthca_memfree? It changes alloc_pages for
pci_alloc_consistent. Using it, I can enable FMR and the driver runs fine.

Indeed, it does not solve problem with dma_sync_single() per se, on the other
hand, with pci_alloc_consistent() swiotlb is not needed thus dma_sync_single()
does nothing. But I agree it is not conceptual.

-- 
Lukáš Hejtmánek
-------------- next part --------------
--- mthca_memfree.c.orig	2007-07-07 01:19:35.988558442 +0200
+++ mthca_memfree.c	2007-07-10 16:00:10.200488265 +0200
@@ -70,36 +70,27 @@
 		return;
 
 	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
-		if (coherent)
-			for (i = 0; i < chunk->npages; ++i) {
-				buf = lowmem_page_address(chunk->mem[i].page);
+		for (i = 0; i < chunk->npages; ++i) {
+			buf = lowmem_page_address(chunk->mem[i].page);
+			if(coherent)
 				dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
 						  buf, sg_dma_address(&chunk->mem[i]));
-			}
-		else {
-			if (chunk->nsg > 0)
-				pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
-					     PCI_DMA_BIDIRECTIONAL);
-
-			for (i = 0; i < chunk->npages; ++i)
-				__free_pages(chunk->mem[i].page,
-					     get_order(chunk->mem[i].length));
+			else
+				pci_free_consistent(dev->pdev, chunk->mem[i].length, buf, sg_dma_address(&chunk->mem[i]));
 		}
-
 		kfree(chunk);
 	}
 
 	kfree(icm);
 }
 
-static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+static int mthca_alloc_icm_pages(struct pci_dev *pdev, struct scatterlist *mem, int order, gfp_t gfp_mask)
 {
-	mem->page = alloc_pages(gfp_mask, order);
-	if (!mem->page)
+	void *buf = pci_alloc_consistent(pdev, PAGE_SIZE << order, &sg_dma_address(mem));
+	if (!buf)
 		return -ENOMEM;
-
-	mem->length = PAGE_SIZE << order;
-	mem->offset = 0;
+	sg_set_buf(mem, buf, PAGE_SIZE << order);
+	sg_dma_len(mem) = PAGE_SIZE << order;
 	return 0;
 }
 
@@ -157,21 +148,13 @@
 						       &chunk->mem[chunk->npages],
 						       cur_order, gfp_mask);
 		else
-		       	ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
+		       	ret = mthca_alloc_icm_pages(dev->pdev, 
+						    &chunk->mem[chunk->npages],
 						    cur_order, gfp_mask);
 
 		if (!ret) {
 			++chunk->npages;
-
-			if (!coherent && chunk->npages == MTHCA_ICM_CHUNK_LEN) {
-				chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
-							chunk->npages,
-							PCI_DMA_BIDIRECTIONAL);
-
-				if (chunk->nsg <= 0)
-					goto fail;
-			}
-
+			++chunk->nsg;
 			if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
 				chunk = NULL;
 
@@ -183,15 +166,6 @@
 		}
 	}
 
-	if (!coherent && chunk) {
-		chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
-					chunk->npages,
-					PCI_DMA_BIDIRECTIONAL);
-
-		if (chunk->nsg <= 0)
-			goto fail;
-	}
-
 	return icm;
 
 fail:


More information about the general mailing list