On Sat, Dec 27, 2008 at 3:20 AM, Nitin Gupta <[email protected]> wrote:
>
> This seems to be the _only_ way for now. Real thing would be to let

I was thinking that extending to support up to page size xvmalloc
might be cleaner. OTOH, on a 4K page machine 512 bytes is 12.5% which
isn't that much more than the fragmentation of xvmalloc, so we do not
gain much.

> such pages go to real swap disk (if present). But till the time that
> is implemented we should allocate full page to keep it or just return
> I/O error as we currently do. It seems that if we return too many swap
> I/O errors, we get into some problems (need to check block layer
> stuff?).
>
> In your code:
>  + compcache.table[page_no].pageNum=(u32)(cmem);
>
> you are casting virtual address to u32. This will surely segfault on
> 64bit. Maybe this is the reason for crash.

No. I am running this in a 32bit VM. The patch does not presently
attempt to support 64bit machines. The crash was caused by the lines
in the initialisation:

        compcache.table[0].pageNum = page_to_pfn(page);
        compcache.table[0].len = PAGE_SIZE;

I have worked around this by checking if page_no == 0, however it
would probably be  better to replace the above first line with
something like:

           compcache.table[0].pageNum = pointer_to_page(page);

I can think of three ways of supporting 64bit:
1) Expand the compcache table (wasteful of space)
2) Steal bits from 64bit pointers (rather unclean)
3) Allocate 64bits with xvmalloc
4) Allocate 64bits from xvmalloc  iff pointer > MAX_U32.

4) seems the nicest of these options.

> Nit: you are using GFP_KERNEL alloc flag with kmalloc - this can cause
> deadlock: alloc memory to store compressed page -> not enough memory,
> swap out something -> alloc memory to store compressed page -> ...and
> this goes on.
> So GFP_NOIO should be used.

OK.

FYI, I have attached another patch. This fixes the above error, but
there is still another bug. AFAICT now all page reads report that
decompression failed. I do not know what causes this bug, and this
patch is still  filled with extra pr_info("Debug ...") lines to help
my try to track this down. I'll look into this as time permits.

-- 
John C. McCabe-Dansted
PhD Student
University of Western Australia
Index: compcache.c
===================================================================
--- compcache.c	(revision 243)
+++ compcache.c	(working copy)
@@ -45,6 +45,18 @@
 #if defined(STATS)
 static struct proc_dir_entry *proc;
 
+static void compcache_free_page(int page_no)
+{
+	if (compcache.table[page_no].len==PAGE_SIZE) {
+		kfree((char*)compcache.table[page_no].pageNum);
+	} else {
+		xvFree(compcache.mem_pool,
+			compcache.table[page_no].pageNum,
+			compcache.table[page_no].offset);
+	}
+}
+
+
 static int proc_compcache_read(char *page, char **start, off_t off,
 				int count, int *eof, void *data)
 {
@@ -115,6 +127,7 @@
 	size_t clen, page_no;
 	void *user_mem, *cmem;
 	struct page *page;
+	int alloc_error;
 
 	if (!valid_swap_request(bio)) {
 		stat_inc(stats.invalid_io);
@@ -134,6 +147,7 @@
 		 * swap device is read from user-space (e.g. during swapon)
 		 */
 		if (unlikely(compcache.table[page_no].pageNum == 0)) {
+	pr_info(C "Debug Rn\n");
 			pr_debug("Read before write on swap device: "
 				"sector=%lu, size=%u, offset=%u\n",
 				(ulong)(bio->bi_sector),
@@ -148,25 +162,39 @@
 
 
 		clen = PAGE_SIZE;
-		cmem = (char *)xvMapPage(compcache.table[page_no].pageNum) +
-				compcache.table[page_no].offset;
 
 		/* Page is stored uncompressed since its incompressible */
 		if (unlikely(compcache.table[page_no].len == PAGE_SIZE)) {
+	pr_info(C "Debug R0\n");
+	pr_info(C "Debug R0 %d %d \n",page_no, compcache.table[page_no].pageNum);
+			if (page_no>0) {
+				cmem = (char *) compcache.table[page_no].pageNum;
+			} else {
+				cmem = (char *)xvMapPage(compcache.table[page_no].pageNum) + compcache.table[page_no].offset;
+			}
+
 			memcpy(user_mem, cmem, PAGE_SIZE);
 			kunmap(page);
-			xvUnmapPage(compcache.table[page_no].pageNum);
+			/*xvUnmapPage(compcache.table[page_no].pageNum);*/
 			set_bit(BIO_UPTODATE, &bio->bi_flags);
 			BIO_ENDIO(bio, 0);
 			return 0;
+		} else {
+	pr_info(C "Debug R1\n");
+			cmem = (char *)xvMapPage(compcache.table[page_no].pageNum) + compcache.table[page_no].offset;
 		}
 
+	pr_info(C "Debug R2 %x\n",(int)cmem);
 		ret = lzo1x_decompress_safe(
 			cmem, compcache.table[page_no].len,
 			user_mem, &clen);
 
-		xvUnmapPage(compcache.table[page_no].pageNum);
+	pr_info(C "Debug R3\n");
+		if (compcache.table[page_no].len != PAGE_SIZE) {
+			xvUnmapPage(compcache.table[page_no].pageNum);
+		}
 
+	pr_info(C "Debug R4\n");
 		/* should NEVER happen */
 		if (unlikely(ret != LZO_E_OK)) {
 			pr_err(C "Decompression failed! "
@@ -176,6 +204,7 @@
 			goto out;
 		}
 
+	pr_info(C "Debug R5\n");
 		kunmap(page);
 		set_bit(BIO_UPTODATE, &bio->bi_flags);
 		BIO_ENDIO(bio, 0);
@@ -189,9 +218,8 @@
 		 * to free the memory that was allocated for this page.
 		 */
 		if (compcache.table[page_no].pageNum) {
-			xvFree(compcache.mem_pool,
-				compcache.table[page_no].pageNum,
-				compcache.table[page_no].offset);
+	pr_info(C "Debug W1\n");
+			compcache_free_page(page_no);
 			stat_dec(stats.curr_pages);
 			stat_set(stats.curr_mem, stats.curr_mem	-
 					compcache.table[page_no].len);
@@ -199,6 +227,7 @@
 			compcache.table[page_no].offset = 0;
 			compcache.table[page_no].len = 0;
 		}
+	pr_info(C "Debug W2\n");
 
 		mutex_lock(&compcache.lock);
 		ret = lzo1x_1_compress(user_mem, PAGE_SIZE,
@@ -212,15 +241,33 @@
 			goto out;
 		}
 
+		alloc_error=0;
+
+	pr_info(C "Debug W3\n");
 		/* Page is incompressible - store it as is */
-		if (clen >= PAGE_SIZE) {
+		/*if (clen >= PAGE_SIZE) {*/
+		if (unlikely(clen > (PAGE_SIZE-512))){
+	pr_info(C "Debug W4\n");
 			clen = PAGE_SIZE;
 			src = user_mem;
+			if (!(cmem=kmalloc(PAGE_SIZE,GFP_NOIO))) {
+				alloc_error=1;
+			}
+			compcache.table[page_no].pageNum=(u32)(cmem); /*32bit*/
+		} else {
+	pr_info(C "Debug W5\n");
+			if (unlikely(xvMalloc(compcache.mem_pool, clen,
+				&compcache.table[page_no].pageNum,
+				&offset))) {
+	pr_info(C "Debug W6\n");
+			alloc_error=2;
+			} else { 
+				cmem = (char *)xvMapPage(compcache.table[page_no].pageNum) + compcache.table[page_no].offset;
+			}
 		}
 
-		if (xvMalloc(compcache.mem_pool, clen,
-				&compcache.table[page_no].pageNum,
-				&offset)) {
+	pr_info(C "Debug W7\n");
+		if (alloc_error) {
 			mutex_unlock(&compcache.lock);
 #if defined(VERBOSE)
 			pr_info(C "Error allocating memory for compressed "
@@ -235,13 +282,14 @@
 
 		compcache.table[page_no].offset = offset;
 		
-		cmem = (char *)xvMapPage(compcache.table[page_no].pageNum) +
-				compcache.table[page_no].offset;
-
+	pr_info(C "Debug W8 %x\n", (int)cmem);
 		memcpy(cmem, src, clen);
 
-		xvUnmapPage(compcache.table[page_no].pageNum);
+	pr_info(C "Debug W9\n");
+		if (clen!=PAGE_SIZE) 
+			xvUnmapPage(compcache.table[page_no].pageNum);
 
+	pr_info(C "Debug Wa\n");
 		/* Update stats */
 		stat_inc(stats.curr_pages);
 		stat_set(stats.curr_mem, stats.curr_mem + clen);
@@ -253,9 +301,11 @@
 		
 		compcache.table[page_no].len = clen;
 
+	pr_info(C "Debug Wb\n");
 		kunmap(page);
 		set_bit(BIO_UPTODATE, &bio->bi_flags);
 		BIO_ENDIO(bio, 0);
+	pr_info(C "Debug Wc\n");
 		return 0;
 	}
 out:
@@ -295,16 +345,20 @@
 	
 	compcache.size = compcache_size_kbytes << 10;
 	compcache.size = (compcache.size + PAGE_SIZE - 1) & PAGE_MASK;
+	pr_info(C "Debug 0\n");
 	pr_info(C "Compressed swap size set to: %zu KB\n", compcache.size >> 10);
 	compcache.size >>= SECTOR_SHIFT;
 
+	pr_info(C "Debug 00\n");
 	compcache.compress_workmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	pr_info(C "Debug 000\n");
 	if (compcache.compress_workmem == NULL) {
 		pr_err(C "Error allocating compressor working memory\n");
 		ret = -ENOMEM;
 		goto fail;
 	}
 
+	pr_info(C "Debug 1\n");
 	compcache.compress_buffer = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
 	if (compcache.compress_buffer == NULL) {
 		pr_err(C "Error allocating compressor buffer space\n");
@@ -312,6 +366,7 @@
 		goto fail;
 	}
 
+	pr_info(C "Debug 2\n");
 	num_pages = compcache.size >> SECTORS_PER_PAGE_SHIFT;
         compcache.table = vmalloc(num_pages * sizeof(*compcache.table));
         if (compcache.table == NULL) {
@@ -321,6 +376,7 @@
         }
         memset(compcache.table, 0, num_pages * sizeof(*compcache.table));
 
+	pr_info(C "Debug 3\n");
 	page = alloc_page(__GFP_ZERO);
 	if (page == NULL) {
 		pr_err(C "Error allocating swap header page\n");
@@ -330,10 +386,12 @@
 	compcache.table[0].pageNum = page_to_pfn(page);
 	compcache.table[0].len = PAGE_SIZE;
 
+	pr_info(C "Debug 4\n");
 	swap_header = kmap(page);
 	setup_swap_header((union swap_header *)(swap_header));
 	kunmap(page);
 
+	pr_info(C "Debug 5\n");
 	compcache.disk = alloc_disk(1);
 	if (compcache.disk == NULL) {
 		pr_err(C "Error allocating disk structure\n");
@@ -341,6 +399,7 @@
 		goto fail;
 	}
 
+	pr_info(C "Debug 6\n");
 	compcache.disk->first_minor = 0;
 	compcache.disk->fops = &compcache_devops;
 	/*
@@ -350,6 +409,7 @@
 	 */
 	strcpy(compcache.disk->disk_name, "ramzswap0");
 
+	pr_info(C "Debug 7\n");
 	compcache.disk->major = register_blkdev(0, compcache.disk->disk_name);
 	if (compcache.disk->major < 0) {
 		pr_err(C "Cannot register block device\n");
@@ -357,6 +417,7 @@
 		goto fail;
 	}
 
+	pr_info(C "Debug 8\n");
 	compcache.disk->queue = blk_alloc_queue(GFP_KERNEL);
 	if (compcache.disk->queue == NULL) {
 		pr_err(C "Cannot register disk queue\n");
@@ -364,11 +425,13 @@
 		goto fail;
 	}
 
+	pr_info(C "Debug 9\n");
 	set_capacity(compcache.disk, compcache.size);
 	blk_queue_make_request(compcache.disk->queue, compcache_make_request);
 	blk_queue_hardsect_size(compcache.disk->queue, PAGE_SIZE);
 	add_disk(compcache.disk);
 
+	pr_info(C "Debug a\n");
 	compcache.mem_pool = xvCreateMemPool();
 	if (compcache.mem_pool == NULL) {
 		pr_err(C "Error creating memory pool\n");
@@ -376,6 +439,7 @@
 		goto fail;
 	}
 
+	pr_info(C "Debug b\n");
 #if defined(STATS)
 	proc = create_proc_entry("compcache", S_IRUGO, NULL);
 	if (proc)
@@ -387,6 +451,7 @@
 	}
 #endif
 
+	pr_info(C "Debug c\n");
 	pr_debug(C "Initialization done!\n");
 	return 0;
 
@@ -425,9 +490,7 @@
 	/* Free all pages that are still in compcache */
 	for (i = 1; i < num_pages; i++) {
 		if (compcache.table[i].pageNum) {
-			xvFree(compcache.mem_pool,
-				compcache.table[i].pageNum,
-				compcache.table[i].offset);
+			compcache_free_page(i);
 		}
 	}
 
_______________________________________________
linux-mm-cc mailing list
[email protected]
http://lists.laptop.org/listinfo/linux-mm-cc

Reply via email to