static void copy_from_brd(void *dst, struct brd_device *brd,
                        sector_t sector, size_t n)
{
        struct page *page;
        void *src;
        unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
        size_t copy;

        copy = min_t(size_t, n, PAGE_SIZE - offset);
        page = brd_lookup_page(brd, sector);
        if (page) {
                src = kmap_atomic(page, KM_USER1);
                memcpy(dst, src + offset, copy);
                kunmap_atomic(src, KM_USER1);
        } else
                memset(dst, 0, copy);

        if (copy < n) {
                dst += copy;
                sector += copy >> SECTOR_SHIFT;
                copy = n - copy;
                page = brd_lookup_page(brd, sector);
                if (page) {
                        src = kmap_atomic(page, KM_USER1);
                        memcpy(dst, src, copy);
                        kunmap_atomic(src, KM_USER1);
                } else
                        memset(dst, 0, copy);
        }
}




+       local_irq_save(flags);
+       map = kmap_atomic(mappage, KM_USER0);
+       if (map[index].memcgrp_id == memcg->memcgrp_id) {
+               if (set && map[index].count == 0) {
+                       map[index].count = 1;
+                       ret = 1;
+               } else if (!set && map[index].count == 1) {
+                       map[index].count = 0;
+                       ret = 1;
+               }
+       }
+       kunmap_atomic(mappage, KM_USER0);
+       local_irq_restore(flags);
+       return ret;
+}


                /* FIXME: use a bounce buffer */
                local_irq_save(flags);
                buf = kmap_atomic(page, KM_IRQ0);

                /* do the actual data transfer */
                ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
                                       do_write);

                kunmap_atomic(buf, KM_IRQ0);
                local_irq_restore(flags);




        if (qc->curbytes == qc->nbytes - qc->sect_size)
                ap->hsm_task_state = HSM_ST_LAST;

        page = sg_page(qc->cursg);
        offset = qc->cursg->offset + qc->cursg_ofs;

        /* get the current page and offset */
        page = nth_page(page, (offset >> PAGE_SHIFT));
        offset %= PAGE_SIZE;

        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" :
"read");

        if (PageHighMem(page)) {
                unsigned long flags;

                /* FIXME: use a bounce buffer */
                local_irq_save(flags);
                buf = kmap_atomic(page, KM_IRQ0);

                /* do the actual data transfer */
                ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
                                       do_write);

                kunmap_atomic(buf, KM_IRQ0);
                local_irq_restore(flags);
        } else {
                buf = page_address(page);
                ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
                                       do_write);
        }

        qc->curbytes += qc->sect_size;
        qc->cursg_ofs += qc->sect_size;

        if (qc->cursg_ofs == qc->cursg->length) {
                qc->cursg = sg_next(qc->cursg);
                qc->cursg_ofs = 0;
        }
}


Reply via email to