#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef BLK_MQ_MODE #include #endif #define SBDD_SECTOR_SHIFT 9 #define SBDD_SECTOR_SIZE (1 << SBDD_SECTOR_SHIFT) #define SBDD_MIB_SECTORS (1 << (20 - SBDD_SECTOR_SHIFT)) #define SBDD_NAME "sbdd" struct sbdd { wait_queue_head_t exitwait; spinlock_t datalock; atomic_t deleting; atomic_t refs_cnt; sector_t capacity; u8 *data; struct gendisk *gd; struct request_queue *q; #ifdef BLK_MQ_MODE struct blk_mq_tag_set *tag_set; #endif }; static struct sbdd __sbdd; static int __sbdd_major = 0; static unsigned long __sbdd_capacity_mib = 100; static sector_t sbdd_xfer(struct bio_vec* bvec, sector_t pos, int dir) { void *buff = page_address(bvec->bv_page) + bvec->bv_offset; sector_t len = bvec->bv_len >> SBDD_SECTOR_SHIFT; size_t offset; size_t nbytes; if (pos + len > __sbdd.capacity) len = __sbdd.capacity - pos; offset = pos << SBDD_SECTOR_SHIFT; nbytes = len << SBDD_SECTOR_SHIFT; spin_lock(&__sbdd.datalock); if (dir) memcpy(__sbdd.data + offset, buff, nbytes); else memcpy(buff, __sbdd.data + offset, nbytes); spin_unlock(&__sbdd.datalock); pr_debug("pos=%6llu len=%4llu %s\n", pos, len, dir ? "written" : "read"); return len; } #ifdef BLK_MQ_MODE static void sbdd_xfer_rq(struct request *rq) { struct req_iterator iter; struct bio_vec bvec; int dir = rq_data_dir(rq); sector_t pos = blk_rq_pos(rq); rq_for_each_segment(bvec, rq, iter) pos += sbdd_xfer(&bvec, pos, dir); } static blk_status_t sbdd_queue_rq(struct blk_mq_hw_ctx *hctx, struct blk_mq_queue_data const *bd) { if (atomic_read(&__sbdd.deleting)) return BLK_STS_IOERR; atomic_inc(&__sbdd.refs_cnt); blk_mq_start_request(bd->rq); sbdd_xfer_rq(bd->rq); blk_mq_end_request(bd->rq, BLK_STS_OK); if (atomic_dec_and_test(&__sbdd.refs_cnt)) wake_up(&__sbdd.exitwait); return BLK_STS_OK; } static struct blk_mq_ops const __sbdd_blk_mq_ops = { /* The function receives requests for the device as arguments and can use various functions to process them. The functions used to process requests in the handler are described below: blk_mq_start_request() - must be called before processing a request blk_mq_requeue_request() - to re-send the request in the queue blk_mq_end_request() - to end request processing and notify upper layers */ .queue_rq = sbdd_queue_rq, }; #else static void sbdd_xfer_bio(struct bio *bio) { struct bvec_iter iter; struct bio_vec bvec; int dir = bio_data_dir(bio); sector_t pos = bio->bi_iter.bi_sector; bio_for_each_segment(bvec, bio, iter) pos += sbdd_xfer(&bvec, pos, dir); } static blk_qc_t sbdd_make_request(struct request_queue *q, struct bio *bio) { if (atomic_read(&__sbdd.deleting)) { pr_err("unable to process bio while deleting\n"); bio_io_error(bio); return BLK_STS_IOERR; } atomic_inc(&__sbdd.refs_cnt); sbdd_xfer_bio(bio); bio_endio(bio); if (atomic_dec_and_test(&__sbdd.refs_cnt)) wake_up(&__sbdd.exitwait); return BLK_STS_OK; } #endif /* BLK_MQ_MODE */ /* There are no read or write operations. These operations are performed by the request() function associated with the request queue of the disk. */ static struct block_device_operations const __sbdd_bdev_ops = { .owner = THIS_MODULE, }; static int sbdd_create(void) { int ret = 0; /* This call is somewhat redundant, but used anyways by tradition. The number is to be displayed in /proc/devices (0 for auto). */ pr_info("registering blkdev\n"); __sbdd_major = register_blkdev(0, SBDD_NAME); if (__sbdd_major < 0) { pr_err("call register_blkdev() failed with %d\n", __sbdd_major); return -EBUSY; } memset(&__sbdd, 0, sizeof(struct sbdd)); __sbdd.capacity = (sector_t)__sbdd_capacity_mib * SBDD_MIB_SECTORS; pr_info("allocating data\n"); __sbdd.data = vzalloc(__sbdd.capacity << SBDD_SECTOR_SHIFT); if (!__sbdd.data) { pr_err("unable to alloc data\n"); return -ENOMEM; } spin_lock_init(&__sbdd.datalock); init_waitqueue_head(&__sbdd.exitwait); #ifdef BLK_MQ_MODE pr_info("allocating tag_set\n"); __sbdd.tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL); if (!__sbdd.tag_set) { pr_err("unable to alloc tag_set\n"); return -ENOMEM; } /* Number of hardware dispatch queues */ __sbdd.tag_set->nr_hw_queues = 1; /* Depth of hardware dispatch queues */ __sbdd.tag_set->queue_depth = 128; __sbdd.tag_set->numa_node = NUMA_NO_NODE; __sbdd.tag_set->ops = &__sbdd_blk_mq_ops; ret = blk_mq_alloc_tag_set(__sbdd.tag_set); if (ret) { pr_err("call blk_mq_alloc_tag_set() failed with %d\n", ret); return ret; } /* Creates both the hardware and the software queues and initializes structs */ pr_info("initing queue\n"); __sbdd.q = blk_mq_init_queue(__sbdd.tag_set); if (IS_ERR(__sbdd.q)) { ret = (int)PTR_ERR(__sbdd.q); pr_err("call blk_mq_init_queue() failed witn %d\n", ret); __sbdd.q = NULL; return ret; } #else pr_info("allocating queue\n"); __sbdd.q = blk_alloc_queue(GFP_KERNEL); if (!__sbdd.q) { pr_err("call blk_alloc_queue() failed\n"); return -EINVAL; } blk_queue_make_request(__sbdd.q, sbdd_make_request); #endif /* BLK_MQ_MODE */ /* Configure queue */ blk_queue_logical_block_size(__sbdd.q, SBDD_SECTOR_SIZE); /* A disk must have at least one minor */ pr_info("allocating disk\n"); __sbdd.gd = alloc_disk(1); /* Configure gendisk */ __sbdd.gd->queue = __sbdd.q; __sbdd.gd->major = __sbdd_major; __sbdd.gd->first_minor = 0; __sbdd.gd->fops = &__sbdd_bdev_ops; /* Represents name in /proc/partitions and /sys/block */ scnprintf(__sbdd.gd->disk_name, DISK_NAME_LEN, SBDD_NAME); set_capacity(__sbdd.gd, __sbdd.capacity); /* Allocating gd does not make it available, add_disk() required. After this call, gd methods can be called at any time. Should not be called before the driver is fully initialized and ready to process reqs. */ pr_info("adding disk\n"); add_disk(__sbdd.gd); return ret; } static void sbdd_delete(void) { atomic_set(&__sbdd.deleting, 1); wait_event(__sbdd.exitwait, !atomic_read(&__sbdd.refs_cnt)); /* gd will be removed only after the last reference put */ if (__sbdd.gd) { pr_info("deleting disk\n"); del_gendisk(__sbdd.gd); } if (__sbdd.q) { pr_info("cleaning up queue\n"); blk_cleanup_queue(__sbdd.q); } if (__sbdd.gd) put_disk(__sbdd.gd); #ifdef BLK_MQ_MODE if (__sbdd.tag_set && __sbdd.tag_set->tags) { pr_info("freeing tag_set\n"); blk_mq_free_tag_set(__sbdd.tag_set); } if (__sbdd.tag_set) kfree(__sbdd.tag_set); #endif if (__sbdd.data) { pr_info("freeing data\n"); vfree(__sbdd.data); } memset(&__sbdd, 0, sizeof(struct sbdd)); if (__sbdd_major > 0) { pr_info("unregistering blkdev\n"); unregister_blkdev(__sbdd_major, SBDD_NAME); __sbdd_major = 0; } } /* Note __init is for the kernel to drop this function after initialization complete making its memory available for other uses. There is also __initdata note, same but used for variables. */ static int __init sbdd_init(void) { int ret = 0; pr_info("starting initialization...\n"); ret = sbdd_create(); if (ret) { pr_warn("initialization failed\n"); sbdd_delete(); } else { pr_info("initialization complete\n"); } return ret; } /* Note __exit is for the compiler to place this code in a special ELF section. Sometimes such functions are simply discarded (e.g. when module is built directly into the kernel). There is also __exitdata note. */ static void __exit sbdd_exit(void) { pr_info("exiting...\n"); sbdd_delete(); pr_info("exiting complete\n"); } /* Called on module loading. Is mandatory. */ module_init(sbdd_init); /* Called on module unloading. Unloading module is not allowed without it. */ module_exit(sbdd_exit); /* Set desired capacity with insmod */ module_param_named(capacity_mib, __sbdd_capacity_mib, ulong, S_IRUGO); /* Note for the kernel: a free license module. A warning will be outputted without it. */ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Simple Block Device Driver");