- [Qemu OpenChannelSSD] feature I/
- [Qemu OpenChannelSSD] Basic I/O
- [Qemu OpenChannelSSD] Basic I/O
- [Qemu OpenChannelSSD]Geometry
- [Qemu OpenChannelSSD] Address Is
- [Qemu OpenChannelSSD] Hello worl
- [Qemu OpenChannelSSD] 阶段总结
- [Qemu OpenChannelSSD] Bad Block
- qemu 编译报错user requested feature
- What‘s good and bad about the F-
除了最基本的I/O via ppa-address以外,liblightnvm还包了一些其他的IO方式,如vblk,lba等.
(vblk与lba似乎实现都还有些不完整或没有完全测试)
1Intro
vblk: 把底下的若干个nvm_addr
封装成一个虚拟的块(virtual block),用户可以针对这个虚拟块进行:
- 擦除(擦除属于这个块的所有addr)
- 从这个块的某个偏移处读/写若干字节(必须对齐)
- 从
nvm_vblk_alloc_line
可以看出: vblk->nblks的单位是2个block.
- vblk I/O 用到的一些设置, 在open一个设备的时候进行的预设.(可以看出一个lun上的plane数只能是1,2,4)
nvm_dev.c:
line 219:
static int dev_attr_fill(struct nvm_dev *dev)
{
...
/* Derive a default plane mode */
switch(geo->nplanes) {
case 4:
dev->pmode = NVM_FLAG_PMODE_QUAD;
break;
case 2:
dev->pmode = NVM_FLAG_PMODE_DUAL;
break;
case 1:
dev->pmode = NVM_FLAG_PMODE_SNGL;
break;
default:
errno = EINVAL;
return -1;
}
dev->erase_naddrs_max = NVM_NADDR_MAX; //Macro at liblightnvm.h, line 42: #define NVM_NADDR_MAX 64
dev->write_naddrs_max = NVM_NADDR_MAX;
dev->read_naddrs_max = NVM_NADDR_MAX;
dev->meta_mode = NVM_META_MODE_NONE;
return 0;
}
vblk erase
nvm_vblk.c, line 116:
static inline int _cmd_nblks(int nblks, int cmd_nblks_max)//找到一个小于等于cmd_nblks_max并能整除nblks的最大整数作为并行化因子
{
int cmd_nblks = cmd_nblks_max;
while(nblks % cmd_nblks && cmd_nblks > 1) --cmd_nblks;
return cmd_nblks;
}
nvm_vblk.c, line 125:
ssize_t nvm_vblk_erase(struct nvm_vblk *vblk)
{
size_t nerr = 0;
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const int PMODE = vblk->dev->pmode;
const int BLK_NADDRS = geo->nplanes; //擦除的粒度单位,1个单位 = geo->nplanes个block地址
const int CMD_NBLKS = _cmd_nblks(vblk->nblks,
vblk->dev->erase_naddrs_max / BLK_NADDRS); //一个线程擦除单位数
//一个线程擦除的block数 = 单位数 * 每单位block数 : CMD_NBLKS * geo->nplanes
const int NTHREADS = vblk->nblks < CMD_NBLKS ? 1 : vblk->nblks / CMD_NBLKS;//总共能出多少个线程
//这里可以看出当nblks比较大时(大于vblk->dev->erase_naddrs_max / BLK_NADDRS),才可能出多线程。
#pragma omp parallel for num_threads(NTHREADS) schedule(static,1) reduction(+:nerr) ordered if (NTHREADS>1)
for (int off = 0; off < vblk->nblks; off += CMD_NBLKS) {
ssize_t err;
struct nvm_ret ret = {};
const int nblks = NVM_MIN(CMD_NBLKS, vblk->nblks - off);
const int naddrs = nblks * BLK_NADDRS;
struct nvm_addr addrs[naddrs];
for (int i = 0; i < naddrs; ++i) {
const int idx = off + (i / BLK_NADDRS);
addrs[i].ppa = vblk->blks[idx].ppa;
addrs[i].g.pl = i % geo->nplanes;
}
err = nvm_addr_erase(vblk->dev, addrs, naddrs, PMODE, &ret);//真正进行擦除的地方(正如之前分析的Basic I/O)
if (err)
++nerr;
#pragma omp ordered
{}
}
if (nerr) {
errno = EIO;
return -1;
}
vblk->pos_write = 0;
vblk->pos_read = 0;
return vblk->nbytes;
}
这里的并行化采用了OpenMP, pthread vs OpenMP:
- 多数情况下openmp能很容易直接从串行程序改过来,并且在没有多线程能力的情况下退回单线程执行
- pthread得自己搞,而且如果没有多线程执行能力也会强制分时多线程,引入额外负担,好处是比openmp灵活
vblk write
由于NAND的特点,vblk包含的地址空间中的每个sector只能一次写,vblk以pos_write
维护目前写了多少地址.(由此可知还剩多少sector可写.
由于IO的单位是sector,因此这里写多少个字节需要能整除sector_nbytes.
172:
static inline int _cmd_nspages(int nblks, int cmd_nspages_max)
{
int cmd_nspages = cmd_nspages_max;
while(nblks % cmd_nspages && cmd_nspages > 1) --cmd_nspages;
return cmd_nspages;
}
...
181:
ssize_t nvm_vblk_pwrite(struct nvm_vblk *vblk, const void *buf, size_t count,
size_t offset)//count与offset都必须能整除geo->nplanes * geo->nsectors,即`对齐`
{
size_t nerr = 0;
const int PMODE = nvm_dev_get_pmode(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const int SPAGE_NADDRS = geo->nplanes * geo->nsectors;//写的粒度单位,1个单位 = geo->nplanes * geo->nsectors个sector地址 = geo->nplanes 个page大小
const int CMD_NSPAGES = _cmd_nspages(vblk->nblks,
vblk->dev->write_naddrs_max / SPAGE_NADDRS);//一个线程写的单位数????这里可能有问题,multi-threading may not work. let‘s make a issue。
//一个线程I/O的sector数 = 单位数 * 每个单位的sector数:CMD_NSPAGES * SPAGE_NADDRS
const int ALIGN = SPAGE_NADDRS * geo->sector_nbytes;//写的粒度单位对应的字节数
const int NTHREADS = vblk->nblks < CMD_NSPAGES ? 1 : vblk->nblks / CMD_NSPAGES;
const size_t bgn = offset / ALIGN;//从第几个单位开始
const size_t end = bgn + (count / ALIGN); //到第几个单位结束
char *padding_buf = NULL;
const size_t meta_tbytes = CMD_NSPAGES * SPAGE_NADDRS * geo->meta_nbytes;//一个线程最大可能I/O的metadata字节数
char *meta = NULL;
if (offset + count > vblk->nbytes) { // Check bounds
errno = EINVAL;
return -1;
}
if ((count % ALIGN) || (offset % ALIGN)) { // Check align
errno = EINVAL;
return -1;
}
if (!buf) { // Allocate and use a padding buffer
const size_t nbytes = CMD_NSPAGES * SPAGE_NADDRS * geo->sector_nbytes;
padding_buf = nvm_buf_alloc(geo, nbytes);
if (!padding_buf) {
errno = ENOMEM;
return -1;
}
nvm_buf_fill(padding_buf, nbytes);
}
if (vblk->dev->meta_mode != NVM_META_MODE_NONE) { // Meta
meta = nvm_buf_alloc(geo, meta_tbytes); // Alloc buf
if (!meta) {
errno = ENOMEM;
return -1;
}
switch(vblk->dev->meta_mode) { // Fill it
case NVM_META_MODE_ALPHA:
nvm_buf_fill(meta, meta_tbytes);
break;
case NVM_META_MODE_CONST:
for (size_t i = 0; i < meta_tbytes; ++i)
meta[i] = 65 + (meta_tbytes % 20);
break;
case NVM_META_MODE_NONE:
break;
}
}//目前版本的默认模式是meta_none(master - 3b399c9c4bc315)
#pragma omp parallel for num_threads(NTHREADS) schedule(static,1) reduction(+:nerr) ordered if(NTHREADS>1)
for (size_t off = bgn; off < end; off += CMD_NSPAGES) {//每个线程CMD_NSPAGES个单位
struct nvm_ret ret = {};
const int nspages = NVM_MIN(CMD_NSPAGES, (int)(end - off));
const int naddrs = nspages * SPAGE_NADDRS;
struct nvm_addr addrs[naddrs];
const char *buf_off;
if (padding_buf)
buf_off = padding_buf;
else
buf_off = buf + (off - bgn) * geo->sector_nbytes * SPAGE_NADDRS;
for (int i = 0; i < naddrs; ++i) {
const int spg = off + (i / SPAGE_NADDRS);//目前写到第spg个单位
!!! const int idx = spg % vblk->nblks;//这个是比较关键的一句:保证了IO在lun之间均匀分布,而不是集中在某个lun某个ch上.
const int pg = (spg / vblk->nblks) % geo->npages;
addrs[i].ppa = vblk->blks[idx].ppa;
addrs[i].g.pg = pg;
addrs[i].g.pl = (i / geo->nsectors) % geo->nplanes;
addrs[i].g.sec = i % geo->nsectors;
}
const ssize_t err = nvm_addr_write(vblk->dev, addrs, naddrs,
buf_off, meta, PMODE, &ret);
if (err)
++nerr;
#pragma omp ordered
{}
}
free(padding_buf);
if (nerr) {
errno = EIO;
return -1;
}
return count;
}
vblk addressing.png
总结
- vblk:
- vblk->nblks值的单位 = geo->nplanes个block = geo->nplanes * geo->npages * geo->nsectors 个sector = geo->npages个写粒度单位(页)
- vblk擦除的粒度单位: geo->nplanes个block地址
- vblk写的粒度单位: geo->nplanes * geo->nsectors个sector地址
- 擦除和读写的上限都是
NVM_NADDR_MAX
个地址,但意义却不同,擦出的地址单位是block地址,读写的地址单位是sector地址 - 使用OpenMP进行并行优化.
- 一个OCSSD的lun上的plane数目只能是1,2 或 4.(见dev_attr_fill(): nvm_dev.c)
网友评论