Merge branch 'for-4.3/sg' of git://git.kernel.dk/linux-block

Pull SG updates from Jens Axboe:
 "This contains a set of scatter-gather related changes/fixes for 4.3:

   - Add support for limited chaining of sg tables even for
     architectures that do not set ARCH_HAS_SG_CHAIN.  From Christoph.

   - Add sg chain support to target_rd.  From Christoph.

   - Fixup open coded sg->page_link in crypto/omap-sham.  From
     Christoph.

   - Fixup open coded crypto ->page_link manipulation.  From Dan.

   - Also from Dan, automated fixup of manual sg_unmark_end()
     manipulations.

   - Also from Dan, automated fixup of open coded sg_phys()
     implementations.

   - From Robert Jarzmik, addition of an sg table splitting helper that
     drivers can use"

* 'for-4.3/sg' of git://git.kernel.dk/linux-block:
  lib: scatterlist: add sg splitting function
  scatterlist: use sg_phys()
  crypto/omap-sham: remove an open coded access to ->page_link
  scatterlist: remove open coded sg_unmark_end instances
  crypto: replace scatterwalk_sg_chain with sg_chain
  target/rd: always chain S/G list
  scatterlist: allow limited chaining without ARCH_HAS_SG_CHAIN
This commit is contained in:
Linus Torvalds
2015-09-02 13:22:38 -07:00
當前提交 d975f309a8
共有 21 個文件被更改,包括 234 次插入81 次删除

查看文件

@@ -138,16 +138,12 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
#ifdef CONFIG_ARCH_HAS_SG_CHAIN
/*
* Reserve extra element for chain entry
*/
if (sg_per_table < total_sg_needed)
chain_entry = 1;
#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
if (!sg) {
@@ -158,15 +154,11 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
sg_init_table(sg, sg_per_table + chain_entry);
#ifdef CONFIG_ARCH_HAS_SG_CHAIN
if (i > 0) {
sg_chain(sg_table[i - 1].sg_table,
max_sg_per_table + 1, sg);
}
#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
@@ -430,42 +422,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
prot_sg = &prot_table->sg_table[prot_page -
prot_table->page_start_offset];
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
PAGE_SIZE);
/*
* Allocate temporaly contiguous scatterlist entries if prot pages
* straddles multiple scatterlist tables.
*/
if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
int i;
prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
if (!prot_sg)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
need_to_release = true;
sg_init_table(prot_sg, prot_npages);
for (i = 0; i < prot_npages; i++) {
if (prot_page + i > prot_table->page_end_offset) {
prot_table = rd_get_prot_table(dev,
prot_page + i);
if (!prot_table) {
kfree(prot_sg);
return rc;
}
sg_unmark_end(&prot_sg[i - 1]);
}
prot_sg[i] = prot_table->sg_table[prot_page + i -
prot_table->page_start_offset];
}
}
#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
if (is_read)
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
prot_sg, prot_offset);