summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/io.c')
-rw-r--r--fs/bcachefs/io.c96
1 files changed, 23 insertions, 73 deletions
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 8a090b0d9b03..8c43791bfbb1 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -141,14 +141,13 @@ void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
bio->bi_vcnt = 0;
}
-static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
- bool *using_mempool)
+static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
{
- struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
+ struct page *page;
if (likely(!*using_mempool)) {
- bv->bv_page = alloc_page(GFP_NOIO);
- if (unlikely(!bv->bv_page)) {
+ page = alloc_page(GFP_NOIO);
+ if (unlikely(!page)) {
mutex_lock(&c->bio_bounce_pages_lock);
*using_mempool = true;
goto pool_alloc;
@@ -156,57 +155,29 @@ static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
}
} else {
pool_alloc:
- bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
+ page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
}
- bv->bv_len = PAGE_SIZE;
- bv->bv_offset = 0;
+ return page;
}
void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
- size_t bytes)
+ size_t size)
{
bool using_mempool = false;
- BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
+ while (size) {
+ struct page *page = __bio_alloc_page_pool(c, &using_mempool);
+ unsigned len = min(PAGE_SIZE, size);
- bio->bi_iter.bi_size = bytes;
-
- while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
- bch2_bio_alloc_page_pool(c, bio, &using_mempool);
+ BUG_ON(!bio_add_page(bio, page, len, 0));
+ size -= len;
+ }
if (using_mempool)
mutex_unlock(&c->bio_bounce_pages_lock);
}
-void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
- size_t bytes)
-{
- while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
- struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
-
- BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
-
- bv->bv_page = alloc_page(GFP_NOIO);
- if (!bv->bv_page) {
- /*
- * We already allocated from mempool, we can't allocate from it again
- * without freeing the pages we already allocated or else we could
- * deadlock:
- */
- bch2_bio_free_pages_pool(c, bio);
- bch2_bio_alloc_pages_pool(c, bio, bytes);
- return;
- }
-
- bv->bv_len = PAGE_SIZE;
- bv->bv_offset = 0;
- bio->bi_vcnt++;
- }
-
- bio->bi_iter.bi_size = bytes;
-}
-
/* Writes */
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
@@ -491,8 +462,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
wbio->bio.bi_opf = src->bi_opf;
if (buf) {
- bio->bi_iter.bi_size = output_available;
- bch2_bio_map(bio, buf);
+ bch2_bio_map(bio, buf, output_available);
return bio;
}
@@ -502,31 +472,17 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
* We can't use mempool for more than c->sb.encoded_extent_max
* worth of pages, but we'd like to allocate more if we can:
*/
- while (bio->bi_iter.bi_size < output_available) {
- unsigned len = min_t(unsigned, PAGE_SIZE,
- output_available - bio->bi_iter.bi_size);
- struct page *p;
-
- p = alloc_page(GFP_NOIO);
- if (!p) {
- unsigned pool_max =
- min_t(unsigned, output_available,
- c->sb.encoded_extent_max << 9);
-
- if (bio_sectors(bio) < pool_max)
- bch2_bio_alloc_pages_pool(c, bio, pool_max);
- break;
- }
+ bch2_bio_alloc_pages_pool(c, bio,
+ min_t(unsigned, output_available,
+ c->sb.encoded_extent_max << 9));
- bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
- .bv_page = p,
- .bv_len = len,
- .bv_offset = 0,
- };
- bio->bi_iter.bi_size += len;
- }
+ if (bio->bi_iter.bi_size < output_available)
+ *page_alloc_failed =
+ bch2_bio_alloc_pages(bio,
+ output_available -
+ bio->bi_iter.bi_size,
+ GFP_NOFS) != 0;
- *page_alloc_failed = bio->bi_vcnt < pages;
return bio;
}
@@ -830,12 +786,6 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
}
dst->bi_iter.bi_size = total_output;
-
- /* Free unneeded pages after compressing: */
- if (to_wbio(dst)->bounce)
- while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
- mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
- &c->bio_bounce_pages);
do_write:
/* might have done a realloc... */