for-5.20-tag

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmLnyNUACgkQxWXV+ddt
 WDt9vA/9HcF+v5EkknyW07tatTap/Hm/ZB86Z5OZi6ikwIEcHsWhp3rUICejm88e
 GecDPIluDtCtyD6x4stuqkwOm22aDP5q2T9H6+gyw92ozyb436OV1Z8IrmftzXKY
 EpZO70PHZT+E6E/WYvyoTmmoCrjib7YlqCWZZhSLUFpsqqlOInmHEH49PW6KvM4r
 acUZ/RxHurKdmI3kNY6ECbAQl6CASvtTdYcVCx8fT2zN0azoLIQxpYa7n/9ca1R6
 8WnYilCbLbNGtcUXvO2M3tMZ4/5kvxrwQsUn93ccCJYuiN0ASiDXbLZ2g4LZ+n56
 JGu+y5v5oBwjpVf+46cuvnENP5BQ61594WPseiVjrqODWnPjN28XkcVC0XmPsiiZ
 lszeHO2cuIrIFoCah8ELMl8usu8+qxfXmPxIXtPu9rEyKsDtOjxVYc8SMXqLp0qQ
 qYtBoFm0JcZHqtZRpB+dhQ37/xXtH4ljUi/mI6x8iALVujeR273URs7yO9zgIdeW
 uZoFtbwpHFLUk+TL7Ku82/zOXp3fCwtDpNmlYbxeMbea/be3ShjncM4+mYzvHYri
 dYON2LFrq+mnRDqtIXTCaAYwX7zU8Y18Ev9QwlNll8dKlKwS89+jpqLoa+eVYy3c
 /HitHFza70KxmOj4dvDVZlzDpPvl7kW1UBkmskg4u3jnNWzedkM=
 =sS1q
 -----END PGP SIGNATURE-----

Merge tag 'for-5.20-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "This brings some long awaited changes, the send protocol bump,
  otherwise lots of small improvements and fixes. The main core part is
  reworking bio handling, cleaning up the submission and endio and
  improving error handling.

  There are some changes outside of btrfs adding helpers or updating
  API, listed at the end of the changelog.

  Features:

   - sysfs:
      - export chunk size, in debug mode add tunable for setting its size
      - show zoned among features (was only in debug mode)
      - show commit stats (number, last/max/total duration)

   - send protocol updated to 2
      - new commands:
         - ability write larger data chunks than 64K
         - send raw compressed extents (uses the encoded data ioctls),
           ie. no decompression on send side, no compression needed on
           receive side if supported
         - send 'otime' (inode creation time) among other timestamps
         - send file attributes (a.k.a file flags and xflags)
      - this is first version bump, backward compatibility on send and
        receive side is provided
      - there are still some known and wanted commands that will be
        implemented in the near future, another version bump will be
        needed, however we want to minimize that to avoid causing
        usability issues

   - print checksum type and implementation at mount time

   - don't print some messages at mount (mentioned as people asked about
     it), we want to print messages namely for new features so let's
     make some space for that
      - big metadata - this has been supported for a long time and is
        not a feature that's worth mentioning
      - skinny metadata - same reason, set by default by mkfs

  Performance improvements:

   - reduced amount of reserved metadata for delayed items
      - when inserted items can be batched into one leaf
      - when deleting batched directory index items
      - when deleting delayed items used for deletion
      - overall improved count of files/sec, decreased subvolume lock
        contention

   - metadata item access bounds checker micro-optimized, with a few
     percent of improved runtime for metadata-heavy operations

   - increase direct io limit for read to 256 sectors, improved
     throughput by 3x on sample workload

  Notable fixes:

   - raid56
      - reduce parity writes, skip sectors of stripe when there are no
        data updates
      - restore reading from on-disk data instead of using stripe cache,
        this reduces chances to damage correct data due to RMW cycle

   - refuse to replay log with unknown incompat read-only feature bit
     set

   - zoned
      - fix page locking when COW fails in the middle of allocation
      - improved tracking of active zones, ZNS drives may limit the
        number and there are ENOSPC errors due to that limit and not
        actual lack of space
      - adjust maximum extent size for zone append so it does not cause
        late ENOSPC due to underreservation

   - mirror reading error messages show the mirror number

   - don't fallback to buffered IO for NOWAIT direct IO writes, we don't
     have the NOWAIT semantics for buffered io yet

   - send, fix sending link commands for existing file paths when there
     are deleted and created hardlinks for same files

   - repair all mirrors for profiles with more than 1 copy (raid1c34)

   - fix repair of compressed extents, unify where error detection and
     repair happen

  Core changes:

   - bio completion cleanups
      - don't double defer compression bios
      - simplify endio workqueues
      - add more data to btrfs_bio to avoid allocation for read requests
      - rework bio error handling so it's same what block layer does,
        the submission works and errors are consumed in endio
      - when asynchronous bio offload fails fall back to synchronous
        checksum calculation to avoid errors under writeback or memory
        pressure

   - new trace points
      - raid56 events
      - ordered extent operations

   - super block log_root_transid deprecated (never used)

   - mixed_backref and big_metadata sysfs feature files removed, they've
     been default for sufficiently long time, there are no known users
     and mixed_backref could be confused with mixed_groups

  Non-btrfs changes, API updates:

   - minor highmem API update to cover const arguments

   - switch all kmap/kmap_atomic to kmap_local

   - remove redundant flush_dcache_page()

   - address_space_operations::writepage callback removed

   - add bdev_max_segments() helper"

* tag 'for-5.20-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (163 commits)
  btrfs: don't call btrfs_page_set_checked in finish_compressed_bio_read
  btrfs: fix repair of compressed extents
  btrfs: remove the start argument to check_data_csum and export
  btrfs: pass a btrfs_bio to btrfs_repair_one_sector
  btrfs: simplify the pending I/O counting in struct compressed_bio
  btrfs: repair all known bad mirrors
  btrfs: merge btrfs_dev_stat_print_on_error with its only caller
  btrfs: join running log transaction when logging new name
  btrfs: simplify error handling in btrfs_lookup_dentry
  btrfs: send: always use the rbtree based inode ref management infrastructure
  btrfs: send: fix sending link commands for existing file paths
  btrfs: send: introduce recorded_ref_alloc and recorded_ref_free
  btrfs: zoned: wait until zone is finished when allocation didn't progress
  btrfs: zoned: write out partially allocated region
  btrfs: zoned: activate necessary block group
  btrfs: zoned: activate metadata block group on flush_space
  btrfs: zoned: disable metadata overcommit for zoned
  btrfs: zoned: introduce space_info->active_total_bytes
  btrfs: zoned: finish least available block group on data bg allocation
  btrfs: let can_allocate_chunk return error
  ...
This commit is contained in:
Linus Torvalds 2022-08-03 14:54:52 -07:00
commit 353767e4aa
57 changed files with 3847 additions and 2834 deletions

View File

@ -22,7 +22,7 @@ void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_dcache_page_asm(const void *addr);
void flush_kernel_icache_page(void *);
/* Cache flush operations */
@ -31,7 +31,7 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
void flush_kernel_dcache_page_addr(void *addr);
void flush_kernel_dcache_page_addr(const void *addr);
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
@ -75,7 +75,7 @@ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
static inline void kunmap_flush_on_unmap(void *addr)
static inline void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}

View File

@ -549,7 +549,7 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(void *addr)
void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags;

View File

@ -13,7 +13,6 @@ struct btrfs_fs_info;
struct btrfs_workqueue;
struct btrfs_work;
typedef void (*btrfs_func_t)(struct btrfs_work *arg);
typedef void (*btrfs_work_func_t)(struct work_struct *arg);
struct btrfs_work {
btrfs_func_t func;

View File

@ -2028,10 +2028,29 @@ out:
return ret;
}
static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
struct btrfs_data_container *inodes = ctx;
const size_t c = 3 * sizeof(u64);
if (inodes->bytes_left >= c) {
inodes->bytes_left -= c;
inodes->val[inodes->elem_cnt] = inum;
inodes->val[inodes->elem_cnt + 1] = offset;
inodes->val[inodes->elem_cnt + 2] = root;
inodes->elem_cnt += 3;
} else {
inodes->bytes_missing += c - inodes->bytes_left;
inodes->bytes_left = 0;
inodes->elem_missed += 3;
}
return 0;
}
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
iterate_extent_inodes_t *iterate, void *ctx,
bool ignore_offset)
void *ctx, bool ignore_offset)
{
int ret;
u64 extent_item_pos;
@ -2049,17 +2068,15 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
extent_item_pos = logical - found_key.objectid;
ret = iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, search_commit_root,
iterate, ctx, ignore_offset);
build_ino_list, ctx, ignore_offset);
return ret;
}
typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
struct extent_buffer *eb, void *ctx);
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
struct extent_buffer *eb, struct inode_fs_paths *ipath);
static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
struct btrfs_path *path,
iterate_irefs_t *iterate, void *ctx)
static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
{
int ret = 0;
int slot;
@ -2068,6 +2085,8 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
u32 name_len;
u64 parent = 0;
int found = 0;
struct btrfs_root *fs_root = ipath->fs_root;
struct btrfs_path *path = ipath->btrfs_path;
struct extent_buffer *eb;
struct btrfs_inode_ref *iref;
struct btrfs_key found_key;
@ -2103,8 +2122,8 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
"following ref at offset %u for inode %llu in tree %llu",
cur, found_key.objectid,
fs_root->root_key.objectid);
ret = iterate(parent, name_len,
(unsigned long)(iref + 1), eb, ctx);
ret = inode_to_path(parent, name_len,
(unsigned long)(iref + 1), eb, ipath);
if (ret)
break;
len = sizeof(*iref) + name_len;
@ -2118,15 +2137,15 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
return ret;
}
static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
struct btrfs_path *path,
iterate_irefs_t *iterate, void *ctx)
static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
{
int ret;
int slot;
u64 offset = 0;
u64 parent;
int found = 0;
struct btrfs_root *fs_root = ipath->fs_root;
struct btrfs_path *path = ipath->btrfs_path;
struct extent_buffer *eb;
struct btrfs_inode_extref *extref;
u32 item_size;
@ -2162,8 +2181,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
parent = btrfs_inode_extref_parent(eb, extref);
name_len = btrfs_inode_extref_name_len(eb, extref);
ret = iterate(parent, name_len,
(unsigned long)&extref->name, eb, ctx);
ret = inode_to_path(parent, name_len,
(unsigned long)&extref->name, eb, ipath);
if (ret)
break;
@ -2180,34 +2199,13 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
return ret;
}
static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
struct btrfs_path *path, iterate_irefs_t *iterate,
void *ctx)
{
int ret;
int found_refs = 0;
ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
if (!ret)
++found_refs;
else if (ret != -ENOENT)
return ret;
ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
if (ret == -ENOENT && found_refs)
return 0;
return ret;
}
/*
* returns 0 if the path could be dumped (probably truncated)
* returns <0 in case of an error
*/
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
struct extent_buffer *eb, void *ctx)
struct extent_buffer *eb, struct inode_fs_paths *ipath)
{
struct inode_fs_paths *ipath = ctx;
char *fspath;
char *fspath_min;
int i = ipath->fspath->elem_cnt;
@ -2248,8 +2246,20 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
*/
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
inode_to_path, ipath);
int ret;
int found_refs = 0;
ret = iterate_inode_refs(inum, ipath);
if (!ret)
++found_refs;
else if (ret != -ENOENT)
return ret;
ret = iterate_inode_extrefs(inum, ipath);
if (ret == -ENOENT && found_refs)
return 0;
return ret;
}
struct btrfs_data_container *init_data_container(u32 total_bytes)

View File

@ -35,8 +35,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
bool ignore_offset);
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
iterate_extent_inodes_t *iterate, void *ctx,
struct btrfs_path *path, void *ctx,
bool ignore_offset);
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);

View File

@ -1051,8 +1051,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
< block_group->zone_unusable);
WARN_ON(block_group->space_info->disk_total
< block_group->length * factor);
WARN_ON(block_group->zone_is_active &&
block_group->space_info->active_total_bytes
< block_group->length);
}
block_group->space_info->total_bytes -= block_group->length;
if (block_group->zone_is_active)
block_group->space_info->active_total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
block_group->space_info->bytes_zone_unusable -=
@ -1816,11 +1821,10 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
stripe_nr = physical - map->stripes[i].physical;
stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
stripe_nr = stripe_nr * map->num_stripes + i;
stripe_nr = div_u64(stripe_nr, map->sub_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = stripe_nr * map->num_stripes + i;
}
/*
* The remaining case would be for RAID56, multiply by
@ -2108,7 +2112,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
trace_btrfs_add_block_group(info, cache, 0);
btrfs_update_space_info(info, cache->flags, cache->length,
cache->used, cache->bytes_super,
cache->zone_unusable, &space_info);
cache->zone_unusable, cache->zone_is_active,
&space_info);
cache->space_info = space_info;
@ -2178,7 +2183,7 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
}
btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
0, 0, &space_info);
0, 0, false, &space_info);
bg->space_info = space_info;
link_block_group(bg);
@ -2559,7 +2564,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
trace_btrfs_add_block_group(fs_info, cache, 1);
btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
cache->bytes_super, cache->zone_unusable,
&cache->space_info);
cache->zone_is_active, &cache->space_info);
btrfs_update_global_block_rsv(fs_info);
link_block_group(cache);
@ -2659,6 +2664,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
/*
* We have allocated a new chunk. We also need to activate that chunk to
* grant metadata tickets for zoned filesystem.
*/
ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
if (ret < 0)
goto out;
ret = inc_block_group_ro(cache, 0);
if (ret == -ETXTBSY)
goto unlock_out;
@ -3761,6 +3774,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
* attempt.
*/
wait_for_alloc = true;
force = CHUNK_ALLOC_NO_FORCE;
spin_unlock(&space_info->lock);
mutex_lock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->chunk_mutex);
@ -3883,6 +3897,14 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
if (IS_ERR(bg)) {
ret = PTR_ERR(bg);
} else {
/*
* We have a new chunk. We also need to activate it for
* zoned filesystem.
*/
ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
if (ret < 0)
return;
/*
* If we fail to add the chunk item here, we end up
* trying again at phase 2 of chunk allocation, at

View File

@ -118,7 +118,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
if (block_rsv->reserved >= block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
block_rsv->reserved = block_rsv->size;
block_rsv->full = 1;
block_rsv->full = true;
} else {
num_bytes = 0;
}
@ -142,7 +142,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
bytes_to_add = min(num_bytes, bytes_to_add);
dest->reserved += bytes_to_add;
if (dest->reserved >= dest->size)
dest->full = 1;
dest->full = true;
num_bytes -= bytes_to_add;
}
spin_unlock(&dest->lock);
@ -171,7 +171,7 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
return 0;
}
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
{
memset(rsv, 0, sizeof(*rsv));
spin_lock_init(&rsv->lock);
@ -180,7 +180,7 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
unsigned short type)
enum btrfs_rsv_type type)
{
btrfs_init_block_rsv(rsv, type);
rsv->space_info = btrfs_find_space_info(fs_info,
@ -188,7 +188,7 @@ void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
}
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type)
enum btrfs_rsv_type type)
{
struct btrfs_block_rsv *block_rsv;
@ -304,7 +304,7 @@ int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
if (block_rsv->reserved >= num_bytes) {
block_rsv->reserved -= num_bytes;
if (block_rsv->reserved < block_rsv->size)
block_rsv->full = 0;
block_rsv->full = false;
ret = 0;
}
spin_unlock(&block_rsv->lock);
@ -319,7 +319,7 @@ void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
if (update_size)
block_rsv->size += num_bytes;
else if (block_rsv->reserved >= block_rsv->size)
block_rsv->full = 1;
block_rsv->full = true;
spin_unlock(&block_rsv->lock);
}
@ -341,7 +341,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
}
global_rsv->reserved -= num_bytes;
if (global_rsv->reserved < global_rsv->size)
global_rsv->full = 0;
global_rsv->full = false;
spin_unlock(&global_rsv->lock);
btrfs_block_rsv_add_bytes(dest, num_bytes, true);
@ -408,10 +408,7 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
btrfs_try_granting_tickets(fs_info, sinfo);
}
if (block_rsv->reserved == block_rsv->size)
block_rsv->full = 1;
else
block_rsv->full = 0;
block_rsv->full = (block_rsv->reserved == block_rsv->size);
if (block_rsv->size >= sinfo->total_bytes)
sinfo->force_alloc = CHUNK_ALLOC_FORCE;

View File

@ -9,7 +9,7 @@ enum btrfs_reserve_flush_enum;
/*
* Types of block reserves
*/
enum {
enum btrfs_rsv_type {
BTRFS_BLOCK_RSV_GLOBAL,
BTRFS_BLOCK_RSV_DELALLOC,
BTRFS_BLOCK_RSV_TRANS,
@ -25,9 +25,10 @@ struct btrfs_block_rsv {
u64 reserved;
struct btrfs_space_info *space_info;
spinlock_t lock;
unsigned short full;
unsigned short type;
unsigned short failfast;
bool full;
bool failfast;
/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
enum btrfs_rsv_type type:8;
/*
* Qgroup equivalent for @size @reserved
@ -49,13 +50,13 @@ struct btrfs_block_rsv {
u64 qgroup_rsv_reserved;
};
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
void btrfs_init_root_block_rsv(struct btrfs_root *root);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
enum btrfs_rsv_type type);
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
unsigned short type);
enum btrfs_rsv_type type);
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,

View File

@ -279,19 +279,31 @@ static inline void btrfs_insert_inode_hash(struct inode *inode)
__insert_inode_hash(inode, h);
}
#if BITS_PER_LONG == 32
/*
* On 32 bit systems the i_ino of struct inode is 32 bits (unsigned long), so
* we use the inode's location objectid which is a u64 to avoid truncation.
*/
static inline u64 btrfs_ino(const struct btrfs_inode *inode)
{
u64 ino = inode->location.objectid;
/*
* !ino: btree_inode
* type == BTRFS_ROOT_ITEM_KEY: subvol dir
*/
if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
/* type == BTRFS_ROOT_ITEM_KEY: subvol dir */
if (inode->location.type == BTRFS_ROOT_ITEM_KEY)
ino = inode->vfs_inode.i_ino;
return ino;
}
#else
static inline u64 btrfs_ino(const struct btrfs_inode *inode)
{
return inode->vfs_inode.i_ino;
}
#endif
static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
{
i_size_write(&inode->vfs_inode, size);
@ -305,8 +317,7 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
if (root == root->fs_info->tree_root &&
btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
return true;
if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
return true;
return false;
}

View File

@ -136,109 +136,14 @@ static int compression_decompress(int type, struct list_head *ws,
static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
{
return sizeof(struct compressed_bio) +
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
}
static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
u64 disk_start)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
const u32 csum_size = fs_info->csum_size;
const u32 sectorsize = fs_info->sectorsize;
struct page *page;
unsigned int i;
char *kaddr;
u8 csum[BTRFS_CSUM_SIZE];
struct compressed_bio *cb = bio->bi_private;
u8 *cb_sum = cb->sums;
if ((inode->flags & BTRFS_INODE_NODATASUM) ||
test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
return 0;
shash->tfm = fs_info->csum_shash;
for (i = 0; i < cb->nr_pages; i++) {
u32 pg_offset;
u32 bytes_left = PAGE_SIZE;
page = cb->compressed_pages[i];
/* Determine the remaining bytes inside the page first */
if (i == cb->nr_pages - 1)
bytes_left = cb->compressed_len - i * PAGE_SIZE;
/* Hash through the page sector by sector */
for (pg_offset = 0; pg_offset < bytes_left;
pg_offset += sectorsize) {
kaddr = kmap_atomic(page);
crypto_shash_digest(shash, kaddr + pg_offset,
sectorsize, csum);
kunmap_atomic(kaddr);
if (memcmp(&csum, cb_sum, csum_size) != 0) {
btrfs_print_data_csum_error(inode, disk_start,
csum, cb_sum, cb->mirror_num);
if (btrfs_bio(bio)->device)
btrfs_dev_stat_inc_and_print(
btrfs_bio(bio)->device,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
return -EIO;
}
cb_sum += csum_size;
disk_start += sectorsize;
}
}
return 0;
}
/*
* Reduce bio and io accounting for a compressed_bio with its corresponding bio.
*
* Return true if there is no pending bio nor io.
* Return false otherwise.
*/
static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
unsigned int bi_size = 0;
bool last_io = false;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
/*
* At endio time, bi_iter.bi_size doesn't represent the real bio size.
* Thus here we have to iterate through all segments to grab correct
* bio size.
*/
bio_for_each_segment_all(bvec, bio, iter_all)
bi_size += bvec->bv_len;
if (bio->bi_status)
cb->status = bio->bi_status;
ASSERT(bi_size && bi_size <= cb->compressed_len);
last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
&cb->pending_sectors);
/*
* Here we must wake up the possible error handler after all other
* operations on @cb finished, or we can race with
* finish_compressed_bio_*() which may free @cb.
*/
wake_up_var(cb);
return last_io;
}
static void finish_compressed_bio_read(struct compressed_bio *cb)
{
unsigned int index;
struct page *page;
if (cb->status == BLK_STS_OK)
cb->status = errno_to_blk_status(btrfs_decompress_bio(cb));
/* Release the compressed pages */
for (index = 0; index < cb->nr_pages; index++) {
page = cb->compressed_pages[index];
@ -247,85 +152,63 @@ static void finish_compressed_bio_read(struct compressed_bio *cb)
}
/* Do io completion on the original bio */
if (cb->status != BLK_STS_OK) {
if (cb->status != BLK_STS_OK)
cb->orig_bio->bi_status = cb->status;
bio_endio(cb->orig_bio);
} else {
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
/*
* We have verified the checksum already, set page checked so
* the end_io handlers know about it
*/
ASSERT(!bio_flagged(cb->orig_bio, BIO_CLONED));
bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
u64 bvec_start = page_offset(bvec->bv_page) +
bvec->bv_offset;
btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
bvec->bv_page, bvec_start,
bvec->bv_len);
}
bio_endio(cb->orig_bio);
}
bio_endio(cb->orig_bio);
/* Finally free the cb struct */
kfree(cb->compressed_pages);
kfree(cb);
}
/* when we finish reading compressed pages from the disk, we
* decompress them and then run the bio end_io routines on the
* decompressed pages (in the inode address space).
*
* This allows the checksumming and other IO error handling routines
* to work normally
*
* The compressed pages are freed here, and it must be run
* in process context
/*
* Verify the checksums and kick off repair if needed on the uncompressed data
* before decompressing it into the original bio and freeing the uncompressed
* pages.
*/
static void end_compressed_bio_read(struct bio *bio)
{
struct compressed_bio *cb = bio->bi_private;
struct inode *inode;
unsigned int mirror = btrfs_bio(bio)->mirror_num;
int ret = 0;
struct inode *inode = cb->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_inode *bi = BTRFS_I(inode);
bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
blk_status_t status = bio->bi_status;
struct btrfs_bio *bbio = btrfs_bio(bio);
struct bvec_iter iter;
struct bio_vec bv;
u32 offset;
if (!dec_and_test_compressed_bio(cb, bio))
goto out;
btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) {
u64 start = bbio->file_offset + offset;
/*
* Record the correct mirror_num in cb->orig_bio so that
* read-repair can work properly.
*/
btrfs_bio(cb->orig_bio)->mirror_num = mirror;
cb->mirror_num = mirror;
if (!status &&
(!csum || !btrfs_check_data_csum(inode, bbio, offset,
bv.bv_page, bv.bv_offset))) {
clean_io_failure(fs_info, &bi->io_failure_tree,
&bi->io_tree, start, bv.bv_page,
btrfs_ino(bi), bv.bv_offset);
} else {
int ret;
/*
* Some IO in this cb have failed, just skip checksum as there
* is no way it could be correct.
*/
if (cb->status != BLK_STS_OK)
goto csum_failed;
refcount_inc(&cb->pending_ios);
ret = btrfs_repair_one_sector(inode, bbio, offset,
bv.bv_page, bv.bv_offset,
btrfs_submit_data_read_bio);
if (ret) {
refcount_dec(&cb->pending_ios);
status = errno_to_blk_status(ret);
}
}
}
inode = cb->inode;
ret = check_compressed_csum(BTRFS_I(inode), bio,
bio->bi_iter.bi_sector << 9);
if (ret)
goto csum_failed;
if (status)
cb->status = status;
/* ok, we're the last bio for this extent, lets start
* the decompression.
*/
ret = btrfs_decompress_bio(cb);
csum_failed:
if (ret)
cb->status = errno_to_blk_status(ret);
finish_compressed_bio_read(cb);
out:
if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_read(cb);
btrfs_bio_free_csum(bbio);
bio_put(bio);
}
@ -403,6 +286,14 @@ static void finish_compressed_bio_write(struct compressed_bio *cb)
kfree(cb);
}
static void btrfs_finish_compressed_write_work(struct work_struct *work)
{
struct compressed_bio *cb =
container_of(work, struct compressed_bio, write_end_work);
finish_compressed_bio_write(cb);
}
/*
* Do the cleanup once all the compressed pages hit the disk. This will clear
* writeback on the file pages and free the compressed pages.
@ -414,29 +305,18 @@ static void end_compressed_bio_write(struct bio *bio)
{
struct compressed_bio *cb = bio->bi_private;
if (!dec_and_test_compressed_bio(cb, bio))
goto out;
if (bio->bi_status)
cb->status = bio->bi_status;
btrfs_record_physical_zoned(cb->inode, cb->start, bio);
if (refcount_dec_and_test(&cb->pending_ios)) {
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
finish_compressed_bio_write(cb);
out:
btrfs_record_physical_zoned(cb->inode, cb->start, bio);
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
}
bio_put(bio);
}
static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
struct bio *bio, int mirror_num)
{
blk_status_t ret;
ASSERT(bio->bi_iter.bi_size);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
return ret;
ret = btrfs_map_bio(fs_info, bio, mirror_num);
return ret;
}
/*
* Allocate a compressed_bio, which will be used to read/write on-disk
* (aka, compressed) * data.
@ -487,7 +367,7 @@ static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_byte
return ERR_PTR(ret);
}
*next_stripe_start = disk_bytenr + geom.len;
refcount_inc(&cb->pending_ios);
return bio;
}
@ -514,26 +394,25 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
struct compressed_bio *cb;
u64 cur_disk_bytenr = disk_start;
u64 next_stripe_start;
blk_status_t ret;
blk_status_t ret = BLK_STS_OK;
int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
const bool use_append = btrfs_use_zone_append(inode, disk_start);
const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(len, fs_info->sectorsize));
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
if (!cb)
return BLK_STS_RESOURCE;
refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
refcount_set(&cb->pending_ios, 1);
cb->status = BLK_STS_OK;
cb->inode = &inode->vfs_inode;
cb->start = start;
cb->len = len;
cb->mirror_num = 0;
cb->compressed_pages = compressed_pages;
cb->compressed_len = compressed_len;
cb->writeback = writeback;
cb->orig_bio = NULL;
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
cb->nr_pages = nr_pages;
if (blkcg_css)
@ -554,8 +433,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
&next_stripe_start);
if (IS_ERR(bio)) {
ret = errno_to_blk_status(PTR_ERR(bio));
bio = NULL;
goto finish_cb;
break;
}
if (blkcg_css)
bio->bi_opf |= REQ_CGROUP_PUNT;
@ -599,44 +477,25 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
if (submit) {
if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, start, true);
if (ret)
goto finish_cb;
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
break;
}
}
ret = submit_compressed_bio(fs_info, bio, 0);
if (ret)
goto finish_cb;
ASSERT(bio->bi_iter.bi_size);
btrfs_submit_bio(fs_info, bio, 0);
bio = NULL;
}
cond_resched();
}
if (blkcg_css)
kthread_associate_blkcg(NULL);
return 0;
finish_cb:
if (blkcg_css)
kthread_associate_blkcg(NULL);
if (bio) {
bio->bi_status = ret;
bio_endio(bio);
}
/* Last byte of @cb is submitted, endio will free @cb */
if (cur_disk_bytenr == disk_start + compressed_len)
return ret;
wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
(disk_start + compressed_len - cur_disk_bytenr) >>
fs_info->sectorsize_bits);
/*
* Even with previous bio ended, we should still have io not yet
* submitted, thus need to finish manually.
*/
ASSERT(refcount_read(&cb->pending_sectors));
/* Now we are the only one referring @cb, can finish it safely. */
finish_compressed_bio_write(cb);
if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_write(cb);
return ret;
}
@ -765,7 +624,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
int zeros;
zeros = PAGE_SIZE - zero_offset;
memzero_page(page, zero_offset, zeros);
flush_dcache_page(page);
}
}
@ -819,7 +677,6 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
blk_status_t ret;
int ret2;
int i;
u8 *sums;
em_tree = &BTRFS_I(inode)->extent_tree;
@ -837,17 +694,15 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
if (!cb) {
ret = BLK_STS_RESOURCE;
goto out;
}
refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
refcount_set(&cb->pending_ios, 1);
cb->status = BLK_STS_OK;
cb->inode = inode;
cb->mirror_num = mirror_num;
sums = cb->sums;
cb->start = em->orig_start;
em_len = em->len;
@ -893,9 +748,8 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
REQ_OP_READ, end_compressed_bio_read,
&next_stripe_start);
if (IS_ERR(comp_bio)) {
ret = errno_to_blk_status(PTR_ERR(comp_bio));
comp_bio = NULL;
goto finish_cb;
cb->status = errno_to_blk_status(PTR_ERR(comp_bio));
break;
}
}
/*
@ -931,22 +785,33 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
submit = true;
if (submit) {
unsigned int nr_sectors;
/* Save the original iter for read repair */
if (bio_op(comp_bio) == REQ_OP_READ)
btrfs_bio(comp_bio)->iter = comp_bio->bi_iter;
ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
if (ret)
goto finish_cb;
/*
* Save the initial offset of this chunk, as there
* is no direct correlation between compressed pages and
* the original file offset. The field is only used for
* priting error messages.
*/
btrfs_bio(comp_bio)->file_offset = file_offset;
nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
fs_info->sectorsize);
sums += fs_info->csum_size * nr_sectors;
ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
if (ret) {
comp_bio->bi_status = ret;
bio_endio(comp_bio);
break;
}
ret = submit_compressed_bio(fs_info, comp_bio, mirror_num);
if (ret)
goto finish_cb;
ASSERT(comp_bio->bi_iter.bi_size);
btrfs_submit_bio(fs_info, comp_bio, mirror_num);
comp_bio = NULL;
}
}
if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_read(cb);
return;
fail:
@ -964,25 +829,6 @@ out:
bio->bi_status = ret;
bio_endio(bio);
return;
finish_cb:
if (comp_bio) {
comp_bio->bi_status = ret;
bio_endio(comp_bio);
}
/* All bytes of @cb is submitted, endio will free @cb */
if (cur_disk_byte == disk_bytenr + compressed_len)
return;
wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
(disk_bytenr + compressed_len - cur_disk_byte) >>
fs_info->sectorsize_bits);
/*
* Even with previous bio ended, we should still have io not yet
* submitted, thus need to finish @cb manually.
*/
ASSERT(refcount_read(&cb->pending_sectors));
/* Now we are the only one referring @cb, can finish it safely. */
finish_compressed_bio_read(cb);
}
/*
@ -1481,7 +1327,6 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
ASSERT(copy_start - decompressed < buf_len);
memcpy_to_page(bvec.bv_page, bvec.bv_offset,
buf + copy_start - decompressed, copy_len);
flush_dcache_page(bvec.bv_page);
cur_offset += copy_len;
bio_advance(orig_bio, copy_len);

View File

@ -30,8 +30,8 @@ static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
struct compressed_bio {
/* Number of sectors with unfinished IO (unsubmitted or unfinished) */
refcount_t pending_sectors;
/* Number of outstanding bios */
refcount_t pending_ios;
/* Number of compressed pages in the array */
unsigned int nr_pages;
@ -59,16 +59,12 @@ struct compressed_bio {
/* IO errors */
blk_status_t status;
int mirror_num;
/* for reads, this is the bio we are copying the data into */
struct bio *orig_bio;
/*
* the start of a variable length array of checksums only
* used by reads
*/
u8 sums[];
union {
/* For reads, this is the bio we are copying the data into */
struct bio *orig_bio;
struct work_struct write_end_work;
};
};
static inline unsigned int btrfs_compress_type(unsigned int type_level)

View File

@ -107,14 +107,6 @@ struct btrfs_ioctl_encoded_io_args;
#define BTRFS_STAT_CURR 0
#define BTRFS_STAT_PREV 1
/*
* Count how many BTRFS_MAX_EXTENT_SIZE cover the @size
*/
static inline u32 count_max_extents(u64 size)
{
return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
}
static inline unsigned long btrfs_chunk_item_size(int num_stripes)
{
BUG_ON(num_stripes == 0);
@ -229,6 +221,13 @@ struct btrfs_root_backup {
#define BTRFS_SUPER_INFO_OFFSET SZ_64K
#define BTRFS_SUPER_INFO_SIZE 4096
/*
* The reserved space at the beginning of each device.
* It covers the primary super block and leaves space for potential use by other
* tools like bootloaders or to lower potential damage of accidental overwrite.
*/
#define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
/*
* the super block basically lists the main trees of the FS
* it currently lacks any block count etc etc
@ -248,8 +247,12 @@ struct btrfs_super_block {
__le64 chunk_root;
__le64 log_root;
/* this will help find the new super based on the log root */
__le64 log_root_transid;
/*
* This member has never been utilized since the very beginning, thus
* it's always 0 regardless of kernel version. We always use
* generation + 1 to read log tree root. So here we mark it deprecated.
*/
__le64 __unused_log_root_transid;
__le64 total_bytes;
__le64 bytes_used;
__le64 root_dir_objectid;
@ -635,6 +638,9 @@ enum {
/* Indicate we have half completed snapshot deletions pending. */
BTRFS_FS_UNFINISHED_DROPS,
/* Indicate we have to finish a zone to do next allocation. */
BTRFS_FS_NEED_ZONE_FINISH,
#if BITS_PER_LONG == 32
/* Indicate if we have error/warn message printed on 32bit systems */
BTRFS_FS_32BIT_ERROR,
@ -656,6 +662,18 @@ enum btrfs_exclusive_operation {
BTRFS_EXCLOP_SWAP_ACTIVATE,
};
/* Store data about transaction commits, exported via sysfs. */
struct btrfs_commit_stats {
/* Total number of commits */
u64 commit_count;
/* The maximum commit duration so far in ns */
u64 max_commit_dur;
/* The last commit duration in ns */
u64 last_commit_dur;
/* The total commit duration in ns */
u64 total_commit_dur;
};
struct btrfs_fs_info {
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
unsigned long flags;
@ -850,11 +868,11 @@ struct btrfs_fs_info {
struct btrfs_workqueue *hipri_workers;
struct btrfs_workqueue *delalloc_workers;
struct btrfs_workqueue *flush_workers;
struct btrfs_workqueue *endio_workers;
struct btrfs_workqueue *endio_meta_workers;
struct btrfs_workqueue *endio_raid56_workers;
struct workqueue_struct *endio_workers;
struct workqueue_struct *endio_meta_workers;
struct workqueue_struct *endio_raid56_workers;
struct workqueue_struct *rmw_workers;
struct btrfs_workqueue *endio_meta_write_workers;
struct workqueue_struct *compressed_write_workers;
struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker;
struct btrfs_workqueue *caching_workers;
@ -1032,6 +1050,12 @@ struct btrfs_fs_info {
u32 csums_per_leaf;
u32 stripesize;
/*
* Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
* filesystem, on zoned it depends on the device constraints.
*/
u64 max_extent_size;
/* Block groups and devices containing active swapfiles. */
spinlock_t swapfile_pins_lock;
struct rb_root swapfile_pins;
@ -1047,6 +1071,8 @@ struct btrfs_fs_info {
*/
u64 zone_size;
/* Max size to emit ZONE_APPEND write command */
u64 max_zone_append_size;
struct mutex zoned_meta_io_lock;
spinlock_t treelog_bg_lock;
u64 treelog_bg;
@ -1063,6 +1089,11 @@ struct btrfs_fs_info {
spinlock_t zone_active_bgs_lock;
struct list_head zone_active_bgs;
/* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */
wait_queue_head_t zone_finish_wait;
/* Updates are not protected by any lock */
struct btrfs_commit_stats commit_stats;
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
spinlock_t ref_verify_lock;
@ -2475,8 +2506,6 @@ BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block,
chunk_root_level, 8);
BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block,
log_root, 64);
BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block,
log_root_transid, 64);
BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block,
log_root_level, 8);
BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block,
@ -2733,8 +2762,16 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
static inline u8 *btrfs_csum_ptr(const struct btrfs_fs_info *fs_info, u8 *csums,
u64 offset)
{
u64 offset_in_sectors = offset >> fs_info->sectorsize_bits;
return csums + offset_in_sectors * fs_info->csum_size;
}
/*
* Take the number of bytes to be checksummmed and figure out how many leaves
* Take the number of bytes to be checksummed and figure out how many leaves
* it would require to store the csums for that many bytes.
*/
static inline u64 btrfs_csum_bytes_to_leaves(
@ -3251,11 +3288,18 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
u64 btrfs_file_extent_end(const struct btrfs_path *path);
/* inode.c */
void btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type);
void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num);
void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type);
int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
u32 pgoff, u8 *csum, const u8 * const csum_expected);
int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
u32 bio_offset, struct page *page, u32 pgoff);
unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
u32 bio_offset, struct page *page,
u64 start, u64 end);
int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
u32 bio_offset, struct page *page, u32 pgoff);
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
u64 start, u64 len);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
@ -3305,9 +3349,9 @@ void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
struct inode *dir);
void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
unsigned *bits);
u32 bits);
void btrfs_clear_delalloc_extent(struct inode *inode,
struct extent_state *state, unsigned *bits);
struct extent_state *state, u32 bits);
void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
struct extent_state *other);
void btrfs_split_delalloc_extent(struct inode *inode,
@ -3353,6 +3397,12 @@ int btrfs_writepage_cow_fixup(struct page *page);
void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
struct page *page, u64 start,
u64 end, bool uptodate);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
u64 disk_io_size,
struct page **pages);
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
struct btrfs_ioctl_encoded_io_args *encoded);
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
@ -4009,6 +4059,19 @@ static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
return fs_info->zone_size > 0;
}
/*
* Count how many fs_info->max_extent_size cover the @size
*/
static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
{
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (!fs_info)
return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
#endif
return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
}
static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
{
return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;

View File

@ -273,7 +273,7 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
u64 num_bytes, u64 disk_num_bytes,
u64 *meta_reserve, u64 *qgroup_reserve)
{
u64 nr_extents = count_max_extents(num_bytes);
u64 nr_extents = count_max_extents(fs_info, num_bytes);
u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
@ -350,7 +350,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
* needs to free the reservation we just made.
*/
spin_lock(&inode->lock);
nr_extents = count_max_extents(num_bytes);
nr_extents = count_max_extents(fs_info, num_bytes);
btrfs_mod_outstanding_extents(inode, nr_extents);
inode->csum_bytes += disk_num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
@ -413,7 +413,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
unsigned num_extents;
spin_lock(&inode->lock);
num_extents = count_max_extents(num_bytes);
num_extents = count_max_extents(fs_info, num_bytes);
btrfs_mod_outstanding_extents(inode, -num_extents);
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);

View File

@ -52,18 +52,6 @@ static inline void btrfs_init_delayed_node(
INIT_LIST_HEAD(&delayed_node->p_list);
}
static inline int btrfs_is_continuous_delayed_item(
struct btrfs_delayed_item *item1,
struct btrfs_delayed_item *item2)
{
if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
item1->key.objectid == item2->key.objectid &&
item1->key.type == item2->key.type &&
item1->key.offset + 1 == item2->key.offset)
return 1;
return 0;
}
static struct btrfs_delayed_node *btrfs_get_delayed_node(
struct btrfs_inode *btrfs_inode)
{
@ -398,8 +386,7 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
}
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
struct btrfs_delayed_item *ins,
int action)
struct btrfs_delayed_item *ins)
{
struct rb_node **p, *node;
struct rb_node *parent_node = NULL;
@ -408,9 +395,9 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
int cmp;
bool leftmost = true;
if (action == BTRFS_DELAYED_INSERTION_ITEM)
if (ins->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
else if (action == BTRFS_DELAYED_DELETION_ITEM)
else if (ins->ins_or_del == BTRFS_DELAYED_DELETION_ITEM)
root = &delayed_node->del_root;
else
BUG();
@ -436,32 +423,19 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
rb_link_node(node, parent_node, p);
rb_insert_color_cached(node, root, leftmost);
ins->delayed_node = delayed_node;
ins->ins_or_del = action;
if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
action == BTRFS_DELAYED_INSERTION_ITEM &&
/* Delayed items are always for dir index items. */
ASSERT(ins->key.type == BTRFS_DIR_INDEX_KEY);
if (ins->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM &&
ins->key.offset >= delayed_node->index_cnt)
delayed_node->index_cnt = ins->key.offset + 1;
delayed_node->index_cnt = ins->key.offset + 1;
delayed_node->count++;
atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
return 0;
}
static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
struct btrfs_delayed_item *item)
{
return __btrfs_add_delayed_item(node, item,
BTRFS_DELAYED_INSERTION_ITEM);
}
static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
struct btrfs_delayed_item *item)
{
return __btrfs_add_delayed_item(node, item,
BTRFS_DELAYED_DELETION_ITEM);
}
static void finish_one_item(struct btrfs_delayed_root *delayed_root)
{
int seq = atomic_inc_return(&delayed_root->items_seq);
@ -573,7 +547,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid,
num_bytes, 1);
item->bytes_reserved = num_bytes;
/*
* For insertions we track reserved metadata space by accounting
* for the number of leaves that will be used, based on the delayed
* node's index_items_size field.
*/
if (item->ins_or_del == BTRFS_DELAYED_DELETION_ITEM)
item->bytes_reserved = num_bytes;
}
return ret;
@ -599,6 +579,21 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
}
static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
unsigned int num_leaves)
{
struct btrfs_fs_info *fs_info = node->root->fs_info;
const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
/* There are no space reservations during log replay, bail out. */
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return;
trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
bytes, 0);
btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
}
static int btrfs_delayed_inode_reserve_metadata(
struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@ -672,22 +667,53 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
}
/*
* Insert a single delayed item or a batch of delayed items that have consecutive
* keys if they exist.
* Insert a single delayed item or a batch of delayed items, as many as possible
* that fit in a leaf. The delayed items (dir index keys) are sorted by their key
* in the rbtree, and if there's a gap between two consecutive dir index items,
* then it means at some point we had delayed dir indexes to add but they got
* removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
* into the subvolume tree. Dir index keys also have their offsets coming from a
* monotonically increasing counter, so we can't get new keys with an offset that
* fits within a gap between delayed dir index items.
*/
static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_delayed_item *first_item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_node *node = first_item->delayed_node;
LIST_HEAD(item_list);
struct btrfs_delayed_item *curr;
struct btrfs_delayed_item *next;
const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
struct btrfs_item_batch batch;
int total_size;
char *ins_data = NULL;
int ret;
bool continuous_keys_only = false;
lockdep_assert_held(&node->mutex);
/*
* During normal operation the delayed index offset is continuously
* increasing, so we can batch insert all items as there will not be any
* overlapping keys in the tree.
*
* The exception to this is log replay, where we may have interleaved
* offsets in the tree, so our batch needs to be continuous keys only in
* order to ensure we do not end up with out of order items in our leaf.
*/
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
continuous_keys_only = true;
/*
* For delayed items to insert, we track reserved metadata bytes based
* on the number of leaves that we will use.
* See btrfs_insert_delayed_dir_index() and
* btrfs_delayed_item_reserve_metadata()).
*/
ASSERT(first_item->bytes_reserved == 0);
list_add_tail(&first_item->tree_list, &item_list);
batch.total_data_size = first_item->data_len;
@ -699,9 +725,19 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
int next_size;
next = __btrfs_next_delayed_item(curr);
if (!next || !btrfs_is_continuous_delayed_item(curr, next))
if (!next)
break;
/*
* We cannot allow gaps in the key space if we're doing log
* replay.
*/
if (continuous_keys_only &&
(next->key.offset != curr->key.offset + 1))
break;
ASSERT(next->bytes_reserved == 0);
next_size = next->data_len + sizeof(struct btrfs_item);
if (total_size + next_size > max_size)
break;
@ -758,9 +794,41 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
*/
btrfs_release_path(path);
ASSERT(node->index_item_leaves > 0);
/*
* For normal operations we will batch an entire leaf's worth of delayed
* items, so if there are more items to process we can decrement
* index_item_leaves by 1 as we inserted 1 leaf's worth of items.
*
* However for log replay we may not have inserted an entire leaf's
* worth of items, we may have not had continuous items, so decrementing
* here would mess up the index_item_leaves accounting. For this case
* only clean up the accounting when there are no items left.
*/
if (next && !continuous_keys_only) {
/*
* We inserted one batch of items into a leaf a there are more
* items to flush in a future batch, now release one unit of
* metadata space from the delayed block reserve, corresponding
* the leaf we just flushed to.
*/
btrfs_delayed_item_release_leaves(node, 1);
node->index_item_leaves--;
} else if (!next) {
/*
* There are no more items to insert. We can have a number of
* reserved leaves > 1 here - this happens when many dir index
* items are added and then removed before they are flushed (file
* names with a very short life, never span a transaction). So
* release all remaining leaves.
*/
btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
node->index_item_leaves = 0;
}
list_for_each_entry_safe(curr, next, &item_list, tree_list) {
list_del(&curr->tree_list);
btrfs_delayed_item_release_metadata(root, curr);
btrfs_release_delayed_item(curr);
}
out:
@ -796,62 +864,75 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
struct extent_buffer *leaf;
struct btrfs_key key;
struct list_head head;
int nitems, i, last_item;
int ret = 0;
struct extent_buffer *leaf = path->nodes[0];
LIST_HEAD(batch_list);
int nitems, slot, last_slot;
int ret;
u64 total_reserved_size = item->bytes_reserved;
BUG_ON(!path->nodes[0]);
ASSERT(leaf != NULL);
leaf = path->nodes[0];
i = path->slots[0];
last_item = btrfs_header_nritems(leaf) - 1;
if (i > last_item)
return -ENOENT; /* FIXME: Is errno suitable? */
next = item;
INIT_LIST_HEAD(&head);
btrfs_item_key_to_cpu(leaf, &key, i);
nitems = 0;
slot = path->slots[0];
last_slot = btrfs_header_nritems(leaf) - 1;
/*
* count the number of the dir index items that we can delete in batch
* Our caller always gives us a path pointing to an existing item, so
* this can not happen.
*/
while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
list_add_tail(&next->tree_list, &head);
nitems++;
ASSERT(slot <= last_slot);
if (WARN_ON(slot > last_slot))
return -ENOENT;
nitems = 1;
curr = item;
list_add_tail(&curr->tree_list, &batch_list);
/*
* Keep checking if the next delayed item matches the next item in the
* leaf - if so, we can add it to the batch of items to delete from the
* leaf.
*/
while (slot < last_slot) {
struct btrfs_key key;
curr = next;
next = __btrfs_next_delayed_item(curr);
if (!next)
break;
if (!btrfs_is_continuous_delayed_item(curr, next))
slot++;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (btrfs_comp_cpu_keys(&next->key, &key) != 0)
break;
i++;
if (i > last_item)
break;
btrfs_item_key_to_cpu(leaf, &key, i);
nitems++;
curr = next;
list_add_tail(&curr->tree_list, &batch_list);
total_reserved_size += curr->bytes_reserved;
}
if (!nitems)
return 0;
ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
if (ret)
goto out;
return ret;
list_for_each_entry_safe(curr, next, &head, tree_list) {
btrfs_delayed_item_release_metadata(root, curr);
/* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
if (total_reserved_size > 0) {
/*
* Check btrfs_delayed_item_reserve_metadata() to see why we
* don't need to release/reserve qgroup space.
*/
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, total_reserved_size,
0);
btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
total_reserved_size, NULL);
}
list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
out:
return ret;
return 0;
}
static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
@ -859,43 +940,52 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_delayed_node *node)
{
struct btrfs_delayed_item *curr, *prev;
int ret = 0;
do_again:
mutex_lock(&node->mutex);
curr = __btrfs_first_delayed_deletion_item(node);
if (!curr)
goto delete_fail;
while (ret == 0) {
struct btrfs_delayed_item *item;
ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
if (ret < 0)
goto delete_fail;
else if (ret > 0) {
/*
* can't find the item which the node points to, so this node
* is invalid, just drop it.
*/
prev = curr;
curr = __btrfs_next_delayed_item(prev);
btrfs_release_delayed_item(prev);
ret = 0;
btrfs_release_path(path);
if (curr) {
mutex_lock(&node->mutex);
item = __btrfs_first_delayed_deletion_item(node);
if (!item) {
mutex_unlock(&node->mutex);
goto do_again;
} else
goto delete_fail;
break;
}
ret = btrfs_search_slot(trans, root, &item->key, path, -1, 1);
if (ret > 0) {
/*
* There's no matching item in the leaf. This means we
* have already deleted this item in a past run of the
* delayed items. We ignore errors when running delayed
* items from an async context, through a work queue job
* running btrfs_async_run_delayed_root(), and don't
* release delayed items that failed to complete. This
* is because we will retry later, and at transaction
* commit time we always run delayed items and will
* then deal with errors if they fail to run again.
*
* So just release delayed items for which we can't find
* an item in the tree, and move to the next item.
*/
btrfs_release_path(path);
btrfs_release_delayed_item(item);
ret = 0;
} else if (ret == 0) {
ret = btrfs_batch_delete_items(trans, root, path, item);
btrfs_release_path(path);
}
/*
* We unlock and relock on each iteration, this is to prevent
* blocking other tasks for too long while we are being run from
* the async context (work queue job). Those tasks are typically
* running system calls like creat/mkdir/rename/unlink/etc which
* need to add delayed items to this delayed node.
*/
mutex_unlock(&node->mutex);
}
btrfs_batch_delete_items(trans, root, path, curr);
btrfs_release_path(path);
mutex_unlock(&node->mutex);
goto do_again;
delete_fail:
btrfs_release_path(path);
mutex_unlock(&node->mutex);
return ret;
}
@ -1354,9 +1444,13 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_disk_key *disk_key, u8 type,
u64 index)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_item *delayed_item;
struct btrfs_dir_item *dir_item;
bool reserve_leaf_space;
u32 data_len;
int ret;
delayed_node = btrfs_get_or_create_delayed_node(dir);
@ -1372,6 +1466,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
delayed_item->key.objectid = btrfs_ino(dir);
delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
delayed_item->key.offset = index;
delayed_item->ins_or_del = BTRFS_DELAYED_INSERTION_ITEM;
dir_item = (struct btrfs_dir_item *)delayed_item->data;
dir_item->location = *disk_key;
@ -1381,15 +1476,52 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
btrfs_set_stack_dir_type(dir_item, type);
memcpy((char *)(dir_item + 1), name, name_len);
ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible
*/
BUG_ON(ret);
data_len = delayed_item->data_len + sizeof(struct btrfs_item);
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (delayed_node->index_item_leaves == 0 ||
delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
delayed_node->curr_index_batch_size = data_len;
reserve_leaf_space = true;
} else {
delayed_node->curr_index_batch_size += data_len;
reserve_leaf_space = false;
}
if (reserve_leaf_space) {
ret = btrfs_delayed_item_reserve_metadata(trans, dir->root,
delayed_item);
/*
* Space was reserved for a dir index item insertion when we
* started the transaction, so getting a failure here should be
* impossible.
*/
if (WARN_ON(ret)) {
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_item(delayed_item);
goto release_node;
}
delayed_node->index_item_leaves++;
} else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
/*
* Adding the new dir index item does not require touching another
* leaf, so we can release 1 unit of metadata that was previously
* reserved when starting the transaction. This applies only to
* the case where we had a transaction start and excludes the
* transaction join case (when replaying log trees).
*/
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, bytes, 0);
btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
ASSERT(trans->bytes_reserved >= bytes);
trans->bytes_reserved -= bytes;
}
ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
@ -1417,8 +1549,37 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
return 1;
}
btrfs_delayed_item_release_metadata(node->root, item);
/*
* For delayed items to insert, we track reserved metadata bytes based
* on the number of leaves that we will use.
* See btrfs_insert_delayed_dir_index() and
* btrfs_delayed_item_reserve_metadata()).
*/
ASSERT(item->bytes_reserved == 0);
ASSERT(node->index_item_leaves > 0);
/*
* If there's only one leaf reserved, we can decrement this item from the
* current batch, otherwise we can not because we don't know which leaf
* it belongs to. With the current limit on delayed items, we rarely
* accumulate enough dir index items to fill more than one leaf (even
* when using a leaf size of 4K).
*/
if (node->index_item_leaves == 1) {
const u32 data_len = item->data_len + sizeof(struct btrfs_item);
ASSERT(node->curr_index_batch_size >= data_len);
node->curr_index_batch_size -= data_len;
}
btrfs_release_delayed_item(item);
/* If we now have no more dir index items, we can release all leaves. */
if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
node->index_item_leaves = 0;
}
mutex_unlock(&node->mutex);
return 0;
}
@ -1451,6 +1612,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
}
item->key = item_key;
item->ins_or_del = BTRFS_DELAYED_DELETION_ITEM;
ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
/*
@ -1465,7 +1627,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
}
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
ret = __btrfs_add_delayed_item(node, item);
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
@ -1833,12 +1995,17 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
mutex_lock(&delayed_node->mutex);
curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
}
if (delayed_node->index_item_leaves > 0) {
btrfs_delayed_item_release_leaves(delayed_node,
delayed_node->index_item_leaves);
delayed_node->index_item_leaves = 0;
}
curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);

View File

@ -58,6 +58,17 @@ struct btrfs_delayed_node {
u64 index_cnt;
unsigned long flags;
int count;
/*
* The size of the next batch of dir index items to insert (if this
* node is from a directory inode). Protected by @mutex.
*/
u32 curr_index_batch_size;
/*
* Number of leaves reserved for inserting dir index items (if this
* node belongs to a directory inode). This may be larger then the
* actual number of leaves we end up using. Protected by @mutex.
*/
u32 index_item_leaves;
};
struct btrfs_delayed_item {

View File

@ -132,7 +132,7 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
spin_lock(&delayed_rsv->lock);
delayed_rsv->size += num_bytes;
delayed_rsv->full = 0;
delayed_rsv->full = false;
spin_unlock(&delayed_rsv->lock);
trans->delayed_ref_updates = 0;
}
@ -175,7 +175,7 @@ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
if (num_bytes)
delayed_refs_rsv->reserved += num_bytes;
if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
delayed_refs_rsv->full = 1;
delayed_refs_rsv->full = true;
spin_unlock(&delayed_refs_rsv->lock);
if (num_bytes)

View File

@ -587,7 +587,8 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
ASSERT(!IS_ERR(em));
map = em->map_lookup;
num_extents = cur_extent = 0;
num_extents = 0;
cur_extent = 0;
for (i = 0; i < map->num_stripes; i++) {
/* We have more device extent to copy */
if (srcdev != map->stripes[i].dev)

View File

@ -51,7 +51,6 @@
BTRFS_SUPER_FLAG_METADUMP |\
BTRFS_SUPER_FLAG_METADUMP_V2)
static void end_workqueue_fn(struct btrfs_work *work);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info);
@ -64,40 +63,6 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
/*
* btrfs_end_io_wq structs are used to do processing in task context when an IO
* is complete. This is used during reads to verify checksums, and it is used
* by writes to insert metadata for new file extents after IO is complete.
*/
struct btrfs_end_io_wq {
struct bio *bio;
bio_end_io_t *end_io;
void *private;
struct btrfs_fs_info *info;
blk_status_t status;
enum btrfs_wq_endio_type metadata;
struct btrfs_work work;
};
static struct kmem_cache *btrfs_end_io_wq_cache;
int __init btrfs_end_io_wq_init(void)
{
btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
sizeof(struct btrfs_end_io_wq),
0,
SLAB_MEM_SPREAD,
NULL);
if (!btrfs_end_io_wq_cache)
return -ENOMEM;
return 0;
}
void __cold btrfs_end_io_wq_exit(void)
{
kmem_cache_destroy(btrfs_end_io_wq_cache);
}
static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
{
if (fs_info->csum_shash)
@ -256,8 +221,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
goto out;
}
btrfs_err_rl(eb->fs_info,
"parent transid verify failed on %llu wanted %llu found %llu",
eb->start,
"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
eb->start, eb->read_mirror,
parent_transid, btrfs_header_generation(eb));
ret = 1;
clear_extent_buffer_uptodate(eb);
@ -587,21 +552,23 @@ static int validate_extent_buffer(struct extent_buffer *eb)
found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) {
btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
eb->start, found_start);
btrfs_err_rl(fs_info,
"bad tree block start, mirror %u want %llu have %llu",
eb->read_mirror, eb->start, found_start);
ret = -EIO;
goto out;
}
if (check_tree_block_fsid(eb)) {
btrfs_err_rl(fs_info, "bad fsid on block %llu",
eb->start);
btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
eb->start, eb->read_mirror);
ret = -EIO;
goto out;
}
found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "bad tree block level %d on %llu",
(int)btrfs_header_level(eb), eb->start);
btrfs_err(fs_info,
"bad tree block level, mirror %u level %d on logical %llu",
eb->read_mirror, btrfs_header_level(eb), eb->start);
ret = -EIO;
goto out;
}
@ -612,8 +579,8 @@ static int validate_extent_buffer(struct extent_buffer *eb)
if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info,
"checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
eb->start,
"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
eb->start, eb->read_mirror,
CSUM_FMT_VALUE(csum_size, header_csum),
CSUM_FMT_VALUE(csum_size, result),
btrfs_header_level(eb));
@ -638,8 +605,8 @@ static int validate_extent_buffer(struct extent_buffer *eb)
set_extent_buffer_uptodate(eb);
else
btrfs_err(fs_info,
"block=%llu read time tree block corruption detected",
eb->start);
"read time tree block corruption detected on logical %llu mirror %u",
eb->start, eb->read_mirror);
out:
return ret;
}
@ -740,58 +707,6 @@ err:
return ret;
}
static void end_workqueue_bio(struct bio *bio)
{
struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info;
struct btrfs_workqueue *wq;
fs_info = end_io_wq->info;
end_io_wq->status = bio->bi_status;
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
wq = fs_info->endio_meta_write_workers;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
wq = fs_info->endio_freespace_worker;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
else
wq = fs_info->endio_write_workers;
} else {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
else if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
else
wq = fs_info->endio_workers;
}
btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
btrfs_queue_work(wq, &end_io_wq->work);
}
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata)
{
struct btrfs_end_io_wq *end_io_wq;
end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
if (!end_io_wq)
return BLK_STS_RESOURCE;
end_io_wq->private = bio->bi_private;
end_io_wq->end_io = bio->bi_end_io;
end_io_wq->info = info;
end_io_wq->status = 0;
end_io_wq->bio = bio;
end_io_wq->metadata = metadata;
bio->bi_private = end_io_wq;
bio->bi_end_io = end_workqueue_bio;
return 0;
}
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
@ -816,7 +731,6 @@ static void run_one_async_done(struct btrfs_work *work)
{
struct async_submit_bio *async;
struct inode *inode;
blk_status_t ret;
async = container_of(work, struct async_submit_bio, work);
inode = async->inode;
@ -834,11 +748,7 @@ static void run_one_async_done(struct btrfs_work *work)
* This changes nothing when cgroups aren't in use.
*/
async->bio->bi_opf |= REQ_CGROUP_PUNT;
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
if (ret) {
async->bio->bi_status = ret;
bio_endio(async->bio);
}
btrfs_submit_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
}
static void run_one_async_free(struct btrfs_work *work)
@ -849,16 +759,23 @@ static void run_one_async_free(struct btrfs_work *work)
kfree(async);
}
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
int mirror_num, u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start)
/*
* Submit bio to an async queue.
*
* Retrun:
* - true if the work has been succesfuly submitted
* - false in case of error
*/
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct async_submit_bio *async;
async = kmalloc(sizeof(*async), GFP_NOFS);
if (!async)
return BLK_STS_RESOURCE;
return false;
async->inode = inode;
async->bio = bio;
@ -876,7 +793,7 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
btrfs_queue_work(fs_info->hipri_workers, &async->work);
else
btrfs_queue_work(fs_info->workers, &async->work);
return 0;
return true;
}
static blk_status_t btree_csum_one_bio(struct bio *bio)
@ -902,7 +819,7 @@ static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
{
/*
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
* submission context. Just jump into btrfs_submit_bio.
*/
return btree_csum_one_bio(bio);
}
@ -924,32 +841,29 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
blk_status_t ret;
bio->bi_opf |= REQ_META;
if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
*/
ret = btrfs_bio_wq_end_io(fs_info, bio,
BTRFS_WQ_ENDIO_METADATA);
if (!ret)
ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else if (!should_async_write(fs_info, BTRFS_I(inode))) {
ret = btree_csum_one_bio(bio);
if (!ret)
ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else {
/*
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
btree_submit_bio_start);
btrfs_submit_bio(fs_info, bio, mirror_num);
return;
}
/*
* Kthread helpers are used to submit writes so that checksumming can
* happen in parallel across all CPUs.
*/
if (should_async_write(fs_info, BTRFS_I(inode)) &&
btrfs_wq_submit_bio(inode, bio, mirror_num, 0, btree_submit_bio_start))
return;
ret = btree_csum_one_bio(bio);
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
return;
}
btrfs_submit_bio(fs_info, bio, mirror_num);
}
#ifdef CONFIG_MIGRATION
@ -1870,7 +1784,7 @@ again:
fail:
/*
* If our caller provided us an anonymous device, then it's his
* responsability to free it in case we fail. So we have to set our
* responsibility to free it in case we fail. So we have to set our
* root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
* and once again by our caller.
*/
@ -1953,25 +1867,6 @@ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
return root;
}
/*
* called by the kthread helper functions to finally call the bio end_io
* functions. This is where read checksum verification actually happens
*/
static void end_workqueue_fn(struct btrfs_work *work)
{
struct bio *bio;
struct btrfs_end_io_wq *end_io_wq;
end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
bio = end_io_wq->bio;
bio->bi_status = end_io_wq->status;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
bio_endio(bio);
kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
}
static int cleaner_kthread(void *arg)
{
struct btrfs_fs_info *fs_info = arg;
@ -2278,10 +2173,14 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->delalloc_workers);
btrfs_destroy_workqueue(fs_info->hipri_workers);
btrfs_destroy_workqueue(fs_info->workers);
btrfs_destroy_workqueue(fs_info->endio_workers);
btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
if (fs_info->endio_workers)
destroy_workqueue(fs_info->endio_workers);
if (fs_info->endio_raid56_workers)
destroy_workqueue(fs_info->endio_raid56_workers);
if (fs_info->rmw_workers)
destroy_workqueue(fs_info->rmw_workers);
if (fs_info->compressed_write_workers)
destroy_workqueue(fs_info->compressed_write_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->delayed_workers);
@ -2295,8 +2194,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
* the queues used for metadata I/O, since tasks from those other work
* queues can do metadata I/O operations.
*/
btrfs_destroy_workqueue(fs_info->endio_meta_workers);
btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
if (fs_info->endio_meta_workers)
destroy_workqueue(fs_info->endio_meta_workers);
}
static void free_root_extent_buffers(struct btrfs_root *root)
@ -2426,7 +2325,9 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
BTRFS_I(inode)->location.type = 0;
BTRFS_I(inode)->location.offset = 0;
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
btrfs_insert_inode_hash(inode);
}
@ -2475,25 +2376,18 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
fs_info->fixup_workers =
btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
/*
* endios are largely parallel and should have a very
* low idle thresh
*/
fs_info->endio_workers =
btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
alloc_workqueue("btrfs-endio", flags, max_active);
fs_info->endio_meta_workers =
btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
max_active, 4);
fs_info->endio_meta_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
max_active, 2);
alloc_workqueue("btrfs-endio-meta", flags, max_active);
fs_info->endio_raid56_workers =
btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
max_active, 4);
alloc_workqueue("btrfs-endio-raid56", flags, max_active);
fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
fs_info->endio_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-write", flags,
max_active, 2);
fs_info->compressed_write_workers =
alloc_workqueue("btrfs-compressed-write", flags, max_active);
fs_info->endio_freespace_worker =
btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
max_active, 0);
@ -2508,7 +2402,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
if (!(fs_info->workers && fs_info->hipri_workers &&
fs_info->delalloc_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
fs_info->compressed_write_workers &&
fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->fixup_workers &&
@ -2535,6 +2429,9 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
btrfs_info(fs_info, "using %s (%s) checksum algorithm",
btrfs_super_csum_name(csum_type),
crypto_shash_driver_name(csum_shash));
return 0;
}
@ -3253,6 +3150,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
init_waitqueue_head(&fs_info->delayed_iputs_wait);
init_waitqueue_head(&fs_info->zone_finish_wait);
/* Usable values until the real ones are cached from the superblock */
fs_info->nodesize = 4096;
@ -3260,6 +3158,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
fs_info->sectorsize_bits = ilog2(4096);
fs_info->stripesize = 4096;
fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
spin_lock_init(&fs_info->swapfile_pins_lock);
fs_info->swapfile_pins = RB_ROOT;
@ -3591,16 +3491,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
/*
* Flag our filesystem as having big metadata blocks if they are bigger
* than the page size.
*/
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
btrfs_info(fs_info,
"flagging fs with big metadata feature");
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
}
/* Set up fs_info before parsing mount options */
nodesize = btrfs_super_nodesize(disk_super);
@ -3638,8 +3528,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
btrfs_info(fs_info, "has skinny extents");
/*
* Flag our filesystem as having big metadata blocks if they are bigger
* than the page size.
*/
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
/*
* mixed block groups end up with duplicate but slightly offset
@ -3668,6 +3562,20 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
err = -EINVAL;
goto fail_alloc;
}
/*
* We have unsupported RO compat features, although RO mounted, we
* should not cause any metadata write, including log replay.
* Or we could screw up whatever the new feature requires.
*/
if (unlikely(features && btrfs_super_log_root(disk_super) &&
!btrfs_test_opt(fs_info, NOLOGREPLAY))) {
btrfs_err(fs_info,
"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
features);
err = -EINVAL;
goto fail_alloc;
}
if (sectorsize < PAGE_SIZE) {
struct btrfs_subpage_info *subpage_info;

View File

@ -17,13 +17,6 @@
*/
#define BTRFS_BDEV_BLOCKSIZE (4096)
enum btrfs_wq_endio_type {
BTRFS_WQ_ENDIO_DATA,
BTRFS_WQ_ENDIO_METADATA,
BTRFS_WQ_ENDIO_FREE_SPACE,
BTRFS_WQ_ENDIO_RAID56,
};
static inline u64 btrfs_sb_offset(int mirror)
{
u64 start = SZ_16K;
@ -121,11 +114,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_extent_buffer(struct extent_buffer *buf, u64 parent_transid,
int level, struct btrfs_key *first_key);
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata);
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
int mirror_num, u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start);
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start);
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
int mirror_num);
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
@ -145,8 +136,6 @@ int btree_lock_page_hook(struct page *page, void *data,
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid);
int btrfs_init_root_free_objectid(struct btrfs_root *root);
int __init btrfs_end_io_wq_init(void);
void __cold btrfs_end_io_wq_exit(void);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(u64 objectid,

View File

@ -1269,7 +1269,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
return ret;
}
static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes)
{
struct btrfs_device *dev = stripe->dev;
struct btrfs_fs_info *fs_info = dev->fs_info;
@ -1316,76 +1316,60 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 discarded_bytes = 0;
u64 end = bytenr + num_bytes;
u64 cur = bytenr;
struct btrfs_io_context *bioc = NULL;
/*
* Avoid races with device replace and make sure our bioc has devices
* associated to its stripes that don't go away while we are discarding.
* Avoid races with device replace and make sure the devices in the
* stripes don't go away while we are discarding.
*/
btrfs_bio_counter_inc_blocked(fs_info);
while (cur < end) {
struct btrfs_io_stripe *stripe;
struct btrfs_discard_stripe *stripes;
unsigned int num_stripes;
int i;
num_bytes = end - cur;
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
&num_bytes, &bioc, 0);
/*
* Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
* -EOPNOTSUPP. For any such error, @num_bytes is not updated,
* thus we can't continue anyway.
*/
if (ret < 0)
goto out;
stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes);
if (IS_ERR(stripes)) {
ret = PTR_ERR(stripes);
if (ret == -EOPNOTSUPP)
ret = 0;
break;
}
stripe = bioc->stripes;
for (i = 0; i < bioc->num_stripes; i++, stripe++) {
for (i = 0; i < num_stripes; i++) {
struct btrfs_discard_stripe *stripe = stripes + i;
u64 bytes;
struct btrfs_device *device = stripe->dev;
if (!device->bdev) {
if (!stripe->dev->bdev) {
ASSERT(btrfs_test_opt(fs_info, DEGRADED));
continue;
}
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
&stripe->dev->dev_state))
continue;
ret = do_discard_extent(stripe, &bytes);
if (!ret) {
discarded_bytes += bytes;
} else if (ret != -EOPNOTSUPP) {
if (ret) {
/*
* Logic errors or -ENOMEM, or -EIO, but
* unlikely to happen.
*
* And since there are two loops, explicitly
* go to out to avoid confusion.
* Keep going if discard is not supported by the
* device.
*/
btrfs_put_bioc(bioc);
goto out;
if (ret != -EOPNOTSUPP)
break;
ret = 0;
} else {
discarded_bytes += bytes;
}
/*
* Just in case we get back EOPNOTSUPP for some reason,
* just ignore the return value so we don't screw up
* people calling discard_extent.
*/
ret = 0;
}
btrfs_put_bioc(bioc);
kfree(stripes);
if (ret)
break;
cur += num_bytes;
}
out:
btrfs_bio_counter_dec(fs_info);
if (actual_bytes)
*actual_bytes = discarded_bytes;
if (ret == -EOPNOTSUPP)
ret = 0;
return ret;
}
@ -3981,23 +3965,63 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
}
}
static bool can_allocate_chunk(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl)
static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl)
{
/* If we can activate new zone, just allocate a chunk and use it */
if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
return 0;
/*
* We already reached the max active zones. Try to finish one block
* group to make a room for a new block group. This is only possible
* for a data block group because btrfs_zone_finish() may need to wait
* for a running transaction which can cause a deadlock for metadata
* allocation.
*/
if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
int ret = btrfs_zone_finish_one_bg(fs_info);
if (ret == 1)
return 0;
else if (ret < 0)
return ret;
}
/*
* If we have enough free space left in an already active block group
* and we can't activate any other zone now, do not allow allocating a
* new chunk and let find_free_extent() retry with a smaller size.
*/
if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
return -ENOSPC;
/*
* Even min_alloc_size is not left in any block groups. Since we cannot
* activate a new block group, allocating it may not help. Let's tell a
* caller to try again and hope it progress something by writing some
* parts of the region. That is only possible for data block groups,
* where a part of the region can be written.
*/
if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
return -EAGAIN;
/*
* We cannot activate a new block group and no enough space left in any
* block groups. So, allocating a new block group may not help. But,
* there is nothing to do anyway, so let's go with it.
*/
return 0;
}
static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl)
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return true;
return 0;
case BTRFS_EXTENT_ALLOC_ZONED:
/*
* If we have enough free space left in an already
* active block group and we can't activate any other
* zone now, do not allow allocating a new chunk and
* let find_free_extent() retry with a smaller size.
*/
if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size &&
!btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
return false;
return true;
return can_allocate_chunk_zoned(fs_info, ffe_ctl);
default:
BUG();
}
@ -4079,8 +4103,9 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
int exist = 0;
/*Check if allocation policy allows to create a new chunk */
if (!can_allocate_chunk(fs_info, ffe_ctl))
return -ENOSPC;
ret = can_allocate_chunk(fs_info, ffe_ctl);
if (ret)
return ret;
trans = current->journal_info;
if (trans)
@ -5992,7 +6017,7 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
*/
static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
{
u64 start = SZ_1M, len = 0, end = 0;
u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0;
int ret;
*trimmed = 0;
@ -6036,8 +6061,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
break;
}
/* Ensure we skip the reserved area in the first 1M */
start = max_t(u64, start, SZ_1M);
/* Ensure we skip the reserved space on each device. */
start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
/*
* If find_first_clear_extent_bit find a range that spans the

File diff suppressed because it is too large Load Diff

View File

@ -57,6 +57,7 @@ enum {
#define BITMAP_LAST_BYTE_MASK(nbits) \
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
struct btrfs_bio;
struct btrfs_root;
struct btrfs_inode;
struct btrfs_io_bio;
@ -142,15 +143,10 @@ static inline void extent_changeset_free(struct extent_changeset *changeset)
struct extent_map_tree;
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 len);
int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc);
@ -247,7 +243,6 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array);
struct bio *btrfs_bio_alloc(unsigned int nr_iovecs);
struct bio *btrfs_bio_clone(struct block_device *bdev, struct bio *bio);
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
@ -266,15 +261,13 @@ struct io_failure_record {
u64 start;
u64 len;
u64 logical;
enum btrfs_compression_type compress_type;
int this_mirror;
int failed_mirror;
int num_copies;
};
int btrfs_repair_one_sector(struct inode *inode,
struct bio *failed_bio, u32 bio_offset,
struct page *page, unsigned int pgoff,
u64 start, int failed_mirror,
int btrfs_repair_one_sector(struct inode *inode, struct btrfs_bio *failed_bbio,
u32 bio_offset, struct page *page, unsigned int pgoff,
submit_bio_hook_t *submit_bio_hook);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS

View File

@ -1954,11 +1954,25 @@ again:
btrfs_inode_unlock(inode, ilock_flags);
/* If 'err' is -ENOTBLK then it means we must fallback to buffered IO. */
/*
* If 'err' is -ENOTBLK or we have not written all data, then it means
* we must fallback to buffered IO.
*/
if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
goto out;
buffered:
/*
* If we are in a NOWAIT context, then return -EAGAIN to signal the caller
* it must retry the operation in a context where blocking is acceptable,
* since we currently don't have NOWAIT semantics support for buffered IO
* and may block there for many reasons (reserving space for example).
*/
if (iocb->ki_flags & IOCB_NOWAIT) {
err = -EAGAIN;
goto out;
}
pos = iocb->ki_pos;
written_buffered = btrfs_buffered_write(iocb, from);
if (written_buffered < 0) {
@ -2041,9 +2055,11 @@ ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
num_written = btrfs_encoded_write(iocb, from, encoded);
num_sync = encoded->len;
} else if (iocb->ki_flags & IOCB_DIRECT) {
num_written = num_sync = btrfs_direct_write(iocb, from);
num_written = btrfs_direct_write(iocb, from);
num_sync = num_written;
} else {
num_written = num_sync = btrfs_buffered_write(iocb, from);
num_written = btrfs_buffered_write(iocb, from);
num_sync = num_written;
}
btrfs_set_inode_last_sub_trans(inode);
@ -2291,7 +2307,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
btrfs_release_log_ctx_extents(&ctx);
if (ret < 0) {
/* Fallthrough and commit/free transaction. */
ret = 1;
ret = BTRFS_LOG_FORCE_COMMIT;
}
/* we've logged all the items and now have a consistent
@ -2717,7 +2733,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
goto out;
}
rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
rsv->failfast = 1;
rsv->failfast = true;
/*
* 1 - update the inode
@ -3083,7 +3099,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
inode->i_mtime = current_time(inode);
inode->i_ctime = inode->i_mtime;
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);

View File

@ -3536,7 +3536,8 @@ int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
* data, keep it dense.
*/
if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size;
cont1_bytes = bytes + empty_size;
min_bytes = cont1_bytes;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
min_bytes = fs_info->sectorsize;

File diff suppressed because it is too large Load Diff

View File

@ -1230,16 +1230,18 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
return em;
}
static u32 get_extent_max_capacity(const struct extent_map *em)
static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
const struct extent_map *em)
{
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
return BTRFS_MAX_COMPRESSED;
return BTRFS_MAX_EXTENT_SIZE;
return fs_info->max_extent_size;
}
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
u32 extent_thresh, u64 newer_than, bool locked)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *next;
bool ret = false;
@ -1263,7 +1265,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
* If the next extent is at its max capacity, defragging current extent
* makes no sense, as the total number of extents won't change.
*/
if (next->len >= get_extent_max_capacity(em))
if (next->len >= get_extent_max_capacity(fs_info, em))
goto out;
/* Skip older extent */
if (next->generation < newer_than)
@ -1400,6 +1402,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
bool locked, struct list_head *target_list,
u64 *last_scanned_ret)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
bool last_is_target = false;
u64 cur = start;
int ret = 0;
@ -1484,7 +1487,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* Skip extents already at its max capacity, this is mostly for
* compressed extents, which max cap is only 128K.
*/
if (em->len >= get_extent_max_capacity(em))
if (em->len >= get_extent_max_capacity(fs_info, em))
goto next;
/*
@ -4243,26 +4246,6 @@ out:
return ret;
}
static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
struct btrfs_data_container *inodes = ctx;
const size_t c = 3 * sizeof(u64);
if (inodes->bytes_left >= c) {
inodes->bytes_left -= c;
inodes->val[inodes->elem_cnt] = inum;
inodes->val[inodes->elem_cnt + 1] = offset;
inodes->val[inodes->elem_cnt + 2] = root;
inodes->elem_cnt += 3;
} else {
inodes->bytes_missing += c - inodes->bytes_left;
inodes->bytes_left = 0;
inodes->elem_missed += 3;
}
return 0;
}
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
void __user *arg, int version)
{
@ -4312,7 +4295,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
}
ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
build_ino_list, inodes, ignore_offset);
inodes, ignore_offset);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
@ -4355,13 +4338,79 @@ void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
spin_unlock(&fs_info->balance_lock);
}
/**
* Try to acquire fs_info::balance_mutex as well as set BTRFS_EXLCOP_BALANCE as
* required.
*
* @fs_info: the filesystem
* @excl_acquired: ptr to boolean value which is set to false in case balance
* is being resumed
*
* Return 0 on success in which case both fs_info::balance is acquired as well
* as exclusive ops are blocked. In case of failure return an error code.
*/
static int btrfs_try_lock_balance(struct btrfs_fs_info *fs_info, bool *excl_acquired)
{
int ret;
/*
* Exclusive operation is locked. Three possibilities:
* (1) some other op is running
* (2) balance is running
* (3) balance is paused -- special case (think resume)
*/
while (1) {
if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
*excl_acquired = true;
mutex_lock(&fs_info->balance_mutex);
return 0;
}
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl) {
/* This is either (2) or (3) */
if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
/* This is (2) */
ret = -EINPROGRESS;
goto out_failure;
} else {
mutex_unlock(&fs_info->balance_mutex);
/*
* Lock released to allow other waiters to
* continue, we'll reexamine the status again.
*/
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl &&
!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
/* This is (3) */
*excl_acquired = false;
return 0;
}
}
} else {
/* This is (1) */
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out_failure;
}
mutex_unlock(&fs_info->balance_mutex);
}
out_failure:
mutex_unlock(&fs_info->balance_mutex);
*excl_acquired = false;
return ret;
}
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
struct btrfs_balance_control *bctl;
bool need_unlock; /* for mut. excl. ops lock */
bool need_unlock = true;
int ret;
if (!capable(CAP_SYS_ADMIN))
@ -4378,53 +4427,12 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
goto out;
}
again:
if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
mutex_lock(&fs_info->balance_mutex);
need_unlock = true;
goto locked;
}
/*
* mut. excl. ops lock is locked. Three possibilities:
* (1) some other op is running
* (2) balance is running
* (3) balance is paused -- special case (think resume)
*/
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl) {
/* this is either (2) or (3) */
if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
mutex_unlock(&fs_info->balance_mutex);
/*
* Lock released to allow other waiters to continue,
* we'll reexamine the status again.
*/
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl &&
!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
/* this is (3) */
need_unlock = false;
goto locked;
}
mutex_unlock(&fs_info->balance_mutex);
goto again;
} else {
/* this is (2) */
mutex_unlock(&fs_info->balance_mutex);
ret = -EINPROGRESS;
goto out;
}
} else {
/* this is (1) */
mutex_unlock(&fs_info->balance_mutex);
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
ret = btrfs_try_lock_balance(fs_info, &need_unlock);
if (ret)
goto out;
}
locked:
lockdep_assert_held(&fs_info->balance_mutex);
if (bargs->flags & BTRFS_BALANCE_RESUME) {
if (!fs_info->balance_ctl) {
ret = -ENOTCONN;

View File

@ -155,7 +155,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
out_pages[*cur_out / PAGE_SIZE] = cur_page;
}
kaddr = kmap(cur_page);
kaddr = kmap_local_page(cur_page);
write_compress_length(kaddr + offset_in_page(*cur_out),
compressed_size);
*cur_out += LZO_LEN;
@ -167,7 +167,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
orig_out + compressed_size - *cur_out);
kunmap(cur_page);
kunmap_local(kaddr);
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
return -E2BIG;
@ -180,7 +180,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
return -ENOMEM;
out_pages[*cur_out / PAGE_SIZE] = cur_page;
}
kaddr = kmap(cur_page);
kaddr = kmap_local_page(cur_page);
memcpy(kaddr + offset_in_page(*cur_out),
compressed_data + *cur_out - orig_out, copy_len);
@ -202,7 +202,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
*cur_out += sector_bytes_left;
out:
kunmap(cur_page);
kunmap_local(kaddr);
return 0;
}
@ -248,12 +248,12 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Compress at most one sector of data each time */
in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
ASSERT(in_len);
data_in = kmap(page_in);
data_in = kmap_local_page(page_in);
ret = lzo1x_1_compress(data_in +
offset_in_page(cur_in), in_len,
workspace->cbuf, &out_len,
workspace->mem);
kunmap(page_in);
kunmap_local(data_in);
if (ret < 0) {
pr_debug("BTRFS: lzo in loop returned %d\n", ret);
ret = -EIO;
@ -310,7 +310,6 @@ static void copy_compressed_segment(struct compressed_bio *cb,
u32 orig_in = *cur_in;
while (*cur_in < orig_in + len) {
char *kaddr;
struct page *cur_page;
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
orig_in + len - *cur_in);
@ -318,11 +317,8 @@ static void copy_compressed_segment(struct compressed_bio *cb,
ASSERT(copy_len);
cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
kaddr = kmap(cur_page);
memcpy(dest + *cur_in - orig_in,
kaddr + offset_in_page(*cur_in),
copy_len);
kunmap(cur_page);
memcpy_from_page(dest + *cur_in - orig_in, cur_page,
offset_in_page(*cur_in), copy_len);
*cur_in += copy_len;
}
@ -342,9 +338,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Bytes decompressed so far */
u32 cur_out = 0;
kaddr = kmap(cb->compressed_pages[0]);
kaddr = kmap_local_page(cb->compressed_pages[0]);
len_in = read_compress_length(kaddr);
kunmap(cb->compressed_pages[0]);
kunmap_local(kaddr);
cur_in += LZO_LEN;
/*
@ -378,9 +374,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
(cur_in + LZO_LEN - 1) / sectorsize);
cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
ASSERT(cur_page);
kaddr = kmap(cur_page);
kaddr = kmap_local_page(cur_page);
seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
kunmap(cur_page);
kunmap_local(kaddr);
cur_in += LZO_LEN;
if (seg_len > WORKSPACE_CBUF_LENGTH) {

View File

@ -272,25 +272,30 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
spin_unlock_irq(&tree->lock);
}
static void finish_ordered_fn(struct btrfs_work *work)
{
struct btrfs_ordered_extent *ordered_extent;
ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
btrfs_finish_ordered_io(ordered_extent);
}
/*
* Mark all ordered extents io inside the specified range finished.
*
* @page: The invovled page for the opeartion.
* @page: The involved page for the operation.
* For uncompressed buffered IO, the page status also needs to be
* updated to indicate whether the pending ordered io is finished.
* Can be NULL for direct IO and compressed write.
* For these cases, callers are ensured they won't execute the
* endio function twice.
* @finish_func: The function to be executed when all the IO of an ordered
* extent are finished.
*
* This function is called for endio, thus the range must have ordered
* extent(s) coveri it.
* extent(s) covering it.
*/
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
u64 num_bytes, btrfs_func_t finish_func,
bool uptodate)
struct page *page, u64 file_offset,
u64 num_bytes, bool uptodate)
{
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@ -401,8 +406,9 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
cond_wake_up(&entry->wait);
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_mark_finished(inode, entry);
spin_unlock_irqrestore(&tree->lock, flags);
btrfs_init_work(&entry->work, finish_func, NULL, NULL);
btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
btrfs_queue_work(wq, &entry->work);
spin_lock_irqsave(&tree->lock, flags);
}
@ -473,6 +479,7 @@ out:
if (finished && cached && entry) {
*cached = entry;
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
}
spin_unlock_irqrestore(&tree->lock, flags);
return finished;
@ -807,8 +814,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
entry = NULL;
if (entry)
if (entry) {
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup(inode, entry);
}
out:
spin_unlock_irqrestore(&tree->lock, flags);
return entry;
@ -848,8 +857,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
break;
}
out:
if (entry)
if (entry) {
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_range(inode, entry);
}
spin_unlock_irq(&tree->lock);
return entry;
}
@ -878,6 +889,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
ASSERT(list_empty(&ordered->log_list));
list_add_tail(&ordered->log_list, list);
refcount_inc(&ordered->refs);
trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
}
spin_unlock_irq(&tree->lock);
}
@ -901,6 +913,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_first(inode, entry);
out:
spin_unlock_irq(&tree->lock);
return entry;
@ -975,8 +988,11 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
/* No ordered extent in the range */
entry = NULL;
out:
if (entry)
if (entry) {
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
}
spin_unlock_irq(&tree->lock);
return entry;
}
@ -1055,6 +1071,8 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret = 0;
trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
spin_lock_irq(&tree->lock);
/* Remove from tree once */
node = &ordered->rb_node;

View File

@ -180,13 +180,14 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
t->last = NULL;
}
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
u64 num_bytes, btrfs_func_t finish_func,
bool uptodate);
u64 num_bytes, bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);

File diff suppressed because it is too large Load Diff

View File

@ -7,45 +7,179 @@
#ifndef BTRFS_RAID56_H
#define BTRFS_RAID56_H
static inline int nr_parity_stripes(const struct map_lookup *map)
{
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
return 1;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
return 2;
else
return 0;
}
#include <linux/workqueue.h>
#include "volumes.h"
enum btrfs_rbio_ops {
BTRFS_RBIO_WRITE,
BTRFS_RBIO_READ_REBUILD,
BTRFS_RBIO_PARITY_SCRUB,
BTRFS_RBIO_REBUILD_MISSING,
};
struct btrfs_raid_bio {
struct btrfs_io_context *bioc;
/*
* While we're doing RMW on a stripe we put it into a hash table so we
* can lock the stripe and merge more rbios into it.
*/
struct list_head hash_list;
/* LRU list for the stripe cache */
struct list_head stripe_cache;
/* For scheduling work in the helper threads */
struct work_struct work;
/*
* bio_list and bio_list_lock are used to add more bios into the stripe
* in hopes of avoiding the full RMW
*/
struct bio_list bio_list;
spinlock_t bio_list_lock;
/*
* Also protected by the bio_list_lock, the plug list is used by the
* plugging code to collect partial bios while plugged. The stripe
* locking code also uses it to hand off the stripe lock to the next
* pending IO.
*/
struct list_head plug_list;
/* Flags that tell us if it is safe to merge with this bio. */
unsigned long flags;
/*
* Set if we're doing a parity rebuild for a read from higher up, which
* is handled differently from a parity rebuild as part of RMW.
*/
enum btrfs_rbio_ops operation;
/* How many pages there are for the full stripe including P/Q */
u16 nr_pages;
/* How many sectors there are for the full stripe including P/Q */
u16 nr_sectors;
/* Number of data stripes (no p/q) */
u8 nr_data;
/* Numer of all stripes (including P/Q) */
u8 real_stripes;
/* How many pages there are for each stripe */
u8 stripe_npages;
/* How many sectors there are for each stripe */
u8 stripe_nsectors;
/* First bad stripe, -1 means no corruption */
s8 faila;
/* Second bad stripe (for RAID6 use) */
s8 failb;
/* Stripe number that we're scrubbing */
u8 scrubp;
/*
* Size of all the bios in the bio_list. This helps us decide if the
* rbio maps to a full stripe or not.
*/
int bio_list_bytes;
int generic_bio_cnt;
refcount_t refs;
atomic_t stripes_pending;
atomic_t error;
struct work_struct end_io_work;
/* Bitmap to record which horizontal stripe has data */
unsigned long dbitmap;
/* Allocated with stripe_nsectors-many bits for finish_*() calls */
unsigned long finish_pbitmap;
/*
* These are two arrays of pointers. We allocate the rbio big enough
* to hold them both and setup their locations when the rbio is
* allocated.
*/
/*
* Pointers to pages that we allocated for reading/writing stripes
* directly from the disk (including P/Q).
*/
struct page **stripe_pages;
/* Pointers to the sectors in the bio_list, for faster lookup */
struct sector_ptr *bio_sectors;
/*
* For subpage support, we need to map each sector to above
* stripe_pages.
*/
struct sector_ptr *stripe_sectors;
/* Allocated with real_stripes-many pointers for finish_*() calls */
void **finish_pointers;
};
/*
* For trace event usage only. Records useful debug info for each bio submitted
* by RAID56 to each physical device.
*
* No matter signed or not, (-1) is always the one indicating we can not grab
* the proper stripe number.
*/
struct raid56_bio_trace_info {
u64 devid;
/* The offset inside the stripe. (<= STRIPE_LEN) */
u32 offset;
/*
* Stripe number.
* 0 is the first data stripe, and nr_data for P stripe,
* nr_data + 1 for Q stripe.
* >= real_stripes for
*/
u8 stripe_nr;
};
static inline int nr_data_stripes(const struct map_lookup *map)
{
return map->num_stripes - nr_parity_stripes(map);
return map->num_stripes - btrfs_nr_parity_stripes(map->type);
}
#define RAID5_P_STRIPE ((u64)-2)
#define RAID6_Q_STRIPE ((u64)-1)
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
((x) == RAID6_Q_STRIPE))
struct btrfs_raid_bio;
struct btrfs_device;
int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
u32 stripe_len, int mirror_num, int generic_io);
int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc, u32 stripe_len);
void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
int mirror_num, bool generic_io);
void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
unsigned int pgoff, u64 logical);
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
struct btrfs_io_context *bioc, u32 stripe_len,
struct btrfs_io_context *bioc,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc,
u64 length);
raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);

View File

@ -5,6 +5,7 @@
#include "compression.h"
#include "ctree.h"
#include "delalloc-space.h"
#include "disk-io.h"
#include "reflink.h"
#include "transaction.h"
#include "subpage.h"
@ -22,8 +23,10 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
int ret;
inode_inc_iversion(inode);
if (!no_time_update)
inode->i_mtime = inode->i_ctime = current_time(inode);
if (!no_time_update) {
inode->i_mtime = current_time(inode);
inode->i_ctime = inode->i_mtime;
}
/*
* We round up to the block size at eof when determining which
* extents to clone above, but shouldn't round up the file size.
@ -110,7 +113,6 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (comp_type == BTRFS_COMPRESS_NONE) {
memcpy_to_page(page, offset_in_page(file_offset), data_start,
datal);
flush_dcache_page(page);
} else {
ret = btrfs_decompress(comp_type, data_start, page,
offset_in_page(file_offset),
@ -132,10 +134,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
*
* So what's in the range [500, 4095] corresponds to zeroes.
*/
if (datal < block_size) {
if (datal < block_size)
memzero_page(page, datal, block_size - datal);
flush_dcache_page(page);
}
btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
btrfs_page_clear_checked(fs_info, page, file_offset, block_size);
@ -658,7 +658,8 @@ static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
struct inode *dst, u64 dst_loff)
{
const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info;
const u64 bs = fs_info->sb->s_blocksize;
int ret;
/*
@ -669,6 +670,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
btrfs_btree_balance_dirty(fs_info);
return ret;
}
@ -778,6 +781,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
round_down(destoff, PAGE_SIZE),
round_up(destoff + len, PAGE_SIZE) - 1);
btrfs_btree_balance_dirty(fs_info);
return ret;
}

View File

@ -135,15 +135,13 @@ struct scrub_parity {
struct work_struct work;
/* Mark the parity blocks which have data */
unsigned long *dbitmap;
unsigned long dbitmap;
/*
* Mark the parity blocks which have data, but errors happen when
* read data or check data
*/
unsigned long *ebitmap;
unsigned long bitmap[];
unsigned long ebitmap;
};
struct scrub_ctx {
@ -1218,7 +1216,6 @@ static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
u64 *raid_map,
u64 mapped_length,
int nstripes, int mirror,
int *stripe_index,
u64 *stripe_offset)
@ -1233,7 +1230,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
continue;
if (logical >= raid_map[i] &&
logical < raid_map[i] + mapped_length)
logical < raid_map[i] + BTRFS_STRIPE_LEN)
break;
}
@ -1337,7 +1334,6 @@ leave_nomem:
scrub_stripe_index_and_offset(logical,
bioc->map_type,
bioc->raid_map,
mapped_length,
bioc->num_stripes -
bioc->num_tgtdevs,
mirror_index,
@ -1380,19 +1376,12 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
struct scrub_sector *sector)
{
DECLARE_COMPLETION_ONSTACK(done);
int ret;
int mirror_num;
bio->bi_iter.bi_sector = sector->logical >> 9;
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
mirror_num = sector->sblock->sectors[0]->mirror_num;
ret = raid56_parity_recover(bio, sector->recover->bioc,
sector->recover->map_length,
mirror_num, 0);
if (ret)
return ret;
raid56_parity_recover(bio, sector->recover->bioc,
sector->sblock->sectors[0]->mirror_num, false);
wait_for_completion_io(&done);
return blk_status_to_errno(bio->bi_status);
@ -2197,7 +2186,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
rbio = raid56_alloc_missing_rbio(bio, bioc, length);
rbio = raid56_alloc_missing_rbio(bio, bioc);
if (!rbio)
goto rbio_out;
@ -2406,13 +2395,13 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
u64 start, u32 len)
{
__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
__scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len);
}
static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
u64 start, u32 len)
{
__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
__scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len);
}
static void scrub_block_complete(struct scrub_block *sblock)
@ -2763,7 +2752,7 @@ static void scrub_free_parity(struct scrub_parity *sparity)
struct scrub_sector *curr, *next;
int nbits;
nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors);
if (nbits) {
spin_lock(&sctx->stat_lock);
sctx->stat.read_errors += nbits;
@ -2795,8 +2784,8 @@ static void scrub_parity_bio_endio(struct bio *bio)
struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
if (bio->bi_status)
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors);
bitmap_or(&sparity->ebitmap, &sparity->ebitmap,
&sparity->dbitmap, sparity->nsectors);
bio_put(bio);
@ -2814,8 +2803,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
u64 length;
int ret;
if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
sparity->nsectors))
if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap,
&sparity->ebitmap, sparity->nsectors))
goto out;
length = sparity->logic_end - sparity->logic_start;
@ -2831,9 +2820,9 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, length,
rbio = raid56_parity_alloc_scrub_rbio(bio, bioc,
sparity->scrub_dev,
sparity->dbitmap,
&sparity->dbitmap,
sparity->nsectors);
if (!rbio)
goto rbio_out;
@ -2847,7 +2836,7 @@ rbio_out:
bioc_out:
btrfs_bio_counter_dec(fs_info);
btrfs_put_bioc(bioc);
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap,
sparity->nsectors);
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
@ -2856,11 +2845,6 @@ out:
scrub_free_parity(sparity);
}
static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
}
static void scrub_parity_get(struct scrub_parity *sparity)
{
refcount_inc(&sparity->refs);
@ -3131,7 +3115,6 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int ret;
struct scrub_parity *sparity;
int nsectors;
int bitmap_len;
path = btrfs_alloc_path();
if (!path) {
@ -3145,9 +3128,8 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
ASSERT(map->stripe_len <= U32_MAX);
nsectors = map->stripe_len >> fs_info->sectorsize_bits;
bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS);
ASSERT(nsectors <= BITS_PER_LONG);
sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
if (!sparity) {
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
@ -3165,8 +3147,6 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
sparity->logic_end = logic_end;
refcount_set(&sparity->refs, 1);
INIT_LIST_HEAD(&sparity->sectors_list);
sparity->dbitmap = sparity->bitmap;
sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
ret = 0;
for (cur_logical = logic_start; cur_logical < logic_end;
@ -3429,20 +3409,22 @@ static int scrub_simple_stripe(struct scrub_ctx *sctx,
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_block_group *bg,
struct map_lookup *map,
struct extent_map *em,
struct btrfs_device *scrub_dev,
int stripe_index, u64 dev_extent_len)
int stripe_index)
{
struct btrfs_path *path;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root;
struct btrfs_root *csum_root;
struct blk_plug plug;
struct map_lookup *map = em->map_lookup;
const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
const u64 chunk_logical = bg->start;
int ret;
u64 physical = map->stripes[stripe_index].physical;
const u64 physical_end = physical + dev_extent_len;
const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
const u64 physical_end = physical + dev_stripe_len;
u64 logical;
u64 logic_end;
/* The logical increment after finishing one stripe */
@ -3569,8 +3551,8 @@ next:
physical += map->stripe_len;
spin_lock(&sctx->stat_lock);
if (stop_loop)
sctx->stat.last_physical = map->stripes[stripe_index].physical +
dev_extent_len;
sctx->stat.last_physical =
map->stripes[stripe_index].physical + dev_stripe_len;
else
sctx->stat.last_physical = physical;
spin_unlock(&sctx->stat_lock);
@ -3639,8 +3621,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
for (i = 0; i < map->num_stripes; ++i) {
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
ret = scrub_stripe(sctx, bg, map, scrub_dev, i,
dev_extent_len);
ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
if (ret)
goto out;
}

File diff suppressed because it is too large Load Diff

View File

@ -7,12 +7,19 @@
#ifndef BTRFS_SEND_H
#define BTRFS_SEND_H
#include "ctree.h"
#include <linux/types.h>
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
#define BTRFS_SEND_STREAM_VERSION 1
#define BTRFS_SEND_STREAM_VERSION 2
#define BTRFS_SEND_BUF_SIZE SZ_64K
/*
* In send stream v1, no command is larger than 64K. In send stream v2, no limit
* should be assumed.
*/
#define BTRFS_SEND_BUF_SIZE_V1 SZ_64K
struct inode;
struct btrfs_ioctl_send_args;
enum btrfs_tlv_type {
BTRFS_TLV_U8,
@ -46,87 +53,117 @@ struct btrfs_tlv_header {
/* commands */
enum btrfs_send_cmd {
BTRFS_SEND_C_UNSPEC,
BTRFS_SEND_C_UNSPEC = 0,
/* Version 1 */
BTRFS_SEND_C_SUBVOL,
BTRFS_SEND_C_SNAPSHOT,
BTRFS_SEND_C_SUBVOL = 1,
BTRFS_SEND_C_SNAPSHOT = 2,
BTRFS_SEND_C_MKFILE,
BTRFS_SEND_C_MKDIR,
BTRFS_SEND_C_MKNOD,
BTRFS_SEND_C_MKFIFO,
BTRFS_SEND_C_MKSOCK,
BTRFS_SEND_C_SYMLINK,
BTRFS_SEND_C_MKFILE = 3,
BTRFS_SEND_C_MKDIR = 4,
BTRFS_SEND_C_MKNOD = 5,
BTRFS_SEND_C_MKFIFO = 6,
BTRFS_SEND_C_MKSOCK = 7,
BTRFS_SEND_C_SYMLINK = 8,
BTRFS_SEND_C_RENAME,
BTRFS_SEND_C_LINK,
BTRFS_SEND_C_UNLINK,
BTRFS_SEND_C_RMDIR,
BTRFS_SEND_C_RENAME = 9,
BTRFS_SEND_C_LINK = 10,
BTRFS_SEND_C_UNLINK = 11,
BTRFS_SEND_C_RMDIR = 12,
BTRFS_SEND_C_SET_XATTR,
BTRFS_SEND_C_REMOVE_XATTR,
BTRFS_SEND_C_SET_XATTR = 13,
BTRFS_SEND_C_REMOVE_XATTR = 14,
BTRFS_SEND_C_WRITE,
BTRFS_SEND_C_CLONE,
BTRFS_SEND_C_WRITE = 15,
BTRFS_SEND_C_CLONE = 16,
BTRFS_SEND_C_TRUNCATE,
BTRFS_SEND_C_CHMOD,
BTRFS_SEND_C_CHOWN,
BTRFS_SEND_C_UTIMES,
BTRFS_SEND_C_TRUNCATE = 17,
BTRFS_SEND_C_CHMOD = 18,
BTRFS_SEND_C_CHOWN = 19,
BTRFS_SEND_C_UTIMES = 20,
BTRFS_SEND_C_END,
BTRFS_SEND_C_UPDATE_EXTENT,
__BTRFS_SEND_C_MAX_V1,
BTRFS_SEND_C_END = 21,
BTRFS_SEND_C_UPDATE_EXTENT = 22,
BTRFS_SEND_C_MAX_V1 = 22,
/* Version 2 */
__BTRFS_SEND_C_MAX_V2,
BTRFS_SEND_C_FALLOCATE = 23,
BTRFS_SEND_C_FILEATTR = 24,
BTRFS_SEND_C_ENCODED_WRITE = 25,
BTRFS_SEND_C_MAX_V2 = 25,
/* End */
__BTRFS_SEND_C_MAX,
BTRFS_SEND_C_MAX = 25,
};
#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
/* attributes in send stream */
enum {
BTRFS_SEND_A_UNSPEC,
BTRFS_SEND_A_UNSPEC = 0,
BTRFS_SEND_A_UUID,
BTRFS_SEND_A_CTRANSID,
/* Version 1 */
BTRFS_SEND_A_UUID = 1,
BTRFS_SEND_A_CTRANSID = 2,
BTRFS_SEND_A_INO,
BTRFS_SEND_A_SIZE,
BTRFS_SEND_A_MODE,
BTRFS_SEND_A_UID,
BTRFS_SEND_A_GID,
BTRFS_SEND_A_RDEV,
BTRFS_SEND_A_CTIME,
BTRFS_SEND_A_MTIME,
BTRFS_SEND_A_ATIME,
BTRFS_SEND_A_OTIME,
BTRFS_SEND_A_INO = 3,
BTRFS_SEND_A_SIZE = 4,
BTRFS_SEND_A_MODE = 5,
BTRFS_SEND_A_UID = 6,
BTRFS_SEND_A_GID = 7,
BTRFS_SEND_A_RDEV = 8,
BTRFS_SEND_A_CTIME = 9,
BTRFS_SEND_A_MTIME = 10,
BTRFS_SEND_A_ATIME = 11,
BTRFS_SEND_A_OTIME = 12,
BTRFS_SEND_A_XATTR_NAME,
BTRFS_SEND_A_XATTR_DATA,
BTRFS_SEND_A_XATTR_NAME = 13,
BTRFS_SEND_A_XATTR_DATA = 14,
BTRFS_SEND_A_PATH,
BTRFS_SEND_A_PATH_TO,
BTRFS_SEND_A_PATH_LINK,
BTRFS_SEND_A_PATH = 15,
BTRFS_SEND_A_PATH_TO = 16,
BTRFS_SEND_A_PATH_LINK = 17,
BTRFS_SEND_A_FILE_OFFSET,
BTRFS_SEND_A_DATA,
BTRFS_SEND_A_FILE_OFFSET = 18,
/*
* As of send stream v2, this attribute is special: it must be the last
* attribute in a command, its header contains only the type, and its
* length is implicitly the remaining length of the command.
*/
BTRFS_SEND_A_DATA = 19,
BTRFS_SEND_A_CLONE_UUID,
BTRFS_SEND_A_CLONE_CTRANSID,
BTRFS_SEND_A_CLONE_PATH,
BTRFS_SEND_A_CLONE_OFFSET,
BTRFS_SEND_A_CLONE_LEN,
BTRFS_SEND_A_CLONE_UUID = 20,
BTRFS_SEND_A_CLONE_CTRANSID = 21,
BTRFS_SEND_A_CLONE_PATH = 22,
BTRFS_SEND_A_CLONE_OFFSET = 23,
BTRFS_SEND_A_CLONE_LEN = 24,
__BTRFS_SEND_A_MAX,
BTRFS_SEND_A_MAX_V1 = 24,
/* Version 2 */
BTRFS_SEND_A_FALLOCATE_MODE = 25,
/*
* File attributes from the FS_*_FL namespace (i_flags, xflags),
* translated to BTRFS_INODE_* bits (BTRFS_INODE_FLAG_MASK) and stored
* in btrfs_inode_item::flags (represented by btrfs_inode::flags and
* btrfs_inode::ro_flags).
*/
BTRFS_SEND_A_FILEATTR = 26,
BTRFS_SEND_A_UNENCODED_FILE_LEN = 27,
BTRFS_SEND_A_UNENCODED_LEN = 28,
BTRFS_SEND_A_UNENCODED_OFFSET = 29,
/*
* COMPRESSION and ENCRYPTION default to NONE (0) if omitted from
* BTRFS_SEND_C_ENCODED_WRITE.
*/
BTRFS_SEND_A_COMPRESSION = 30,
BTRFS_SEND_A_ENCRYPTION = 31,
BTRFS_SEND_A_MAX_V2 = 31,
/* End */
BTRFS_SEND_A_MAX = 31,
};
#define BTRFS_SEND_A_MAX (__BTRFS_SEND_A_MAX - 1)
#ifdef __KERNEL__
long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg);
#endif
#endif

View File

@ -9,6 +9,7 @@
#include "ordered-data.h"
#include "transaction.h"
#include "block-group.h"
#include "zoned.h"
/*
* HOW DOES SPACE RESERVATION WORK
@ -187,6 +188,37 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
*/
#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
/*
* Calculate chunk size depending on volume type (regular or zoned).
*/
static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
{
if (btrfs_is_zoned(fs_info))
return fs_info->zone_size;
ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
if (flags & BTRFS_BLOCK_GROUP_DATA)
return SZ_1G;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
return SZ_32M;
/* Handle BTRFS_BLOCK_GROUP_METADATA */
if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
return SZ_1G;
return SZ_256M;
}
/*
* Update default chunk size.
*/
void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
u64 chunk_size)
{
WRITE_ONCE(space_info->chunk_size, chunk_size);
}
static int create_space_info(struct btrfs_fs_info *info, u64 flags)
{
@ -208,6 +240,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
INIT_LIST_HEAD(&space_info->tickets);
INIT_LIST_HEAD(&space_info->priority_tickets);
space_info->clamp = 1;
btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
if (btrfs_is_zoned(info))
space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
@ -263,7 +296,7 @@ out:
void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
u64 bytes_readonly, u64 bytes_zone_unusable,
struct btrfs_space_info **space_info)
bool active, struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
int factor;
@ -274,6 +307,8 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
ASSERT(found);
spin_lock(&found->lock);
found->total_bytes += total_bytes;
if (active)
found->active_total_bytes += total_bytes;
found->disk_total += total_bytes * factor;
found->bytes_used += bytes_used;
found->disk_used += bytes_used * factor;
@ -337,6 +372,22 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
return avail;
}
static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
/*
* On regular filesystem, all total_bytes are always writable. On zoned
* filesystem, there may be a limitation imposed by max_active_zones.
* For metadata allocation, we cannot finish an existing active block
* group to avoid a deadlock. Thus, we need to consider only the active
* groups to be writable for metadata space.
*/
if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
return space_info->total_bytes;
return space_info->active_total_bytes;
}
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
@ -349,9 +400,12 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
return 0;
used = btrfs_space_info_used(space_info, true);
avail = calc_available_free_space(fs_info, space_info, flush);
if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
avail = 0;
else
avail = calc_available_free_space(fs_info, space_info, flush);
if (used + bytes < space_info->total_bytes + avail)
if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
return 1;
return 0;
}
@ -387,7 +441,7 @@ again:
ticket = list_first_entry(head, struct reserve_ticket, list);
/* Check and see if our ticket can be satisfied now. */
if ((used + ticket->bytes <= space_info->total_bytes) ||
if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
flush)) {
btrfs_space_info_update_bytes_may_use(fs_info,
@ -671,6 +725,18 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
case ALLOC_CHUNK:
case ALLOC_CHUNK_FORCE:
/*
* For metadata space on zoned filesystem, reaching here means we
* don't have enough space left in active_total_bytes. Try to
* activate a block group first, because we may have inactive
* block group already allocated.
*/
ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
if (ret < 0)
break;
else if (ret == 1)
break;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@ -681,6 +747,23 @@ static void flush_space(struct btrfs_fs_info *fs_info,
(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
CHUNK_ALLOC_FORCE);
btrfs_end_transaction(trans);
/*
* For metadata space on zoned filesystem, allocating a new chunk
* is not enough. We still need to activate the block * group.
* Active the newly allocated block group by (maybe) finishing
* a block group.
*/
if (ret == 1) {
ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
/*
* Revert to the original ret regardless we could finish
* one block group or not.
*/
if (ret >= 0)
ret = 1;
}
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
@ -718,6 +801,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
{
u64 used;
u64 avail;
u64 total;
u64 to_reclaim = space_info->reclaim_size;
lockdep_assert_held(&space_info->lock);
@ -732,8 +816,9 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
* space. If that's the case add in our overage so we make sure to put
* appropriate pressure on the flushing state machine.
*/
if (space_info->total_bytes + avail < used)
to_reclaim += used - (space_info->total_bytes + avail);
total = writable_total_bytes(fs_info, space_info);
if (total + avail < used)
to_reclaim += used - (total + avail);
return to_reclaim;
}
@ -743,9 +828,12 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
{
u64 global_rsv_size = fs_info->global_block_rsv.reserved;
u64 ordered, delalloc;
u64 thresh = div_factor_fine(space_info->total_bytes, 90);
u64 total = writable_total_bytes(fs_info, space_info);
u64 thresh;
u64 used;
thresh = div_factor_fine(total, 90);
lockdep_assert_held(&space_info->lock);
/* If we're just plain full then async reclaim just slows us down. */
@ -807,8 +895,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
BTRFS_RESERVE_FLUSH_ALL);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_readonly + global_rsv_size;
if (used < space_info->total_bytes)
thresh += space_info->total_bytes - used;
if (used < total)
thresh += total - used;
thresh >>= space_info->clamp;
used = space_info->bytes_pinned;
@ -1280,7 +1368,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
/*
* This is the priority reclaim path, so to_reclaim could be >0 still
* because we may have only satisified the priority tickets and still
* because we may have only satisfied the priority tickets and still
* left non priority tickets on the list. We would then have
* to_reclaim but ->bytes == 0.
*/
@ -1525,7 +1613,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* can_overcommit() to ensure we can overcommit to continue.
*/
if (!pending_tickets &&
((used + orig_bytes <= space_info->total_bytes) ||
((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
btrfs_space_info_update_bytes_may_use(fs_info, space_info,
orig_bytes);

View File

@ -19,12 +19,16 @@ struct btrfs_space_info {
u64 bytes_may_use; /* number of bytes that may be used for
delalloc/allocations */
u64 bytes_readonly; /* total bytes that are read only */
/* Total bytes in the space, but only accounts active block groups. */
u64 active_total_bytes;
u64 bytes_zone_unusable; /* total bytes that are unusable until
resetting the device zone */
u64 max_extent_size; /* This will hold the maximum extent size of
the space info if we had an ENOSPC in the
allocator. */
/* Chunk size in bytes */
u64 chunk_size;
/*
* Once a block group drops below this threshold (percents) we'll
@ -122,7 +126,9 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
u64 bytes_readonly, u64 bytes_zone_unusable,
struct btrfs_space_info **space_info);
bool active, struct btrfs_space_info **space_info);
void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
u64 chunk_size);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,

View File

@ -12,15 +12,10 @@ static bool check_setget_bounds(const struct extent_buffer *eb,
{
const unsigned long member_offset = (unsigned long)ptr + off;
if (member_offset > eb->len) {
if (unlikely(member_offset + size > eb->len)) {
btrfs_warn(eb->fs_info,
"bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
(unsigned long)ptr, eb->start, member_offset, size);
return false;
}
if (member_offset + size > eb->len) {
btrfs_warn(eb->fs_info,
"bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
"bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
(member_offset > eb->len ? "start" : "end"),
(unsigned long)ptr, eb->start, member_offset, size);
return false;
}

View File

@ -123,7 +123,7 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct btrfs_subpage *subpage;
/*
* We have cases like a dummy extent buffer page, which is not mappped
* We have cases like a dummy extent buffer page, which is not mapped
* and doesn't need to be locked.
*/
if (page->mapping)
@ -731,7 +731,7 @@ void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
* It should not have any subpage::writers count.
* Can be unlocked by unlock_page().
* This is the most common locked page for __extent_writepage() called
* inside extent_write_cache_pages() or extent_write_full_page().
* inside extent_write_cache_pages().
* Rarer cases include the @locked_page from extent_write_locked_range().
*
* - Page locked by lock_delalloc_pages()

View File

@ -48,6 +48,7 @@
#include "block-group.h"
#include "discard.h"
#include "qgroup.h"
#include "raid56.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
@ -72,7 +73,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data);
#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT)
/*
* Characters to print to indicate error conditions or uncommon filesystem sate.
* Characters to print to indicate error conditions or uncommon filesystem state.
* RO is not an error.
*/
static const char fs_state_chars[] = {
@ -1931,10 +1932,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
@ -2246,12 +2243,8 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
if (type & BTRFS_BLOCK_GROUP_RAID0)
num_stripes = nr_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_stripes = 2;
else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
num_stripes = 3;
else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
num_stripes = 4;
else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK)
num_stripes = rattr->ncopies;
else if (type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = 4;
@ -2275,17 +2268,13 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
/*
* In order to avoid overwriting the superblock on the drive,
* btrfs starts at an offset of at least 1MB when doing chunk
* allocation.
*
* This ensures we have at least min_stripe_size free space
* after excluding 1MB.
* Ensure we have at least min_stripe_size on top of the
* reserved space on the device.
*/
if (avail_space <= SZ_1M + min_stripe_size)
if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size)
continue;
avail_space -= SZ_1M;
avail_space -= BTRFS_DEVICE_RANGE_RESERVED;
devices_info[i].dev = device;
devices_info[i].max_avail = avail_space;
@ -2703,13 +2692,9 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_delayed_ref;
err = btrfs_end_io_wq_init();
if (err)
goto free_prelim_ref;
err = btrfs_interface_init();
if (err)
goto free_end_io_wq;
goto free_prelim_ref;
btrfs_print_mod_info();
@ -2725,8 +2710,6 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
free_end_io_wq:
btrfs_end_io_wq_exit();
free_prelim_ref:
btrfs_prelim_ref_exit();
free_delayed_ref:
@ -2764,7 +2747,6 @@ static void __exit exit_btrfs_fs(void)
extent_state_cache_exit();
extent_io_exit();
btrfs_interface_exit();
btrfs_end_io_wq_exit();
unregister_filesystem(&btrfs_fs_type);
btrfs_exit_sysfs();
btrfs_cleanup_fs_uuids();

View File

@ -21,6 +21,7 @@
#include "space-info.h"
#include "block-group.h"
#include "qgroup.h"
#include "misc.h"
/*
* Structure name Path
@ -61,6 +62,10 @@ struct raid_kobject {
.store = _store, \
}
#define BTRFS_ATTR_W(_prefix, _name, _store) \
static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
__INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \
static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
__INIT_KOBJ_ATTR(_name, 0644, _show, _store)
@ -92,6 +97,7 @@ static struct btrfs_feature_attr btrfs_attr_features_##_name = { \
static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj);
static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj);
static struct kobject *get_btrfs_kobj(struct kobject *kobj);
static struct btrfs_feature_attr *to_btrfs_feature_attr(struct kobj_attribute *a)
{
@ -270,12 +276,10 @@ static umode_t btrfs_feature_visible(struct kobject *kobj,
return mode;
}
BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF);
BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL);
BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS);
BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO);
BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD);
BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
@ -283,9 +287,10 @@ BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
#ifdef CONFIG_BTRFS_DEBUG
/* Remove once support for zoned allocation is feature complete */
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
#endif
#ifdef CONFIG_BTRFS_DEBUG
/* Remove once support for extent tree v2 is feature complete */
BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2);
#endif
@ -296,17 +301,15 @@ BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY);
/*
* Features which depend on feature bits and may differ between each fs.
*
* /sys/fs/btrfs/features - all available features implemeted by this version
* /sys/fs/btrfs/features - all available features implemented by this version
* /sys/fs/btrfs/UUID/features - features of the fs which are enabled or
* can be changed on a mounted filesystem.
*/
static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(mixed_backref),
BTRFS_FEAT_ATTR_PTR(default_subvol),
BTRFS_FEAT_ATTR_PTR(mixed_groups),
BTRFS_FEAT_ATTR_PTR(compress_lzo),
BTRFS_FEAT_ATTR_PTR(compress_zstd),
BTRFS_FEAT_ATTR_PTR(big_metadata),
BTRFS_FEAT_ATTR_PTR(extended_iref),
BTRFS_FEAT_ATTR_PTR(raid56),
BTRFS_FEAT_ATTR_PTR(skinny_metadata),
@ -314,8 +317,10 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(metadata_uuid),
BTRFS_FEAT_ATTR_PTR(free_space_tree),
BTRFS_FEAT_ATTR_PTR(raid1c34),
#ifdef CONFIG_BTRFS_DEBUG
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_PTR(zoned),
#endif
#ifdef CONFIG_BTRFS_DEBUG
BTRFS_FEAT_ATTR_PTR(extent_tree_v2),
#endif
#ifdef CONFIG_FS_VERITY
@ -709,6 +714,112 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \
} \
BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
static ssize_t btrfs_chunk_size_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_space_info *sinfo = to_space_info(kobj);
return sysfs_emit(buf, "%llu\n", READ_ONCE(sinfo->chunk_size));
}
/*
* Store new chunk size in space info. Can be called on a read-only filesystem.
*
* If the new chunk size value is larger than 10% of free space it is reduced
* to match that limit. Alignment must be to 256M and the system chunk size
* cannot be set.
*/
static ssize_t btrfs_chunk_size_store(struct kobject *kobj,
struct kobj_attribute *a,
const char *buf, size_t len)
{
struct btrfs_space_info *space_info = to_space_info(kobj);
struct btrfs_fs_info *fs_info = to_fs_info(get_btrfs_kobj(kobj));
char *retptr;
u64 val;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!fs_info->fs_devices)
return -EINVAL;
if (btrfs_is_zoned(fs_info))
return -EINVAL;
/* System block type must not be changed. */
if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
return -EPERM;
val = memparse(buf, &retptr);
/* There could be trailing '\n', also catch any typos after the value */
retptr = skip_spaces(retptr);
if (*retptr != 0 || val == 0)
return -EINVAL;
val = min(val, BTRFS_MAX_DATA_CHUNK_SIZE);
/* Limit stripe size to 10% of available space. */
val = min(div_factor(fs_info->fs_devices->total_rw_bytes, 1), val);
/* Must be multiple of 256M. */
val &= ~((u64)SZ_256M - 1);
/* Must be at least 256M. */
if (val < SZ_256M)
return -EINVAL;
btrfs_update_space_info_chunk_size(space_info, val);
return len;
}
#ifdef CONFIG_BTRFS_DEBUG
/*
* Request chunk allocation with current chunk size.
*/
static ssize_t btrfs_force_chunk_alloc_store(struct kobject *kobj,
struct kobj_attribute *a,
const char *buf, size_t len)
{
struct btrfs_space_info *space_info = to_space_info(kobj);
struct btrfs_fs_info *fs_info = to_fs_info(get_btrfs_kobj(kobj));
struct btrfs_trans_handle *trans;
bool val;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (sb_rdonly(fs_info->sb))
return -EROFS;
ret = kstrtobool(buf, &val);
if (ret)
return ret;
if (!val)
return -EINVAL;
/*
* This is unsafe to be called from sysfs context and may cause
* unexpected problems.
*/
trans = btrfs_start_transaction(fs_info->tree_root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_force_chunk_alloc(trans, space_info->flags);
btrfs_end_transaction(trans);
if (ret == 1)
return len;
return -ENOSPC;
}
BTRFS_ATTR_W(space_info, force_chunk_alloc, btrfs_force_chunk_alloc_store);
#endif
SPACE_INFO_ATTR(flags);
SPACE_INFO_ATTR(total_bytes);
SPACE_INFO_ATTR(bytes_used);
@ -719,6 +830,7 @@ SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(bytes_zone_unusable);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
BTRFS_ATTR_RW(space_info, chunk_size, btrfs_chunk_size_show, btrfs_chunk_size_store);
static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj,
struct kobj_attribute *a,
@ -773,6 +885,10 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, disk_used),
BTRFS_ATTR_PTR(space_info, disk_total),
BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold),
BTRFS_ATTR_PTR(space_info, chunk_size),
#ifdef CONFIG_BTRFS_DEBUG
BTRFS_ATTR_PTR(space_info, force_chunk_alloc),
#endif
NULL,
};
ATTRIBUTE_GROUPS(space_info);
@ -871,6 +987,48 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
static ssize_t btrfs_commit_stats_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
return sysfs_emit(buf,
"commits %llu\n"
"last_commit_ms %llu\n"
"max_commit_ms %llu\n"
"total_commit_ms %llu\n",
fs_info->commit_stats.commit_count,
div_u64(fs_info->commit_stats.last_commit_dur, NSEC_PER_MSEC),
div_u64(fs_info->commit_stats.max_commit_dur, NSEC_PER_MSEC),
div_u64(fs_info->commit_stats.total_commit_dur, NSEC_PER_MSEC));
}
static ssize_t btrfs_commit_stats_store(struct kobject *kobj,
struct kobj_attribute *a,
const char *buf, size_t len)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
unsigned long val;
int ret;
if (!fs_info)
return -EPERM;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val)
return -EINVAL;
WRITE_ONCE(fs_info->commit_stats.max_commit_dur, 0);
return len;
}
BTRFS_ATTR_RW(, commit_stats, btrfs_commit_stats_show, btrfs_commit_stats_store);
static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
@ -1110,6 +1268,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, generation),
BTRFS_ATTR_PTR(, read_policy),
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
BTRFS_ATTR_PTR(, commit_stats),
NULL,
};
@ -1140,6 +1299,16 @@ static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj)
return to_fs_devs(kobj)->fs_info;
}
static struct kobject *get_btrfs_kobj(struct kobject *kobj)
{
while (kobj) {
if (kobj->ktype == &btrfs_ktype)
return kobj;
kobj = kobj->parent;
}
return NULL;
}
#define NUM_FEATURE_BITS 64
#define BTRFS_FEATURE_NAME_MAX 13
static char btrfs_unknown_feature_names[FEAT_MAX][NUM_FEATURE_BITS][BTRFS_FEATURE_NAME_MAX];
@ -2106,4 +2275,3 @@ void __cold btrfs_exit_sysfs(void)
#endif
kset_unregister(btrfs_kset);
}

View File

@ -59,6 +59,7 @@ struct inode *btrfs_new_test_inode(void)
return NULL;
inode->i_mode = S_IFREG;
inode->i_ino = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;

View File

@ -47,7 +47,8 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
path->nodes[0] = eb = alloc_dummy_extent_buffer(fs_info, nodesize);
eb = alloc_dummy_extent_buffer(fs_info, nodesize);
path->nodes[0] = eb;
if (!eb) {
test_std_err(TEST_ALLOC_EXTENT_BUFFER);
ret = -ENOMEM;

View File

@ -10,6 +10,7 @@
#include <linux/pagemap.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
#include <linux/timekeeping.h>
#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
@ -1831,8 +1832,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
dentry->d_name.len * 2);
parent_inode->i_mtime = parent_inode->i_ctime =
current_time(parent_inode);
parent_inode->i_mtime = current_time(parent_inode);
parent_inode->i_ctime = parent_inode->i_mtime;
ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -2098,12 +2099,23 @@ static void add_pending_snapshot(struct btrfs_trans_handle *trans)
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
}
static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval)
{
fs_info->commit_stats.commit_count++;
fs_info->commit_stats.last_commit_dur = interval;
fs_info->commit_stats.max_commit_dur =
max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
fs_info->commit_stats.total_commit_dur += interval;
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
int ret;
ktime_t start_time;
ktime_t interval;
ASSERT(refcount_read(&trans->use_count) == 1);
@ -2228,6 +2240,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
}
/*
* Get the time spent on the work done by the commit thread and not
* the time spent waiting on a previous commit
*/
start_time = ktime_get_ns();
extwriter_counter_dec(cur_trans, trans->type);
ret = btrfs_start_delalloc_flush(fs_info);
@ -2469,6 +2487,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
trace_btrfs_transaction_commit(fs_info);
interval = ktime_get_ns() - start_time;
btrfs_scrub_continue(fs_info);
if (current->journal_info == trans)
@ -2476,6 +2496,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
kmem_cache_free(btrfs_trans_handle_cachep, trans);
update_commit_stats(fs_info, interval);
return ret;
unlock_reloc:

View File

@ -171,7 +171,7 @@ again:
int index = (root->log_transid + 1) % 2;
if (btrfs_need_log_full_commit(trans)) {
ret = -EAGAIN;
ret = BTRFS_LOG_FORCE_COMMIT;
goto out;
}
@ -194,7 +194,7 @@ again:
* writing.
*/
if (zoned && !created) {
ret = -EAGAIN;
ret = BTRFS_LOG_FORCE_COMMIT;
goto out;
}
@ -2287,7 +2287,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
struct btrfs_key location;
/*
* Currenly we only log dir index keys. Even if we replay a log created
* Currently we only log dir index keys. Even if we replay a log created
* by an older kernel that logged both dir index and dir item keys, all
* we need to do is process the dir index keys, we (and our caller) can
* safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY).
@ -3121,7 +3121,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* bail out if we need to do a full commit */
if (btrfs_need_log_full_commit(trans)) {
ret = -EAGAIN;
ret = BTRFS_LOG_FORCE_COMMIT;
mutex_unlock(&root->log_mutex);
goto out;
}
@ -3222,7 +3222,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
btrfs_wait_tree_log_extents(log, mark);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
ret = BTRFS_LOG_FORCE_COMMIT;
goto out;
}
@ -3261,7 +3261,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
blk_finish_plug(&plug);
btrfs_wait_tree_log_extents(log, mark);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
ret = BTRFS_LOG_FORCE_COMMIT;
goto out_wake_log_root;
}
@ -5848,7 +5848,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
inode_only == LOG_INODE_ALL &&
inode->last_unlink_trans >= trans->transid) {
btrfs_set_log_full_commit(trans);
ret = 1;
ret = BTRFS_LOG_FORCE_COMMIT;
goto out_unlock;
}
@ -6562,12 +6562,12 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
bool log_dentries = false;
if (btrfs_test_opt(fs_info, NOTREELOG)) {
ret = 1;
ret = BTRFS_LOG_FORCE_COMMIT;
goto end_no_trans;
}
if (btrfs_root_refs(&root->root_item) == 0) {
ret = 1;
ret = BTRFS_LOG_FORCE_COMMIT;
goto end_no_trans;
}
@ -6665,7 +6665,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
if (ret < 0) {
btrfs_set_log_full_commit(trans);
ret = 1;
ret = BTRFS_LOG_FORCE_COMMIT;
}
if (ret)
@ -7029,8 +7029,15 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* anyone from syncing the log until we have updated both inodes
* in the log.
*/
ret = join_running_log_trans(root);
/*
* At least one of the inodes was logged before, so this should
* not fail, but if it does, it's not serious, just bail out and
* mark the log for a full commit.
*/
if (WARN_ON_ONCE(ret < 0))
goto out;
log_pinned = true;
btrfs_pin_log_trans(root);
path = btrfs_alloc_path();
if (!path) {

View File

@ -12,6 +12,9 @@
/* return value for btrfs_log_dentry_safe that means we don't need to log it at all */
#define BTRFS_NO_LOG_SYNC 256
/* We can't use the tree log for whatever reason, force a transaction commit */
#define BTRFS_LOG_FORCE_COMMIT (1)
struct btrfs_log_ctx {
int log_ret;
int log_transid;

View File

@ -182,6 +182,13 @@ const char *btrfs_bg_type_to_raid_name(u64 flags)
return btrfs_raid_array[index].raid_name;
}
int btrfs_nr_parity_stripes(u64 type)
{
enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
return btrfs_raid_array[index].nparity;
}
/*
* Fill @buf with textual description of @bg_flags, no more than @size_buf
* bytes including terminating null byte.
@ -238,7 +245,6 @@ out_overflow:;
static int init_first_rw_device(struct btrfs_trans_handle *trans);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
enum btrfs_map_op op,
@ -1396,12 +1402,7 @@ static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
{
switch (device->fs_devices->chunk_alloc_policy) {
case BTRFS_CHUNK_ALLOC_REGULAR:
/*
* We don't want to overwrite the superblock on the drive nor
* any area used by the boot loader (grub for example), so we
* make sure to start at an offset of at least 1MB.
*/
return max_t(u64, start, SZ_1M);
return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
case BTRFS_CHUNK_ALLOC_ZONED:
/*
* We don't care about the starting region like regular
@ -5071,26 +5072,16 @@ static void init_alloc_chunk_ctl_policy_regular(
struct btrfs_fs_devices *fs_devices,
struct alloc_chunk_ctl *ctl)
{
u64 type = ctl->type;
struct btrfs_space_info *space_info;
if (type & BTRFS_BLOCK_GROUP_DATA) {
ctl->max_stripe_size = SZ_1G;
ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
/* For larger filesystems, use larger metadata chunks */
if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
ctl->max_stripe_size = SZ_1G;
else
ctl->max_stripe_size = SZ_256M;
ctl->max_chunk_size = ctl->max_stripe_size;
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
ctl->max_stripe_size = SZ_32M;
ctl->max_chunk_size = 2 * ctl->max_stripe_size;
ctl->devs_max = min_t(int, ctl->devs_max,
BTRFS_MAX_DEVS_SYS_CHUNK);
} else {
BUG();
}
space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
ASSERT(space_info);
ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
ctl->max_stripe_size = ctl->max_chunk_size;
if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
/* We don't want a chunk larger than 10% of writable space */
ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
@ -5720,7 +5711,8 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
struct extent_map *em;
struct map_lookup *map;
int ret;
enum btrfs_raid_types index;
int ret = 1;
em = btrfs_get_chunk_map(fs_info, logical, len);
if (IS_ERR(em))
@ -5733,10 +5725,11 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return 1;
map = em->map_lookup;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
ret = map->sub_stripes;
index = btrfs_bg_flags_to_raid_index(map->type);
/* Non-RAID56, use their ncopies from btrfs_raid_array. */
if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
ret = btrfs_raid_array[index].ncopies;
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
ret = 2;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
@ -5748,8 +5741,6 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
* stripe under reconstruction.
*/
ret = map->num_stripes;
else
ret = 1;
free_extent_map(em);
down_read(&fs_info->dev_replace.rwsem);
@ -5768,6 +5759,9 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct map_lookup *map;
unsigned long len = fs_info->sectorsize;
if (!btrfs_fs_incompat(fs_info, RAID56))
return len;
em = btrfs_get_chunk_map(fs_info, logical, len);
if (!WARN_ON(IS_ERR(em))) {
@ -5785,6 +5779,9 @@ int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
struct map_lookup *map;
int ret = 0;
if (!btrfs_fs_incompat(fs_info, RAID56))
return 0;
em = btrfs_get_chunk_map(fs_info, logical, len);
if(!WARN_ON(IS_ERR(em))) {
@ -5917,18 +5914,17 @@ void btrfs_put_bioc(struct btrfs_io_context *bioc)
kfree(bioc);
}
/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
/*
* Please note that, discard won't be sent to target device of device
* replace.
*/
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
u64 logical, u64 *length_ret,
struct btrfs_io_context **bioc_ret)
struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
u64 logical, u64 *length_ret,
u32 *num_stripes)
{
struct extent_map *em;
struct map_lookup *map;
struct btrfs_io_context *bioc;
struct btrfs_discard_stripe *stripes;
u64 length = *length_ret;
u64 offset;
u64 stripe_nr;
@ -5937,29 +5933,26 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
u64 stripe_cnt;
u64 stripe_len;
u64 stripe_offset;
u64 num_stripes;
u32 stripe_index;
u32 factor = 0;
u32 sub_stripes = 0;
u64 stripes_per_dev = 0;
u32 remaining_stripes = 0;
u32 last_stripe = 0;
int ret = 0;
int ret;
int i;
/* Discard always returns a bioc. */
ASSERT(bioc_ret);
em = btrfs_get_chunk_map(fs_info, logical, length);
if (IS_ERR(em))
return PTR_ERR(em);
return ERR_CAST(em);
map = em->map_lookup;
/* we don't discard raid56 yet */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EOPNOTSUPP;
goto out;
}
goto out_free_map;
}
offset = logical - em->start;
length = min_t(u64, em->start + em->len - logical, length);
@ -5985,7 +5978,7 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
* device we have to walk to find the data, and stripe_index is
* the number of our device in the stripe array
*/
num_stripes = 1;
*num_stripes = 1;
stripe_index = 0;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
@ -5995,7 +5988,7 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
sub_stripes = map->sub_stripes;
factor = map->num_stripes / sub_stripes;
num_stripes = min_t(u64, map->num_stripes,
*num_stripes = min_t(u64, map->num_stripes,
sub_stripes * stripe_cnt);
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= sub_stripes;
@ -6005,31 +5998,30 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
last_stripe *= sub_stripes;
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
BTRFS_BLOCK_GROUP_DUP)) {
num_stripes = map->num_stripes;
*num_stripes = map->num_stripes;
} else {
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
}
bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0);
if (!bioc) {
stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
if (!stripes) {
ret = -ENOMEM;
goto out;
goto out_free_map;
}
for (i = 0; i < num_stripes; i++) {
bioc->stripes[i].physical =
for (i = 0; i < *num_stripes; i++) {
stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
bioc->stripes[i].dev = map->stripes[stripe_index].dev;
stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
bioc->stripes[i].length = stripes_per_dev *
map->stripe_len;
stripes[i].length = stripes_per_dev * map->stripe_len;
if (i / sub_stripes < remaining_stripes)
bioc->stripes[i].length += map->stripe_len;
stripes[i].length += map->stripe_len;
/*
* Special for the first stripe and
@ -6040,17 +6032,17 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
* off end_off
*/
if (i < sub_stripes)
bioc->stripes[i].length -= stripe_offset;
stripes[i].length -= stripe_offset;
if (stripe_index >= last_stripe &&
stripe_index <= (last_stripe +
sub_stripes - 1))
bioc->stripes[i].length -= stripe_end_offset;
stripes[i].length -= stripe_end_offset;
if (i == sub_stripes - 1)
stripe_offset = 0;
} else {
bioc->stripes[i].length = length;
stripes[i].length = length;
}
stripe_index++;
@ -6060,12 +6052,11 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
}
}
*bioc_ret = bioc;
bioc->map_type = map->type;
bioc->num_stripes = num_stripes;
out:
free_extent_map(em);
return ret;
return stripes;
out_free_map:
free_extent_map(em);
return ERR_PTR(ret);
}
/*
@ -6208,7 +6199,6 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
bioc->stripes + i;
new->physical = old->physical;
new->length = old->length;
new->dev = dev_replace->tgtdev;
bioc->tgtdev_map[i] = index_where_to_add;
index_where_to_add++;
@ -6249,8 +6239,6 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
bioc->stripes + num_stripes;
tgtdev_stripe->physical = physical_of_found;
tgtdev_stripe->length =
bioc->stripes[index_srcdev].length;
tgtdev_stripe->dev = dev_replace->tgtdev;
bioc->tgtdev_map[index_srcdev] = num_stripes;
@ -6472,6 +6460,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ASSERT(map->stripe_len == BTRFS_STRIPE_LEN);
if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
stripe_nr = div64_u64(raid56_full_stripe_start,
@ -6479,9 +6468,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
/* RAID[56] write or recovery. Return all stripes */
num_stripes = map->num_stripes;
max_errors = nr_parity_stripes(map);
max_errors = btrfs_chunk_max_errors(map);
*length = map->stripe_len;
/* Return the length to the full stripe end */
*length = min(logical + *length,
raid56_full_stripe_start + em->start +
data_stripes * stripe_len) - logical;
stripe_index = 0;
stripe_offset = 0;
} else {
@ -6604,10 +6596,6 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret, int mirror_num)
{
if (op == BTRFS_MAP_DISCARD)
return __btrfs_map_block_for_discard(fs_info, logical,
length, bioc_ret);
return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
mirror_num, 0);
}
@ -6620,77 +6608,106 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
}
static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_io_context *bioc)
{
bio->bi_private = bioc->private;
bio->bi_end_io = bioc->end_io;
bio_endio(bio);
if (bioc->orig_bio->bi_opf & REQ_META)
return bioc->fs_info->endio_meta_workers;
return bioc->fs_info->endio_workers;
}
static void btrfs_end_bio_work(struct work_struct *work)
{
struct btrfs_bio *bbio =
container_of(work, struct btrfs_bio, end_io_work);
bio_endio(&bbio->bio);
}
static void btrfs_end_bioc(struct btrfs_io_context *bioc, bool async)
{
struct bio *orig_bio = bioc->orig_bio;
struct btrfs_bio *bbio = btrfs_bio(orig_bio);
bbio->mirror_num = bioc->mirror_num;
orig_bio->bi_private = bioc->private;
orig_bio->bi_end_io = bioc->end_io;
/*
* Only send an error to the higher layers if it is beyond the tolerance
* threshold.
*/
if (atomic_read(&bioc->error) > bioc->max_errors)
orig_bio->bi_status = BLK_STS_IOERR;
else
orig_bio->bi_status = BLK_STS_OK;
if (btrfs_op(orig_bio) == BTRFS_MAP_READ && async) {
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(bioc), &bbio->end_io_work);
} else {
bio_endio(orig_bio);
}
btrfs_put_bioc(bioc);
}
static void btrfs_end_bio(struct bio *bio)
{
struct btrfs_io_context *bioc = bio->bi_private;
int is_orig_bio = 0;
struct btrfs_io_stripe *stripe = bio->bi_private;
struct btrfs_io_context *bioc = stripe->bioc;
if (bio->bi_status) {
atomic_inc(&bioc->error);
if (bio->bi_status == BLK_STS_IOERR ||
bio->bi_status == BLK_STS_TARGET) {
struct btrfs_device *dev = btrfs_bio(bio)->device;
ASSERT(dev->bdev);
if (btrfs_op(bio) == BTRFS_MAP_WRITE)
btrfs_dev_stat_inc_and_print(dev,
btrfs_dev_stat_inc_and_print(stripe->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
else if (!(bio->bi_opf & REQ_RAHEAD))
btrfs_dev_stat_inc_and_print(dev,
btrfs_dev_stat_inc_and_print(stripe->dev,
BTRFS_DEV_STAT_READ_ERRS);
if (bio->bi_opf & REQ_PREFLUSH)
btrfs_dev_stat_inc_and_print(dev,
btrfs_dev_stat_inc_and_print(stripe->dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
}
}
if (bio == bioc->orig_bio)
is_orig_bio = 1;
if (bio != bioc->orig_bio)
bio_put(bio);
btrfs_bio_counter_dec(bioc->fs_info);
if (atomic_dec_and_test(&bioc->stripes_pending)) {
if (!is_orig_bio) {
bio_put(bio);
bio = bioc->orig_bio;
}
btrfs_bio(bio)->mirror_num = bioc->mirror_num;
/* only send an error to the higher layers if it is
* beyond the tolerance of the btrfs bio
*/
if (atomic_read(&bioc->error) > bioc->max_errors) {
bio->bi_status = BLK_STS_IOERR;
} else {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
*/
bio->bi_status = BLK_STS_OK;
}
btrfs_end_bioc(bioc, bio);
} else if (!is_orig_bio) {
bio_put(bio);
}
if (atomic_dec_and_test(&bioc->stripes_pending))
btrfs_end_bioc(bioc, true);
}
static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
u64 physical, struct btrfs_device *dev)
static void submit_stripe_bio(struct btrfs_io_context *bioc,
struct bio *orig_bio, int dev_nr, bool clone)
{
struct btrfs_fs_info *fs_info = bioc->fs_info;
struct btrfs_device *dev = bioc->stripes[dev_nr].dev;
u64 physical = bioc->stripes[dev_nr].physical;
struct bio *bio;
bio->bi_private = bioc;
btrfs_bio(bio)->device = dev;
if (!dev || !dev->bdev ||
test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
(btrfs_op(orig_bio) == BTRFS_MAP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
atomic_inc(&bioc->error);
if (atomic_dec_and_test(&bioc->stripes_pending))
btrfs_end_bioc(bioc, false);
return;
}
if (clone) {
bio = bio_alloc_clone(dev->bdev, orig_bio, GFP_NOFS, &fs_bio_set);
} else {
bio = orig_bio;
bio_set_dev(bio, dev->bdev);
btrfs_bio(bio)->device = dev;
}
bioc->stripes[dev_nr].bioc = bioc;
bio->bi_private = &bioc->stripes[dev_nr];
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
/*
@ -6708,8 +6725,8 @@ static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
}
}
btrfs_debug_in_rcu(fs_info,
"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
"%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
__func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
dev->devid, bio->bi_iter.bi_size);
@ -6719,66 +6736,39 @@ static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
submit_bio(bio);
}
static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num)
{
atomic_inc(&bioc->error);
if (atomic_dec_and_test(&bioc->stripes_pending)) {
/* Should be the original bio. */
WARN_ON(bio != bioc->orig_bio);
btrfs_bio(bio)->mirror_num = bioc->mirror_num;
bio->bi_iter.bi_sector = logical >> 9;
if (atomic_read(&bioc->error) > bioc->max_errors)
bio->bi_status = BLK_STS_IOERR;
else
bio->bi_status = BLK_STS_OK;
btrfs_end_bioc(bioc, bio);
}
}
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num)
{
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
u64 length = bio->bi_iter.bi_size;
u64 map_length = length;
int ret;
int dev_nr;
int total_devs;
struct btrfs_io_context *bioc = NULL;
length = bio->bi_iter.bi_size;
map_length = length;
btrfs_bio_counter_inc_blocked(fs_info);
ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
&map_length, &bioc, mirror_num, 1);
if (ret) {
btrfs_bio_counter_dec(fs_info);
return errno_to_blk_status(ret);
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
return;
}
total_devs = bioc->num_stripes;
bioc->orig_bio = first_bio;
bioc->private = first_bio->bi_private;
bioc->end_io = first_bio->bi_end_io;
atomic_set(&bioc->stripes_pending, bioc->num_stripes);
bioc->orig_bio = bio;
bioc->private = bio->bi_private;
bioc->end_io = bio->bi_end_io;
atomic_set(&bioc->stripes_pending, total_devs);
if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
ret = raid56_parity_write(bio, bioc, map_length);
} else {
ret = raid56_parity_recover(bio, bioc, map_length,
mirror_num, 1);
}
btrfs_bio_counter_dec(fs_info);
return errno_to_blk_status(ret);
if (btrfs_op(bio) == BTRFS_MAP_WRITE)
raid56_parity_write(bio, bioc);
else
raid56_parity_recover(bio, bioc, mirror_num, true);
return;
}
if (map_length < length) {
@ -6789,26 +6779,11 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
}
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bioc->stripes[dev_nr].dev;
if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
&dev->dev_state) ||
(btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bioc_error(bioc, first_bio, logical);
continue;
}
const bool should_clone = (dev_nr < total_devs - 1);
if (dev_nr < total_devs - 1) {
bio = btrfs_bio_clone(dev->bdev, first_bio);
} else {
bio = first_bio;
bio_set_dev(bio, dev->bdev);
}
submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
submit_stripe_bio(bioc, bio, dev_nr, should_clone);
}
btrfs_bio_counter_dec(fs_info);
return BLK_STS_OK;
}
static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
@ -6966,11 +6941,12 @@ static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
devid, uuid);
}
static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
u64 btrfs_calc_stripe_length(const struct extent_map *em)
{
const int data_stripes = calc_data_stripes(type, num_stripes);
const struct map_lookup *map = em->map_lookup;
const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
return div_u64(chunk_len, data_stripes);
return div_u64(em->len, data_stripes);
}
#if BITS_PER_LONG == 32
@ -7109,8 +7085,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
map->type = type;
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
map->verified_stripes = 0;
em->orig_block_len = calc_stripe_length(type, em->len,
map->num_stripes);
em->orig_block_len = btrfs_calc_stripe_length(em);
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
@ -7236,7 +7211,8 @@ static int read_one_dev(struct extent_buffer *leaf,
u8 fs_uuid[BTRFS_FSID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
devid = args.devid = btrfs_device_id(leaf, dev_item);
devid = btrfs_device_id(leaf, dev_item);
args.devid = devid;
read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
@ -7865,11 +7841,7 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
btrfs_dev_stat_inc(dev, index);
btrfs_dev_stat_print_on_error(dev);
}
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
{
if (!dev->dev_stats_valid)
return;
btrfs_err_rl_in_rcu(dev->fs_info,
@ -8011,7 +7983,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
map = em->map_lookup;
stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
stripe_len = btrfs_calc_stripe_length(em);
if (physical_len != stripe_len) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
@ -8021,6 +7993,16 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
goto out;
}
/*
* Very old mkfs.btrfs (before v4.1) will not respect the reserved
* space. Although kernel can handle it without problem, better to warn
* the users.
*/
if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
btrfs_warn(fs_info,
"devid %llu physical %llu len %llu inside the reserved space",
devid, physical_offset, physical_len);
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev->devid == devid &&
map->stripes[i].physical == physical_offset) {

View File

@ -354,6 +354,13 @@ struct btrfs_fs_devices {
- 2 * sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
/*
* Maximum number of sectors for a single bio to limit the size of the
* checksum array. This matches the number of bio_vecs per bio and thus the
* I/O size for buffered I/O.
*/
#define BTRFS_MAX_BIO_SECTORS (256)
/*
* Additional info to pass along bio.
*
@ -371,6 +378,9 @@ struct btrfs_bio {
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
struct bvec_iter iter;
/* For read end I/O handling */
struct work_struct end_io_work;
/*
* This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last.
@ -391,10 +401,36 @@ static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
}
}
/*
* Iterate through a btrfs_bio (@bbio) on a per-sector basis.
*
* bvl - struct bio_vec
* bbio - struct btrfs_bio
* iters - struct bvec_iter
* bio_offset - unsigned int
*/
#define btrfs_bio_for_each_sector(fs_info, bvl, bbio, iter, bio_offset) \
for ((iter) = (bbio)->iter, (bio_offset) = 0; \
(iter).bi_size && \
(((bvl) = bio_iter_iovec((&(bbio)->bio), (iter))), 1); \
(bio_offset) += fs_info->sectorsize, \
bio_advance_iter_single(&(bbio)->bio, &(iter), \
(fs_info)->sectorsize))
struct btrfs_io_stripe {
struct btrfs_device *dev;
union {
/* Block mapping */
u64 physical;
/* For the endio handler */
struct btrfs_io_context *bioc;
};
};
struct btrfs_discard_stripe {
struct btrfs_device *dev;
u64 physical;
u64 length; /* only used for discard mappings */
u64 length;
};
/*
@ -533,6 +569,9 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret);
struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
u64 logical, u64 *length_ret,
u32 *num_stripes);
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
enum btrfs_map_op op, u64 logical,
struct btrfs_io_geometry *io_geom);
@ -541,8 +580,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
u64 type);
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num);
void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
struct btrfs_device *btrfs_scan_one_device(const char *path,
@ -601,6 +639,8 @@ int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
u64 logical, u64 len);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
u64 btrfs_calc_stripe_length(const struct extent_map *em);
int btrfs_nr_parity_stripes(u64 type);
int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
struct btrfs_block_group *bg);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);

View File

@ -97,7 +97,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret;
char *data_in;
char *data_in = NULL;
char *cpage_out;
int nr_pages = 0;
struct page *in_page = NULL;
@ -126,7 +126,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM;
goto out;
}
cpage_out = kmap(out_page);
cpage_out = page_address(out_page);
pages[0] = out_page;
nr_pages = 1;
@ -148,26 +148,26 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
int i;
for (i = 0; i < in_buf_pages; i++) {
if (in_page) {
kunmap(in_page);
if (data_in) {
kunmap_local(data_in);
put_page(in_page);
}
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
data_in = kmap(in_page);
data_in = kmap_local_page(in_page);
memcpy(workspace->buf + i * PAGE_SIZE,
data_in, PAGE_SIZE);
start += PAGE_SIZE;
}
workspace->strm.next_in = workspace->buf;
} else {
if (in_page) {
kunmap(in_page);
if (data_in) {
kunmap_local(data_in);
put_page(in_page);
}
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
data_in = kmap(in_page);
data_in = kmap_local_page(in_page);
start += PAGE_SIZE;
workspace->strm.next_in = data_in;
}
@ -196,9 +196,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
* the stream end if required
*/
if (workspace->strm.avail_out == 0) {
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
@ -207,7 +205,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM;
goto out;
}
cpage_out = kmap(out_page);
cpage_out = page_address(out_page);
pages[nr_pages] = out_page;
nr_pages++;
workspace->strm.avail_out = PAGE_SIZE;
@ -234,9 +232,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
} else if (workspace->strm.avail_out == 0) {
/* get another page for the stream end */
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
@ -245,7 +241,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM;
goto out;
}
cpage_out = kmap(out_page);
cpage_out = page_address(out_page);
pages[nr_pages] = out_page;
nr_pages++;
workspace->strm.avail_out = PAGE_SIZE;
@ -264,13 +260,11 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = workspace->strm.total_in;
out:
*out_pages = nr_pages;
if (out_page)
kunmap(out_page);
if (in_page) {
kunmap(in_page);
if (data_in) {
kunmap_local(data_in);
put_page(in_page);
}
return ret;
}
@ -287,7 +281,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
unsigned long buf_start;
struct page **pages_in = cb->compressed_pages;
data_in = kmap(pages_in[page_in_index]);
data_in = kmap_local_page(pages_in[page_in_index]);
workspace->strm.next_in = data_in;
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
workspace->strm.total_in = 0;
@ -309,7 +303,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
pr_warn("BTRFS: inflateInit failed\n");
kunmap(pages_in[page_in_index]);
kunmap_local(data_in);
return -EIO;
}
while (workspace->strm.total_in < srclen) {
@ -336,13 +330,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->strm.avail_in == 0) {
unsigned long tmp;
kunmap(pages_in[page_in_index]);
kunmap_local(data_in);
page_in_index++;
if (page_in_index >= total_pages_in) {
data_in = NULL;
break;
}
data_in = kmap(pages_in[page_in_index]);
data_in = kmap_local_page(pages_in[page_in_index]);
workspace->strm.next_in = data_in;
tmp = srclen - workspace->strm.total_in;
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
@ -355,7 +349,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
done:
zlib_inflateEnd(&workspace->strm);
if (data_in)
kunmap(pages_in[page_in_index]);
kunmap_local(data_in);
if (!ret)
zero_fill_bio(cb->orig_bio);
return ret;

View File

@ -94,9 +94,9 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
* Possible states of log buffer zones
*
* Empty[0] In use[0] Full[0]
* Empty[1] * x 0
* In use[1] 0 x 0
* Full[1] 1 1 C
* Empty[1] * 0 1
* In use[1] x x 1
* Full[1] 0 0 C
*
* Log position:
* *: Special case, no superblock is written
@ -415,6 +415,16 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
nr_sectors = bdev_nr_sectors(bdev);
zone_info->zone_size_shift = ilog2(zone_info->zone_size);
zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
/*
* We limit max_zone_append_size also by max_segments *
* PAGE_SIZE. Technically, we can have multiple pages per segment. But,
* since btrfs adds the pages one by one to a bio, and btrfs cannot
* increase the metadata reservation even if it increases the number of
* extents, it is safe to stick with the limit.
*/
zone_info->max_zone_append_size =
min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
(u64)bdev_max_segments(bdev) << PAGE_SHIFT);
if (!IS_ALIGNED(nr_sectors, zone_sectors))
zone_info->nr_zones++;
@ -640,6 +650,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
u64 zoned_devices = 0;
u64 nr_devices = 0;
u64 zone_size = 0;
u64 max_zone_append_size = 0;
const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
int ret = 0;
@ -674,6 +685,11 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
ret = -EINVAL;
goto out;
}
if (!max_zone_append_size ||
(zone_info->max_zone_append_size &&
zone_info->max_zone_append_size < max_zone_append_size))
max_zone_append_size =
zone_info->max_zone_append_size;
}
nr_devices++;
}
@ -723,7 +739,11 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
}
fs_info->zone_size = zone_size;
fs_info->max_zone_append_size = ALIGN_DOWN(max_zone_append_size,
fs_info->sectorsize);
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
if (fs_info->max_zone_append_size < fs_info->max_extent_size)
fs_info->max_extent_size = fs_info->max_zone_append_size;
/*
* Check mount options here, because we might change fs_info->zoned
@ -1829,6 +1849,7 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_space_info *space_info = block_group->space_info;
struct map_lookup *map;
struct btrfs_device *device;
u64 physical;
@ -1840,6 +1861,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
map = block_group->physical_map;
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
if (block_group->zone_is_active) {
ret = true;
@ -1868,7 +1890,10 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
/* Successfully activated all the zones */
block_group->zone_is_active = 1;
space_info->active_total_bytes += block_group->length;
spin_unlock(&block_group->lock);
btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock);
/* For the active block group list */
btrfs_get_block_group(block_group);
@ -1881,6 +1906,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
out_unlock:
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
return ret;
}
@ -1981,6 +2007,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
/* For active_bg_list */
btrfs_put_block_group(block_group);
clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
wake_up_all(&fs_info->zone_finish_wait);
return 0;
}
@ -2017,6 +2046,9 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
}
mutex_unlock(&fs_info->chunk_mutex);
if (!ret)
set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
return ret;
}
@ -2160,3 +2192,96 @@ out:
spin_unlock(&block_group->lock);
btrfs_put_block_group(block_group);
}
int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group *block_group;
struct btrfs_block_group *min_bg = NULL;
u64 min_avail = U64_MAX;
int ret;
spin_lock(&fs_info->zone_active_bgs_lock);
list_for_each_entry(block_group, &fs_info->zone_active_bgs,
active_bg_list) {
u64 avail;
spin_lock(&block_group->lock);
if (block_group->reserved ||
(block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
spin_unlock(&block_group->lock);
continue;
}
avail = block_group->zone_capacity - block_group->alloc_offset;
if (min_avail > avail) {
if (min_bg)
btrfs_put_block_group(min_bg);
min_bg = block_group;
min_avail = avail;
btrfs_get_block_group(min_bg);
}
spin_unlock(&block_group->lock);
}
spin_unlock(&fs_info->zone_active_bgs_lock);
if (!min_bg)
return 0;
ret = btrfs_zone_finish(min_bg);
btrfs_put_block_group(min_bg);
return ret < 0 ? ret : 1;
}
int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
bool do_finish)
{
struct btrfs_block_group *bg;
int index;
if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
return 0;
/* No more block groups to activate */
if (space_info->active_total_bytes == space_info->total_bytes)
return 0;
for (;;) {
int ret;
bool need_finish = false;
down_read(&space_info->groups_sem);
for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
list_for_each_entry(bg, &space_info->block_groups[index],
list) {
if (!spin_trylock(&bg->lock))
continue;
if (btrfs_zoned_bg_is_full(bg) || bg->zone_is_active) {
spin_unlock(&bg->lock);
continue;
}
spin_unlock(&bg->lock);
if (btrfs_zone_activate(bg)) {
up_read(&space_info->groups_sem);
return 1;
}
need_finish = true;
}
}
up_read(&space_info->groups_sem);
if (!do_finish || !need_finish)
break;
ret = btrfs_zone_finish_one_bg(fs_info);
if (ret == 0)
break;
if (ret < 0)
return ret;
}
return 0;
}

View File

@ -19,6 +19,7 @@ struct btrfs_zoned_device_info {
*/
u64 zone_size;
u8 zone_size_shift;
u64 max_zone_append_size;
u32 nr_zones;
unsigned int max_active_zones;
atomic_t active_zones_left;
@ -79,6 +80,9 @@ void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info);
void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
u64 length);
int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, bool do_finish);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@ -248,6 +252,20 @@ static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
static inline void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info,
u64 logical, u64 length) { }
static inline int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
{
return 1;
}
static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
bool do_finish)
{
/* Consider all the block groups are active */
return 0;
}
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)

View File

@ -403,7 +403,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
workspace->in_buf.src = kmap(in_page);
workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
@ -415,7 +415,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
workspace->out_buf.dst = kmap(out_page);
workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
@ -450,9 +450,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
@ -462,7 +460,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
workspace->out_buf.dst = kmap(out_page);
workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out,
PAGE_SIZE);
@ -477,13 +475,12 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we need more input */
if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
kunmap(in_page);
kunmap_local(workspace->in_buf.src);
put_page(in_page);
start += PAGE_SIZE;
len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
workspace->in_buf.src = kmap(in_page);
workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
}
@ -510,9 +507,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
@ -522,7 +517,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
workspace->out_buf.dst = kmap(out_page);
workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
@ -537,13 +532,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = tot_out;
out:
*out_pages = nr_pages;
/* Cleanup */
if (in_page) {
kunmap(in_page);
if (workspace->in_buf.src) {
kunmap_local(workspace->in_buf.src);
put_page(in_page);
}
if (out_page)
kunmap(out_page);
return ret;
}
@ -567,7 +559,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}
workspace->in_buf.src = kmap(pages_in[page_in_index]);
workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
@ -603,14 +595,15 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
break;
if (workspace->in_buf.pos == workspace->in_buf.size) {
kunmap(pages_in[page_in_index++]);
kunmap_local(workspace->in_buf.src);
page_in_index++;
if (page_in_index >= total_pages_in) {
workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
workspace->in_buf.src = kmap(pages_in[page_in_index]);
workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
@ -619,7 +612,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
zero_fill_bio(cb->orig_bio);
done:
if (workspace->in_buf.src)
kunmap(pages_in[page_in_index]);
kunmap_local(workspace->in_buf.src);
return ret;
}

View File

@ -1167,6 +1167,11 @@ bdev_max_zone_append_sectors(struct block_device *bdev)
return queue_max_zone_append_sectors(bdev_get_queue(bdev));
}
static inline unsigned int bdev_max_segments(struct block_device *bdev)
{
return queue_max_segments(bdev_get_queue(bdev));
}
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;

View File

@ -8,7 +8,7 @@
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
void kunmap_local_indexed(void *vaddr);
void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
void __kmap_local_sched_in(void);
@ -89,7 +89,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
static inline void __kunmap_local(void *vaddr)
static inline void __kunmap_local(const void *vaddr)
{
kunmap_local_indexed(vaddr);
}
@ -121,7 +121,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
static inline void __kunmap_atomic(void *addr)
static inline void __kunmap_atomic(const void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
@ -197,7 +197,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
return kmap_local_page(pfn_to_page(pfn));
}
static inline void __kunmap_local(void *addr)
static inline void __kunmap_local(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);
@ -224,7 +224,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
return kmap_atomic(pfn_to_page(pfn));
}
static inline void __kunmap_atomic(void *addr)
static inline void __kunmap_atomic(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);

View File

@ -30,6 +30,8 @@ struct btrfs_qgroup;
struct extent_io_tree;
struct prelim_ref;
struct btrfs_space_info;
struct btrfs_raid_bio;
struct raid56_bio_trace_info;
#define show_ref_type(type) \
__print_symbolic(type, \
@ -596,6 +598,70 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_range,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first_range,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_for_logging,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_split,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_dec_test_pending,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
DECLARE_EVENT_CLASS(btrfs__writepage,
TP_PROTO(const struct page *page, const struct inode *inode,
@ -2258,6 +2324,98 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
DECLARE_EVENT_CLASS(btrfs_raid56_bio,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
TP_ARGS(rbio, bio, trace_info),
TP_STRUCT__entry_btrfs(
__field( u64, full_stripe )
__field( u64, physical )
__field( u64, devid )
__field( u32, offset )
__field( u32, len )
__field( u8, opf )
__field( u8, total_stripes )
__field( u8, real_stripes )
__field( u8, nr_data )
__field( u8, stripe_nr )
),
TP_fast_assign_btrfs(rbio->bioc->fs_info,
__entry->full_stripe = rbio->bioc->raid_map[0];
__entry->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
__entry->len = bio->bi_iter.bi_size;
__entry->opf = bio_op(bio);
__entry->devid = trace_info->devid;
__entry->offset = trace_info->offset;
__entry->stripe_nr = trace_info->stripe_nr;
__entry->total_stripes = rbio->bioc->num_stripes;
__entry->real_stripes = rbio->real_stripes;
__entry->nr_data = rbio->nr_data;
),
/*
* For type output, we need to output things like "DATA1"
* (the first data stripe), "DATA2" (the second data stripe),
* "PQ1" (P stripe),"PQ2" (Q stripe), "REPLACE0" (replace target device).
*/
TP_printk_btrfs(
"full_stripe=%llu devid=%lld type=%s%d offset=%d opf=0x%x physical=%llu len=%u",
__entry->full_stripe, __entry->devid,
(__entry->stripe_nr < __entry->nr_data) ? "DATA" :
((__entry->stripe_nr < __entry->real_stripes) ? "PQ" :
"REPLACE"),
(__entry->stripe_nr < __entry->nr_data) ?
(__entry->stripe_nr + 1) :
((__entry->stripe_nr < __entry->real_stripes) ?
(__entry->stripe_nr - __entry->nr_data + 1) : 0),
__entry->offset, __entry->opf, __entry->physical, __entry->len)
);
DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
TP_ARGS(rbio, bio, trace_info)
);
DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
TP_ARGS(rbio, bio, trace_info)
);
DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
TP_ARGS(rbio, bio, trace_info)
);
DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
TP_ARGS(rbio, bio, trace_info)
);
DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
TP_ARGS(rbio, bio, trace_info)
);
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */

View File

@ -777,11 +777,19 @@ struct btrfs_ioctl_received_subvol_args {
*/
#define BTRFS_SEND_FLAG_VERSION 0x8
/*
* Send compressed data using the ENCODED_WRITE command instead of decompressing
* the data and sending it with the WRITE command. This requires protocol
* version >= 2.
*/
#define BTRFS_SEND_FLAG_COMPRESSED 0x10
#define BTRFS_SEND_FLAG_MASK \
(BTRFS_SEND_FLAG_NO_FILE_DATA | \
BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \
BTRFS_SEND_FLAG_OMIT_END_CMD | \
BTRFS_SEND_FLAG_VERSION)
BTRFS_SEND_FLAG_VERSION | \
BTRFS_SEND_FLAG_COMPRESSED)
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */

View File

@ -561,7 +561,7 @@ void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(__kmap_local_page_prot);
void kunmap_local_indexed(void *vaddr)
void kunmap_local_indexed(const void *vaddr)
{
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
pte_t *kmap_pte;