[XFS] Complete the pagebuf -> xfs_buf naming convention transition,

finally.

SGI-PV: 947038
SGI-Modid: xfs-linux-melb:xfs-kern:24866a

Signed-off-by: Nathan Scott <nathans@sgi.com>
This commit is contained in:
Nathan Scott 2006-01-11 15:39:08 +11:00
parent 68bdb6eabc
commit ce8e922c0e
10 changed files with 867 additions and 1090 deletions

View file

@ -273,7 +273,7 @@ xfs_map_at_offset(
lock_buffer(bh);
bh->b_blocknr = bn;
bh->b_bdev = iomapp->iomap_target->pbr_bdev;
bh->b_bdev = iomapp->iomap_target->bt_bdev;
set_buffer_mapped(bh);
clear_buffer_delay(bh);
}
@ -982,7 +982,7 @@ __linvfs_get_block(
}
/* If this is a realtime file, data might be on a new device */
bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
bh_result->b_bdev = iomap.iomap_target->bt_bdev;
/* If we previously allocated a block out beyond eof and
* we are now coming back to use it then we will need to
@ -1097,7 +1097,7 @@ linvfs_direct_IO(
iocb->private = xfs_alloc_ioend(inode);
ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
iomap.iomap_target->pbr_bdev,
iomap.iomap_target->bt_bdev,
iov, offset, nr_segs,
linvfs_get_blocks_direct,
linvfs_end_io_direct);

File diff suppressed because it is too large Load diff

View file

@ -32,44 +32,47 @@
* Base types
*/
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
typedef enum page_buf_rw_e {
PBRW_READ = 1, /* transfer into target memory */
PBRW_WRITE = 2, /* transfer from target memory */
PBRW_ZERO = 3 /* Zero target memory */
} page_buf_rw_t;
typedef enum {
XBRW_READ = 1, /* transfer into target memory */
XBRW_WRITE = 2, /* transfer from target memory */
XBRW_ZERO = 3, /* Zero target memory */
} xfs_buf_rw_t;
typedef enum page_buf_flags_e { /* pb_flags values */
PBF_READ = (1 << 0), /* buffer intended for reading from device */
PBF_WRITE = (1 << 1), /* buffer intended for writing to device */
PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */
PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
PBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
PBF_DELWRI = (1 << 6), /* buffer has dirty pages */
PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
PBF_ORDERED = (1 << 11), /* use ordered writes */
PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
typedef enum {
XBF_READ = (1 << 0), /* buffer intended for reading from device */
XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
XBF_ORDERED = (1 << 11), /* use ordered writes */
XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
/* flags used only as arguments to access routines */
PBF_LOCK = (1 << 14), /* lock requested */
PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
XBF_LOCK = (1 << 14), /* lock requested */
XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
/* flags used only internally */
_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
_PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
} page_buf_flags_t;
_XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
_XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
_XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
_XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
} xfs_buf_flags_t;
typedef enum {
XBT_FORCE_SLEEP = (0 << 1),
XBT_FORCE_FLUSH = (1 << 1),
} xfs_buftarg_flags_t;
typedef struct xfs_bufhash {
struct list_head bh_list;
@ -77,14 +80,14 @@ typedef struct xfs_bufhash {
} xfs_bufhash_t;
typedef struct xfs_buftarg {
dev_t pbr_dev;
struct block_device *pbr_bdev;
struct address_space *pbr_mapping;
unsigned int pbr_bsize;
unsigned int pbr_sshift;
size_t pbr_smask;
dev_t bt_dev;
struct block_device *bt_bdev;
struct address_space *bt_mapping;
unsigned int bt_bsize;
unsigned int bt_sshift;
size_t bt_smask;
/* per-device buffer hash table */
/* per device buffer hash table */
uint bt_hashmask;
uint bt_hashshift;
xfs_bufhash_t *bt_hash;
@ -94,469 +97,333 @@ typedef struct xfs_buftarg {
struct list_head bt_list;
struct list_head bt_delwrite_queue;
spinlock_t bt_delwrite_lock;
uint bt_flags;
#define BT_FORCE_SLEEP 1
#define BT_FORCE_FLUSH 2
unsigned long bt_flags;
} xfs_buftarg_t;
/*
* xfs_buf_t: Buffer structure for page cache-based buffers
* xfs_buf_t: Buffer structure for pagecache-based buffers
*
* This buffer structure is used by the page cache buffer management routines
* to refer to an assembly of pages forming a logical buffer. The actual I/O
* is performed with buffer_head structures, as required by drivers.
*
* The buffer structure is used on temporary basis only, and discarded when
* released. The real data storage is recorded in the page cache. Metadata is
* This buffer structure is used by the pagecache buffer management routines
* to refer to an assembly of pages forming a logical buffer.
*
* The buffer structure is used on a temporary basis only, and discarded when
* released. The real data storage is recorded in the pagecache. Buffers are
* hashed to the block device on which the file system resides.
*/
struct xfs_buf;
typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
/* call-back function on I/O completion */
typedef void (*page_buf_iodone_t)(struct xfs_buf *);
/* call-back function on I/O completion */
typedef void (*page_buf_relse_t)(struct xfs_buf *);
/* pre-write function */
typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
#define PB_PAGES 2
#define XB_PAGES 2
typedef struct xfs_buf {
struct semaphore pb_sema; /* semaphore for lockables */
unsigned long pb_queuetime; /* time buffer was queued */
atomic_t pb_pin_count; /* pin count */
wait_queue_head_t pb_waiters; /* unpin waiters */
struct list_head pb_list;
page_buf_flags_t pb_flags; /* status flags */
struct list_head pb_hash_list; /* hash table list */
xfs_bufhash_t *pb_hash; /* hash table list start */
xfs_buftarg_t *pb_target; /* buffer target (device) */
atomic_t pb_hold; /* reference count */
xfs_daddr_t pb_bn; /* block number for I/O */
loff_t pb_file_offset; /* offset in file */
size_t pb_buffer_length; /* size of buffer in bytes */
size_t pb_count_desired; /* desired transfer size */
void *pb_addr; /* virtual address of buffer */
struct work_struct pb_iodone_work;
atomic_t pb_io_remaining;/* #outstanding I/O requests */
page_buf_iodone_t pb_iodone; /* I/O completion function */
page_buf_relse_t pb_relse; /* releasing function */
page_buf_bdstrat_t pb_strat; /* pre-write function */
struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */
void *pb_fspriv;
void *pb_fspriv2;
void *pb_fspriv3;
unsigned short pb_error; /* error code on I/O */
unsigned short pb_locked; /* page array is locked */
unsigned int pb_page_count; /* size of page array */
unsigned int pb_offset; /* page offset in first page */
struct page **pb_pages; /* array of page pointers */
struct page *pb_page_array[PB_PAGES]; /* inline pages */
#ifdef PAGEBUF_LOCK_TRACKING
int pb_last_holder;
struct semaphore b_sema; /* semaphore for lockables */
unsigned long b_queuetime; /* time buffer was queued */
atomic_t b_pin_count; /* pin count */
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
xfs_buf_flags_t b_flags; /* status flags */
struct list_head b_hash_list; /* hash table list */
xfs_bufhash_t *b_hash; /* hash table list start */
xfs_buftarg_t *b_target; /* buffer target (device) */
atomic_t b_hold; /* reference count */
xfs_daddr_t b_bn; /* block number for I/O */
xfs_off_t b_file_offset; /* offset in file */
size_t b_buffer_length;/* size of buffer in bytes */
size_t b_count_desired;/* desired transfer size */
void *b_addr; /* virtual address of buffer */
struct work_struct b_iodone_work;
atomic_t b_io_remaining; /* #outstanding I/O requests */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
xfs_buf_relse_t b_relse; /* releasing function */
xfs_buf_bdstrat_t b_strat; /* pre-write function */
struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
void *b_fspriv;
void *b_fspriv2;
void *b_fspriv3;
unsigned short b_error; /* error code on I/O */
unsigned short b_locked; /* page array is locked */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
#ifdef XFS_BUF_LOCK_TRACKING
int b_last_holder;
#endif
} xfs_buf_t;
/* Finding and Reading Buffers */
extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */
/* the block is in memory */
xfs_buftarg_t *, /* inode for block */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t, /* PBF_LOCK */
xfs_buf_t *); /* newly allocated buffer */
extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t, xfs_buf_t *);
#define xfs_incore(buftarg,blkno,len,lockit) \
_pagebuf_find(buftarg, blkno ,len, lockit, NULL)
extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
xfs_buftarg_t *, /* inode for buffer */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
/* PBF_ASYNC */
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
#define xfs_buf_get(target, blkno, len, flags) \
xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
xfs_buftarg_t *, /* inode for buffer */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
#define xfs_buf_read(target, blkno, len, flags) \
xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
/* no memory or disk address */
size_t len,
xfs_buftarg_t *); /* mount point "fake" inode */
extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */
/* without disk address */
size_t len,
xfs_buftarg_t *); /* mount point "fake" inode */
extern int pagebuf_associate_memory(
xfs_buf_t *,
void *,
size_t);
extern void pagebuf_hold( /* increment reference count */
xfs_buf_t *); /* buffer to hold */
extern void pagebuf_readahead( /* read ahead into cache */
xfs_buftarg_t *, /* target for buffer (or NULL) */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* additional read flags */
extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
extern void xfs_buf_hold(xfs_buf_t *);
extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
/* Releasing Buffers */
extern void pagebuf_free( /* deallocate a buffer */
xfs_buf_t *); /* buffer to deallocate */
extern void pagebuf_rele( /* release hold on a buffer */
xfs_buf_t *); /* buffer to release */
extern void xfs_buf_free(xfs_buf_t *);
extern void xfs_buf_rele(xfs_buf_t *);
/* Locking and Unlocking Buffers */
extern int pagebuf_cond_lock( /* lock buffer, if not locked */
/* (returns -EBUSY if locked) */
xfs_buf_t *); /* buffer to lock */
extern int pagebuf_lock_value( /* return count on lock */
xfs_buf_t *); /* buffer to check */
extern int pagebuf_lock( /* lock buffer */
xfs_buf_t *); /* buffer to lock */
extern void pagebuf_unlock( /* unlock buffer */
xfs_buf_t *); /* buffer to unlock */
extern int xfs_buf_cond_lock(xfs_buf_t *);
extern int xfs_buf_lock_value(xfs_buf_t *);
extern void xfs_buf_lock(xfs_buf_t *);
extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
extern int xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
xfs_buf_rw_t);
extern void pagebuf_iodone( /* mark buffer I/O complete */
xfs_buf_t *, /* buffer to mark */
int); /* run completion locally, or in
* a helper thread. */
extern void pagebuf_ioerror( /* mark buffer in error (or not) */
xfs_buf_t *, /* buffer to mark */
int); /* error to store (0 if none) */
extern int pagebuf_iostart( /* start I/O on a buffer */
xfs_buf_t *, /* buffer to start */
page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */
/* PBF_READ, PBF_WRITE, */
/* PBF_DELWRI */
extern int pagebuf_iorequest( /* start real I/O */
xfs_buf_t *); /* buffer to convey to device */
extern int pagebuf_iowait( /* wait for buffer I/O done */
xfs_buf_t *); /* buffer to wait on */
extern void pagebuf_iomove( /* move data in/out of pagebuf */
xfs_buf_t *, /* buffer to manipulate */
size_t, /* starting buffer offset */
size_t, /* length in buffer */
caddr_t, /* data pointer */
page_buf_rw_t); /* direction */
static inline int pagebuf_iostrategy(xfs_buf_t *pb)
static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
{
return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
}
static inline int pagebuf_geterror(xfs_buf_t *pb)
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
return pb ? pb->pb_error : ENOMEM;
return bp ? bp->b_error : ENOMEM;
}
/* Buffer Utility Routines */
extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
xfs_buf_t *, /* buffer to offset into */
size_t); /* offset */
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
/* Pinning Buffer Storage in Memory */
extern void pagebuf_pin( /* pin buffer in memory */
xfs_buf_t *); /* buffer to pin */
extern void pagebuf_unpin( /* unpin buffered data */
xfs_buf_t *); /* buffer to unpin */
extern int pagebuf_ispin( /* check if buffer is pinned */
xfs_buf_t *); /* buffer to check */
extern void xfs_buf_pin(xfs_buf_t *);
extern void xfs_buf_unpin(xfs_buf_t *);
extern int xfs_buf_ispin(xfs_buf_t *);
/* Delayed Write Buffer Routines */
extern void pagebuf_delwri_dequeue(xfs_buf_t *);
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
/* Buffer Daemon Setup Routines */
extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void);
extern int pagebuf_init(void);
extern void pagebuf_terminate(void);
#ifdef PAGEBUF_TRACE
extern ktrace_t *pagebuf_trace_buf;
extern void pagebuf_trace(
xfs_buf_t *, /* buffer being traced */
char *, /* description of operation */
void *, /* arbitrary diagnostic value */
void *); /* return address */
#ifdef XFS_BUF_TRACE
extern ktrace_t *xfs_buf_trace_buf;
extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#else
# define pagebuf_trace(pb, id, ptr, ra) do { } while (0)
#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
#endif
#define pagebuf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; })
#define xfs_buf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
#define XFS_B_ASYNC XBF_ASYNC
#define XFS_B_DELWRI XBF_DELWRI
#define XFS_B_READ XBF_READ
#define XFS_B_WRITE XBF_WRITE
#define XFS_B_STALE XBF_STALE
/* These are just for xfs_syncsub... it sets an internal variable
* then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
*/
#define XFS_B_ASYNC PBF_ASYNC
#define XFS_B_DELWRI PBF_DELWRI
#define XFS_B_READ PBF_READ
#define XFS_B_WRITE PBF_WRITE
#define XFS_B_STALE PBF_STALE
#define XFS_BUF_TRYLOCK XBF_TRYLOCK
#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
#define XFS_BUF_LOCK XBF_LOCK
#define XFS_BUF_MAPPED XBF_MAPPED
#define XFS_BUF_TRYLOCK PBF_TRYLOCK
#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
#define XFS_BUF_LOCK PBF_LOCK
#define XFS_BUF_MAPPED PBF_MAPPED
#define BUF_BUSY XBF_DONT_BLOCK
#define BUF_BUSY PBF_DONT_BLOCK
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_ZEROFLAGS(bp) \
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
#define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
#define XFS_BUF_ZEROFLAGS(x) \
((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
#define XFS_BUF_SUPER_STALE(x) do { \
XFS_BUF_STALE(x); \
pagebuf_delwri_dequeue(x); \
XFS_BUF_DONE(x); \
#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
#define XFS_BUF_SUPER_STALE(bp) do { \
XFS_BUF_STALE(bp); \
xfs_buf_delwri_dequeue(bp); \
XFS_BUF_DONE(bp); \
} while (0)
#define XFS_BUF_MANAGE PBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
#define XFS_BUF_MANAGE XBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI)
#define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x)
#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI)
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no)
#define XFS_BUF_GETERROR(x) pagebuf_geterror(x)
#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0)
#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
#define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE)
#define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE)
#define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE)
#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
#define XFS_BUF_BUSY(x) do { } while (0)
#define XFS_BUF_UNBUSY(x) do { } while (0)
#define XFS_BUF_ISBUSY(x) (1)
#define XFS_BUF_BUSY(bp) do { } while (0)
#define XFS_BUF_UNBUSY(bp) do { } while (0)
#define XFS_BUF_ISBUSY(bp) (1)
#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
#define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED)
#define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED)
#define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED)
#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
#define XFS_BUF_ISSHUT(x) (0)
#define XFS_BUF_SHUT(bp) do { } while (0)
#define XFS_BUF_UNSHUT(bp) do { } while (0)
#define XFS_BUF_ISSHUT(bp) (0)
#define XFS_BUF_HOLD(x) pagebuf_hold(x)
#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
#define XFS_BUF_ISUNINITIAL(x) (0)
#define XFS_BUF_UNUNINITIAL(x) (0)
#define XFS_BUF_ISUNINITIAL(bp) (0)
#define XFS_BUF_UNUNINITIAL(bp) (0)
#define XFS_BUF_BP_ISMAPPED(bp) 1
#define XFS_BUF_BP_ISMAPPED(bp) (1)
#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
#define XFS_BUF_SET_IODONE_FUNC(buf, func) \
(buf)->pb_iodone = (func)
#define XFS_BUF_CLR_IODONE_FUNC(buf) \
(buf)->pb_iodone = NULL
#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
(buf)->pb_strat = (func)
#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
(buf)->pb_strat = NULL
#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
#define XFS_BUF_FSPRIVATE(buf, type) \
((type)(buf)->pb_fspriv)
#define XFS_BUF_SET_FSPRIVATE(buf, value) \
(buf)->pb_fspriv = (void *)(value)
#define XFS_BUF_FSPRIVATE2(buf, type) \
((type)(buf)->pb_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(buf, value) \
(buf)->pb_fspriv2 = (void *)(value)
#define XFS_BUF_FSPRIVATE3(buf, type) \
((type)(buf)->pb_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(buf, value) \
(buf)->pb_fspriv3 = (void *)(value)
#define XFS_BUF_SET_START(buf)
#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
#define XFS_BUF_SET_START(bp) do { } while (0)
#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
(buf)->pb_relse = (value)
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
#define XFS_BUF_TARGET(bp) ((bp)->b_target)
#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
{
if (bp->pb_flags & PBF_MAPPED)
return XFS_BUF_PTR(bp) + offset;
return (xfs_caddr_t) pagebuf_offset(bp, offset);
bp->b_fspriv3 = mp;
bp->b_strat = xfs_bdstrat_cb;
xfs_buf_delwri_dequeue(bp);
return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
}
#define XFS_BUF_SET_PTR(bp, val, count) \
pagebuf_associate_memory(bp, val, count)
#define XFS_BUF_ADDR(bp) ((bp)->pb_bn)
#define XFS_BUF_SET_ADDR(bp, blk) \
((bp)->pb_bn = (xfs_daddr_t)(blk))
#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset)
#define XFS_BUF_SET_OFFSET(bp, off) \
((bp)->pb_file_offset = (off))
#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired)
#define XFS_BUF_SET_COUNT(bp, cnt) \
((bp)->pb_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) \
((bp)->pb_buffer_length = (cnt))
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0)
#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp)
#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
/* setup the buffer target from a buftarg structure */
#define XFS_BUF_SET_TARGET(bp, target) \
(bp)->pb_target = (target)
#define XFS_BUF_TARGET(bp) ((bp)->pb_target)
#define XFS_BUFTARG_NAME(target) \
pagebuf_target_name(target)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
static inline void xfs_buf_relse(xfs_buf_t *bp)
{
bp->pb_fspriv3 = mp;
bp->pb_strat = xfs_bdstrat_cb;
pagebuf_delwri_dequeue(bp);
return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
if (!bp->b_relse)
xfs_buf_unlock(bp);
xfs_buf_rele(bp);
}
static inline void xfs_buf_relse(xfs_buf_t *bp)
{
if (!bp->pb_relse)
pagebuf_unlock(bp);
pagebuf_rele(bp);
}
#define xfs_bpin(bp) pagebuf_pin(bp)
#define xfs_bunpin(bp) pagebuf_unpin(bp)
#define xfs_bpin(bp) xfs_buf_pin(bp)
#define xfs_bunpin(bp) xfs_buf_unpin(bp)
#define xfs_buftrace(id, bp) \
pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
#define xfs_biodone(pb) \
pagebuf_iodone(pb, 0)
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
#define xfs_biomove(pb, off, len, data, rw) \
pagebuf_iomove((pb), (off), (len), (data), \
((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
#define xfs_biomove(bp, off, len, data, rw) \
xfs_buf_iomove((bp), (off), (len), (data), \
((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
#define xfs_biozero(pb, off, len) \
pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
#define xfs_biozero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
static inline int XFS_bwrite(xfs_buf_t *pb)
static inline int XFS_bwrite(xfs_buf_t *bp)
{
int iowait = (pb->pb_flags & PBF_ASYNC) == 0;
int iowait = (bp->b_flags & XBF_ASYNC) == 0;
int error = 0;
if (!iowait)
pb->pb_flags |= _PBF_RUN_QUEUES;
bp->b_flags |= _XBF_RUN_QUEUES;
pagebuf_delwri_dequeue(pb);
pagebuf_iostrategy(pb);
xfs_buf_delwri_dequeue(bp);
xfs_buf_iostrategy(bp);
if (iowait) {
error = pagebuf_iowait(pb);
xfs_buf_relse(pb);
error = xfs_buf_iowait(bp);
xfs_buf_relse(bp);
}
return error;
}
#define XFS_bdwrite(pb) \
pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
#define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
{
bp->pb_strat = xfs_bdstrat_cb;
bp->pb_fspriv3 = mp;
return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
bp->b_strat = xfs_bdstrat_cb;
bp->b_fspriv3 = mp;
return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
}
#define XFS_bdstrat(bp) pagebuf_iorequest(bp)
#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
#define xfs_iowait(pb) pagebuf_iowait(pb)
#define xfs_iowait(bp) xfs_buf_iowait(bp)
#define xfs_baread(target, rablkno, ralen) \
pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
#define xfs_buf_free(bp) pagebuf_free(bp)
xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
/*
* Handling of buftargs.
*/
extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
extern void xfs_free_buftarg(xfs_buftarg_t *, int);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
#define xfs_getsize_buftarg(buftarg) \
block_size((buftarg)->pbr_bdev)
#define xfs_readonly_buftarg(buftarg) \
bdev_read_only((buftarg)->pbr_bdev)
#define xfs_binval(buftarg) \
xfs_flush_buftarg(buftarg, 1)
#define XFS_bflush(buftarg) \
xfs_flush_buftarg(buftarg, 1)
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
#endif /* __XFS_BUF_H__ */

View file

@ -750,7 +750,7 @@ xfs_ioctl(
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
if (copy_to_user(arg, &da, sizeof(da)))

View file

@ -232,7 +232,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
#define xfs_statvfs_fsid(statp, mp) \
({ u64 id = huge_encode_dev((mp)->m_dev); \
({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
__kernel_fsid_t *fsid = &(statp)->f_fsid; \
(fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })

View file

@ -233,8 +233,8 @@ xfs_read(
xfs_buftarg_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
if ((*offset & target->pbr_smask) ||
(size & target->pbr_smask)) {
if ((*offset & target->bt_smask) ||
(size & target->bt_smask)) {
if (*offset == ip->i_d.di_size) {
return (0);
}
@ -618,7 +618,7 @@ xfs_write(
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
if ((pos & target->pbr_smask) || (count & target->pbr_smask))
if ((pos & target->bt_smask) || (count & target->bt_smask))
return XFS_ERROR(-EINVAL);
if (!VN_CACHED(vp) && pos < i_size_read(inode))
@ -938,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
if (!XFS_FORCED_SHUTDOWN(mp)) {
pagebuf_iorequest(bp);
xfs_buf_iorequest(bp);
return 0;
} else {
xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
@ -991,7 +991,7 @@ xfsbdstrat(
* if (XFS_BUF_IS_GRIO(bp)) {
*/
pagebuf_iorequest(bp);
xfs_buf_iorequest(bp);
return 0;
}

View file

@ -109,15 +109,15 @@ struct xfsstats {
__uint32_t vn_remove; /* # times vn_remove called */
__uint32_t vn_free; /* # times vn_free called */
#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
__uint32_t pb_get;
__uint32_t pb_create;
__uint32_t pb_get_locked;
__uint32_t pb_get_locked_waited;
__uint32_t pb_busy_locked;
__uint32_t pb_miss_locked;
__uint32_t pb_page_retries;
__uint32_t pb_page_found;
__uint32_t pb_get_read;
__uint32_t xb_get;
__uint32_t xb_create;
__uint32_t xb_get_locked;
__uint32_t xb_get_locked_waited;
__uint32_t xb_busy_locked;
__uint32_t xb_miss_locked;
__uint32_t xb_page_retries;
__uint32_t xb_page_found;
__uint32_t xb_get_read;
/* Extra precision counters */
__uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes;

View file

@ -309,7 +309,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
return;
}
if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered ==
if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
QUEUE_ORDERED_NONE) {
xfs_fs_cmn_err(CE_NOTE, mp,
"Disabling barriers, not supported by the underlying device");
@ -330,7 +330,7 @@ void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
blkdev_issue_flush(buftarg->pbr_bdev, NULL);
blkdev_issue_flush(buftarg->bt_bdev, NULL);
}
STATIC struct inode *
@ -969,9 +969,9 @@ init_xfs_fs( void )
if (error < 0)
goto undo_zones;
error = pagebuf_init();
error = xfs_buf_init();
if (error < 0)
goto undo_pagebuf;
goto undo_buffers;
vn_init();
xfs_init();
@ -985,9 +985,9 @@ init_xfs_fs( void )
return 0;
undo_register:
pagebuf_terminate();
xfs_buf_terminate();
undo_pagebuf:
undo_buffers:
linvfs_destroy_zones();
undo_zones:
@ -1001,7 +1001,7 @@ exit_xfs_fs( void )
XFS_DM_EXIT(&xfs_fs_type);
unregister_filesystem(&xfs_fs_type);
xfs_cleanup();
pagebuf_terminate();
xfs_buf_terminate();
linvfs_destroy_zones();
ktrace_uninit();
}

View file

@ -308,7 +308,6 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
#define m_dev m_ddev_targp->pbr_dev
__uint8_t m_dircook_elog; /* log d-cookie entry bits */
__uint8_t m_blkbit_log; /* blocklog + NBBY */
__uint8_t m_blkbb_log; /* blocklog - BBSHIFT */

View file

@ -238,6 +238,7 @@ xfs_bioerror_relse(
}
return (EIO);
}
/*
* Prints out an ALERT message about I/O error.
*/
@ -252,11 +253,9 @@ xfs_ioerror_alert(
"I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
" (\"%s\") error %d buf count %zd",
(!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
XFS_BUFTARG_NAME(bp->pb_target),
(__uint64_t)blkno,
func,
XFS_BUF_GETERROR(bp),
XFS_BUF_COUNT(bp));
XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
(__uint64_t)blkno, func,
XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
}
/*