diff options
-rw-r--r-- | fs/xfs/kmem.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr_leaf.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_format.h | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_fs.h | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_log_format.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_log_cil.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_symlink.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_trans_ail.c | 8 |
10 files changed, 15 insertions, 15 deletions
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index da031b93e182..1da94237a8cf 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -32,7 +32,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags) /* - * __vmalloc() will allocate data pages and auxillary structures (e.g. + * __vmalloc() will allocate data pages and auxiliary structures (e.g. * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence * we need to tell memory reclaim that we are in such a context via * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index f7a4b54c5bc2..b39bd81d78bd 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -1488,7 +1488,7 @@ xfs_alloc_ag_vextent_near( dofirst = prandom_u32() & 1; #endif - /* handle unitialized agbno range so caller doesn't have to */ + /* handle uninitialized agbno range so caller doesn't have to */ if (!args->min_agbno && !args->max_agbno) args->max_agbno = args->mp->m_sb.sb_agblocks - 1; ASSERT(args->min_agbno <= args->max_agbno); diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index dca8840496ea..8ba3ae89f07e 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -829,7 +829,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args) } /* - * Retreive the attribute value and length. + * Retrieve the attribute value and length. * * If ATTR_KERNOVAL is specified, only the length needs to be returned. * Unlike a lookup, we only return an error if the attribute does not diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h index ae654e06b2fb..6702a0849f75 100644 --- a/fs/xfs/libxfs/xfs_da_format.h +++ b/fs/xfs/libxfs/xfs_da_format.h @@ -482,7 +482,7 @@ xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp) } /* - * Free space block defintions for the node format. + * Free space block definitions for the node format. */ /* diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 39dd2b908106..45f9e93ac570 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -416,7 +416,7 @@ struct xfs_bulkstat { /* * Project quota id helpers (previously projid was 16bit only - * and using two 16bit values to hold new 32bit projid was choosen + * and using two 16bit values to hold new 32bit projid was chosen * to retain compatibility with "old" filesystems). */ static inline uint32_t diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index e5f97c69b320..8ef31d71a9c7 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h @@ -432,9 +432,9 @@ static inline uint xfs_log_dinode_size(int version) } /* - * Buffer Log Format defintions + * Buffer Log Format definitions * - * These are the physical dirty bitmap defintions for the log format structure. + * These are the physical dirty bitmap definitions for the log format structure. */ #define XFS_BLF_CHUNK 128 #define XFS_BLF_SHIFT 7 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 1e63dd3d1257..2ed3c65c602f 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -461,7 +461,7 @@ _xfs_buf_map_pages( unsigned nofs_flag; /* - * vm_map_ram() will allocate auxillary structures (e.g. + * vm_map_ram() will allocate auxiliary structures (e.g. * pagetables) with GFP_KERNEL, yet we are likely to be under * GFP_NOFS context here. Hence we need to tell memory reclaim * that we are in such a context via PF_MEMALLOC_NOFS to prevent diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index a1204424a938..48435cf2aa16 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -179,7 +179,7 @@ xlog_cil_alloc_shadow_bufs( /* * We free and allocate here as a realloc would copy - * unecessary data. We don't use kmem_zalloc() for the + * unnecessary data. We don't use kmem_zalloc() for the * same reason - we don't need to zero the data area in * the buffer, only the log vector header and the iovec * storage. @@ -682,7 +682,7 @@ xlog_cil_push( } - /* check for a previously pushed seqeunce */ + /* check for a previously pushed sequence */ if (push_seq < cil->xc_ctx->sequence) { spin_unlock(&cil->xc_push_lock); goto out_skip; diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h index 9743d8c9394b..b1fa091427e6 100644 --- a/fs/xfs/xfs_symlink.h +++ b/fs/xfs/xfs_symlink.h @@ -5,7 +5,7 @@ #ifndef __XFS_SYMLINK_H #define __XFS_SYMLINK_H 1 -/* Kernel only symlink defintions */ +/* Kernel only symlink definitions */ int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, const char *target_path, umode_t mode, struct xfs_inode **ipp); diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index aea71ee189f5..00cc5b8734be 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -427,15 +427,15 @@ xfsaild_push( case XFS_ITEM_FLUSHING: /* - * The item or its backing buffer is already beeing + * The item or its backing buffer is already being * flushed. The typical reason for that is that an * inode buffer is locked because we already pushed the * updates to it as part of inode clustering. * * We do not want to to stop flushing just because lots - * of items are already beeing flushed, but we need to + * of items are already being flushed, but we need to * re-try the flushing relatively soon if most of the - * AIL is beeing flushed. + * AIL is being flushed. */ XFS_STATS_INC(mp, xs_push_ail_flushing); trace_xfs_ail_flushing(lip); @@ -612,7 +612,7 @@ xfsaild( * The push is run asynchronously in a workqueue, which means the caller needs * to handle waiting on the async flush for space to become available. * We don't want to interrupt any push that is in progress, hence we only queue - * work if we set the pushing bit approriately. + * work if we set the pushing bit appropriately. * * We do this unlocked - we only need to know whether there is anything in the * AIL at the time we are called. We don't need to access the contents of |