xfs: shorten xfs_repair_ prefix to xrep_

Shorten all the metadata repair xfs_repair_* symbols to xrep_.
Whitespace damage will be fixed by a subsequent patch.  There are no
functional changes.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 5eccd89..7e6a56a 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -41,21 +41,21 @@
  * and will set *fixed to true if it thinks it repaired anything.
  */
 int
-xfs_repair_attempt(
+xrep_attempt(
 	struct xfs_inode		*ip,
 	struct xfs_scrub_context	*sc,
 	bool				*fixed)
 {
 	int				error = 0;
 
-	trace_xfs_repair_attempt(ip, sc->sm, error);
+	trace_xrep_attempt(ip, sc->sm, error);
 
 	xchk_ag_btcur_free(&sc->sa);
 
 	/* Repair whatever's broken. */
 	ASSERT(sc->ops->repair);
 	error = sc->ops->repair(sc);
-	trace_xfs_repair_done(ip, sc->sm, error);
+	trace_xrep_done(ip, sc->sm, error);
 	switch (error) {
 	case 0:
 		/*
@@ -93,7 +93,7 @@ xfs_repair_attempt(
  * structure to track rate limiting information.
  */
 void
-xfs_repair_failure(
+xrep_failure(
 	struct xfs_mount		*mp)
 {
 	xfs_alert_ratelimited(mp,
@@ -105,7 +105,7 @@ xfs_repair_failure(
  * given mountpoint.
  */
 int
-xfs_repair_probe(
+xrep_probe(
 	struct xfs_scrub_context	*sc)
 {
 	int				error = 0;
@@ -121,7 +121,7 @@ xfs_repair_probe(
  * the btree cursors.
  */
 int
-xfs_repair_roll_ag_trans(
+xrep_roll_ag_trans(
 	struct xfs_scrub_context	*sc)
 {
 	int				error;
@@ -162,7 +162,7 @@ xfs_repair_roll_ag_trans(
  * in AG reservations) to construct a whole btree.
  */
 bool
-xfs_repair_ag_has_space(
+xrep_ag_has_space(
 	struct xfs_perag		*pag,
 	xfs_extlen_t			nr_blocks,
 	enum xfs_ag_resv_type		type)
@@ -178,7 +178,7 @@ xfs_repair_ag_has_space(
  * any type of per-AG btree.
  */
 xfs_extlen_t
-xfs_repair_calc_ag_resblks(
+xrep_calc_ag_resblks(
 	struct xfs_scrub_context	*sc)
 {
 	struct xfs_mount		*mp = sc->mp;
@@ -231,7 +231,7 @@ xfs_repair_calc_ag_resblks(
 	}
 	xfs_perag_put(pag);
 
-	trace_xfs_repair_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
+	trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
 			freelen, usedlen);
 
 	/*
@@ -270,7 +270,7 @@ xfs_repair_calc_ag_resblks(
 		rmapbt_sz = 0;
 	}
 
-	trace_xfs_repair_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
+	trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
 			inobt_sz, rmapbt_sz, refcbt_sz);
 
 	return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
@@ -278,7 +278,7 @@ xfs_repair_calc_ag_resblks(
 
 /* Allocate a block in an AG. */
 int
-xfs_repair_alloc_ag_block(
+xrep_alloc_ag_block(
 	struct xfs_scrub_context	*sc,
 	struct xfs_owner_info		*oinfo,
 	xfs_fsblock_t			*fsbno,
@@ -329,7 +329,7 @@ xfs_repair_alloc_ag_block(
 
 /* Initialize a new AG btree root block with zero entries. */
 int
-xfs_repair_init_btblock(
+xrep_init_btblock(
 	struct xfs_scrub_context	*sc,
 	xfs_fsblock_t			fsb,
 	struct xfs_buf			**bpp,
@@ -340,7 +340,7 @@ xfs_repair_init_btblock(
 	struct xfs_mount		*mp = sc->mp;
 	struct xfs_buf			*bp;
 
-	trace_xfs_repair_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
+	trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
 			XFS_FSB_TO_AGBNO(mp, fsb), btnum);
 
 	ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
@@ -384,19 +384,19 @@ xfs_repair_init_btblock(
 
 /* Collect a dead btree extent for later disposal. */
 int
-xfs_repair_collect_btree_extent(
+xrep_collect_btree_extent(
 	struct xfs_scrub_context	*sc,
-	struct xfs_repair_extent_list	*exlist,
+	struct xrep_extent_list		*exlist,
 	xfs_fsblock_t			fsbno,
 	xfs_extlen_t			len)
 {
-	struct xfs_repair_extent	*rex;
+	struct xrep_extent		*rex;
 
-	trace_xfs_repair_collect_btree_extent(sc->mp,
+	trace_xrep_collect_btree_extent(sc->mp,
 			XFS_FSB_TO_AGNO(sc->mp, fsbno),
 			XFS_FSB_TO_AGBNO(sc->mp, fsbno), len);
 
-	rex = kmem_alloc(sizeof(struct xfs_repair_extent), KM_MAYFAIL);
+	rex = kmem_alloc(sizeof(struct xrep_extent), KM_MAYFAIL);
 	if (!rex)
 		return -ENOMEM;
 
@@ -414,14 +414,14 @@ xfs_repair_collect_btree_extent(
  * Therefore, free all the memory associated with the list so we can die.
  */
 void
-xfs_repair_cancel_btree_extents(
+xrep_cancel_btree_extents(
 	struct xfs_scrub_context	*sc,
-	struct xfs_repair_extent_list	*exlist)
+	struct xrep_extent_list		*exlist)
 {
-	struct xfs_repair_extent	*rex;
-	struct xfs_repair_extent	*n;
+	struct xrep_extent		*rex;
+	struct xrep_extent		*n;
 
-	for_each_xfs_repair_extent_safe(rex, n, exlist) {
+	for_each_xrep_extent_safe(rex, n, exlist) {
 		list_del(&rex->list);
 		kmem_free(rex);
 	}
@@ -429,16 +429,16 @@ xfs_repair_cancel_btree_extents(
 
 /* Compare two btree extents. */
 static int
-xfs_repair_btree_extent_cmp(
+xrep_btree_extent_cmp(
 	void				*priv,
 	struct list_head		*a,
 	struct list_head		*b)
 {
-	struct xfs_repair_extent	*ap;
-	struct xfs_repair_extent	*bp;
+	struct xrep_extent		*ap;
+	struct xrep_extent		*bp;
 
-	ap = container_of(a, struct xfs_repair_extent, list);
-	bp = container_of(b, struct xfs_repair_extent, list);
+	ap = container_of(a, struct xrep_extent, list);
+	bp = container_of(b, struct xrep_extent, list);
 
 	if (ap->fsbno > bp->fsbno)
 		return 1;
@@ -462,15 +462,15 @@ xfs_repair_btree_extent_cmp(
 #define LEFT_ALIGNED	(1 << 0)
 #define RIGHT_ALIGNED	(1 << 1)
 int
-xfs_repair_subtract_extents(
+xrep_subtract_extents(
 	struct xfs_scrub_context	*sc,
-	struct xfs_repair_extent_list	*exlist,
-	struct xfs_repair_extent_list	*sublist)
+	struct xrep_extent_list		*exlist,
+	struct xrep_extent_list		*sublist)
 {
 	struct list_head		*lp;
-	struct xfs_repair_extent	*ex;
-	struct xfs_repair_extent	*newex;
-	struct xfs_repair_extent	*subex;
+	struct xrep_extent		*ex;
+	struct xrep_extent		*newex;
+	struct xrep_extent		*subex;
 	xfs_fsblock_t			sub_fsb;
 	xfs_extlen_t			sub_len;
 	int				state;
@@ -480,8 +480,8 @@ xfs_repair_subtract_extents(
 		return 0;
 	ASSERT(!list_empty(&sublist->list));
 
-	list_sort(NULL, &exlist->list, xfs_repair_btree_extent_cmp);
-	list_sort(NULL, &sublist->list, xfs_repair_btree_extent_cmp);
+	list_sort(NULL, &exlist->list, xrep_btree_extent_cmp);
+	list_sort(NULL, &sublist->list, xrep_btree_extent_cmp);
 
 	/*
 	 * Now that we've sorted both lists, we iterate exlist once, rolling
@@ -491,11 +491,11 @@ xfs_repair_subtract_extents(
 	 * list traversal is similar to merge sort, but we're deleting
 	 * instead.  In this manner we avoid O(n^2) operations.
 	 */
-	subex = list_first_entry(&sublist->list, struct xfs_repair_extent,
+	subex = list_first_entry(&sublist->list, struct xrep_extent,
 			list);
 	lp = exlist->list.next;
 	while (lp != &exlist->list) {
-		ex = list_entry(lp, struct xfs_repair_extent, list);
+		ex = list_entry(lp, struct xrep_extent, list);
 
 		/*
 		 * Advance subex and/or ex until we find a pair that
@@ -548,7 +548,7 @@ xfs_repair_subtract_extents(
 			 * Deleting from the middle: add the new right extent
 			 * and then shrink the left extent.
 			 */
-			newex = kmem_alloc(sizeof(struct xfs_repair_extent),
+			newex = kmem_alloc(sizeof(struct xrep_extent),
 					KM_MAYFAIL);
 			if (!newex) {
 				error = -ENOMEM;
@@ -619,12 +619,12 @@ xfs_repair_subtract_extents(
  * is not intended for use with file data repairs; we have bunmapi for that.
  */
 int
-xfs_repair_invalidate_blocks(
+xrep_invalidate_blocks(
 	struct xfs_scrub_context	*sc,
-	struct xfs_repair_extent_list	*exlist)
+	struct xrep_extent_list		*exlist)
 {
-	struct xfs_repair_extent	*rex;
-	struct xfs_repair_extent	*n;
+	struct xrep_extent		*rex;
+	struct xrep_extent		*n;
 	struct xfs_buf			*bp;
 	xfs_fsblock_t			fsbno;
 	xfs_agblock_t			i;
@@ -637,7 +637,7 @@ xfs_repair_invalidate_blocks(
 	 * because we never own those; and if we can't TRYLOCK the buffer we
 	 * assume it's owned by someone else.
 	 */
-	for_each_xfs_repair_extent_safe(rex, n, exlist) {
+	for_each_xrep_extent_safe(rex, n, exlist) {
 		for (fsbno = rex->fsbno, i = rex->len; i > 0; fsbno++, i--) {
 			/* Skip AG headers and post-EOFS blocks */
 			if (!xfs_verify_fsbno(sc->mp, fsbno))
@@ -657,7 +657,7 @@ xfs_repair_invalidate_blocks(
 
 /* Ensure the freelist is the correct size. */
 int
-xfs_repair_fix_freelist(
+xrep_fix_freelist(
 	struct xfs_scrub_context	*sc,
 	bool				can_shrink)
 {
@@ -677,7 +677,7 @@ xfs_repair_fix_freelist(
  * Put a block back on the AGFL.
  */
 STATIC int
-xfs_repair_put_freelist(
+xrep_put_freelist(
 	struct xfs_scrub_context	*sc,
 	xfs_agblock_t			agbno)
 {
@@ -685,7 +685,7 @@ xfs_repair_put_freelist(
 	int				error;
 
 	/* Make sure there's space on the freelist. */
-	error = xfs_repair_fix_freelist(sc, true);
+	error = xrep_fix_freelist(sc, true);
 	if (error)
 		return error;
 
@@ -713,7 +713,7 @@ xfs_repair_put_freelist(
 
 /* Dispose of a single metadata block. */
 STATIC int
-xfs_repair_dispose_btree_block(
+xrep_dispose_btree_block(
 	struct xfs_scrub_context	*sc,
 	xfs_fsblock_t			fsbno,
 	struct xfs_owner_info		*oinfo,
@@ -767,7 +767,7 @@ xfs_repair_dispose_btree_block(
 	if (has_other_rmap)
 		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
 	else if (resv == XFS_AG_RESV_AGFL)
-		error = xfs_repair_put_freelist(sc, agbno);
+		error = xrep_put_freelist(sc, agbno);
 	else
 		error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv);
 	if (agf_bp != sc->sa.agf_bp)
@@ -777,7 +777,7 @@ xfs_repair_dispose_btree_block(
 
 	if (sc->ip)
 		return xfs_trans_roll_inode(&sc->tp, sc->ip);
-	return xfs_repair_roll_ag_trans(sc);
+	return xrep_roll_ag_trans(sc);
 
 out_free:
 	if (agf_bp != sc->sa.agf_bp)
@@ -787,29 +787,29 @@ xfs_repair_dispose_btree_block(
 
 /* Dispose of btree blocks from an old per-AG btree. */
 int
-xfs_repair_reap_btree_extents(
+xrep_reap_btree_extents(
 	struct xfs_scrub_context	*sc,
-	struct xfs_repair_extent_list	*exlist,
+	struct xrep_extent_list		*exlist,
 	struct xfs_owner_info		*oinfo,
 	enum xfs_ag_resv_type		type)
 {
-	struct xfs_repair_extent	*rex;
-	struct xfs_repair_extent	*n;
+	struct xrep_extent		*rex;
+	struct xrep_extent		*n;
 	int				error = 0;
 
 	ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
 
 	/* Dispose of every block from the old btree. */
-	for_each_xfs_repair_extent_safe(rex, n, exlist) {
+	for_each_xrep_extent_safe(rex, n, exlist) {
 		ASSERT(sc->ip != NULL ||
 		       XFS_FSB_TO_AGNO(sc->mp, rex->fsbno) == sc->sa.agno);
 
-		trace_xfs_repair_dispose_btree_extent(sc->mp,
+		trace_xrep_dispose_btree_extent(sc->mp,
 				XFS_FSB_TO_AGNO(sc->mp, rex->fsbno),
 				XFS_FSB_TO_AGBNO(sc->mp, rex->fsbno), rex->len);
 
 		for (; rex->len > 0; rex->len--, rex->fsbno++) {
-			error = xfs_repair_dispose_btree_block(sc, rex->fsbno,
+			error = xrep_dispose_btree_block(sc, rex->fsbno,
 					oinfo, type);
 			if (error)
 				goto out;
@@ -819,7 +819,7 @@ xfs_repair_reap_btree_extents(
 	}
 
 out:
-	xfs_repair_cancel_btree_extents(sc, exlist);
+	xrep_cancel_btree_extents(sc, exlist);
 	return error;
 }
 
@@ -831,12 +831,12 @@ xfs_repair_reap_btree_extents(
  * btree roots.  This is not guaranteed to work if the AG is heavily damaged
  * or the rmap data are corrupt.
  *
- * Callers of xfs_repair_find_ag_btree_roots must lock the AGF and AGFL
+ * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
  * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
  * AGI is being rebuilt.  It must maintain these locks until it's safe for
  * other threads to change the btrees' shapes.  The caller provides
  * information about the btrees to look for by passing in an array of
- * xfs_repair_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
+ * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
  * The (root, height) fields will be set on return if anything is found.  The
  * last element of the array should have a NULL buf_ops to mark the end of the
  * array.
@@ -850,16 +850,16 @@ xfs_repair_reap_btree_extents(
  * should be the roots.
  */
 
-struct xfs_repair_findroot {
+struct xrep_findroot {
 	struct xfs_scrub_context	*sc;
 	struct xfs_buf			*agfl_bp;
 	struct xfs_agf			*agf;
-	struct xfs_repair_find_ag_btree	*btree_info;
+	struct xrep_find_ag_btree	*btree_info;
 };
 
 /* See if our block is in the AGFL. */
 STATIC int
-xfs_repair_findroot_agfl_walk(
+xrep_findroot_agfl_walk(
 	struct xfs_mount		*mp,
 	xfs_agblock_t			bno,
 	void				*priv)
@@ -871,9 +871,9 @@ xfs_repair_findroot_agfl_walk(
 
 /* Does this block match the btree information passed in? */
 STATIC int
-xfs_repair_findroot_block(
-	struct xfs_repair_findroot	*ri,
-	struct xfs_repair_find_ag_btree	*fab,
+xrep_findroot_block(
+	struct xrep_findroot		*ri,
+	struct xrep_find_ag_btree	*fab,
 	uint64_t			owner,
 	xfs_agblock_t			agbno,
 	bool				*found_it)
@@ -894,7 +894,7 @@ xfs_repair_findroot_block(
 	 */
 	if (owner == XFS_RMAP_OWN_AG) {
 		error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
-				xfs_repair_findroot_agfl_walk, &agbno);
+				xrep_findroot_agfl_walk, &agbno);
 		if (error == XFS_BTREE_QUERY_RANGE_ABORT)
 			return 0;
 		if (error)
@@ -932,7 +932,7 @@ xfs_repair_findroot_block(
 	fab->height = xfs_btree_get_level(btblock) + 1;
 	*found_it = true;
 
-	trace_xfs_repair_findroot_block(mp, ri->sc->sa.agno, agbno,
+	trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno,
 			be32_to_cpu(btblock->bb_magic), fab->height - 1);
 out:
 	xfs_trans_brelse(ri->sc->tp, bp);
@@ -944,13 +944,13 @@ xfs_repair_findroot_block(
  * looking for?
  */
 STATIC int
-xfs_repair_findroot_rmap(
+xrep_findroot_rmap(
 	struct xfs_btree_cur		*cur,
 	struct xfs_rmap_irec		*rec,
 	void				*priv)
 {
-	struct xfs_repair_findroot	*ri = priv;
-	struct xfs_repair_find_ag_btree	*fab;
+	struct xrep_findroot		*ri = priv;
+	struct xrep_find_ag_btree	*fab;
 	xfs_agblock_t			b;
 	bool				found_it;
 	int				error = 0;
@@ -965,7 +965,7 @@ xfs_repair_findroot_rmap(
 		for (fab = ri->btree_info; fab->buf_ops; fab++) {
 			if (rec->rm_owner != fab->rmap_owner)
 				continue;
-			error = xfs_repair_findroot_block(ri, fab,
+			error = xrep_findroot_block(ri, fab,
 					rec->rm_owner, rec->rm_startblock + b,
 					&found_it);
 			if (error)
@@ -980,15 +980,15 @@ xfs_repair_findroot_rmap(
 
 /* Find the roots of the per-AG btrees described in btree_info. */
 int
-xfs_repair_find_ag_btree_roots(
+xrep_find_ag_btree_roots(
 	struct xfs_scrub_context	*sc,
 	struct xfs_buf			*agf_bp,
-	struct xfs_repair_find_ag_btree	*btree_info,
+	struct xrep_find_ag_btree	*btree_info,
 	struct xfs_buf			*agfl_bp)
 {
 	struct xfs_mount		*mp = sc->mp;
-	struct xfs_repair_findroot	ri;
-	struct xfs_repair_find_ag_btree	*fab;
+	struct xrep_findroot		ri;
+	struct xrep_find_ag_btree	*fab;
 	struct xfs_btree_cur		*cur;
 	int				error;
 
@@ -1007,7 +1007,7 @@ xfs_repair_find_ag_btree_roots(
 	}
 
 	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
-	error = xfs_rmap_query_all(cur, xfs_repair_findroot_rmap, &ri);
+	error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
 	xfs_btree_del_cursor(cur, error);
 
 	return error;
@@ -1015,7 +1015,7 @@ xfs_repair_find_ag_btree_roots(
 
 /* Force a quotacheck the next time we mount. */
 void
-xfs_repair_force_quotacheck(
+xrep_force_quotacheck(
 	struct xfs_scrub_context	*sc,
 	uint				dqtype)
 {
@@ -1043,7 +1043,7 @@ xfs_repair_force_quotacheck(
  * repair corruptions in the quota metadata.
  */
 int
-xfs_repair_ino_dqattach(
+xrep_ino_dqattach(
 	struct xfs_scrub_context	*sc)
 {
 	int				error;
@@ -1057,11 +1057,11 @@ xfs_repair_ino_dqattach(
 "inode %llu repair encountered quota error %d, quotacheck forced.",
 				(unsigned long long)sc->ip->i_ino, error);
 		if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
-			xfs_repair_force_quotacheck(sc, XFS_DQ_USER);
+			xrep_force_quotacheck(sc, XFS_DQ_USER);
 		if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
-			xfs_repair_force_quotacheck(sc, XFS_DQ_GROUP);
+			xrep_force_quotacheck(sc, XFS_DQ_GROUP);
 		if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
-			xfs_repair_force_quotacheck(sc, XFS_DQ_PROJ);
+			xrep_force_quotacheck(sc, XFS_DQ_PROJ);
 		/* fall through */
 	case -ESRCH:
 		error = 0;