[kernel] r22419 - in dists/sid/linux/debian: . patches patches/debian

Ben Hutchings benh at moszumanska.debian.org
Tue Feb 24 21:07:32 UTC 2015


Author: benh
Date: Tue Feb 24 21:07:23 2015
New Revision: 22419

Log:
Fix module ABI changes from 3.16.7-ckt{5,6}

Revert the quota ops change as it will take some effort to avoid an
ABI change.  All the others can be fixed or ignored.

Added:
   dists/sid/linux/debian/patches/debian/mm-fix-pagecache_get_page-abi-change-in-3.16.7-ckt6.patch
   dists/sid/linux/debian/patches/debian/perf-fix-abi-change-in-3.16.7-ckt6.patch
   dists/sid/linux/debian/patches/debian/revert-quota-switch-get_dqblk-and-set_dqblk-to-use-b.patch
   dists/sid/linux/debian/patches/debian/rmap-fix-abi-change-in-3.16.7-ckt5.patch
Modified:
   dists/sid/linux/debian/changelog
   dists/sid/linux/debian/patches/series

Modified: dists/sid/linux/debian/changelog
==============================================================================
--- dists/sid/linux/debian/changelog	Tue Feb 24 16:29:31 2015	(r22418)
+++ dists/sid/linux/debian/changelog	Tue Feb 24 21:07:23 2015	(r22419)
@@ -137,6 +137,11 @@
   * [x86] HPET force enable for e6xx based systems (Closes: #772951)
   * vfs: read file_handle only once in handle_to_path (CVE-2015-1420)
   * ASLR: fix stack randomization on 64-bit systems (CVE-2015-1593)
+  * Revert "quota: Switch ->get_dqblk() and ->set_dqblk() to use bytes as
+    space units" to avoid ABI change
+  * rmap: Fix ABI change in 3.16.7-ckt5
+  * perf: Fix ABI change in 3.16.7-ckt6
+  * mm: Fix pagecache_get_page() ABI change in 3.16.7-ckt6
 
   [ Helge Deller ]
   * [alpha] build debian-installer udeb packages

Added: dists/sid/linux/debian/patches/debian/mm-fix-pagecache_get_page-abi-change-in-3.16.7-ckt6.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/debian/mm-fix-pagecache_get_page-abi-change-in-3.16.7-ckt6.patch	Tue Feb 24 21:07:23 2015	(r22419)
@@ -0,0 +1,46 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 24 Feb 2015 20:05:05 +0000
+Subject: mm: Fix pagecache_get_page() ABI change in 3.16.7-ckt6
+Forwarded: not-needed
+
+The last parameter to pagecache_get_page() was dropped, as some
+callers passed the wrong value and it is really redundant.
+
+As most filesystems need this function, we should keep the old
+function signature for OOT modules.  Rename the function, add a
+wrapper with the old name and parameters, and add a macro to make all
+new callers use the new function.
+
+---
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -267,7 +267,12 @@ pgoff_t page_cache_prev_hole(struct addr
+ #define FGP_NOWAIT		0x00000020
+ 
+ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+-		int fgp_flags, gfp_t cache_gfp_mask);
++				int fgp_flags, gfp_t cache_gfp_mask,
++				gfp_t radix_gfp_mask);
++struct page *
++pagecache_get_page_fixed(struct address_space *mapping, pgoff_t offset,
++			 int fgp_flags, gfp_t cache_gfp_mask);
++#define pagecache_get_page pagecache_get_page_fixed
+ 
+ /**
+  * find_get_page - find and get a page reference
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2674,3 +2674,13 @@ int try_to_release_page(struct page *pag
+ }
+ 
+ EXPORT_SYMBOL(try_to_release_page);
++
++#undef pagecache_get_page
++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
++				int fgp_flags, gfp_t cache_gfp_mask,
++				gfp_t radix_gfp_mask __always_unused)
++{
++	return pagecache_get_page_fixed(mapping, offset, fgp_flags,
++					cache_gfp_mask);
++}
++EXPORT_SYMBOL(pagecache_get_page);

Added: dists/sid/linux/debian/patches/debian/perf-fix-abi-change-in-3.16.7-ckt6.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/debian/perf-fix-abi-change-in-3.16.7-ckt6.patch	Tue Feb 24 21:07:23 2015	(r22419)
@@ -0,0 +1,32 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 24 Feb 2015 09:53:48 +0000
+Subject: perf: Fix ABI change in 3.16.7-ckt6
+Forwarded: not-needed
+
+Commit c3c87e770458 ('perf: Tighten (and fix) the grouping condition')
+removed perf_event_context::type and its type definition.  Add them
+back.
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -453,6 +453,12 @@ struct perf_event {
+ #endif /* CONFIG_PERF_EVENTS */
+ };
+ 
++/* now unused */
++enum perf_event_context_type {
++	task_context,
++	cpu_context,
++};
++
+ /**
+  * struct perf_event_context - event context structure
+  *
+@@ -460,6 +466,7 @@ struct perf_event {
+  */
+ struct perf_event_context {
+ 	struct pmu			*pmu;
++	enum perf_event_context_type	type; /* now unused */
+ 	/*
+ 	 * Protect the states of the events in the list,
+ 	 * nr_active, and the list:

Added: dists/sid/linux/debian/patches/debian/revert-quota-switch-get_dqblk-and-set_dqblk-to-use-b.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/debian/revert-quota-switch-get_dqblk-and-set_dqblk-to-use-b.patch	Tue Feb 24 21:07:23 2015	(r22419)
@@ -0,0 +1,940 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Tue, 24 Feb 2015 06:11:44 +0000
+Subject: Revert "quota: Switch ->get_dqblk() and ->set_dqblk() to use bytes as space units"
+Forwarded: not-needed
+
+This reverts commit e5c1cda088832f58c8477ac993ed70ec98fa1221.  based
+on commit 14bf61ffe6ac54afcd1e888a4407fe16054483db upstream.  It makes
+a module ABI change and doesn't seem worth the trouble.  We can
+probably fix the issue later in a somewhat uglier way without the ABI
+change.
+---
+ fs/gfs2/quota.c          |  49 +++++++-------
+ fs/quota/dquot.c         |  83 ++++++++++++------------
+ fs/quota/quota.c         | 162 ++++++++---------------------------------------
+ fs/xfs/xfs_qm.h          |   4 +-
+ fs/xfs/xfs_qm_syscalls.c | 156 ++++++++++++++++++++++++++-------------------
+ fs/xfs/xfs_quotaops.c    |   8 +--
+ include/linux/quota.h    |  47 +-------------
+ include/linux/quotaops.h |   4 +-
+ 8 files changed, 195 insertions(+), 318 deletions(-)
+
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index dc482ff..64b29f7 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
+ 
+ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
+ 			     s64 change, struct gfs2_quota_data *qd,
+-			     struct qc_dqblk *fdq)
++			     struct fs_disk_quota *fdq)
+ {
+ 	struct inode *inode = &ip->i_inode;
+ 	struct gfs2_sbd *sdp = GFS2_SB(inode);
+@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
+ 	be64_add_cpu(&q.qu_value, change);
+ 	qd->qd_qb.qb_value = q.qu_value;
+ 	if (fdq) {
+-		if (fdq->d_fieldmask & QC_SPC_SOFT) {
+-			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
++		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
++			q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+ 			qd->qd_qb.qb_warn = q.qu_warn;
+ 		}
+-		if (fdq->d_fieldmask & QC_SPC_HARD) {
+-			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
++		if (fdq->d_fieldmask & FS_DQ_BHARD) {
++			q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+ 			qd->qd_qb.qb_limit = q.qu_limit;
+ 		}
+-		if (fdq->d_fieldmask & QC_SPACE) {
+-			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
++		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
++			q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+ 			qd->qd_qb.qb_value = q.qu_value;
+ 		}
+ 	}
+@@ -1502,7 +1502,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
+ }
+ 
+ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
+-			  struct qc_dqblk *fdq)
++			  struct fs_disk_quota *fdq)
+ {
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+ 	struct gfs2_quota_lvb *qlvb;
+@@ -1510,7 +1510,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
+ 	struct gfs2_holder q_gh;
+ 	int error;
+ 
+-	memset(fdq, 0, sizeof(*fdq));
++	memset(fdq, 0, sizeof(struct fs_disk_quota));
+ 
+ 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ 		return -ESRCH; /* Crazy XFS error code */
+@@ -1527,9 +1527,12 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
+ 		goto out;
+ 
+ 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
+-	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+-	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+-	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
++	fdq->d_version = FS_DQUOT_VERSION;
++	fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
++	fdq->d_id = from_kqid_munged(current_user_ns(), qid);
++	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
++	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
++	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+ 
+ 	gfs2_glock_dq_uninit(&q_gh);
+ out:
+@@ -1538,10 +1541,10 @@ out:
+ }
+ 
+ /* GFS2 only supports a subset of the XFS fields */
+-#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
++#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+ 
+ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
+-			  struct qc_dqblk *fdq)
++			  struct fs_disk_quota *fdq)
+ {
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+ 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+@@ -1585,17 +1588,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
+ 		goto out_i;
+ 
+ 	/* If nothing has changed, this is a no-op */
+-	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+-	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+-		fdq->d_fieldmask ^= QC_SPC_SOFT;
++	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
++	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
++		fdq->d_fieldmask ^= FS_DQ_BSOFT;
+ 
+-	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+-	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+-		fdq->d_fieldmask ^= QC_SPC_HARD;
++	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
++	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
++		fdq->d_fieldmask ^= FS_DQ_BHARD;
+ 
+-	if ((fdq->d_fieldmask & QC_SPACE) &&
+-	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+-		fdq->d_fieldmask ^= QC_SPACE;
++	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
++	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
++		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+ 
+ 	if (fdq->d_fieldmask == 0)
+ 		goto out_i;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 6a35035..f56a357 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2407,25 +2407,30 @@ static inline qsize_t stoqb(qsize_t space)
+ }
+ 
+ /* Generic routine for getting common part of quota structure */
+-static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
++static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+ {
+ 	struct mem_dqblk *dm = &dquot->dq_dqb;
+ 
+ 	memset(di, 0, sizeof(*di));
++	di->d_version = FS_DQUOT_VERSION;
++	di->d_flags = dquot->dq_id.type == USRQUOTA ?
++			FS_USER_QUOTA : FS_GROUP_QUOTA;
++	di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
++
+ 	spin_lock(&dq_data_lock);
+-	di->d_spc_hardlimit = dm->dqb_bhardlimit;
+-	di->d_spc_softlimit = dm->dqb_bsoftlimit;
++	di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
++	di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+ 	di->d_ino_hardlimit = dm->dqb_ihardlimit;
+ 	di->d_ino_softlimit = dm->dqb_isoftlimit;
+-	di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+-	di->d_ino_count = dm->dqb_curinodes;
+-	di->d_spc_timer = dm->dqb_btime;
+-	di->d_ino_timer = dm->dqb_itime;
++	di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
++	di->d_icount = dm->dqb_curinodes;
++	di->d_btimer = dm->dqb_btime;
++	di->d_itimer = dm->dqb_itime;
+ 	spin_unlock(&dq_data_lock);
+ }
+ 
+ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
+-		    struct qc_dqblk *di)
++		    struct fs_disk_quota *di)
+ {
+ 	struct dquot *dquot;
+ 
+@@ -2439,70 +2444,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
+ }
+ EXPORT_SYMBOL(dquot_get_dqblk);
+ 
+-#define VFS_QC_MASK \
+-	(QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+-	 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+-	 QC_SPC_TIMER | QC_INO_TIMER)
++#define VFS_FS_DQ_MASK \
++	(FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
++	 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
++	 FS_DQ_BTIMER | FS_DQ_ITIMER)
+ 
+ /* Generic routine for setting common part of quota structure */
+-static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
++static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+ {
+ 	struct mem_dqblk *dm = &dquot->dq_dqb;
+ 	int check_blim = 0, check_ilim = 0;
+ 	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
+ 
+-	if (di->d_fieldmask & ~VFS_QC_MASK)
++	if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+ 		return -EINVAL;
+ 
+-	if (((di->d_fieldmask & QC_SPC_SOFT) &&
+-	     stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+-	    ((di->d_fieldmask & QC_SPC_HARD) &&
+-	     stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+-	    ((di->d_fieldmask & QC_INO_SOFT) &&
++	if (((di->d_fieldmask & FS_DQ_BSOFT) &&
++	     (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
++	    ((di->d_fieldmask & FS_DQ_BHARD) &&
++	     (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
++	    ((di->d_fieldmask & FS_DQ_ISOFT) &&
+ 	     (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+-	    ((di->d_fieldmask & QC_INO_HARD) &&
++	    ((di->d_fieldmask & FS_DQ_IHARD) &&
+ 	     (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
+ 		return -ERANGE;
+ 
+ 	spin_lock(&dq_data_lock);
+-	if (di->d_fieldmask & QC_SPACE) {
+-		dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
++	if (di->d_fieldmask & FS_DQ_BCOUNT) {
++		dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+ 		check_blim = 1;
+ 		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+ 	}
+ 
+-	if (di->d_fieldmask & QC_SPC_SOFT)
+-		dm->dqb_bsoftlimit = di->d_spc_softlimit;
+-	if (di->d_fieldmask & QC_SPC_HARD)
+-		dm->dqb_bhardlimit = di->d_spc_hardlimit;
+-	if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
++	if (di->d_fieldmask & FS_DQ_BSOFT)
++		dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
++	if (di->d_fieldmask & FS_DQ_BHARD)
++		dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
++	if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+ 		check_blim = 1;
+ 		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+ 	}
+ 
+-	if (di->d_fieldmask & QC_INO_COUNT) {
+-		dm->dqb_curinodes = di->d_ino_count;
++	if (di->d_fieldmask & FS_DQ_ICOUNT) {
++		dm->dqb_curinodes = di->d_icount;
+ 		check_ilim = 1;
+ 		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+ 	}
+ 
+-	if (di->d_fieldmask & QC_INO_SOFT)
++	if (di->d_fieldmask & FS_DQ_ISOFT)
+ 		dm->dqb_isoftlimit = di->d_ino_softlimit;
+-	if (di->d_fieldmask & QC_INO_HARD)
++	if (di->d_fieldmask & FS_DQ_IHARD)
+ 		dm->dqb_ihardlimit = di->d_ino_hardlimit;
+-	if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
++	if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+ 		check_ilim = 1;
+ 		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+ 	}
+ 
+-	if (di->d_fieldmask & QC_SPC_TIMER) {
+-		dm->dqb_btime = di->d_spc_timer;
++	if (di->d_fieldmask & FS_DQ_BTIMER) {
++		dm->dqb_btime = di->d_btimer;
+ 		check_blim = 1;
+ 		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+ 	}
+ 
+-	if (di->d_fieldmask & QC_INO_TIMER) {
+-		dm->dqb_itime = di->d_ino_timer;
++	if (di->d_fieldmask & FS_DQ_ITIMER) {
++		dm->dqb_itime = di->d_itimer;
+ 		check_ilim = 1;
+ 		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+ 	}
+@@ -2512,7 +2517,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ 		    dm->dqb_curspace < dm->dqb_bsoftlimit) {
+ 			dm->dqb_btime = 0;
+ 			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+-		} else if (!(di->d_fieldmask & QC_SPC_TIMER))
++		} else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+ 			/* Set grace only if user hasn't provided his own... */
+ 			dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
+ 	}
+@@ -2521,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ 		    dm->dqb_curinodes < dm->dqb_isoftlimit) {
+ 			dm->dqb_itime = 0;
+ 			clear_bit(DQ_INODES_B, &dquot->dq_flags);
+-		} else if (!(di->d_fieldmask & QC_INO_TIMER))
++		} else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+ 			/* Set grace only if user hasn't provided his own... */
+ 			dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
+ 	}
+@@ -2537,7 +2542,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ }
+ 
+ int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
+-		  struct qc_dqblk *di)
++		  struct fs_disk_quota *di)
+ {
+ 	struct dquot *dquot;
+ 	int rc;
+diff --git a/fs/quota/quota.c b/fs/quota/quota.c
+index 47bd897..ff3f0b3 100644
+--- a/fs/quota/quota.c
++++ b/fs/quota/quota.c
+@@ -115,27 +115,17 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
+ 	return sb->s_qcop->set_info(sb, type, &info);
+ }
+ 
+-static inline qsize_t qbtos(qsize_t blocks)
+-{
+-	return blocks << QIF_DQBLKSIZE_BITS;
+-}
+-
+-static inline qsize_t stoqb(qsize_t space)
+-{
+-	return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+-}
+-
+-static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
++static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+ {
+ 	memset(dst, 0, sizeof(*dst));
+-	dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+-	dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+-	dst->dqb_curspace = src->d_space;
++	dst->dqb_bhardlimit = src->d_blk_hardlimit;
++	dst->dqb_bsoftlimit = src->d_blk_softlimit;
++	dst->dqb_curspace = src->d_bcount;
+ 	dst->dqb_ihardlimit = src->d_ino_hardlimit;
+ 	dst->dqb_isoftlimit = src->d_ino_softlimit;
+-	dst->dqb_curinodes = src->d_ino_count;
+-	dst->dqb_btime = src->d_spc_timer;
+-	dst->dqb_itime = src->d_ino_timer;
++	dst->dqb_curinodes = src->d_icount;
++	dst->dqb_btime = src->d_btimer;
++	dst->dqb_itime = src->d_itimer;
+ 	dst->dqb_valid = QIF_ALL;
+ }
+ 
+@@ -143,7 +133,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
+ 			  void __user *addr)
+ {
+ 	struct kqid qid;
+-	struct qc_dqblk fdq;
++	struct fs_disk_quota fdq;
+ 	struct if_dqblk idq;
+ 	int ret;
+ 
+@@ -161,36 +151,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
+ 	return 0;
+ }
+ 
+-static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
++static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+ {
+-	dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+-	dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+-	dst->d_space = src->dqb_curspace;
++	dst->d_blk_hardlimit = src->dqb_bhardlimit;
++	dst->d_blk_softlimit  = src->dqb_bsoftlimit;
++	dst->d_bcount = src->dqb_curspace;
+ 	dst->d_ino_hardlimit = src->dqb_ihardlimit;
+ 	dst->d_ino_softlimit = src->dqb_isoftlimit;
+-	dst->d_ino_count = src->dqb_curinodes;
+-	dst->d_spc_timer = src->dqb_btime;
+-	dst->d_ino_timer = src->dqb_itime;
++	dst->d_icount = src->dqb_curinodes;
++	dst->d_btimer = src->dqb_btime;
++	dst->d_itimer = src->dqb_itime;
+ 
+ 	dst->d_fieldmask = 0;
+ 	if (src->dqb_valid & QIF_BLIMITS)
+-		dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
++		dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+ 	if (src->dqb_valid & QIF_SPACE)
+-		dst->d_fieldmask |= QC_SPACE;
++		dst->d_fieldmask |= FS_DQ_BCOUNT;
+ 	if (src->dqb_valid & QIF_ILIMITS)
+-		dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
++		dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+ 	if (src->dqb_valid & QIF_INODES)
+-		dst->d_fieldmask |= QC_INO_COUNT;
++		dst->d_fieldmask |= FS_DQ_ICOUNT;
+ 	if (src->dqb_valid & QIF_BTIME)
+-		dst->d_fieldmask |= QC_SPC_TIMER;
++		dst->d_fieldmask |= FS_DQ_BTIMER;
+ 	if (src->dqb_valid & QIF_ITIME)
+-		dst->d_fieldmask |= QC_INO_TIMER;
++		dst->d_fieldmask |= FS_DQ_ITIMER;
+ }
+ 
+ static int quota_setquota(struct super_block *sb, int type, qid_t id,
+ 			  void __user *addr)
+ {
+-	struct qc_dqblk fdq;
++	struct fs_disk_quota fdq;
+ 	struct if_dqblk idq;
+ 	struct kqid qid;
+ 
+@@ -254,78 +244,10 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
+ 	return ret;
+ }
+ 
+-/*
+- * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+- * out of there as xfsprogs rely on definitions being in that header file. So
+- * just define same functions here for quota purposes.
+- */
+-#define XFS_BB_SHIFT 9
+-
+-static inline u64 quota_bbtob(u64 blocks)
+-{
+-	return blocks << XFS_BB_SHIFT;
+-}
+-
+-static inline u64 quota_btobb(u64 bytes)
+-{
+-	return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+-}
+-
+-static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+-{
+-	dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+-	dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+-	dst->d_ino_hardlimit = src->d_ino_hardlimit;
+-	dst->d_ino_softlimit = src->d_ino_softlimit;
+-	dst->d_space = quota_bbtob(src->d_bcount);
+-	dst->d_ino_count = src->d_icount;
+-	dst->d_ino_timer = src->d_itimer;
+-	dst->d_spc_timer = src->d_btimer;
+-	dst->d_ino_warns = src->d_iwarns;
+-	dst->d_spc_warns = src->d_bwarns;
+-	dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+-	dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+-	dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+-	dst->d_rt_spc_timer = src->d_rtbtimer;
+-	dst->d_rt_spc_warns = src->d_rtbwarns;
+-	dst->d_fieldmask = 0;
+-	if (src->d_fieldmask & FS_DQ_ISOFT)
+-		dst->d_fieldmask |= QC_INO_SOFT;
+-	if (src->d_fieldmask & FS_DQ_IHARD)
+-		dst->d_fieldmask |= QC_INO_HARD;
+-	if (src->d_fieldmask & FS_DQ_BSOFT)
+-		dst->d_fieldmask |= QC_SPC_SOFT;
+-	if (src->d_fieldmask & FS_DQ_BHARD)
+-		dst->d_fieldmask |= QC_SPC_HARD;
+-	if (src->d_fieldmask & FS_DQ_RTBSOFT)
+-		dst->d_fieldmask |= QC_RT_SPC_SOFT;
+-	if (src->d_fieldmask & FS_DQ_RTBHARD)
+-		dst->d_fieldmask |= QC_RT_SPC_HARD;
+-	if (src->d_fieldmask & FS_DQ_BTIMER)
+-		dst->d_fieldmask |= QC_SPC_TIMER;
+-	if (src->d_fieldmask & FS_DQ_ITIMER)
+-		dst->d_fieldmask |= QC_INO_TIMER;
+-	if (src->d_fieldmask & FS_DQ_RTBTIMER)
+-		dst->d_fieldmask |= QC_RT_SPC_TIMER;
+-	if (src->d_fieldmask & FS_DQ_BWARNS)
+-		dst->d_fieldmask |= QC_SPC_WARNS;
+-	if (src->d_fieldmask & FS_DQ_IWARNS)
+-		dst->d_fieldmask |= QC_INO_WARNS;
+-	if (src->d_fieldmask & FS_DQ_RTBWARNS)
+-		dst->d_fieldmask |= QC_RT_SPC_WARNS;
+-	if (src->d_fieldmask & FS_DQ_BCOUNT)
+-		dst->d_fieldmask |= QC_SPACE;
+-	if (src->d_fieldmask & FS_DQ_ICOUNT)
+-		dst->d_fieldmask |= QC_INO_COUNT;
+-	if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+-		dst->d_fieldmask |= QC_RT_SPACE;
+-}
+-
+ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
+ 			   void __user *addr)
+ {
+ 	struct fs_disk_quota fdq;
+-	struct qc_dqblk qdq;
+ 	struct kqid qid;
+ 
+ 	if (copy_from_user(&fdq, addr, sizeof(fdq)))
+@@ -335,44 +257,13 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
+ 	qid = make_kqid(current_user_ns(), type, id);
+ 	if (!qid_valid(qid))
+ 		return -EINVAL;
+-	copy_from_xfs_dqblk(&qdq, &fdq);
+-	return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+-}
+-
+-static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+-			      int type, qid_t id)
+-{
+-	memset(dst, 0, sizeof(*dst));
+-	dst->d_version = FS_DQUOT_VERSION;
+-	dst->d_id = id;
+-	if (type == USRQUOTA)
+-		dst->d_flags = FS_USER_QUOTA;
+-	else if (type == PRJQUOTA)
+-		dst->d_flags = FS_PROJ_QUOTA;
+-	else
+-		dst->d_flags = FS_GROUP_QUOTA;
+-	dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+-	dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+-	dst->d_ino_hardlimit = src->d_ino_hardlimit;
+-	dst->d_ino_softlimit = src->d_ino_softlimit;
+-	dst->d_bcount = quota_btobb(src->d_space);
+-	dst->d_icount = src->d_ino_count;
+-	dst->d_itimer = src->d_ino_timer;
+-	dst->d_btimer = src->d_spc_timer;
+-	dst->d_iwarns = src->d_ino_warns;
+-	dst->d_bwarns = src->d_spc_warns;
+-	dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+-	dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+-	dst->d_rtbcount = quota_btobb(src->d_rt_space);
+-	dst->d_rtbtimer = src->d_rt_spc_timer;
+-	dst->d_rtbwarns = src->d_rt_spc_warns;
++	return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+ }
+ 
+ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
+ 			   void __user *addr)
+ {
+ 	struct fs_disk_quota fdq;
+-	struct qc_dqblk qdq;
+ 	struct kqid qid;
+ 	int ret;
+ 
+@@ -381,11 +272,8 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
+ 	qid = make_kqid(current_user_ns(), type, id);
+ 	if (!qid_valid(qid))
+ 		return -EINVAL;
+-	ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+-	if (ret)
+-		return ret;
+-	copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+-	if (copy_to_user(addr, &fdq, sizeof(fdq)))
++	ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
++	if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+ 		return -EFAULT;
+ 	return ret;
+ }
+diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
+index f251bed..797fd46 100644
+--- a/fs/xfs/xfs_qm.h
++++ b/fs/xfs/xfs_qm.h
+@@ -167,9 +167,9 @@ extern void		xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
+ /* quota ops */
+ extern int		xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
+ extern int		xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
+-					uint, struct qc_dqblk *);
++					uint, struct fs_disk_quota *);
+ extern int		xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
+-					struct qc_dqblk *);
++					struct fs_disk_quota *);
+ extern int		xfs_qm_scall_getqstat(struct xfs_mount *,
+ 					struct fs_quota_stat *);
+ extern int		xfs_qm_scall_getqstatv(struct xfs_mount *,
+diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
+index 670882e..bbc813c 100644
+--- a/fs/xfs/xfs_qm_syscalls.c
++++ b/fs/xfs/xfs_qm_syscalls.c
+@@ -40,6 +40,7 @@ STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
+ STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
+ 					uint);
+ STATIC uint	xfs_qm_export_flags(uint);
++STATIC uint	xfs_qm_export_qtype_flags(uint);
+ 
+ /*
+  * Turn off quota accounting and/or enforcement for all udquots and/or
+@@ -573,8 +574,8 @@ xfs_qm_scall_getqstatv(
+ 	return 0;
+ }
+ 
+-#define XFS_QC_MASK \
+-	(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
++#define XFS_DQ_MASK \
++	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+ 
+ /*
+  * Adjust quota limits, and start/stop timers accordingly.
+@@ -584,7 +585,7 @@ xfs_qm_scall_setqlim(
+ 	struct xfs_mount	*mp,
+ 	xfs_dqid_t		id,
+ 	uint			type,
+-	struct qc_dqblk		*newlim)
++	fs_disk_quota_t		*newlim)
+ {
+ 	struct xfs_quotainfo	*q = mp->m_quotainfo;
+ 	struct xfs_disk_dquot	*ddq;
+@@ -593,9 +594,9 @@ xfs_qm_scall_setqlim(
+ 	int			error;
+ 	xfs_qcnt_t		hard, soft;
+ 
+-	if (newlim->d_fieldmask & ~XFS_QC_MASK)
++	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+ 		return EINVAL;
+-	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
++	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+ 		return 0;
+ 
+ 	/*
+@@ -633,11 +634,11 @@ xfs_qm_scall_setqlim(
+ 	/*
+ 	 * Make sure that hardlimits are >= soft limits before changing.
+ 	 */
+-	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+-		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
++	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
++		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+ 			be64_to_cpu(ddq->d_blk_hardlimit);
+-	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+-		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
++	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
++		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+ 			be64_to_cpu(ddq->d_blk_softlimit);
+ 	if (hard == 0 || hard >= soft) {
+ 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
+@@ -650,11 +651,11 @@ xfs_qm_scall_setqlim(
+ 	} else {
+ 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
+ 	}
+-	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+-		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
++	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
++		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+ 			be64_to_cpu(ddq->d_rtb_hardlimit);
+-	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+-		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
++	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
++		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+ 			be64_to_cpu(ddq->d_rtb_softlimit);
+ 	if (hard == 0 || hard >= soft) {
+ 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
+@@ -667,10 +668,10 @@ xfs_qm_scall_setqlim(
+ 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
+ 	}
+ 
+-	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
++	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+ 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
+ 			be64_to_cpu(ddq->d_ino_hardlimit);
+-	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
++	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+ 		(xfs_qcnt_t) newlim->d_ino_softlimit :
+ 			be64_to_cpu(ddq->d_ino_softlimit);
+ 	if (hard == 0 || hard >= soft) {
+@@ -687,12 +688,12 @@ xfs_qm_scall_setqlim(
+ 	/*
+ 	 * Update warnings counter(s) if requested
+ 	 */
+-	if (newlim->d_fieldmask & QC_SPC_WARNS)
+-		ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+-	if (newlim->d_fieldmask & QC_INO_WARNS)
+-		ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+-	if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+-		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
++	if (newlim->d_fieldmask & FS_DQ_BWARNS)
++		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
++	if (newlim->d_fieldmask & FS_DQ_IWARNS)
++		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
++	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
++		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+ 
+ 	if (id == 0) {
+ 		/*
+@@ -702,24 +703,24 @@ xfs_qm_scall_setqlim(
+ 		 * soft and hard limit values (already done, above), and
+ 		 * for warnings.
+ 		 */
+-		if (newlim->d_fieldmask & QC_SPC_TIMER) {
+-			q->qi_btimelimit = newlim->d_spc_timer;
+-			ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
++		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
++			q->qi_btimelimit = newlim->d_btimer;
++			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+ 		}
+-		if (newlim->d_fieldmask & QC_INO_TIMER) {
+-			q->qi_itimelimit = newlim->d_ino_timer;
+-			ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
++		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
++			q->qi_itimelimit = newlim->d_itimer;
++			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+ 		}
+-		if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+-			q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+-			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
++		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
++			q->qi_rtbtimelimit = newlim->d_rtbtimer;
++			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+ 		}
+-		if (newlim->d_fieldmask & QC_SPC_WARNS)
+-			q->qi_bwarnlimit = newlim->d_spc_warns;
+-		if (newlim->d_fieldmask & QC_INO_WARNS)
+-			q->qi_iwarnlimit = newlim->d_ino_warns;
+-		if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+-			q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
++		if (newlim->d_fieldmask & FS_DQ_BWARNS)
++			q->qi_bwarnlimit = newlim->d_bwarns;
++		if (newlim->d_fieldmask & FS_DQ_IWARNS)
++			q->qi_iwarnlimit = newlim->d_iwarns;
++		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
++			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+ 	} else {
+ 		/*
+ 		 * If the user is now over quota, start the timelimit.
+@@ -830,7 +831,7 @@ xfs_qm_scall_getquota(
+ 	struct xfs_mount	*mp,
+ 	xfs_dqid_t		id,
+ 	uint			type,
+-	struct qc_dqblk		*dst)
++	struct fs_disk_quota	*dst)
+ {
+ 	struct xfs_dquot	*dqp;
+ 	int			error;
+@@ -854,25 +855,28 @@ xfs_qm_scall_getquota(
+ 	}
+ 
+ 	memset(dst, 0, sizeof(*dst));
+-	dst->d_spc_hardlimit =
+-		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+-	dst->d_spc_softlimit =
+-		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
++	dst->d_version = FS_DQUOT_VERSION;
++	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
++	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
++	dst->d_blk_hardlimit =
++		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
++	dst->d_blk_softlimit =
++		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+ 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
+ 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
+-	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+-	dst->d_ino_count = dqp->q_res_icount;
+-	dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+-	dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+-	dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+-	dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+-	dst->d_rt_spc_hardlimit =
+-		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+-	dst->d_rt_spc_softlimit =
+-		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+-	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+-	dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+-	dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
++	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
++	dst->d_icount = dqp->q_res_icount;
++	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
++	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
++	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
++	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
++	dst->d_rtb_hardlimit =
++		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
++	dst->d_rtb_softlimit =
++		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
++	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
++	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
++	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+ 
+ 	/*
+ 	 * Internally, we don't reset all the timers when quota enforcement
+@@ -885,23 +889,23 @@ xfs_qm_scall_getquota(
+ 	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
+ 	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
+ 	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
+-		dst->d_spc_timer = 0;
+-		dst->d_ino_timer = 0;
+-		dst->d_rt_spc_timer = 0;
++		dst->d_btimer = 0;
++		dst->d_itimer = 0;
++		dst->d_rtbtimer = 0;
+ 	}
+ 
+ #ifdef DEBUG
+-	if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+-	     (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+-	     (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+-	    id != 0) {
+-		if ((dst->d_space > dst->d_spc_softlimit) &&
+-		    (dst->d_spc_softlimit > 0)) {
+-			ASSERT(dst->d_spc_timer != 0);
++	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
++	     (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
++	     (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
++	    dst->d_id != 0) {
++		if ((dst->d_bcount > dst->d_blk_softlimit) &&
++		    (dst->d_blk_softlimit > 0)) {
++			ASSERT(dst->d_btimer != 0);
+ 		}
+-		if ((dst->d_ino_count > dst->d_ino_softlimit) &&
++		if ((dst->d_icount > dst->d_ino_softlimit) &&
+ 		    (dst->d_ino_softlimit > 0)) {
+-			ASSERT(dst->d_ino_timer != 0);
++			ASSERT(dst->d_itimer != 0);
+ 		}
+ 	}
+ #endif
+@@ -911,6 +915,26 @@ out_put:
+ }
+ 
+ STATIC uint
++xfs_qm_export_qtype_flags(
++	uint flags)
++{
++	/*
++	 * Can't be more than one, or none.
++	 */
++	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
++		(FS_PROJ_QUOTA | FS_USER_QUOTA));
++	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
++		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
++	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
++		(FS_USER_QUOTA | FS_GROUP_QUOTA));
++	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
++
++	return (flags & XFS_DQ_USER) ?
++		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
++			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
++}
++
++STATIC uint
+ xfs_qm_export_flags(
+ 	uint flags)
+ {
+diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
+index ece0f31..2ad1b98 100644
+--- a/fs/xfs/xfs_quotaops.c
++++ b/fs/xfs/xfs_quotaops.c
+@@ -133,7 +133,7 @@ STATIC int
+ xfs_fs_get_dqblk(
+ 	struct super_block	*sb,
+ 	struct kqid		qid,
+-	struct qc_dqblk		*qdq)
++	struct fs_disk_quota	*fdq)
+ {
+ 	struct xfs_mount	*mp = XFS_M(sb);
+ 
+@@ -143,14 +143,14 @@ xfs_fs_get_dqblk(
+ 		return -ESRCH;
+ 
+ 	return -xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
+-				      xfs_quota_type(qid.type), qdq);
++				      xfs_quota_type(qid.type), fdq);
+ }
+ 
+ STATIC int
+ xfs_fs_set_dqblk(
+ 	struct super_block	*sb,
+ 	struct kqid		qid,
+-	struct qc_dqblk		*qdq)
++	struct fs_disk_quota	*fdq)
+ {
+ 	struct xfs_mount	*mp = XFS_M(sb);
+ 
+@@ -162,7 +162,7 @@ xfs_fs_set_dqblk(
+ 		return -ESRCH;
+ 
+ 	return -xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
+-				     xfs_quota_type(qid.type), qdq);
++				     xfs_quota_type(qid.type), fdq);
+ }
+ 
+ const struct quotactl_ops xfs_quotactl_operations = {
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 6724c12..0f3c5d3 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -316,49 +316,6 @@ struct dquot_operations {
+ 
+ struct path;
+ 
+-/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+-struct qc_dqblk {
+-	int d_fieldmask;	/* mask of fields to change in ->set_dqblk() */
+-	u64 d_spc_hardlimit;	/* absolute limit on used space */
+-	u64 d_spc_softlimit;	/* preferred limit on used space */
+-	u64 d_ino_hardlimit;	/* maximum # allocated inodes */
+-	u64 d_ino_softlimit;	/* preferred inode limit */
+-	u64 d_space;		/* Space owned by the user */
+-	u64 d_ino_count;	/* # inodes owned by the user */
+-	s64 d_ino_timer;	/* zero if within inode limits */
+-				/* if not, we refuse service */
+-	s64 d_spc_timer;	/* similar to above; for space */
+-	int d_ino_warns;	/* # warnings issued wrt num inodes */
+-	int d_spc_warns;	/* # warnings issued wrt used space */
+-	u64 d_rt_spc_hardlimit;	/* absolute limit on realtime space */
+-	u64 d_rt_spc_softlimit;	/* preferred limit on RT space */
+-	u64 d_rt_space;		/* realtime space owned */
+-	s64 d_rt_spc_timer;	/* similar to above; for RT space */
+-	int d_rt_spc_warns;	/* # warnings issued wrt RT space */
+-};
+-
+-/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+-#define	QC_INO_SOFT	(1<<0)
+-#define	QC_INO_HARD	(1<<1)
+-#define	QC_SPC_SOFT	(1<<2)
+-#define	QC_SPC_HARD	(1<<3)
+-#define	QC_RT_SPC_SOFT	(1<<4)
+-#define	QC_RT_SPC_HARD	(1<<5)
+-#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+-		       QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+-#define	QC_SPC_TIMER	(1<<6)
+-#define	QC_INO_TIMER	(1<<7)
+-#define	QC_RT_SPC_TIMER	(1<<8)
+-#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+-#define	QC_SPC_WARNS	(1<<9)
+-#define	QC_INO_WARNS	(1<<10)
+-#define	QC_RT_SPC_WARNS	(1<<11)
+-#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+-#define	QC_SPACE	(1<<12)
+-#define	QC_INO_COUNT	(1<<13)
+-#define	QC_RT_SPACE	(1<<14)
+-#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+-
+ /* Operations handling requests from userspace */
+ struct quotactl_ops {
+ 	int (*quota_on)(struct super_block *, int, int, struct path *);
+@@ -367,8 +324,8 @@ struct quotactl_ops {
+ 	int (*quota_sync)(struct super_block *, int);
+ 	int (*get_info)(struct super_block *, int, struct if_dqinfo *);
+ 	int (*set_info)(struct super_block *, int, struct if_dqinfo *);
+-	int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+-	int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
++	int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
++	int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+ 	int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
+ 	int (*set_xstate)(struct super_block *, unsigned int, int);
+ 	int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index bfaf713..1d3eee5 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
+ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+ int dquot_get_dqblk(struct super_block *sb, struct kqid id,
+-		struct qc_dqblk *di);
++		struct fs_disk_quota *di);
+ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
+-		struct qc_dqblk *di);
++		struct fs_disk_quota *di);
+ 
+ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
+ int dquot_transfer(struct inode *inode, struct iattr *iattr);

Added: dists/sid/linux/debian/patches/debian/rmap-fix-abi-change-in-3.16.7-ckt5.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/debian/rmap-fix-abi-change-in-3.16.7-ckt5.patch	Tue Feb 24 21:07:23 2015	(r22419)
@@ -0,0 +1,50 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 23 Feb 2015 05:09:41 +0000
+Subject: rmap: Fix ABI change in 3.16.7-ckt5
+Forwarded: not-needed
+
+'mm: prevent endless growth of anon_vma hierarchy' in 3.16.7-ckt5
+added two new members to struct anon_vma.
+
+This structure is always allocated by anon_vma_alloc() so it is OK to
+add members, but we need to add them at the end and hide them from
+genksyms.
+
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -37,16 +37,6 @@ struct anon_vma {
+ 	atomic_t refcount;
+ 
+ 	/*
+-	 * Count of child anon_vmas and VMAs which points to this anon_vma.
+-	 *
+-	 * This counter is used for making decision about reusing anon_vma
+-	 * instead of forking new one. See comments in function anon_vma_clone.
+-	 */
+-	unsigned degree;
+-
+-	struct anon_vma *parent;	/* Parent of this anon_vma */
+-
+-	/*
+ 	 * NOTE: the LSB of the rb_root.rb_node is set by
+ 	 * mm_take_all_locks() _after_ taking the above lock. So the
+ 	 * rb_root must only be read/written after taking the above lock
+@@ -55,6 +45,18 @@ struct anon_vma {
+ 	 * mm_take_all_locks() (mm_all_locks_mutex).
+ 	 */
+ 	struct rb_root rb_root;	/* Interval tree of private "related" vmas */
++
++#ifndef __GENKSYMS__
++	/*
++	 * Count of child anon_vmas and VMAs which points to this anon_vma.
++	 *
++	 * This counter is used for making decision about reusing anon_vma
++	 * instead of forking new one. See comments in function anon_vma_clone.
++	 */
++	unsigned degree;
++
++	struct anon_vma *parent;	/* Parent of this anon_vma */
++#endif
+ };
+ 
+ /*

Modified: dists/sid/linux/debian/patches/series
==============================================================================
--- dists/sid/linux/debian/patches/series	Tue Feb 24 16:29:31 2015	(r22418)
+++ dists/sid/linux/debian/patches/series	Tue Feb 24 21:07:23 2015	(r22419)
@@ -493,3 +493,7 @@
 bugfix/x86/x86-hpet-force-enable-for-e6xx-based-systems.patch
 bugfix/all/vfs-read-file_handle-only-once-in-handle_to_path.patch
 bugfix/all/aslr-fix-stack-randomization-on-64-bit-systems.patch
+debian/revert-quota-switch-get_dqblk-and-set_dqblk-to-use-b.patch
+debian/rmap-fix-abi-change-in-3.16.7-ckt5.patch
+debian/perf-fix-abi-change-in-3.16.7-ckt6.patch
+debian/mm-fix-pagecache_get_page-abi-change-in-3.16.7-ckt6.patch



More information about the Kernel-svn-changes mailing list