[v10,052/108] KVM: x86/tdp_mmu: Ignore unsupported mmu operation on private GFNs
Commit Message
From: Isaku Yamahata <isaku.yamahata@intel.com>
Some KVM MMU operations (dirty page logging, page migration, aging page)
aren't supported for private GFNs (yet) with the first generation of TDX.
Silently return on unsupported TDX KVM MMU operations.
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
arch/x86/kvm/mmu/mmu.c | 3 ++
arch/x86/kvm/mmu/tdp_mmu.c | 73 +++++++++++++++++++++++++++++++++++---
arch/x86/kvm/x86.c | 3 ++
3 files changed, 74 insertions(+), 5 deletions(-)
Comments
On Sunday, October 30, 2022 2:23 PM, Yamahata, Isaku wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
>
> Some KVM MMU operations (dirty page logging, page migration, aging page)
> aren't supported for private GFNs (yet) with the first generation of TDX.
> Silently return on unsupported TDX KVM MMU operations.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 3 ++
> arch/x86/kvm/mmu/tdp_mmu.c | 73
> +++++++++++++++++++++++++++++++++++---
> arch/x86/kvm/x86.c | 3 ++
> 3 files changed, 74 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index
> 02e7b5cf3231..efc3b3f2dd12 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6588,6 +6588,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct
> kvm *kvm,
> for_each_rmap_spte(rmap_head, &iter, sptep) {
> sp = sptep_to_sp(sptep);
>
> + /* Private page dirty logging is not supported yet. */
> + KVM_BUG_ON(is_private_sptep(sptep), kvm);
> +
> /*
> * We cannot do huge page mapping for indirect shadow pages,
> * which are found on the last rmap (level = 1) when not using diff --git
> a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index
> 0e053b96444a..4b207ce83ffe 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1469,7 +1469,8 @@ typedef bool (*tdp_handler_t)(struct kvm *kvm,
> struct tdp_iter *iter,
>
> static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
> struct kvm_gfn_range *range,
> - tdp_handler_t handler)
> + tdp_handler_t handler,
> + bool only_shared)
> {
> struct kvm_mmu_page *root;
> struct tdp_iter iter;
> @@ -1480,9 +1481,23 @@ static __always_inline bool
> kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
> * into this helper allow blocking; it'd be dead, wasteful code.
> */
> for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
> + gfn_t start;
> + gfn_t end;
> +
> + if (only_shared && is_private_sp(root))
> + continue;
> +
> rcu_read_lock();
>
> - tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
> + /*
> + * For TDX shared mapping, set GFN shared bit to the range,
> + * so the handler() doesn't need to set it, to avoid duplicated
> + * code in multiple handler()s.
> + */
> + start = kvm_gfn_for_root(kvm, root, range->start);
> + end = kvm_gfn_for_root(kvm, root, range->end);
> +
> + tdp_root_for_each_leaf_pte(iter, root, start, end)
> ret |= handler(kvm, &iter, range);
>
> rcu_read_unlock();
> @@ -1526,7 +1541,12 @@ static bool age_gfn_range(struct kvm *kvm, struct
> tdp_iter *iter,
>
> bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range
> *range) {
> - return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
> + /*
> + * First TDX generation doesn't support clearing A bit for private
> + * mapping, since there's no secure EPT API to support it. However
> + * it's a legitimate request for TDX guest.
> + */
> + return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range, true);
> }
>
> static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, @@ -1537,7
> +1557,8 @@ static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
>
> bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range
> *range) {
> - return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
> + /* The first TDX generation doesn't support A bit. */
> + return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn, true);
> }
>
> static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, @@ -1582,8
> +1603,11 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct
> kvm_gfn_range *range)
> * No need to handle the remote TLB flush under RCU protection, the
> * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
> * shadow page. See the WARN on pfn_changed in
> __handle_changed_spte().
> + *
> + * .change_pte() callback should not happen for private page, because
> + * for now TDX private pages are pinned during VM's life time.
> */
> - return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
> + return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn, true);
> }
>
> /*
> @@ -1637,6 +1661,14 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
>
> lockdep_assert_held_read(&kvm->mmu_lock);
>
> + /*
> + * Because first TDX generation doesn't support write protecting private
> + * mappings and kvm_arch_dirty_log_supported(kvm) = false, it's a bug
> + * to reach here for guest TD.
> + */
> + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
> + return false;
> +
> for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
> spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
> slot->base_gfn + slot->npages, min_level); @@ -1902,6
> +1934,14 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
>
> lockdep_assert_held_read(&kvm->mmu_lock);
>
> + /*
> + * First TDX generation doesn't support clearing dirty bit,
> + * since there's no secure EPT API to support it. It is a
> + * bug to reach here for TDX guest.
> + */
> + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
> + return false;
> +
It might not be a good choice to intercept everywhere in kvm_mmu just as tdx
doesn't support it. I'm thinking maybe we could do the check in tdx.c, which is
much simpler. For example:
@@ -2592,6 +2605,12 @@ static void tdx_handle_changed_private_spte(struct kvm *kvm,
lockdep_assert_held(&kvm->mmu_lock);
if (change->new.is_present) {
+ /* Only flags change. This isn't supported currently. */
+ KVM_BUG_ON(change->old.is_present, kvm);
Then we can have kvm_arch_dirty_log_supported completely removed.
On Mon, Dec 05, 2022 at 02:23:35PM +0000,
"Wang, Wei W" <wei.w.wang@intel.com> wrote:
> On Sunday, October 30, 2022 2:23 PM, Yamahata, Isaku wrote:
> > From: Isaku Yamahata <isaku.yamahata@intel.com>
> >
> > Some KVM MMU operations (dirty page logging, page migration, aging page)
> > aren't supported for private GFNs (yet) with the first generation of TDX.
> > Silently return on unsupported TDX KVM MMU operations.
> >
> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> > ---
> > arch/x86/kvm/mmu/mmu.c | 3 ++
> > arch/x86/kvm/mmu/tdp_mmu.c | 73
> > +++++++++++++++++++++++++++++++++++---
> > arch/x86/kvm/x86.c | 3 ++
> > 3 files changed, 74 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index
> > 02e7b5cf3231..efc3b3f2dd12 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -6588,6 +6588,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct
> > kvm *kvm,
> > for_each_rmap_spte(rmap_head, &iter, sptep) {
> > sp = sptep_to_sp(sptep);
> >
> > + /* Private page dirty logging is not supported yet. */
> > + KVM_BUG_ON(is_private_sptep(sptep), kvm);
> > +
> > /*
> > * We cannot do huge page mapping for indirect shadow pages,
> > * which are found on the last rmap (level = 1) when not using diff --git
> > a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index
> > 0e053b96444a..4b207ce83ffe 100644
> > --- a/arch/x86/kvm/mmu/tdp_mmu.c
> > +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> > @@ -1469,7 +1469,8 @@ typedef bool (*tdp_handler_t)(struct kvm *kvm,
> > struct tdp_iter *iter,
> >
> > static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
> > struct kvm_gfn_range *range,
> > - tdp_handler_t handler)
> > + tdp_handler_t handler,
> > + bool only_shared)
> > {
> > struct kvm_mmu_page *root;
> > struct tdp_iter iter;
> > @@ -1480,9 +1481,23 @@ static __always_inline bool
> > kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
> > * into this helper allow blocking; it'd be dead, wasteful code.
> > */
> > for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
> > + gfn_t start;
> > + gfn_t end;
> > +
> > + if (only_shared && is_private_sp(root))
> > + continue;
> > +
> > rcu_read_lock();
> >
> > - tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
> > + /*
> > + * For TDX shared mapping, set GFN shared bit to the range,
> > + * so the handler() doesn't need to set it, to avoid duplicated
> > + * code in multiple handler()s.
> > + */
> > + start = kvm_gfn_for_root(kvm, root, range->start);
> > + end = kvm_gfn_for_root(kvm, root, range->end);
> > +
> > + tdp_root_for_each_leaf_pte(iter, root, start, end)
> > ret |= handler(kvm, &iter, range);
> >
> > rcu_read_unlock();
> > @@ -1526,7 +1541,12 @@ static bool age_gfn_range(struct kvm *kvm, struct
> > tdp_iter *iter,
> >
> > bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range
> > *range) {
> > - return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
> > + /*
> > + * First TDX generation doesn't support clearing A bit for private
> > + * mapping, since there's no secure EPT API to support it. However
> > + * it's a legitimate request for TDX guest.
> > + */
> > + return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range, true);
> > }
> >
> > static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, @@ -1537,7
> > +1557,8 @@ static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
> >
> > bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range
> > *range) {
> > - return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
> > + /* The first TDX generation doesn't support A bit. */
> > + return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn, true);
> > }
> >
> > static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, @@ -1582,8
> > +1603,11 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct
> > kvm_gfn_range *range)
> > * No need to handle the remote TLB flush under RCU protection, the
> > * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
> > * shadow page. See the WARN on pfn_changed in
> > __handle_changed_spte().
> > + *
> > + * .change_pte() callback should not happen for private page, because
> > + * for now TDX private pages are pinned during VM's life time.
> > */
> > - return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
> > + return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn, true);
> > }
> >
> > /*
> > @@ -1637,6 +1661,14 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
> >
> > lockdep_assert_held_read(&kvm->mmu_lock);
> >
> > + /*
> > + * Because first TDX generation doesn't support write protecting private
> > + * mappings and kvm_arch_dirty_log_supported(kvm) = false, it's a bug
> > + * to reach here for guest TD.
> > + */
> > + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
> > + return false;
> > +
> > for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
> > spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
> > slot->base_gfn + slot->npages, min_level); @@ -1902,6
> > +1934,14 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
> >
> > lockdep_assert_held_read(&kvm->mmu_lock);
> >
> > + /*
> > + * First TDX generation doesn't support clearing dirty bit,
> > + * since there's no secure EPT API to support it. It is a
> > + * bug to reach here for TDX guest.
> > + */
> > + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
> > + return false;
> > +
>
> It might not be a good choice to intercept everywhere in kvm_mmu just as tdx
> doesn't support it. I'm thinking maybe we could do the check in tdx.c, which is
> much simpler. For example:
>
> @@ -2592,6 +2605,12 @@ static void tdx_handle_changed_private_spte(struct kvm *kvm,
> lockdep_assert_held(&kvm->mmu_lock);
>
> if (change->new.is_present) {
> + /* Only flags change. This isn't supported currently. */
> + KVM_BUG_ON(change->old.is_present, kvm);
>
> Then we can have kvm_arch_dirty_log_supported completely removed.
Do you mean WARN_ON_ONCE()? If so, they can be removed from this patches because
the code should be blocked by "if (!kvm_arch_dirty_log_supported(kvm))" at the
caller.
On Friday, December 16, 2022 7:21 AM, Yamahata, Isaku wrote:
> > > + /*
> > > + * First TDX generation doesn't support clearing dirty bit,
> > > + * since there's no secure EPT API to support it. It is a
> > > + * bug to reach here for TDX guest.
> > > + */
> > > + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
> > > + return false;
> > > +
> >
> > It might not be a good choice to intercept everywhere in kvm_mmu just
> > as tdx doesn't support it. I'm thinking maybe we could do the check in
> > tdx.c, which is much simpler. For example:
> >
> > @@ -2592,6 +2605,12 @@ static void
> tdx_handle_changed_private_spte(struct kvm *kvm,
> > lockdep_assert_held(&kvm->mmu_lock);
> >
> > if (change->new.is_present) {
> > + /* Only flags change. This isn't supported currently. */
> > + KVM_BUG_ON(change->old.is_present, kvm);
> >
> > Then we can have kvm_arch_dirty_log_supported completely removed.
>
> Do you mean WARN_ON_ONCE()? If so, they can be removed from this
> patches because the code should be blocked by "if
> (!kvm_arch_dirty_log_supported(kvm))" at the caller.
>
As you also mentioned in the comment "It is a bug to reach here", we could keep using KVM_BUG_ON. The suggestion is that we don't need to add such checks in all the callers as it is more complicated (and error-prone) to consider all of them.
Why not let callers run into tdx_handle_changed_private_spte to do the check (at this only place)? From what I understand, we don’t support an spte change with flags update only for this version. This would be much simpler.
@@ -6588,6 +6588,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
for_each_rmap_spte(rmap_head, &iter, sptep) {
sp = sptep_to_sp(sptep);
+ /* Private page dirty logging is not supported yet. */
+ KVM_BUG_ON(is_private_sptep(sptep), kvm);
+
/*
* We cannot do huge page mapping for indirect shadow pages,
* which are found on the last rmap (level = 1) when not using
@@ -1469,7 +1469,8 @@ typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
struct kvm_gfn_range *range,
- tdp_handler_t handler)
+ tdp_handler_t handler,
+ bool only_shared)
{
struct kvm_mmu_page *root;
struct tdp_iter iter;
@@ -1480,9 +1481,23 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
* into this helper allow blocking; it'd be dead, wasteful code.
*/
for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
+ gfn_t start;
+ gfn_t end;
+
+ if (only_shared && is_private_sp(root))
+ continue;
+
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
+ /*
+ * For TDX shared mapping, set GFN shared bit to the range,
+ * so the handler() doesn't need to set it, to avoid duplicated
+ * code in multiple handler()s.
+ */
+ start = kvm_gfn_for_root(kvm, root, range->start);
+ end = kvm_gfn_for_root(kvm, root, range->end);
+
+ tdp_root_for_each_leaf_pte(iter, root, start, end)
ret |= handler(kvm, &iter, range);
rcu_read_unlock();
@@ -1526,7 +1541,12 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
+ /*
+ * First TDX generation doesn't support clearing A bit for private
+ * mapping, since there's no secure EPT API to support it. However
+ * it's a legitimate request for TDX guest.
+ */
+ return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range, true);
}
static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
@@ -1537,7 +1557,8 @@ static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
+ /* The first TDX generation doesn't support A bit. */
+ return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn, true);
}
static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
@@ -1582,8 +1603,11 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
* No need to handle the remote TLB flush under RCU protection, the
* target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
* shadow page. See the WARN on pfn_changed in __handle_changed_spte().
+ *
+ * .change_pte() callback should not happen for private page, because
+ * for now TDX private pages are pinned during VM's life time.
*/
- return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
+ return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn, true);
}
/*
@@ -1637,6 +1661,14 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
+ /*
+ * Because first TDX generation doesn't support write protecting private
+ * mappings and kvm_arch_dirty_log_supported(kvm) = false, it's a bug
+ * to reach here for guest TD.
+ */
+ if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
+ return false;
+
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
@@ -1902,6 +1934,14 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
+ /*
+ * First TDX generation doesn't support clearing dirty bit,
+ * since there's no secure EPT API to support it. It is a
+ * bug to reach here for TDX guest.
+ */
+ if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
+ return false;
+
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
@@ -1968,6 +2008,13 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_mmu_page *root;
lockdep_assert_held_write(&kvm->mmu_lock);
+ /*
+ * First TDX generation doesn't support clearing dirty bit,
+ * since there's no secure EPT API to support it. For now silently
+ * ignore KVM_CLEAR_DIRTY_LOG.
+ */
+ if (!kvm_arch_dirty_log_supported(kvm))
+ return;
for_each_tdp_mmu_root(kvm, root, slot->as_id)
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
}
@@ -2034,6 +2081,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
+ /*
+ * This should only be reachable when diryt-log is supported. It's a
+ * bug to reach here.
+ */
+ if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
+ return;
+
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
zap_collapsible_spte_range(kvm, root, slot);
}
@@ -2087,6 +2141,15 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
+
+ /*
+ * First TDX generation doesn't support write protecting private
+ * mappings, silently ignore the request. KVM_GET_DIRTY_LOG etc
+ * can reach here, no warning.
+ */
+ if (!kvm_arch_dirty_log_supported(kvm))
+ return false;
+
for_each_tdp_mmu_root(kvm, root, slot->as_id)
spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
@@ -12897,6 +12897,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
u32 new_flags = new ? new->flags : 0;
bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
+ if (!kvm_arch_dirty_log_supported(kvm) && log_dirty_pages)
+ return;
+
/*
* Update CPU dirty logging if dirty logging is being toggled. This
* applies to all operations.