From patchwork Fri Mar 17 21:33:02 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71466 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp2525wrt; Fri, 17 Mar 2023 14:38:31 -0700 (PDT) X-Google-Smtp-Source: AK7set/kNFENtQZuarYEoCWIh0ijGn1PyYpvvq68SjHPAN3txcrfkAcj/a1FLU/ld35WYT0ziPXd X-Received: by 2002:a05:6a20:3d89:b0:cc:fced:f740 with SMTP id s9-20020a056a203d8900b000ccfcedf740mr10765878pzi.0.1679089110906; Fri, 17 Mar 2023 14:38:30 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089110; cv=none; d=google.com; s=arc-20160816; b=YzE+T5O3eWeAxWjWjXXKCetuQH2Bkxv2BTA12eJcbUQH5+lIvAaYPO7OPwG70lpCGr 5c8idzm/usmB5rEN+1HUCned5vIgieAXZEMO1Vkb46LLEF27K+Q5nQiCSMWLwveGKgqg WFGHNf+jhoSuX8U1wDZGV15GXhJBiBW4TF333O7XRhQJlTtyWxOZFxxAhF8k9OnVCcvj JVY3UYmSoJMbMEaUs9WwqCth3GHzQ5/9dfRbzAH2Y7ox7ir/8iUz0lm2eUZorwpv9yfc KUKLYJD1wn+Hcvpq1nnsuWwAh+6uuUbsr9WSdZxBqdiScEF88lIbXxyx0dW3mAUPx0tr wnNg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=n+D2VxsDjgx5RznEN8EkWnubE1j52YbpUkDkSBr+7kQ=; b=WA2y5nrUDzQFTR1Jk5/uMERYvXUWiFtgy8NLiu9zK8FTTQq2aGfyozomnjTmShEKiD 2BSlLE2X88Yp4WeilkP/0SOjIJcxiowRh5SBl8Gr6ywpxT0C/aDmZCtKnMr2UDa8iYlX DfdHhwctWcgbFj2lSpc946l7zAK8Nb5SFpNhAzaIsxIv0GMSTfaTsKZ3vOFPXEQsAgBH bxp8z+SUccmbLqrSX/1i8OxTsIjXPi/Amud71KGFgPWFikE1ysg3T7pB1/9JngVFZS4Z Qm5bKsm/ubxkIAO8LkmsXZnIqZfNR2NF1zAK9YO6PZRRyjgWNAhoebUsevES++1ksb08 p7DA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=gFNKLoV1; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id b7-20020a656687000000b0050c0c9d2931si3486124pgw.671.2023.03.17.14.38.17; Fri, 17 Mar 2023 14:38:30 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=gFNKLoV1; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230447AbjCQVfy (ORCPT + 99 others); Fri, 17 Mar 2023 17:35:54 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51602 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230395AbjCQVfv (ORCPT ); Fri, 17 Mar 2023 17:35:51 -0400 Received: from mail-pl1-x630.google.com (mail-pl1-x630.google.com [IPv6:2607:f8b0:4864:20::630]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 548CD4DBF0; Fri, 17 Mar 2023 14:35:13 -0700 (PDT) Received: by mail-pl1-x630.google.com with SMTP id bc12so6074285plb.0; Fri, 17 Mar 2023 14:35:13 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088825; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=n+D2VxsDjgx5RznEN8EkWnubE1j52YbpUkDkSBr+7kQ=; b=gFNKLoV1dc2fIFb4muJinOYD2SxBlMJB2XwOtJ9/WcIrD5VXz78iZ58zcVrzBy6NIM t0bCMduv5I3N5MxhwRZMNIrW5IfjLXbnxE/Xst3YsOqi8MSNt4C3ZPOZmTpw0hwelGWj 81Pfuc5785G9GF+v74KCxixsKWMtzjnFu9TeCAXwGDbmgmdNa4/Dm4J2CtFrHfg7Dxrp BUigiUKOdQ8Pd/gfGOGKd+/p+J0xgtG4RWBkhLKSdAMSjeDPBi0HckooQT4o8ro12m26 QeXNRNsBlx0zpRRRb/YWesMIrzt/tWkJj/sfPh9nJ8z5mjHNU/TC83W4eHnJcZ50M//G FSKQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088825; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=n+D2VxsDjgx5RznEN8EkWnubE1j52YbpUkDkSBr+7kQ=; b=Q47rXz1mJmo1TvwM2qlwtQLQEKmhBdLpFIq40DHMoItBq49uH7DtrS85WqBdw6KYCP KcvGaZtdbXVKrbqDsyXUDUjk5x7u3tQ593vwAZasAKp5ug0RNNhzO43b0k2hOUEFUpr1 rfOUJ3Gjl3NzL5BpbOcKqhlNfKAwZ+bFHhGJIwMa4ItgI/Sw47my+kOUuhDj5BA6IsKu a3A7nIQJxv92J+hM5ZVjc00Wcm6OWUMshN8H+oGS4ZUknKXCR20R70U/fYUGMnsePl9Q Tmy/2ekURrj/eggHhzcfgJig+XjS05DtM0be33KRnevWpQo0elIAlLBJtLEmBD6uRCb5 FHgw== X-Gm-Message-State: AO0yUKXUUiIrcW08BH3eZkC2wsDNBp6Sc5SkWgI0RKFaXzrVQG6dl2j/ xPK0MYHKzo9uQwLvqQ2W/8o= X-Received: by 2002:a17:90a:e7cb:b0:23f:4882:f0e5 with SMTP id kb11-20020a17090ae7cb00b0023f4882f0e5mr5588042pjb.5.1679088825134; Fri, 17 Mar 2023 14:33:45 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id h3-20020a17090a130300b0023d36aa85fesm5313212pja.40.2023.03.17.14.33.44 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:44 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 01/32] cgroup: Implement cgroup_show_cftypes() Date: Fri, 17 Mar 2023 11:33:02 -1000 Message-Id: <20230317213333.2174969-2-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652543374762011?= X-GMAIL-MSGID: =?utf-8?q?1760652543374762011?= Implement cgroup_show_cftypes() which shows and hides all cgroup files associated with the specified set of cgroup file types. CFTYPE_HIDDEN flag is added so that files can be created hidden from the get-go. cgroup_show_cftypes() can be used whether the cftypes are added or not. It also combines with cgroup_show_file() so that a given file is visible iff both its cftype and cfile are visible. This will be used by a new sched_class to selectively show and hide CPU controller interface files depending on whether they're supported. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/cgroup-defs.h | 8 +++ include/linux/cgroup.h | 1 + kernel/cgroup/cgroup.c | 97 ++++++++++++++++++++++++++++++++++--- 3 files changed, 99 insertions(+), 7 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8a0d5466c7be..8af1e7d487cb 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -127,12 +127,18 @@ enum { CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ + CFTYPE_HIDDEN = (1 << 6), /* file type hidden, see cgroup_show_cftypes() */ + /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ __CFTYPE_ADDED = (1 << 18), }; +enum cfile_flags { + CFILE_HIDDEN = (1 << 0), /* file instance hidden */ +}; + /* * cgroup_file is the handle for a file instance created in a cgroup which * is used, for example, to generate file changed notifications. This can @@ -140,7 +146,9 @@ enum { */ struct cgroup_file { /* do not access any fields from outside cgroup core */ + struct cftype *cft; struct kernfs_node *kn; + unsigned int flags; unsigned long notified_at; struct timer_list notify_timer; }; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 3410aecffdb4..a8c6982c2c24 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -115,6 +115,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_show_cftype(struct cftype *cft, bool show); void cgroup_file_notify(struct cgroup_file *cfile); void cgroup_file_show(struct cgroup_file *cfile, bool show); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 8a5294f4ce72..f762bfb78f4a 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4206,10 +4206,13 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, return ret; } + kernfs_show(kn, !(cft->flags & CFTYPE_HIDDEN)); + if (cft->file_offset) { struct cgroup_file *cfile = (void *)css + cft->file_offset; timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0); + cfile->cft = cft; spin_lock_irq(&cgroup_file_kn_lock); cfile->kn = kn; @@ -4488,6 +4491,24 @@ void cgroup_file_notify(struct cgroup_file *cfile) spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); } +static struct kernfs_node *cfile_kn_get(struct cgroup_file *cfile) +{ + struct kernfs_node *kn; + + spin_lock_irq(&cgroup_file_kn_lock); + kn = cfile->kn; + kernfs_get(kn); + spin_unlock_irq(&cgroup_file_kn_lock); + + return kn; +} + +static bool cfile_visible(struct cgroup_file *cfile) +{ + return !(cfile->cft->flags & CFTYPE_HIDDEN) && + !(cfile->flags & CFILE_HIDDEN); +} + /** * cgroup_file_show - show or hide a hidden cgroup file * @cfile: target cgroup_file obtained by setting cftype->file_offset @@ -4497,15 +4518,20 @@ void cgroup_file_show(struct cgroup_file *cfile, bool show) { struct kernfs_node *kn; - spin_lock_irq(&cgroup_file_kn_lock); - kn = cfile->kn; - kernfs_get(kn); - spin_unlock_irq(&cgroup_file_kn_lock); + mutex_lock(&cgroup_mutex); - if (kn) - kernfs_show(kn, show); + if (show) + cfile->flags &= ~CFILE_HIDDEN; + else + cfile->flags |= CFILE_HIDDEN; - kernfs_put(kn); + kn = cfile_kn_get(cfile); + if (kn) { + kernfs_show(kn, cfile_visible(cfile)); + kernfs_put(kn); + } + + mutex_unlock(&cgroup_mutex); } /** @@ -5519,6 +5545,63 @@ static void offline_css(struct cgroup_subsys_state *css) wake_up_all(&css->cgroup->offline_waitq); } +/** + * cgroup_show_cftype - show or hide a cgroup file type + * @cft: cftype to show or hide + * @show: whether to show or hide + * + * Sets %CFTYPE_HIDDEN and shows/hides the matching files according to @show. + * @cft may or may not be added at the time of this call. After hiding, it's + * guaranteed that there are no in-flight operations on the hidden files. + */ +void cgroup_show_cftype(struct cftype *cft, bool show) +{ + struct cgroup_subsys *ss = cft->ss; + struct cgroup *root = ss ? &ss->root->cgrp : &cgrp_dfl_root.cgrp; + struct cgroup_subsys_state *css; + + mutex_lock(&cgroup_mutex); + + if (show) + cft->flags &= ~CFTYPE_HIDDEN; + else + cft->flags |= CFTYPE_HIDDEN; + + if (!(cft->flags & __CFTYPE_ADDED)) + goto out_unlock; + + css_for_each_descendant_pre(css, cgroup_css(root, ss)) { + struct cgroup *cgrp = css->cgroup; + struct kernfs_node *kn; + + if (!(css->flags & CSS_VISIBLE)) + continue; + + if (cft->file_offset) { + struct cgroup_file *cfile = + (void *)css + cft->file_offset; + + kn = cfile_kn_get(cfile); + if (kn) { + kernfs_show(kn, cfile_visible(cfile)); + kernfs_put(kn); + } + } else { + char buf[CGROUP_FILE_NAME_MAX]; + + kn = kernfs_find_and_get(cgrp->kn, + cgroup_file_name(cgrp, cft, buf)); + if (kn) { + kernfs_show(kn, show); + kernfs_put(kn); + } + } + } + +out_unlock: + mutex_unlock(&cgroup_mutex); +} + /** * css_create - create a cgroup_subsys_state * @cgrp: the cgroup new css will be associated with From patchwork Fri Mar 17 21:33:03 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71497 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16901wrt; Fri, 17 Mar 2023 15:23:35 -0700 (PDT) X-Google-Smtp-Source: AK7set9smI+jD4qKAqEk4iFmf1Fnpqy+8UOVX3z6ut+W39XEyk50MfId5RZ9YLLfVj2MVFfOlMxG X-Received: by 2002:a05:6a20:441f:b0:d4:faa7:be92 with SMTP id ce31-20020a056a20441f00b000d4faa7be92mr14422336pzb.6.1679091815393; Fri, 17 Mar 2023 15:23:35 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091815; cv=none; d=google.com; s=arc-20160816; b=NRToNaNr0Ca6tokWfV0pMr6BfeabE8YvPoa0TMGjFsrhoATyHUy3VAA9ke4SGu5/C4 BGxTwpD5sbdhHR8TDNHvP5ExsW0zHZ31cCf1Cawe59hJcj7HJ2fCYNVMqjBSQySJhLDc PxCZ9BPGv7DZ2+Ne+nel7Om9+nlq9bgCycskf1w88e3OKWC6NA6Vpm+QHOOGIM5KW9Ev WiZiomdRLtM8Q/RCrqSFB2qTs9ZxZMldl+cFWdMpapPZ3i1pqAx2qEt7FJKFbayFTIvE BA1JAuHlr1HS39GP8F4FI1gZvDRW0QC1D9hBpiR4g5avDpZL9Gp9+P5gXxWt9XroHhn0 ZhxA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=UdU5h92XMmuwS06YSRGGn+N+QBjdB8GT5V/VP2XGkLs=; b=kcU+dhNfWo0fFYQTEk7u/T/MtOcC4Z4NcX+VCowCd61QoO/eSlP82ulVj6RPLQ0sZp jxjMnDeCuGioVqJDSjqXYnWGRWgBw8ehpnQoEBe1m2G3ci1cMhXMQwXh4MfCgaKG4KZB 3OzuLIKcMwF55MXzN4HovWz9MincUTzdHIP0RpE3e6ZePlSJ/G05aSZMWbUPXt6Bd2kz sFINbSDSRLsm7vDxZEXX5mMHNjCMy1xx9UzEt/nkHdh0YDoleKaSN/phQSXBgjKv3pPw 9xayG3EbvesD9EmDvLmQgMiVfahYRg6BpFSOl83APVHANWG07HzUkjGIa8mxjJGykYU9 iDPw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=UWthX6UA; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id bm18-20020a656e92000000b00502e7406695si2166421pgb.316.2023.03.17.15.23.09; Fri, 17 Mar 2023 15:23:35 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=UWthX6UA; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229783AbjCQVfw (ORCPT + 99 others); Fri, 17 Mar 2023 17:35:52 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51594 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230388AbjCQVfv (ORCPT ); Fri, 17 Mar 2023 17:35:51 -0400 Received: from mail-pl1-x635.google.com (mail-pl1-x635.google.com [IPv6:2607:f8b0:4864:20::635]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 546454DBDE; Fri, 17 Mar 2023 14:35:13 -0700 (PDT) Received: by mail-pl1-x635.google.com with SMTP id i5so6690214pla.2; Fri, 17 Mar 2023 14:35:13 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088827; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=UdU5h92XMmuwS06YSRGGn+N+QBjdB8GT5V/VP2XGkLs=; b=UWthX6UAZSUAsa36RUITbDqHz/qm8WevYIuvzRuncCV4UysQcATnylHOGwUMu9wmiB wd7Aa7S9PM5D2NodoO9Nm3DTNR1i7fY5YI4nOmQRN4U7owqd0qqqp38k6hDh+ODAC/Jl n5x8WD5eDgJtuZJs0EbAQXUpkjfHwJkVthYDlgSkpt7jCxodE8bX3EaNIucOnkMx+dM1 vZqspNl9qVDRLpaBZ6KdlOry2AdKmw67It92qnhSmmm3YXvcls712s9+fE1U2tdotJ2u iUOsBfIU91jMExuhiR1SKZ4sR+vCv+zgUTx2U7XQiMw6c5e92scBs2G5+jJeru0/4bH4 4+WA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088827; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=UdU5h92XMmuwS06YSRGGn+N+QBjdB8GT5V/VP2XGkLs=; b=3YqXWDrqQZ30SVyUXNrPEz9z8CFXLhNvOH+N+zc1yn+IMK596JMaRLNOecDpYIoabj 0VCWXRmMDjl/ayiUK9lkM97nustJURD3GOiA02K6K2silH/rKB2YkrDLQbwSZHTQyfR+ mKgF5W+Ejs3H7k85JS+fLx91o/jjHAz+sX4S1Q3OH6KaQXUkj3/QmNkjGH6PSKD9X+kl SKxFgXU82vb/IqrIX9Vs6dW0lauJ83XD1wZwdT8JuA/Sl+hxKHVELXY+tk3Cur1FJMCv uPIZVQ5V4UXKKe06E1WtNvQY/J83PoKOkXZkQqnxKG/rZx2MU+XxRESRpPAWmSZFAPWG BYCw== X-Gm-Message-State: AO0yUKWZm6LcV/uaFVhv8AuQR/kPg+bWXElnp207mh1FyL9fEOThPqLh bfqLTPgeZuncVrqsdIJZZjI= X-Received: by 2002:a17:903:247:b0:19e:6516:127a with SMTP id j7-20020a170903024700b0019e6516127amr8879377plh.39.1679088826913; Fri, 17 Mar 2023 14:33:46 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id x5-20020a1709028ec500b001a04d37a4acsm1998879plo.9.2023.03.17.14.33.46 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:46 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 02/32] sched: Encapsulate task attribute change sequence into a helper macro Date: Fri, 17 Mar 2023 11:33:03 -1000 Message-Id: <20230317213333.2174969-3-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655379112143157?= X-GMAIL-MSGID: =?utf-8?q?1760655379112143157?= A task needs to be dequeued and put before an attribute change and then restored afterwards. This is currently open-coded in multiple places. This patch encapsulates the preparation and restoration sequences into SCHED_CHANGE_BLOCK which allows the actual attribute changes to be put inside its nested block. While the conversions are generally straightforward, there are some subtleties: * If a variable is specified for the flags argument, it can be modified from inside the block body to allow using a different flags value for re-enqueueing. This is used by rt_mutex_setprio() and __sched_setscheduler(). * __sched_setscheduler() used to only set ENQUEUE_HEAD if the task is queued. After the conversion, it sets the flag whether the task is queued or not. This doesn't cause any behavioral differences and is simpler than accessing the internal state of the helper. * In a similar vein, sched_move_task() tests task_current() again after the change block instead of carrying over the test result from inside the change block. This patch is adopted from Peter Zijlstra's draft patch linked below. The changes are: * Call fini explicitly from for() instead of using the __cleanup__ attribute. * Allow the queue flag variable to be modified directly so that the user doesn't have to poke into sched_change_guard struct. Also, in the original patch, rt_mutex_setprio() was incorrectly updating its queue_flag instead of cg.flags. * Some cosmetic changes. Signed-off-by: Tejun Heo Original-patch-by: Peter Zijlstra Link: https://lore.kernel.org/all/20220330162228.GH14330@worktop.programming.kicks-ass.net/T/#u Reviewed-by: David Vernet --- kernel/sched/core.c | 260 ++++++++++++++++++++++---------------------- 1 file changed, 130 insertions(+), 130 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index af017e038b48..fb080ca54d80 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2096,6 +2096,76 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } +struct sched_change_guard { + struct task_struct *p; + struct rq *rq; + bool queued; + bool running; + bool done; +}; + +static struct sched_change_guard +sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags) +{ + struct sched_change_guard cg = { + .rq = rq, + .p = p, + .queued = task_on_rq_queued(p), + .running = task_current(rq, p), + }; + + if (cg.queued) { + /* + * __kthread_bind() may call this on blocked tasks without + * holding rq->lock through __do_set_cpus_allowed(). Assert @rq + * locked iff @p is queued. + */ + lockdep_assert_rq_held(rq); + dequeue_task(rq, p, flags); + } + if (cg.running) + put_prev_task(rq, p); + + return cg; +} + +static void sched_change_guard_fini(struct sched_change_guard *cg, int flags) +{ + if (cg->queued) + enqueue_task(cg->rq, cg->p, flags | ENQUEUE_NOCLOCK); + if (cg->running) + set_next_task(cg->rq, cg->p); + cg->done = true; +} + +/** + * SCHED_CHANGE_BLOCK - Nested block for task attribute updates + * @__rq: Runqueue the target task belongs to + * @__p: Target task + * @__flags: DEQUEUE/ENQUEUE_* flags + * + * A task may need to be dequeued and put_prev_task'd for attribute updates and + * set_next_task'd and re-enqueued afterwards. This helper defines a nested + * block which automatically handles these preparation and cleanup operations. + * + * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + * update_attribute(p); + * ... + * } + * + * If @__flags is a variable, the variable may be updated in the block body and + * the updated value will be used when re-enqueueing @p. + * + * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling + * update_rq_clock() beforehand. Otherwise, the rq clock is automatically + * updated iff the task needs to be dequeued and re-enqueued. Only the former + * case guarantees that the rq clock is up-to-date inside and after the block. + */ +#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \ + for (struct sched_change_guard __cg = \ + sched_change_guard_init(__rq, __p, __flags); \ + !__cg.done; sched_change_guard_fini(&__cg, __flags)) + static inline int __normal_prio(int policy, int rt_prio, int nice) { int prio; @@ -2554,7 +2624,6 @@ static void __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) { struct rq *rq = task_rq(p); - bool queued, running; /* * This here violates the locking rules for affinity, since we're only @@ -2573,26 +2642,9 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) else lockdep_assert_held(&p->pi_lock); - queued = task_on_rq_queued(p); - running = task_current(rq, p); - - if (queued) { - /* - * Because __kthread_bind() calls this on blocked tasks without - * holding rq->lock. - */ - lockdep_assert_rq_held(rq); - dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + p->sched_class->set_cpus_allowed(p, ctx); } - if (running) - put_prev_task(rq, p); - - p->sched_class->set_cpus_allowed(p, ctx); - - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); } /* @@ -6989,7 +7041,7 @@ static inline int rt_effective_prio(struct task_struct *p, int prio) */ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) { - int prio, oldprio, queued, running, queue_flag = + int prio, oldprio, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; const struct sched_class *prev_class; struct rq_flags rf; @@ -7049,49 +7101,39 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) queue_flag &= ~DEQUEUE_MOVE; prev_class = p->sched_class; - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, queue_flag); - if (running) - put_prev_task(rq, p); - - /* - * Boosting condition are: - * 1. -rt task is running and holds mutex A - * --> -dl task blocks on mutex A - * - * 2. -dl task is running and holds mutex A - * --> -dl task blocks on mutex A and could preempt the - * running task - */ - if (dl_prio(prio)) { - if (!dl_prio(p->normal_prio) || - (pi_task && dl_prio(pi_task->prio) && - dl_entity_preempt(&pi_task->dl, &p->dl))) { - p->dl.pi_se = pi_task->dl.pi_se; - queue_flag |= ENQUEUE_REPLENISH; + SCHED_CHANGE_BLOCK(rq, p, queue_flag) { + /* + * Boosting condition are: + * 1. -rt task is running and holds mutex A + * --> -dl task blocks on mutex A + * + * 2. -dl task is running and holds mutex A + * --> -dl task blocks on mutex A and could preempt the + * running task + */ + if (dl_prio(prio)) { + if (!dl_prio(p->normal_prio) || + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { + p->dl.pi_se = pi_task->dl.pi_se; + queue_flag |= ENQUEUE_REPLENISH; + } else { + p->dl.pi_se = &p->dl; + } + } else if (rt_prio(prio)) { + if (dl_prio(oldprio)) + p->dl.pi_se = &p->dl; + if (oldprio < prio) + queue_flag |= ENQUEUE_HEAD; } else { - p->dl.pi_se = &p->dl; + if (dl_prio(oldprio)) + p->dl.pi_se = &p->dl; + if (rt_prio(oldprio)) + p->rt.timeout = 0; } - } else if (rt_prio(prio)) { - if (dl_prio(oldprio)) - p->dl.pi_se = &p->dl; - if (oldprio < prio) - queue_flag |= ENQUEUE_HEAD; - } else { - if (dl_prio(oldprio)) - p->dl.pi_se = &p->dl; - if (rt_prio(oldprio)) - p->rt.timeout = 0; - } - - __setscheduler_prio(p, prio); - if (queued) - enqueue_task(rq, p, queue_flag); - if (running) - set_next_task(rq, p); + __setscheduler_prio(p, prio); + } check_class_changed(rq, p, prev_class, oldprio); out_unlock: @@ -7113,7 +7155,6 @@ static inline int rt_effective_prio(struct task_struct *p, int prio) void set_user_nice(struct task_struct *p, long nice) { - bool queued, running; int old_prio; struct rq_flags rf; struct rq *rq; @@ -7137,22 +7178,13 @@ void set_user_nice(struct task_struct *p, long nice) p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); - if (running) - put_prev_task(rq, p); - - p->static_prio = NICE_TO_PRIO(nice); - set_load_weight(p, true); - old_prio = p->prio; - p->prio = effective_prio(p); - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + p->static_prio = NICE_TO_PRIO(nice); + set_load_weight(p, true); + old_prio = p->prio; + p->prio = effective_prio(p); + } /* * If the task increased its priority or is running and @@ -7536,7 +7568,7 @@ static int __sched_setscheduler(struct task_struct *p, bool user, bool pi) { int oldpolicy = -1, policy = attr->sched_policy; - int retval, oldprio, newprio, queued, running; + int retval, oldprio, newprio; const struct sched_class *prev_class; struct balance_callback *head; struct rq_flags rf; @@ -7701,33 +7733,22 @@ static int __sched_setscheduler(struct task_struct *p, queue_flags &= ~DEQUEUE_MOVE; } - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, queue_flags); - if (running) - put_prev_task(rq, p); - - prev_class = p->sched_class; + SCHED_CHANGE_BLOCK(rq, p, queue_flags) { + prev_class = p->sched_class; - if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { - __setscheduler_params(p, attr); - __setscheduler_prio(p, newprio); - } - __setscheduler_uclamp(p, attr); + if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { + __setscheduler_params(p, attr); + __setscheduler_prio(p, newprio); + } + __setscheduler_uclamp(p, attr); - if (queued) { /* * We enqueue to tail when the priority of a task is * increased (user space view). */ if (oldprio < p->prio) queue_flags |= ENQUEUE_HEAD; - - enqueue_task(rq, p, queue_flags); } - if (running) - set_next_task(rq, p); check_class_changed(rq, p, prev_class, oldprio); @@ -9250,25 +9271,15 @@ int migrate_task_to(struct task_struct *p, int target_cpu) */ void sched_setnuma(struct task_struct *p, int nid) { - bool queued, running; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(p, &rf); - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE); - if (running) - put_prev_task(rq, p); - - p->numa_preferred_nid = nid; + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE) { + p->numa_preferred_nid = nid; + } - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_NUMA_BALANCING */ @@ -10360,35 +10371,24 @@ static void sched_change_group(struct task_struct *tsk) */ void sched_move_task(struct task_struct *tsk) { - int queued, running, queue_flags = - DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(tsk, &rf); update_rq_clock(rq); - running = task_current(rq, tsk); - queued = task_on_rq_queued(tsk); - - if (queued) - dequeue_task(rq, tsk, queue_flags); - if (running) - put_prev_task(rq, tsk); - - sched_change_group(tsk); + SCHED_CHANGE_BLOCK(rq, tsk, + DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK) { + sched_change_group(tsk); + } - if (queued) - enqueue_task(rq, tsk, queue_flags); - if (running) { - set_next_task(rq, tsk); - /* - * After changing group, the running task may have joined a - * throttled one but it's still the running task. Trigger a - * resched to make sure that task can still run. - */ + /* + * After changing group, the running task may have joined a throttled + * one but it's still the running task. Trigger a resched to make sure + * that task can still run. + */ + if (task_current(rq, tsk)) resched_curr(rq); - } task_rq_unlock(rq, tsk, &rf); } From patchwork Fri Mar 17 21:33:04 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71490 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp15600wrt; Fri, 17 Mar 2023 15:19:39 -0700 (PDT) X-Google-Smtp-Source: AK7set83Tr0yEYoTb98y+hQGC2AZMijjA8Bw9e5IhQN/STznoufVv7vvdqjcu8cwBrDx/4rK2jE/ X-Received: by 2002:a05:6a00:3186:b0:5a9:cebd:7b79 with SMTP id bj6-20020a056a00318600b005a9cebd7b79mr8057617pfb.0.1679091579036; Fri, 17 Mar 2023 15:19:39 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091579; cv=none; d=google.com; s=arc-20160816; b=l+PN/VPUNrR6WhN5PPj8BSR5JC1KDufB0T/MRx1LUHxrgym7f8T4VgRXESfKWCzb7t j3L7KZrGJxQD94TycDlvfxO4ptF18hCa73a8cgucuyRnzNhGdtlM35InVRqvOz5/BbBm RFCLRD7yxr6RBVxcX2RyfrMMO8gmGwhw1/akoUA9QTEHdRGVi/f/YlHZnn0imjtYVKRt J6jGATNJTTS8otyt23cd7xicLjGa9nBYmjrMunyDrvvyPpdjvC3iBhDmss9d65plg1Ty BNAny2ESgsByLvFKhDSaWXa0K/4CraxFrnOPhWK9YU7oEX+tpVMyW45bt1CN3GHpi3LW 5v0Q== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=nAoxgWK3CFgz0aTuA8BNCG0HD1YgZYimkX7KWNGAibg=; b=g+7DZ0wJvu+64llrDnkkJekUvk5P4BpM2v73pXKLBm+VrrXKLwA8/MuB2OPXR0Q/57 u5rQzNFaHKCPMGcyBeSWGR2qwu52f8B8gRlKToQURrkrWFrMR4jvtfhNTgI4H4xg7S58 Us8QIYw6kdQBhojyYeuw4HCnNmQ0foKrk6LeHo/GgPVYsAAUbjfKVS97jdcxwBvJQ2O8 aeTaCQwgfZP36O6u/S1YgFc4NaWznj4nKJr6wlcRyHIAm3ab2SVlz44k/Ncgfcq9kBzF /qZPmOBivHL+oxqBWrQxdDFdT+/M9szl9mk3+9Gin7v2XpWjw0/pI1Hq/ZgFlBaWZjpo CfGA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=TZidGMi9; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id y10-20020a62f24a000000b005a8f259220asi3375311pfl.66.2023.03.17.15.19.04; Fri, 17 Mar 2023 15:19:39 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=TZidGMi9; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230515AbjCQVgI (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:08 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51644 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230434AbjCQVfy (ORCPT ); Fri, 17 Mar 2023 17:35:54 -0400 Received: from mail-pj1-x1034.google.com (mail-pj1-x1034.google.com [IPv6:2607:f8b0:4864:20::1034]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9DB902F049; Fri, 17 Mar 2023 14:35:14 -0700 (PDT) Received: by mail-pj1-x1034.google.com with SMTP id gp15-20020a17090adf0f00b0023d1bbd9f9eso10590225pjb.0; Fri, 17 Mar 2023 14:35:14 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088829; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=nAoxgWK3CFgz0aTuA8BNCG0HD1YgZYimkX7KWNGAibg=; b=TZidGMi9uz5AczAn70op7sGAvjY19LclymC/wY9OfEByuRYWwzv4OrsI985z9/fjt8 sX/Pp4O4WbHkKT2WBLzY5An1LDEcAWzSixHr6Bic54cTb/YK3rlGcpXY1MATNFtc0tVH e2cIFHytnWkuy04GmIdsxnX/T9Z4ZfBTTjPW5DuyZumM/LZGnhF83mHEeLcRBCdUrbih mox9SlD/21AgGmqMoVwgCedymRO81Worr/mZgGvxj3aKwf/OBAHmhok1t30IPttCAEKJ qBf9mPf6Vmsei0i/8+uXmLM8MT489PG4ykSFEZUfy4T1Wrk+m84pPY8d5FQAsl398W3y oQ7g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088829; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=nAoxgWK3CFgz0aTuA8BNCG0HD1YgZYimkX7KWNGAibg=; b=z+hnQbBSgEHB/Kr4Zi3Kbz4mEb3cIxL6UbMzn+K3OPL4cOIfA08fpNDzR9ueEZyIuT z7VwHIk1zdIzEV4nihkYwl0ziHu6tOHFfe09tOCv7uvkF3e3wC10uh7AFTZXYxfZ9tkI 4U3kFgHo1t5yiaER05IqKIaV4T6hxU9o5aaPZe4NItnLnEiZOFfgkjZJxtXE0ve8uKkJ 3NC/tVnsVUQ5i5AZQt3mLjy4B98xDum5K9+Qqn0tlCJardxBUttshE0jxWueEDXhbl/s gU+1a6Of2LxR3e5v7hW5+4Bzni80RPOQWBWCsdHfwH2ukIRRMN2ewu5ckNeAGTi9gAuX LXzw== X-Gm-Message-State: AO0yUKW1jzv0ZXACei1K1o9a11IisG3OuVsoPvf9XVOeDkpEmcc7glHx NBUVZ0kuHESZTUK7lXFlvSg= X-Received: by 2002:a17:902:db0e:b0:19a:b869:f2f8 with SMTP id m14-20020a170902db0e00b0019ab869f2f8mr10501930plx.21.1679088828634; Fri, 17 Mar 2023 14:33:48 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id g6-20020a1709026b4600b001a19cf1b37esm1988777plt.40.2023.03.17.14.33.48 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:48 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 03/32] sched: Restructure sched_class order sanity checks in sched_init() Date: Fri, 17 Mar 2023 11:33:04 -1000 Message-Id: <20230317213333.2174969-4-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655131436088890?= X-GMAIL-MSGID: =?utf-8?q?1760655131436088890?= Currently, sched_init() checks that the sched_class'es are in the expected order by testing each adjacency which is a bit brittle and makes it cumbersome to add optional sched_class'es. Instead, let's verify whether they're in the expected order using sched_class_above() which is what matters. Signed-off-by: Tejun Heo Suggested-by: Peter Zijlstra Reviewed-by: David Vernet --- kernel/sched/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fb080ca54d80..efac96fd6cfd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9794,12 +9794,12 @@ void __init sched_init(void) int i; /* Make sure the linker didn't screw up */ - BUG_ON(&idle_sched_class != &fair_sched_class + 1 || - &fair_sched_class != &rt_sched_class + 1 || - &rt_sched_class != &dl_sched_class + 1); #ifdef CONFIG_SMP - BUG_ON(&dl_sched_class != &stop_sched_class + 1); + BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class)); #endif + BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class)); + BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class)); + BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class)); wait_bit_init(); From patchwork Fri Mar 17 21:33:05 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71492 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16332wrt; Fri, 17 Mar 2023 15:21:54 -0700 (PDT) X-Google-Smtp-Source: AK7set+pstNIQibdioa/78zZUnoo5eWHk+kDTatGTHYGfW7SNwr1t3wjXQZmQZ9XozMjWdvHdRBY X-Received: by 2002:a05:6a00:1489:b0:623:94a2:1a69 with SMTP id v9-20020a056a00148900b0062394a21a69mr12801459pfu.2.1679091714681; Fri, 17 Mar 2023 15:21:54 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091714; cv=none; d=google.com; s=arc-20160816; b=tl1GctqtrObMyR8ovRxAkoBxvRNAQfyFLucP7UqoolLQCaGcBuVzEY3TKjJpDAyEoS WhlKvYhDZ/8HgwN4lZaRK1OiN+hZrYQPjCrgFoGWpHFvIIIa6E2LC4HgDiCkoOe9bIh9 6d6XU2TmsTTD9aOxE/Uyhn4Kxewws8aJiZvJMP/SZ1NXhkaXtoBAJ3ET5f8C/1nFKf5S nhLbnE9q0/qqEJkqWid+3eggoIjLK6y6bG1baTumYb/PspK1zTS2E+BJM00bUprwchev B0/vFcAQhW4Ej/SjzpWdWUvCxPun05NEnUFXw1HNU1OifYs6rhsn3SCsQdzulMQkkYn1 l+ag== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=Hj8cLjQDLjXQ1ZfCGY4fgdKd/DSkCX4r/5xti3BJx2Q=; b=jXv7ZPS8ytqbyiDc8V8PdGgK4X3hZvo/Np2Ud0HpjJzBBFBKeHwEf6y3yEI8fAPJ5y 5XSrP886MOSnBtxcAf+p9YmwiY6jVbyfz/vYZj9jxgBwoDrPJYpKmU6I8U4ELQ8rvFqR T8t7brEkDUH9LfhTVe9nuQQLubXp5j6eYN8oZHussp2r7uJn06IyfqjZtDIVq/KkHD9b MX0GwCPWT1whFPPPbELvsxjs+s49YqdhD6M9eMC05B+tbZBnZyPgvhkTCKazdUvAPAvx LYvgV+NWE6VgM2q7+2L7Ql7mNyhqJmuqaKc6h0FjeliuScUgSe5oIIhoMfcc568SRzz1 CUlA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=SL77xmJ9; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id i69-20020a638748000000b0050c0de283bcsi3246703pge.777.2023.03.17.15.21.35; Fri, 17 Mar 2023 15:21:54 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=SL77xmJ9; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230381AbjCQVgt (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52120 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229734AbjCQVgW (ORCPT ); Fri, 17 Mar 2023 17:36:22 -0400 Received: from mail-pj1-x1032.google.com (mail-pj1-x1032.google.com [IPv6:2607:f8b0:4864:20::1032]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3E56C2B9D9; Fri, 17 Mar 2023 14:35:38 -0700 (PDT) Received: by mail-pj1-x1032.google.com with SMTP id e15-20020a17090ac20f00b0023d1b009f52so10569099pjt.2; Fri, 17 Mar 2023 14:35:37 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088830; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Hj8cLjQDLjXQ1ZfCGY4fgdKd/DSkCX4r/5xti3BJx2Q=; b=SL77xmJ9qXwFUFkWfvQLbr2oAZctrFIRH1zYs6aiSXgaXesy4X9bbCc43J+9eu7BwZ oQ5gpgLhJe8AlvwhIjFD9fwm6HOV+aTa/I02IjXcjxAmaJNGPS3ymgflyQg833brBgs7 84m+/ooDtKn6Ij3ePqTiRldtbg/5JadmV2CKcQHFrYHK1MIZb3JiQZItVnLirykhMwD+ 4DXceV50PClFpYHAnpemnfed3iOsz3CZOXKBLcLNOYZQ0bjx2AwMxczYK+4Wbtn5m0Td 6qyHhqHDOe90XFAfM7/SJjOm33J83u/+ekfpbYg6lZzffSkpzsbctqrsa8GiLeQQhE54 qDtw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088830; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Hj8cLjQDLjXQ1ZfCGY4fgdKd/DSkCX4r/5xti3BJx2Q=; b=dBqx6jNGU870rF2nB3NnRfn+uSFhXDgJOXZgfiJWYa/YWheyzySirKyKr6K2PYxKAr UaNGpsrA3KIlNUlX4CL1uXvCNgDMSsgRQUdDLN0EC7VIkXYjiuZ4Zl3x3SMK3Vko5fdc 3c/Ej3Y5XNvXUddHznN/+IISKGDWk719DCGCwkGWCL8MKD8MBqObjQRsLXPorMLGlqsN 6X/CAm+evanquY+3zxpS5CKLp1AY8KDkvKUu+7zcU2kHZa9Fu7q4RqKpHBT+so2aIFt1 2HP2cvUW8CCZTJIXpPJ2nDv7UlYZiXGX5/0lrhml7COf5KDlLlnLK7zUJL4jjMh83bgY Oqcg== X-Gm-Message-State: AO0yUKUwXNGzff0TtoybdnxugoabD+n/hiKBwVoblu9qK6bf5LpwWC4H usEUy0u0cwZyyvEL4SiJb34= X-Received: by 2002:a17:902:ec91:b0:1a0:72b8:4030 with SMTP id x17-20020a170902ec9100b001a072b84030mr10550746plg.48.1679088830398; Fri, 17 Mar 2023 14:33:50 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id jl24-20020a170903135800b001a0450da45csm1973726plb.185.2023.03.17.14.33.49 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:50 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 04/32] sched: Allow sched_cgroup_fork() to fail and introduce sched_cancel_fork() Date: Fri, 17 Mar 2023 11:33:05 -1000 Message-Id: <20230317213333.2174969-5-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655273865864588?= X-GMAIL-MSGID: =?utf-8?q?1760655273865864588?= A new BPF extensible sched_class will need more control over the forking process. It wants to be able to fail from sched_cgroup_fork() after the new task's sched_task_group is initialized so that the loaded BPF program can prepare the task with its cgroup association is established and reject fork if e.g. allocation fails. Allow sched_cgroup_fork() to fail by making it return int instead of void and adding sched_cancel_fork() to undo sched_fork() in the error path. sched_cgroup_fork() doesn't fail yet and this patch shouldn't cause any behavior changes. v2: Patch description updated to detail the expected use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/task.h | 3 ++- kernel/fork.c | 15 ++++++++++----- kernel/sched/core.c | 8 +++++++- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 357e0068497c..dcff721170c3 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -58,7 +58,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern void sched_cancel_fork(struct task_struct *p); extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); diff --git a/kernel/fork.c b/kernel/fork.c index f68954d05e89..0d166537a1a3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2242,7 +2242,7 @@ static __latent_entropy struct task_struct *copy_process( retval = perf_event_init_task(p, clone_flags); if (retval) - goto bad_fork_cleanup_policy; + goto bad_fork_sched_cancel_fork; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; @@ -2383,7 +2383,9 @@ static __latent_entropy struct task_struct *copy_process( * cgroup specific, it unconditionally needs to place the task on a * runqueue. */ - sched_cgroup_fork(p, args); + retval = sched_cgroup_fork(p, args); + if (retval) + goto bad_fork_cancel_cgroup; /* * From this point on we must avoid any synchronous user-space @@ -2429,13 +2431,13 @@ static __latent_entropy struct task_struct *copy_process( /* Don't start children in a dying pid namespace */ if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { retval = -ENOMEM; - goto bad_fork_cancel_cgroup; + goto bad_fork_core_free; } /* Let kill terminate clone/fork in the middle */ if (fatal_signal_pending(current)) { retval = -EINTR; - goto bad_fork_cancel_cgroup; + goto bad_fork_core_free; } /* No more failure paths after this point. */ @@ -2510,10 +2512,11 @@ static __latent_entropy struct task_struct *copy_process( return p; -bad_fork_cancel_cgroup: +bad_fork_core_free: sched_core_free(p); spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); +bad_fork_cancel_cgroup: cgroup_cancel_fork(p, args); bad_fork_put_pidfd: if (clone_flags & CLONE_PIDFD) { @@ -2552,6 +2555,8 @@ static __latent_entropy struct task_struct *copy_process( audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); +bad_fork_sched_cancel_fork: + sched_cancel_fork(p); bad_fork_cleanup_policy: lockdep_free_task(p); #ifdef CONFIG_NUMA diff --git a/kernel/sched/core.c b/kernel/sched/core.c index efac96fd6cfd..fdf4dba12a7e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4768,7 +4768,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) return 0; } -void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) +int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) { unsigned long flags; @@ -4795,6 +4795,12 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) if (p->sched_class->task_fork) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + return 0; +} + +void sched_cancel_fork(struct task_struct *p) +{ } void sched_post_fork(struct task_struct *p) From patchwork Fri Mar 17 21:33:06 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71486 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp10483wrt; Fri, 17 Mar 2023 15:05:07 -0700 (PDT) X-Google-Smtp-Source: AK7set8mTvOeQ4hmh/ZFpX5YNq8+ylBhUEcGAPUvaZGgx2kwx9x8CWQ1GTm4S9QWRbi1Sy4bOznA X-Received: by 2002:a05:6a20:7f8c:b0:c7:af88:3dd8 with SMTP id d12-20020a056a207f8c00b000c7af883dd8mr11409986pzj.6.1679090707029; Fri, 17 Mar 2023 15:05:07 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090707; cv=none; d=google.com; s=arc-20160816; b=tiuXqmOVO6m8hOvtAcugYWl3X/Zxcr2B3TR2xF9RlpuxvyPN+f9of0SemuM9pjEd1l gtBj1Ib0QEemOhR6qAgGgefUh5Xs0/VmWlJOR77PC6mYAxFeHrJ5Rlrba+ZXChHKWpjL Zy30bXwmGULI7ZzChdga1Mu7RkHoZe39fE38pBUZZnQMH/HJRbgUJz9u0DNHakYW2xGp 6E4Yrcs3hiZy5Nnw/PImDr0agcGmV0mQ7ht5GxGj7PV/Hg3lYbFNgndnqkOPUBQGEhQv sThJMlwp3MlzneU1tfj+lWIHb6daPKMCVVK5MAbdxOur+kHTY+0341CSJTZB3rf5VJfk 91Tw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=DHm+NYWQnKdGMy+c994+YcNwf72k/1epACWefCI4KNA=; b=xqNgrg7lLu5Nr00iNk+1RkP4FgEllPKEtPgpZnRS0btaHUZzqG8xYtAyBLGZgFN4GZ 9GRRkKfGi6hcfcy7DIA3UjPhHzUB4S1niodtdp6n0Q6vTkTby2PlfwRIcQSDR7JtlrN2 kkByiX2o+L20dmBYsUIan0WK2rBQXNljciCrNg2ev0S76BrFCJlilmxMXuQvwShl6jEC y/qATozX8SZeo9jPpDf2ehYudT/ByoSREh9U1TpU6CHfmH8jpM4lhC9ZBe/7L2x2KvYq /yaqybdbuYiqDNqfV4WZodadas1sjJrg+jqQ56oVi/wMVMIzdV3nEqQ0S4VzEmB/Yyu+ Whtw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=ELiyrGpS; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id v190-20020a6389c7000000b0050be32f3b3asi3483381pgd.315.2023.03.17.15.04.53; Fri, 17 Mar 2023 15:05:07 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=ELiyrGpS; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230242AbjCQVvv (ORCPT + 99 others); Fri, 17 Mar 2023 17:51:51 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:53136 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229765AbjCQVvu (ORCPT ); Fri, 17 Mar 2023 17:51:50 -0400 Received: from mail-pj1-x102e.google.com (mail-pj1-x102e.google.com [IPv6:2607:f8b0:4864:20::102e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 56C9BC3CE7; Fri, 17 Mar 2023 14:51:13 -0700 (PDT) Received: by mail-pj1-x102e.google.com with SMTP id p3-20020a17090a74c300b0023f69bc7a68so2177297pjl.4; Fri, 17 Mar 2023 14:51:13 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679089869; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=DHm+NYWQnKdGMy+c994+YcNwf72k/1epACWefCI4KNA=; b=ELiyrGpS0ug/qpUKT1OkjjcYKOXAav6kIo+QL8/nNxFIv5tYFHHuI2q8pSJDs3q+6U pC3SxMSEiv3mnkH/lG72+a1n/qzaN4xu5GlPp1vu4nxcaLqe63tVHBQ5uDuwJZ9cruLg 4fXT+0N+PfoHwbMPz9YgZ/yDyeoKG2aJpI/Fye63W7fvI9qivZ1rRznY4A3J0+fOCzkV zY63p8VrOlCys5SAytAwNjah3RmZtjlsQxYJPR3mTieNjq+kzYimhqtliQdKwC6weAmF dHdQarnMOog4NSfA2LW1ZDy1EyhdWXtXHvaxP+aRq01tFPwQF0RdKfO3MxmxoN1Bdh98 UmlQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679089869; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=DHm+NYWQnKdGMy+c994+YcNwf72k/1epACWefCI4KNA=; b=xFlD/vjI4qV2W9jkKmz6fEwn74b4+p9etUpIRdZZCS/tEHgfXBQbVg2/QO1P6CKaSf v8wZgxh/TKOqcvJIl9Cb0D/qpuo5cPUVjurgnnHUD51p1ovXWcmMv7bIZB4JcGjtdgOM bAss6QqZZKY/1HcRS+nCW3tqDw+UzpTZUn4Rr9q14ButicJRebtY/m6X4xxTGrCIwxRU oQbmBY/XtY14kKBbL6ZzpzRBunl5KEgTzRxiRabMRrLA66DF6q4pGnyc63ESO31z6xeN NazlA3NCX59bdUWaO8ZUosM0qXfuCtk+b8LQvo0HrG2E955h/50aC4muzhUaPOBh/o6P FKIA== X-Gm-Message-State: AO0yUKV3txg9jri5efA8ngRR/3Q0ACouYVa3M1nNWuCi1flTerudgIRg pXawD+bmWSsGpPSwt6A1hGXI956m5i0= X-Received: by 2002:a17:90a:1910:b0:23f:2757:ce99 with SMTP id 16-20020a17090a191000b0023f2757ce99mr8038056pjg.49.1679088832114; Fri, 17 Mar 2023 14:33:52 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id ch7-20020a17090af40700b0023cd53e7706sm5411264pjb.47.2023.03.17.14.33.51 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:51 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 05/32] sched: Add sched_class->reweight_task() Date: Fri, 17 Mar 2023 11:33:06 -1000 Message-Id: <20230317213333.2174969-6-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760654217341211076?= X-GMAIL-MSGID: =?utf-8?q?1760654217341211076?= Currently, during a task weight change, sched core directly calls reweight_task() defined in fair.c if @p is on CFS. Let's make it a proper sched_class operation instead. CFS's reweight_task() is renamed to reweight_task_fair() and now called through sched_class. While it turns a direct call into an indirect one, set_load_weight() isn't called from a hot path and this change shouldn't cause any noticeable difference. This will be used to implement reweight_task for a new BPF extensible sched_class so that it can keep its cached task weight up-to-date. This will be used by a new sched_class to track weight changes. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 4 ++-- kernel/sched/fair.c | 3 ++- kernel/sched/sched.h | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fdf4dba12a7e..5a6f4884384e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1277,8 +1277,8 @@ static void set_load_weight(struct task_struct *p, bool update_load) * SCHED_OTHER tasks have to update their load when changing their * weight */ - if (update_load && p->sched_class == &fair_sched_class) { - reweight_task(p, prio); + if (update_load && p->sched_class->reweight_task) { + p->sched_class->reweight_task(task_rq(p), p, prio); } else { load->weight = scale_load(sched_prio_to_weight[prio]); load->inv_weight = sched_prio_to_wmult[prio]; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7a1b1f855b96..681ab0dd0bc1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3342,7 +3342,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, } -void reweight_task(struct task_struct *p, int prio) +static void reweight_task_fair(struct rq *rq, struct task_struct *p, int prio) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -12547,6 +12547,7 @@ DEFINE_SCHED_CLASS(fair) = { .task_tick = task_tick_fair, .task_fork = task_fork_fair, + .reweight_task = reweight_task_fair, .prio_changed = prio_changed_fair, .switched_from = switched_from_fair, .switched_to = switched_to_fair, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3e8df6d31c1e..7934b597053d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2213,6 +2213,8 @@ struct sched_class { */ void (*switched_from)(struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task); + void (*reweight_task)(struct rq *this_rq, struct task_struct *task, + int newprio); void (*prio_changed) (struct rq *this_rq, struct task_struct *task, int oldprio); @@ -2365,8 +2367,6 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); -extern void reweight_task(struct task_struct *p, int prio); - extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); From patchwork Fri Mar 17 21:33:07 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71480 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp6200wrt; Fri, 17 Mar 2023 14:52:44 -0700 (PDT) X-Google-Smtp-Source: AK7set/rwE9VT2JmAwPHDFUKsF3t5sDW0Es/63e1ox3ooJOV61NhN8aWisohlbFIzswZXmCRtJOk X-Received: by 2002:a05:6a20:6997:b0:d7:19b3:a9bd with SMTP id t23-20020a056a20699700b000d719b3a9bdmr7570430pzk.4.1679089963788; Fri, 17 Mar 2023 14:52:43 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089963; cv=none; d=google.com; s=arc-20160816; b=QKcJPxcoapT2wS3UV0Kp9m8IUGcXYc37FiS13X8MsY3ofP9HS9ZhkLxHdtI5M+Cn2H kR66DDzmSBOWwXM3xhh3v6P3g8EhTU5m2BAagTPMOe4LFoVtRQ8m3Cf6HmaCZwHFTxcw 9L06+EH7IjjWywDWezlj9WghLKSGLItZLh1kveSgw/6foRjtM9kCastIfR/UjyiWtzOJ y8K3e6gnGhkr9WSXQ5QgtpxsvpGCXRufsRfxsCbgV2LLvSDk2gJDmAexQP5YHL9TBBHa ItlRPHtjic/Np6NDn6oTKuT36epdWdFrIUwCaFBcom96mtlIqtnMTQsNxlo2gQZVPZ/O 9/fQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=a5RYcwx9iQWgNlT0187t5NhcGeh1nY2e8DTY6uu1F9k=; b=BjFlvKI0AcZWW/rc4IQpbf/Ze0wAHMhFiQzcftBNEQSXyVSpi2qoZ/Edcy9W/r2hDA WPYpbxbPefQBBf43ELKM+0heGknwGREl8lH8HoGgYv9+YeKCLSLcmXBnuK65VH864xGt htsOp+f5JCy+9cUQks01FQcQvbq51O9QrgctlAUbRzE5FnPYuNIGdnXtmWs0g3GSkBQ/ kIDvjQsUEeoCshWR71AFocuvZ4y0ITi6xeRqKDYA21fkqtGi1muO9c1ReOXP2siZvI8a iqvS7NBYCGm+XJtajmPcZJLA8vTISWycOVeY8Jq4M3jo1ubuQaVlvAW+RlH7Qn4zQWFw a0rQ== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=KnGUtjGl; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id 34-20020a630b22000000b0050bf219bfd7si3316193pgl.236.2023.03.17.14.52.27; Fri, 17 Mar 2023 14:52:43 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=KnGUtjGl; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229746AbjCQVi2 (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:28 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52148 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231217AbjCQVhV (ORCPT ); Fri, 17 Mar 2023 17:37:21 -0400 Received: from mail-pj1-f54.google.com (mail-pj1-f54.google.com [209.85.216.54]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D0C7438460; Fri, 17 Mar 2023 14:36:16 -0700 (PDT) Received: by mail-pj1-f54.google.com with SMTP id qe8-20020a17090b4f8800b0023f07253a2cso6625400pjb.3; Fri, 17 Mar 2023 14:36:16 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088834; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=a5RYcwx9iQWgNlT0187t5NhcGeh1nY2e8DTY6uu1F9k=; b=KnGUtjGld2eavhY0nV7HDE35mBtuIWhB1vCYI+6hUw1whbxTqlRP/HVJmAOtjw7CGZ XoWk5NzaCey8siqObwBHeMVno3GGcGi4CAK+QwrlQst1f1lHFnfGtNV383iJH3S8jmeM p/JpSju7bNUUNqbMjhRbUYxugQ0mWk4TW5zXlXu5WhpH/p3OekuAk9x8Bw+QcSmWddi8 /TjOVylJKCiOFluMtlAJTnWsYQOxInqnnu29NpvVD51xajyLUdR5rdnPgwWmJ8ErCinV LEW1km3yY39wF2LwyETTG6c7BpVXOpcOBXrwh/zG72JYDv+a2mPrAmTcBll5NQdZmh6o E8ug== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088834; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=a5RYcwx9iQWgNlT0187t5NhcGeh1nY2e8DTY6uu1F9k=; b=bkcBxNzV9NpkmovC9yQaYi0l6l2WdL1aj5UV9qmvgHjAqGADwA9GPOXu87HGR47432 LzczaxUyFatvBRewvjkxDeIDjZKZiC0ikOQ7eEODLsyzCXRhNS+xzb3m59Syy6P0+OOV V1D0SnWAz14wG1ve2i2wXSRmMW6G+b6IfuK/3yrctqKWk077rxMNK7VFIl5l24/JZbZL ROR2sqnYJH4lGhsnHtEY9/7PfZOgIswd+PQGFgbdfx24DpdNSNhTIdpat4nDQo54rYMv hUfoxinoaR7F3Y+B3qe0TYPM5b2inxVi29b1oPYe9q1v7cKWyI65psZnBxcKSM9KwwLx 3LwA== X-Gm-Message-State: AO0yUKVQZYnmUm6Fu4cOX0pQtOajlu5+7qwmscov1k5mzGa+SK3isIlP Ir5cBQ6XasRdzlabYCD5dJMIEfdtz+A= X-Received: by 2002:a17:90b:1b4c:b0:23d:2532:ae34 with SMTP id nv12-20020a17090b1b4c00b0023d2532ae34mr4736986pjb.2.1679088833872; Fri, 17 Mar 2023 14:33:53 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id fa23-20020a17090af0d700b00231227781d5sm5560031pjb.2.2023.03.17.14.33.53 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:53 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 06/32] sched: Add sched_class->switching_to() and expose check_class_changing/changed() Date: Fri, 17 Mar 2023 11:33:07 -1000 Message-Id: <20230317213333.2174969-7-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,RCVD_IN_MSPIKE_H3, RCVD_IN_MSPIKE_WL,SPF_HELO_NONE,SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653438045624801?= X-GMAIL-MSGID: =?utf-8?q?1760653438045624801?= When a task switches to a new sched_class, the prev and new classes are notified through ->switched_from() and ->switched_to(), respectively, after the switching is done. A new BPF extensible sched_class will have callbacks that allow the BPF scheduler to keep track of relevant task states (like priority and cpumask). Those callbacks aren't called while a task is on a different sched_class. When a task comes back, we wanna tell the BPF progs the up-to-date state before the task gets enqueued, so we need a hook which is called before the switching is committed. This patch adds ->switching_to() which is called during sched_class switch through check_class_changing() before the task is restored. Also, this patch exposes check_class_changing/changed() in kernel/sched/sched.h. They will be used by the new BPF extensible sched_class to implement implicit sched_class switching which is used e.g. when falling back to CFS when the BPF scheduler fails or unloads. This is a prep patch and doesn't cause any behavior changes. The new operation and exposed functions aren't used yet. v2: Improve patch description w/ details on planned use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 20 +++++++++++++++++--- kernel/sched/sched.h | 7 +++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5a6f4884384e..a378e8e09061 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2223,6 +2223,17 @@ inline int task_curr(const struct task_struct *p) return cpu_curr(task_cpu(p)) == p; } +/* + * ->switching_to() is called with the pi_lock and rq_lock held and must not + * mess with locking. + */ +void check_class_changing(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class) +{ + if (prev_class != p->sched_class && p->sched_class->switching_to) + p->sched_class->switching_to(rq, p); +} + /* * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, * use the balance_callback list if you want balancing. @@ -2230,9 +2241,9 @@ inline int task_curr(const struct task_struct *p) * this means any call to check_class_changed() must be followed by a call to * balance_callback(). */ -static inline void check_class_changed(struct rq *rq, struct task_struct *p, - const struct sched_class *prev_class, - int oldprio) +void check_class_changed(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class, + int oldprio) { if (prev_class != p->sched_class) { if (prev_class->switched_from) @@ -7139,6 +7150,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) } __setscheduler_prio(p, prio); + check_class_changing(rq, p, prev_class); } check_class_changed(rq, p, prev_class, oldprio); @@ -7748,6 +7760,8 @@ static int __sched_setscheduler(struct task_struct *p, } __setscheduler_uclamp(p, attr); + check_class_changing(rq, p, prev_class); + /* * We enqueue to tail when the priority of a task is * increased (user space view). diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7934b597053d..1545779c5db8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2211,6 +2211,7 @@ struct sched_class { * cannot assume the switched_from/switched_to pair is serialized by * rq->lock. They are however serialized by p->pi_lock. */ + void (*switching_to) (struct rq *this_rq, struct task_struct *task); void (*switched_from)(struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task); void (*reweight_task)(struct rq *this_rq, struct task_struct *task, @@ -2447,6 +2448,12 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); +extern void check_class_changing(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class); +extern void check_class_changed(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class, + int oldprio); + extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); #ifdef CONFIG_PREEMPT_RT From patchwork Fri Mar 17 21:33:08 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71467 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp2664wrt; Fri, 17 Mar 2023 14:38:58 -0700 (PDT) X-Google-Smtp-Source: AK7set/hnBW1a5rVQ2PjmZkTcr/JXXK5nS0e8VsQ8AsDJjmmGGY1Fd+pZJzN7C2hIgGl1c0baF/q X-Received: by 2002:a05:6a20:6914:b0:cd:2c0a:6ec0 with SMTP id q20-20020a056a20691400b000cd2c0a6ec0mr9093766pzj.3.1679089138344; Fri, 17 Mar 2023 14:38:58 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089138; cv=none; d=google.com; s=arc-20160816; b=WZXYDfp5MOkL3eG5OBPa0C5pL9vNuMoSzHIk4MGFa+DM/gTStktMxZ8dxKCPCSoFrb SYeBWx7HY2r1WISp6hBvagslUrl67ihkQ6kzuC6aYwaidaTA/oiEFgv29rrTqCjMmBhj Kg0GiYdy0h7PgREEhRsvKB2Fx9m0yz+3eI09yWyvjO3pHTLI1wA/8OSYDrxTW4QBl9vu HcJ/tuv5ymfDlyXOsjnfNB9XvqLU5Pvme1r7EfXVCAZXJwJ9sJpKAh4HtK+pmp0GG0U/ NmFJ/DvMMOxgmmO07xIuBLqKGGABNzClCofJtI4d1xCPrdsTd7muCvJSQYWCK1Ud5fTm FrjA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=xVdIfb6F5UB3YBDHRdzW1feeALJRZ0rog2R5eaNMiiY=; b=gb9hDcxGjG6Xc+jFJAHQ/SJNTG9io6TNH03iMjGNrWS4/NE0n0XhwpGMp2C0WDb5Yb PWbCHY66fvWohsZ8lSs4CdUZZc+3Y19k+AoMdlbJeyIvpoQOItECaM6qhXJ/CvXk7xs6 tSKKQ4mc+kafvrAU5MzOH4URx7+/pGYfgQN8PTbWEWn6Ajvign1zx2mgdzKeFpXGbVhk PFqc0fvuVuPgYHjkoZAugB52cVigezKu5fPoCVtWtTJpcKHR48/fxLBPi45woOthRXAi qbfoTsuFERHQEVCNsmXLbAodCh2CH3me/KxGpUocdPDfLIlAYmslA3p9cpB6DF/LG5wz wqCg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=RJmQKieu; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id d11-20020a056a0010cb00b005a8c65d57a0si3527298pfu.257.2023.03.17.14.38.43; Fri, 17 Mar 2023 14:38:58 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=RJmQKieu; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229560AbjCQVgU (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:20 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51862 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230197AbjCQVgJ (ORCPT ); Fri, 17 Mar 2023 17:36:09 -0400 Received: from mail-pj1-x102b.google.com (mail-pj1-x102b.google.com [IPv6:2607:f8b0:4864:20::102b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 72C7C38E8A; Fri, 17 Mar 2023 14:35:25 -0700 (PDT) Received: by mail-pj1-x102b.google.com with SMTP id fy10-20020a17090b020a00b0023b4bcf0727so6659407pjb.0; Fri, 17 Mar 2023 14:35:25 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088836; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=xVdIfb6F5UB3YBDHRdzW1feeALJRZ0rog2R5eaNMiiY=; b=RJmQKieuoBpzebcHh2VLzlzU09dKDuNr31pYdWA7spYf07t01yCuqSOO3b1EfKdXuS 9DeI7YtFajTo1jm29L5bL27jHQnXaSZo3GrFOgc+8+DlhCfAgw6ijtdvHXKwkQC+2+H9 bdYhVwlbCnu/KkzqRpp5q3Hwtbpk7Ybj6kBaE1wjimDvMBbefdhKh2bQMFQ2pGTCSvic HmZpuPOKYKbXydjuERTJT5IselTQ0ZeK2D4Fozjwin0AlMny6cn+ZgI+lfgOMoml39o6 prWd++S3lMSsL2ZWeQkdy5/XI4Rz3BgvtKR9alGLssQRdmwBG98Mx/ipb40IcnSITpo5 Fg5w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088836; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=xVdIfb6F5UB3YBDHRdzW1feeALJRZ0rog2R5eaNMiiY=; b=Kf0QZd+bFCiCkGIIXw3ZZikx2YblmpKFZPTue1Bb9IvvZJbkcV6LoKTvlzm9Wi1bxV gVIg7vwkJaA1TMj/p5h4/me8nqauSuCgQ5owev91TEKctukqrFJwJ6FqOL18tGdg0oqu 8Qt+nw0pNu4ATuokD/hK2/kV3RgpFKdSp/1qqvjiXdazAqvYwkZecEdxGwm7WvvC5u4i Gs6guhPm77CZwq5SYSB2bHgL8olAk0po5eJEavH9v8t7vGVKiplIfeLRIgFVj99smaeK X8U1ORYbDj1+j/20qeqQcB1rUqUSeaMVaItGx822AkiTGmPHAqWucKSQ6xxvtLV0VL64 IWqQ== X-Gm-Message-State: AO0yUKW+fenmQ/V/Md8qy1gzS8NLfw+bFq50j4/jzBsm2iJz3k/zQy9J VlHXWmyJOTZnCmiblp9ft9c= X-Received: by 2002:a17:90b:3e8e:b0:234:ba6f:c980 with SMTP id rj14-20020a17090b3e8e00b00234ba6fc980mr9358653pjb.17.1679088835772; Fri, 17 Mar 2023 14:33:55 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id q2-20020a63d602000000b00476dc914262sm1949664pgg.1.2023.03.17.14.33.55 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:55 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 07/32] sched: Factor out cgroup weight conversion functions Date: Fri, 17 Mar 2023 11:33:08 -1000 Message-Id: <20230317213333.2174969-8-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652572617383597?= X-GMAIL-MSGID: =?utf-8?q?1760652572617383597?= Factor out sched_weight_from/to_cgroup() which convert between scheduler shares and cgroup weight. No functional change. The factored out functions will be used by a new BPF extensible sched_class so that the weights can be exposed to the BPF programs in a way which is consistent cgroup weights and easier to interpret. The weight conversions will be used regardless of cgroup usage. It's just borrowing the cgroup weight range as it's more intuitive. CGROUP_WEIGHT_MIN/DFL/MAX constants are moved outside CONFIG_CGROUPS so that the conversion helpers can always be defined. v2: The helpers are now defined regardless of COFNIG_CGROUPS. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/cgroup.h | 4 ++-- kernel/sched/core.c | 28 +++++++++++++--------------- kernel/sched/sched.h | 18 ++++++++++++++++++ 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a8c6982c2c24..5080dfc8ee48 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -29,8 +29,6 @@ struct kernel_clone_args; -#ifdef CONFIG_CGROUPS - /* * All weight knobs on the default hierarchy should use the following min, * default and max values. The default value is the logarithmic center of @@ -40,6 +38,8 @@ struct kernel_clone_args; #define CGROUP_WEIGHT_DFL 100 #define CGROUP_WEIGHT_MAX 10000 +#ifdef CONFIG_CGROUPS + /* walk only threadgroup leaders */ #define CSS_TASK_ITER_PROCS (1U << 0) /* walk all threaded css_sets in the domain */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a378e8e09061..fc7008095249 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11145,29 +11145,27 @@ static int cpu_extra_stat_show(struct seq_file *sf, } #ifdef CONFIG_FAIR_GROUP_SCHED + +static unsigned long tg_weight(struct task_group *tg) +{ + return scale_load_down(tg->shares); +} + static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { - struct task_group *tg = css_tg(css); - u64 weight = scale_load_down(tg->shares); - - return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); + return sched_weight_to_cgroup(tg_weight(css_tg(css))); } static int cpu_weight_write_u64(struct cgroup_subsys_state *css, - struct cftype *cft, u64 weight) + struct cftype *cft, u64 cgrp_weight) { - /* - * cgroup weight knobs should use the common MIN, DFL and MAX - * values which are 1, 100 and 10000 respectively. While it loses - * a bit of range on both ends, it maps pretty well onto the shares - * value used by scheduler and the round-trip conversions preserve - * the original value over the entire range. - */ - if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) + unsigned long weight; + + if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX) return -ERANGE; - weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); + weight = sched_weight_from_cgroup(cgrp_weight); return sched_group_set_shares(css_tg(css), scale_load(weight)); } @@ -11175,7 +11173,7 @@ static int cpu_weight_write_u64(struct cgroup_subsys_state *css, static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { - unsigned long weight = scale_load_down(css_tg(css)->shares); + unsigned long weight = tg_weight(css_tg(css)); int last_delta = INT_MAX; int prio, delta; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1545779c5db8..9a6cba6f9299 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -232,6 +232,24 @@ static inline void update_avg(u64 *avg, u64 sample) #define shr_bound(val, shift) \ (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) +/* + * cgroup weight knobs should use the common MIN, DFL and MAX values which are + * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it + * maps pretty well onto the shares value used by scheduler and the round-trip + * conversions preserve the original value over the entire range. + */ +static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight) +{ + return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL); +} + +static inline unsigned long sched_weight_to_cgroup(unsigned long weight) +{ + return clamp_t(unsigned long, + DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024), + CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); +} + /* * !! For sched_setattr_nocheck() (kernel) only !! * From patchwork Fri Mar 17 21:33:09 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71484 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp10378wrt; Fri, 17 Mar 2023 15:04:52 -0700 (PDT) X-Google-Smtp-Source: AK7set8NEap3+M1hduHizIRj7L/d7215POnEQ04zvM1JejHmXaW6k7JztD8u+IL0NK9FtXDClPB8 X-Received: by 2002:a17:902:7296:b0:1a0:463d:fd09 with SMTP id d22-20020a170902729600b001a0463dfd09mr8194686pll.1.1679090691937; Fri, 17 Mar 2023 15:04:51 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090691; cv=none; d=google.com; s=arc-20160816; b=K57kQ3tUftS5UYiD9+ukAoBpLYsSZ8xoY7XdfkETak28CfkH9fPuR14u+CZS3/mwuc +YzfhE1ozuMAwsnB3zMTx+Zlg8bXJ6mMBRROPnBfoLaTZqIlSyOREBh873cxYAHkmVcd cxuZSXWh5fRsoa3BSC3hx18mmvKBYv1cJrUEnixW1TYdBXuhnaRQJYgoD5+EJgksbvhW qM7xlOE8tM10nug9rcFrvJDYvLCGlEJ9jaNlpu8lOHAEq9HfSgC0rfuYSezfGz/4Ae5c tKNDX0uTOoWG8CraavNBnhHIdYjdBk4aYe7WZpel0V5Jx/wFDd+ujFzsEix5aFWcTQzk c/jQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=Ek2xB039Z37SY+SNJqL7xVXjLO75eTCaHSia7ob4yw8=; b=NZOsuekkINDwKapCLtLw1TSLMYy56QvJSovaJFjjOQmU17DPJFBrZACk2a3IE635cS /tro+ttG18OnluO5nZ2oLSJDTfrtgQot7E/xvaMzB/zlgKsRM9qjwYu2ZHayobjyfNlR fwWKSJp11QCy6Zg5VlPXutYrMOjiESm1g6U98TtxWyP3H9WACKgjHZsi4CVRAUQvzkBk KDWTmmATfC4yCUOwSylS1PQPtoRI3bPPFRvAdX05nsj+53wvbVD1os+KFezntbGVT5AT dqurK1C0v04ryluVrTbJMOlEBkvdyCM7g3Vum4DumlOIxl8FgyV1dH57wUG12wkzlDsN l5Dg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=PcmKwOuw; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id g7-20020a170902868700b0019e9e57f92esi3209888plo.571.2023.03.17.15.03.50; Fri, 17 Mar 2023 15:04:51 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=PcmKwOuw; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231337AbjCQVia (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:30 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54040 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231318AbjCQVhV (ORCPT ); Fri, 17 Mar 2023 17:37:21 -0400 Received: from mail-pl1-f180.google.com (mail-pl1-f180.google.com [209.85.214.180]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3982962FD8; Fri, 17 Mar 2023 14:36:18 -0700 (PDT) Received: by mail-pl1-f180.google.com with SMTP id h8so6660037plf.10; Fri, 17 Mar 2023 14:36:18 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088838; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Ek2xB039Z37SY+SNJqL7xVXjLO75eTCaHSia7ob4yw8=; b=PcmKwOuwLfBUobUpytlBICTcmmbYezLZ2HfCN7STw9plQvvPtS8XdFhZ8mdx1kkMkO vdVV0GQBpCN5MVQHMHI55XHWiBJw4JOGDaIMCuxPCt31Jjn4ZcRnNkTCibwDW2OZGbrA 1VrSYyzvIS9B/T3MqJsxnn/t8ZAEIF2qZQ1/XeXUEVkm/mia/jJ3D+JkJSlBRGjsovmu pdIXV7J1+70COvQIs1ui6h51LNoOVW8XXknD2oH/50DZMX5HWcdBqdWawrQisXNHbm6x oRaKpGJuZM01RDQSwvYHheXH0sP0ygsladF5BlF9nce5pZwZ5Tn5N32izHTXA1CoZeM7 ICjw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088838; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Ek2xB039Z37SY+SNJqL7xVXjLO75eTCaHSia7ob4yw8=; b=5zy+XHnsyoXU9DFz+Q1gBE2Tj3NhV1VO7tOg5KdFVreLl08K9JoH38hMON1jt6z49N XtFfRlAZqqmxplKqjLsUFYzQTqTB8H4UCGeEBWjjnxYjzb3517FhI4ZUk/4heebhHArA EDhoh3CH9rK86o8MCRcic3GZF/68uWu+AJDu9WIUAH46GKN6rcxxAZ1oG6Q1NJyb9RXk LKkm46qerQBdC+prn5Zmkm5k/AnRUA5PIfdHiNgCxvb+iQNxWV4tRMNkcBe6yfA0iVy3 Enimc4ZaVDUoeSBW0FbfMw1VJP2rzZb5PKhYvieOfA6kSvOY3tTWCWsVgzh3UqYcFMGk pz2g== X-Gm-Message-State: AO0yUKVpNIMzj10lqoLY19Lxi03WH8mSBMt1t002K7qlB/I2IDgAWpaD KLnTzLoTdekMo+C+kqKY5f0= X-Received: by 2002:a05:6a20:bc88:b0:d5:58df:fb7a with SMTP id fx8-20020a056a20bc8800b000d558dffb7amr5112510pzb.3.1679088837521; Fri, 17 Mar 2023 14:33:57 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id d16-20020aa78150000000b005825b8e0540sm1948805pfn.204.2023.03.17.14.33.56 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:57 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , kernel test robot Subject: [PATCH 08/32] sched: Expose css_tg(), __setscheduler_prio() and SCHED_CHANGE_BLOCK() Date: Fri, 17 Mar 2023 11:33:09 -1000 Message-Id: <20230317213333.2174969-9-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,RCVD_IN_MSPIKE_H3, RCVD_IN_MSPIKE_WL,SPF_HELO_NONE,SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760654201313264864?= X-GMAIL-MSGID: =?utf-8?q?1760654201313264864?= These will be used by a new BPF extensible sched_class. css_tg() will be used in the init and exit paths to visit all task_groups by walking cgroups. __setscheduler_prio() is used to pick the sched_class matching the current prio of the task. For the new BPF extensible sched_class, the mapping from the task configuration to sched_class isn't static and depends on a few factors - e.g. whether the BPF progs implementing the scheduler are loaded and in a serviceable state. That mapping logic will be added to __setscheduler_prio(). When the BPF scheduler progs get loaded and unloaded, the mapping changes and the new sched_class will walk the tasks applying the new mapping using SCHED_CHANGE_BLOCK() and __setscheduler_prio(). v2: Expose SCHED_CHANGE_BLOCK() too and update the description. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Reported-by: kernel test robot --- kernel/sched/core.c | 47 +++---------------------------------------- kernel/sched/sched.h | 48 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 44 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fc7008095249..2a602f93f5f8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2096,15 +2096,7 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } -struct sched_change_guard { - struct task_struct *p; - struct rq *rq; - bool queued; - bool running; - bool done; -}; - -static struct sched_change_guard +struct sched_change_guard sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags) { struct sched_change_guard cg = { @@ -2129,7 +2121,7 @@ sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags) return cg; } -static void sched_change_guard_fini(struct sched_change_guard *cg, int flags) +void sched_change_guard_fini(struct sched_change_guard *cg, int flags) { if (cg->queued) enqueue_task(cg->rq, cg->p, flags | ENQUEUE_NOCLOCK); @@ -2138,34 +2130,6 @@ static void sched_change_guard_fini(struct sched_change_guard *cg, int flags) cg->done = true; } -/** - * SCHED_CHANGE_BLOCK - Nested block for task attribute updates - * @__rq: Runqueue the target task belongs to - * @__p: Target task - * @__flags: DEQUEUE/ENQUEUE_* flags - * - * A task may need to be dequeued and put_prev_task'd for attribute updates and - * set_next_task'd and re-enqueued afterwards. This helper defines a nested - * block which automatically handles these preparation and cleanup operations. - * - * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { - * update_attribute(p); - * ... - * } - * - * If @__flags is a variable, the variable may be updated in the block body and - * the updated value will be used when re-enqueueing @p. - * - * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling - * update_rq_clock() beforehand. Otherwise, the rq clock is automatically - * updated iff the task needs to be dequeued and re-enqueued. Only the former - * case guarantees that the rq clock is up-to-date inside and after the block. - */ -#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \ - for (struct sched_change_guard __cg = \ - sched_change_guard_init(__rq, __p, __flags); \ - !__cg.done; sched_change_guard_fini(&__cg, __flags)) - static inline int __normal_prio(int policy, int rt_prio, int nice) { int prio; @@ -7016,7 +6980,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag } EXPORT_SYMBOL(default_wake_function); -static void __setscheduler_prio(struct task_struct *p, int prio) +void __setscheduler_prio(struct task_struct *p, int prio) { if (dl_prio(prio)) p->sched_class = &dl_sched_class; @@ -10413,11 +10377,6 @@ void sched_move_task(struct task_struct *tsk) task_rq_unlock(rq, tsk, &rf); } -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) -{ - return css ? container_of(css, struct task_group, css) : NULL; -} - static struct cgroup_subsys_state * cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a6cba6f9299..866ce69a445e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -469,6 +469,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) return walk_tg_tree_from(&root_task_group, down, up, data); } +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + extern int tg_nop(struct task_group *tg, void *data); extern void free_fair_sched_group(struct task_group *tg); @@ -2386,6 +2391,8 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); +extern void __setscheduler_prio(struct task_struct *p, int prio); + extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -2466,6 +2473,47 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); +struct sched_change_guard { + struct task_struct *p; + struct rq *rq; + bool queued; + bool running; + bool done; +}; + +extern struct sched_change_guard +sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags); + +extern void sched_change_guard_fini(struct sched_change_guard *cg, int flags); + +/** + * SCHED_CHANGE_BLOCK - Nested block for task attribute updates + * @__rq: Runqueue the target task belongs to + * @__p: Target task + * @__flags: DEQUEUE/ENQUEUE_* flags + * + * A task may need to be dequeued and put_prev_task'd for attribute updates and + * set_next_task'd and re-enqueued afterwards. This helper defines a nested + * block which automatically handles these preparation and cleanup operations. + * + * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + * update_attribute(p); + * ... + * } + * + * If @__flags is a variable, the variable may be updated in the block body and + * the updated value will be used when re-enqueueing @p. + * + * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling + * update_rq_clock() beforehand. Otherwise, the rq clock is automatically + * updated iff the task needs to be dequeued and re-enqueued. Only the former + * case guarantees that the rq clock is up-to-date inside and after the block. + */ +#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \ + for (struct sched_change_guard __cg = \ + sched_change_guard_init(__rq, __p, __flags); \ + !__cg.done; sched_change_guard_fini(&__cg, __flags)) + extern void check_class_changing(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class); extern void check_class_changed(struct rq *rq, struct task_struct *p, From patchwork Fri Mar 17 21:33:10 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71491 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16157wrt; Fri, 17 Mar 2023 15:21:20 -0700 (PDT) X-Google-Smtp-Source: AK7set+OR9IkA+WaU7Zr/zpDfWDARp9OBvdPPNuXvrOi3zuAUZPzq5UVOe433Z3Lon/CYd0ZhB9o X-Received: by 2002:a05:6a20:7f8c:b0:c7:af88:3dd8 with SMTP id d12-20020a056a207f8c00b000c7af883dd8mr11461838pzj.6.1679091679892; Fri, 17 Mar 2023 15:21:19 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091679; cv=none; d=google.com; s=arc-20160816; b=sWtcS42OgKmRtBPBPGcjRcZaZZV+tNVoPcNjvwP5ESz9nyKgpTj/yNr2P90QDB4dDD hYYI2xjrPXidDiAzyf/+tZGLfPfOWuiL/5sPu+b0PHEreqfELO8aDkg/DFR6mz+84TQJ L8NYn2IOxfanBM9mZ1Fflj7VxW52HI5lmer642tqsBglnJBFFTtMW8tAW4wtljEVMSPD oNMSKRiO9RG/Y0Mhjx4pISCtvEnCMvIJv0ZG/vP5brWNiCQW+eVG2d87q5u91yc71oyB svu7qDrJaon/8PbjJukA01FshIDPt89ufgFP6AklG5+7bz5Q/SZWD0UlExp3vzAb1ntu ZRpQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=G334uj+j3RQjKOT2zxeoftYC9/ECccSqiye4Nkect10=; b=E5j1KTUj39z7aMZG+H93PSfrN1JoCZkjb2Y7XXbfIsvJch6sm89xpizL/+MVhWWKEP c6KwbhBKDBONYUJOiCrimnKTVT3XVk2tGnselDxUwOOxTuwZxWcORjslRedhwDyyDhi0 PDhliaZ5ZGesNN+UDeqhdbSGLmoX1aFVlerZsxXtMfxaVJ1CzSgoyVSHn4yPpe+ontpo 1AMz2NdbOoLSmCtxjRDT/wFqSBB6y/IjCcmWrSMr8UnqLysmDdQL9bz8sm91sdMBhTWG NpjSQbaJrKnHbrs++8OHlhT3tkFlQhU1EC//D0qPYRw0D0Q8NQj5g40p4S4zZJrEC9Mb n9Bg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=hmMg4kTn; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id i69-20020a638748000000b0050c0de283bcsi3246703pge.777.2023.03.17.15.21.03; Fri, 17 Mar 2023 15:21:19 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=hmMg4kTn; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230517AbjCQVg1 (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:27 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51946 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230452AbjCQVgK (ORCPT ); Fri, 17 Mar 2023 17:36:10 -0400 Received: from mail-pj1-x102f.google.com (mail-pj1-x102f.google.com [IPv6:2607:f8b0:4864:20::102f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 608F9211C1; Fri, 17 Mar 2023 14:35:27 -0700 (PDT) Received: by mail-pj1-x102f.google.com with SMTP id d13so6698035pjh.0; Fri, 17 Mar 2023 14:35:27 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088839; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=G334uj+j3RQjKOT2zxeoftYC9/ECccSqiye4Nkect10=; b=hmMg4kTnuGkoCJwMWYzyR+RqBxdIAsEeDJAtlfJ2+mSUxNxrUn4mE2AxJA2w8DRluY WuyIojchrFdRDlHsX92lHGMnBwKjqDDOL+yUl2mAQcExggynHW0nE0kBxw3sUf+NXy1K x2KsRzDHCOKO3txmd3Di50qDL7xQaFn29ZSHotpYVnTvipugeuFO4U6YT+H+MR3dVhBb +xWehPmw0vhajWZUiRm5oykf8+Yhwnt0ygbdJupLXwh0eEwjcafXQWYuP/UC6JuwSW+g SIYbPA3A7NLSLTmHplddA4ETYkPyv5DXYLoIluxP9QtO1Y0PB61IVUFbdMquXLYhcgws GySA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088839; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=G334uj+j3RQjKOT2zxeoftYC9/ECccSqiye4Nkect10=; b=CqLE/+HUhI7cYrEtnRix2kOmgV/1+SnAj4yPVWf/rZifkwob17HGK3xveIWGYxNim9 b9mtuiHLU44dAzyqhcViQ4aMH1wNtr2S26bT+NMIo1BGySBcWbcpUt4FmQEHjT64xkAd 24oZHHY5IJq8y4zA5O1PAELqt4IotqxIJI1dnUaV7MT8Bn1EZqB0jO5qa+zDjPdxBLfY AJegRNIQbXSRs6fz3/WVx40jwGIgco1M+E6nLpRncGbIBzqjV1eGmal32BZXaW5TVMPc fN78Tqw2+HKD1kWAkKQfmC9Tlo/vzBCPxnXs7rROLvk1PI85ltchCVQKBrce1x6OYIev jIyw== X-Gm-Message-State: AO0yUKUz7dqKOi4EdhUcdmJj6k42zM15ki2pfiwNSL2GBWdTdoWrjbzb HN8h7oEmnVFPmfBhtLSbFtE= X-Received: by 2002:a17:90b:4b84:b0:23d:3c7b:8684 with SMTP id lr4-20020a17090b4b8400b0023d3c7b8684mr9915122pjb.41.1679088839311; Fri, 17 Mar 2023 14:33:59 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id gj15-20020a17090b108f00b00233864f21a7sm5383549pjb.51.2023.03.17.14.33.58 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:33:59 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 09/32] sched: Enumerate CPU cgroup file types Date: Fri, 17 Mar 2023 11:33:10 -1000 Message-Id: <20230317213333.2174969-10-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655237471511554?= X-GMAIL-MSGID: =?utf-8?q?1760655237471511554?= Rename cpu[_legacy]_files to cpu[_legacy]_cftypes for clarity and add cpu_cftype_id which enumerates every cgroup2 interface file type. This doesn't make any functional difference now. The enums will be used to access specific cftypes by a new BPF extensible sched_class to selectively show and hide CPU controller interface files depending on the capability of the currently loaded BPF scheduler progs. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 22 +++++++++++----------- kernel/sched/sched.h | 21 +++++++++++++++++++++ 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2a602f93f5f8..59136fafa94c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11014,7 +11014,7 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, } #endif -static struct cftype cpu_legacy_files[] = { +static struct cftype cpu_legacy_cftypes[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { .name = "shares", @@ -11221,21 +11221,21 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, } #endif -static struct cftype cpu_files[] = { +struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { #ifdef CONFIG_FAIR_GROUP_SCHED - { + [CPU_CFTYPE_WEIGHT] = { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = cpu_weight_read_u64, .write_u64 = cpu_weight_write_u64, }, - { + [CPU_CFTYPE_WEIGHT_NICE] = { .name = "weight.nice", .flags = CFTYPE_NOT_ON_ROOT, .read_s64 = cpu_weight_nice_read_s64, .write_s64 = cpu_weight_nice_write_s64, }, - { + [CPU_CFTYPE_IDLE] = { .name = "idle", .flags = CFTYPE_NOT_ON_ROOT, .read_s64 = cpu_idle_read_s64, @@ -11243,13 +11243,13 @@ static struct cftype cpu_files[] = { }, #endif #ifdef CONFIG_CFS_BANDWIDTH - { + [CPU_CFTYPE_MAX] = { .name = "max", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_max_show, .write = cpu_max_write, }, - { + [CPU_CFTYPE_MAX_BURST] = { .name = "max.burst", .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = cpu_cfs_burst_read_u64, @@ -11257,13 +11257,13 @@ static struct cftype cpu_files[] = { }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP - { + [CPU_CFTYPE_UCLAMP_MIN] = { .name = "uclamp.min", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_uclamp_min_show, .write = cpu_uclamp_min_write, }, - { + [CPU_CFTYPE_UCLAMP_MAX] = { .name = "uclamp.max", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_uclamp_max_show, @@ -11283,8 +11283,8 @@ struct cgroup_subsys cpu_cgrp_subsys = { .can_attach = cpu_cgroup_can_attach, #endif .attach = cpu_cgroup_attach, - .legacy_cftypes = cpu_legacy_files, - .dfl_cftypes = cpu_files, + .legacy_cftypes = cpu_legacy_cftypes, + .dfl_cftypes = cpu_cftypes, .early_init = true, .threaded = true, }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 866ce69a445e..67f7f1149630 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3379,4 +3379,25 @@ static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *n static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { } #endif +#ifdef CONFIG_CGROUP_SCHED +enum cpu_cftype_id { +#ifdef CONFIG_FAIR_GROUP_SCHED + CPU_CFTYPE_WEIGHT, + CPU_CFTYPE_WEIGHT_NICE, + CPU_CFTYPE_IDLE, +#endif +#ifdef CONFIG_CFS_BANDWIDTH + CPU_CFTYPE_MAX, + CPU_CFTYPE_MAX_BURST, +#endif +#ifdef CONFIG_UCLAMP_TASK_GROUP + CPU_CFTYPE_UCLAMP_MIN, + CPU_CFTYPE_UCLAMP_MAX, +#endif + CPU_CFTYPE_CNT, +}; + +extern struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1]; +#endif /* CONFIG_CGROUP_SCHED */ + #endif /* _KERNEL_SCHED_SCHED_H */ From patchwork Fri Mar 17 21:33:11 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71468 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp2733wrt; Fri, 17 Mar 2023 14:39:09 -0700 (PDT) X-Google-Smtp-Source: AK7set9jQ/65gkSeRAee9azTGKECelwWB+hy+fI3IuLprQuBXb41mtjcTF3DQVEzhNvxZvavOYw9 X-Received: by 2002:a17:90a:7182:b0:236:7144:669f with SMTP id i2-20020a17090a718200b002367144669fmr10550669pjk.2.1679089149616; Fri, 17 Mar 2023 14:39:09 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089149; cv=none; d=google.com; s=arc-20160816; b=tznMROOyyhJ+30EnURaD3daPMViPWZrYaQq0ryBAUWUOCbOUXT7PvXBYoAD6HAgX1M xLDgaZ2+UCDOc+RW5wb98IEn9FFM0ygefehBhm9t4jSFAnqQmy/rDjW7V6K24AQiBlll 9LCim7voLKobE/NY8BQA4jS3F3CbiYRSIXsewGvEg4ICYhos0ggRhiEx08eBU4Q1GixN 9Nj2CBIpIx/atXmVrXd0a/Har8pqFMe9kdiDL0BAdKrH6lacYp78021OiAWHDRSLaw7Y Lo+mHBzp2H9o2Lt8IXxEe4kT8jXiJYLbwRyfKNojkllpVo2AE1PbcZkfGjYnFcxBQ8Yp AsXA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=YWw2v1UnTVnbdS72T7wGx5fNyybAmUxe9bUw37LCRvo=; b=U3Zv+6b02kfU82b37GaIsNjNZIRKNc/AQlv5b5YQcyYXB5YZ9MxVAA0c7Ktplz+dCc uRJUsdYwaT/HkwAxkBE33RGkK1KzGEv9CBJLppl9JLXDd8LQvti6piBUoB7eZp2kr1FN Cu8FiCG5NRYgKuKm5RQbxb0HZnwtlmqY9YUnSDKes6IQs7LkqhVmn1de1nofqIQiCLNV 30V/mAq9nR1JVqc9kutzDZInVngyCYYq08FIkQl1S44F2VXUaGKjLuODOdEzj3PWpw5L waPxK+lhjX3fkm/sReQ+kR8twNXbwIBeuoSFPCTPOuQeDocMpesi9wrT2HeeqWj8UwIu 5Ckg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=Agw5iz0e; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id t5-20020a17090a950500b0023b576f6ac0si3607041pjo.26.2023.03.17.14.38.56; Fri, 17 Mar 2023 14:39:09 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=Agw5iz0e; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231214AbjCQVgb (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:31 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51994 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230461AbjCQVgL (ORCPT ); Fri, 17 Mar 2023 17:36:11 -0400 Received: from mail-pj1-x1032.google.com (mail-pj1-x1032.google.com [IPv6:2607:f8b0:4864:20::1032]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 945C14AD34; Fri, 17 Mar 2023 14:35:26 -0700 (PDT) Received: by mail-pj1-x1032.google.com with SMTP id e15-20020a17090ac20f00b0023d1b009f52so10569536pjt.2; Fri, 17 Mar 2023 14:35:26 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088841; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=YWw2v1UnTVnbdS72T7wGx5fNyybAmUxe9bUw37LCRvo=; b=Agw5iz0eFlCMtSGY5mRtsHN6rrRxdhNbZnER8AKAoat/O5xaHjX7SL3c7GZ3qItUsL hKON97uP+aKGWt92sQDNkmCOwFR8wv2wZhO0Ktu3Sh4A8hHGoJlxnGYWXYZ56LTFmfiA LYq1c1+/Qous0sUaovLvwojE/SPl1jgbceikPlt1UYOawglhpTlW4SBNL0Ar7TfIIEI4 Qj0WmyY9QbrBY/dTd1N7FtbcrYl9hG4BM8FQJ32sEhqzqwAfRDS88DZke5fqsLtmqaAV 5TNVeHqUlE8j29hQaGSMr8xR4zkIEN2/t0RDYS9sHR8Wc0LWq6cWKdDY+1RFdJqyk83+ IODw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088841; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=YWw2v1UnTVnbdS72T7wGx5fNyybAmUxe9bUw37LCRvo=; b=fvoJTAs1grpzOVprgPTyizQGWP8SQZxcxfJHsbyAMGhQ8Pbrc7rrR8jV1Q3Kkqxg7q TVbwMpyViGlWtCNvDT4JnV+v/rsoCWWUMaS5N6ye+Bdb/iRyXlmwzfod3a4ug2y+Ro0g vNFGYSHA+LJwXzXhABMWtIT8dLToXE71UnX57Vg3cMwx/dJRIMLDs1ASUB+1S9roIEvw IaSwT/SzBBof7t8xZ/uTxNdVQTO/H6nNfJfrsa6lGhmw04feFXYUJT/PjqJ/DSD6kCkY 4CHQpb/Vn6jTZk6NA7FUaV0iIt/I7gi9z6170VJE3UR59BzTFZGjXu6qTrN2aZqV0sNv iECA== X-Gm-Message-State: AO0yUKV+BZcFl/cnkwNypEDvIgKXeYT5t4p4FvEGgi4yf4MWfGoA6ykV 9qh4Ghry2uIbjZb3IC+0mf8= X-Received: by 2002:a17:90b:4f90:b0:23b:4bf6:bbfa with SMTP id qe16-20020a17090b4f9000b0023b4bf6bbfamr10391873pjb.11.1679088841085; Fri, 17 Mar 2023 14:34:01 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id w23-20020a17090a15d700b00233b5d6b4b5sm5326472pjd.16.2023.03.17.14.34.00 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:00 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 10/32] sched: Add @reason to sched_class->rq_{on|off}line() Date: Fri, 17 Mar 2023 11:33:11 -1000 Message-Id: <20230317213333.2174969-11-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652584099587982?= X-GMAIL-MSGID: =?utf-8?q?1760652584099587982?= ->rq_{on|off}line are called either during CPU hotplug or cpuset partition updates. A planned BPF extensible sched_class wants to tell the BPF scheduler progs about CPU hotplug events in a way that's synchronized with rq state changes. As the BPF scheduler progs aren't necessarily affected by cpuset partition updates, we need a way to distinguish the two types of events. Let's add an argument to tell them apart. v2: Patch description updated to detail the expected use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 12 ++++++------ kernel/sched/deadline.c | 4 ++-- kernel/sched/fair.c | 4 ++-- kernel/sched/rt.c | 4 ++-- kernel/sched/sched.h | 13 +++++++++---- kernel/sched/topology.c | 4 ++-- 6 files changed, 23 insertions(+), 18 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 59136fafa94c..aa63371aa84c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9428,7 +9428,7 @@ static inline void balance_hotplug_wait(void) #endif /* CONFIG_HOTPLUG_CPU */ -void set_rq_online(struct rq *rq) +void set_rq_online(struct rq *rq, enum rq_onoff_reason reason) { if (!rq->online) { const struct sched_class *class; @@ -9438,19 +9438,19 @@ void set_rq_online(struct rq *rq) for_each_class(class) { if (class->rq_online) - class->rq_online(rq); + class->rq_online(rq, reason); } } } -void set_rq_offline(struct rq *rq) +void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason) { if (rq->online) { const struct sched_class *class; for_each_class(class) { if (class->rq_offline) - class->rq_offline(rq); + class->rq_offline(rq, reason); } cpumask_clear_cpu(rq->cpu, rq->rd->online); @@ -9546,7 +9546,7 @@ int sched_cpu_activate(unsigned int cpu) rq_lock_irqsave(rq, &rf); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_online(rq); + set_rq_online(rq, RQ_ONOFF_HOTPLUG); } rq_unlock_irqrestore(rq, &rf); @@ -9591,7 +9591,7 @@ int sched_cpu_deactivate(unsigned int cpu) if (rq->rd) { update_rq_clock(rq); BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_offline(rq); + set_rq_offline(rq, RQ_ONOFF_HOTPLUG); } rq_unlock_irqrestore(rq, &rf); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 71b24371a6f7..a7bb573c4c82 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2518,7 +2518,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, } /* Assumes rq->lock is held */ -static void rq_online_dl(struct rq *rq) +static void rq_online_dl(struct rq *rq, enum rq_onoff_reason reason) { if (rq->dl.overloaded) dl_set_overload(rq); @@ -2529,7 +2529,7 @@ static void rq_online_dl(struct rq *rq) } /* Assumes rq->lock is held */ -static void rq_offline_dl(struct rq *rq) +static void rq_offline_dl(struct rq *rq, enum rq_onoff_reason reason) { if (rq->dl.overloaded) dl_clear_overload(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 681ab0dd0bc1..28204472a3f1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11805,14 +11805,14 @@ void trigger_load_balance(struct rq *rq) nohz_balancer_kick(rq); } -static void rq_online_fair(struct rq *rq) +static void rq_online_fair(struct rq *rq, enum rq_onoff_reason reason) { update_sysctl(); update_runtime_enabled(rq); } -static void rq_offline_fair(struct rq *rq) +static void rq_offline_fair(struct rq *rq, enum rq_onoff_reason reason) { update_sysctl(); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0a11f44adee5..2b4c769438a1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2473,7 +2473,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) } /* Assumes rq->lock is held */ -static void rq_online_rt(struct rq *rq) +static void rq_online_rt(struct rq *rq, enum rq_onoff_reason reason) { if (rq->rt.overloaded) rt_set_overload(rq); @@ -2484,7 +2484,7 @@ static void rq_online_rt(struct rq *rq) } /* Assumes rq->lock is held */ -static void rq_offline_rt(struct rq *rq) +static void rq_offline_rt(struct rq *rq, enum rq_onoff_reason reason) { if (rq->rt.overloaded) rt_clear_overload(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 67f7f1149630..958613dd8290 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2183,6 +2183,11 @@ extern const u32 sched_prio_to_wmult[40]; #define RETRY_TASK ((void *)-1UL) +enum rq_onoff_reason { + RQ_ONOFF_HOTPLUG, /* CPU is going on/offline */ + RQ_ONOFF_TOPOLOGY, /* sched domain topology update */ +}; + struct affinity_context { const struct cpumask *new_mask; struct cpumask *user_mask; @@ -2219,8 +2224,8 @@ struct sched_class { void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); - void (*rq_online)(struct rq *rq); - void (*rq_offline)(struct rq *rq); + void (*rq_online)(struct rq *rq, enum rq_onoff_reason reason); + void (*rq_offline)(struct rq *rq, enum rq_onoff_reason reason); struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); #endif @@ -2787,8 +2792,8 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) raw_spin_rq_unlock(rq1); } -extern void set_rq_online (struct rq *rq); -extern void set_rq_offline(struct rq *rq); +extern void set_rq_online (struct rq *rq, enum rq_onoff_reason reason); +extern void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason); extern bool sched_smp_initialized; #else /* CONFIG_SMP */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 051aaf65c749..155c4e7e0f08 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -495,7 +495,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) old_rd = rq->rd; if (cpumask_test_cpu(rq->cpu, old_rd->online)) - set_rq_offline(rq); + set_rq_offline(rq, RQ_ONOFF_TOPOLOGY); cpumask_clear_cpu(rq->cpu, old_rd->span); @@ -513,7 +513,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) cpumask_set_cpu(rq->cpu, rd->span); if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) - set_rq_online(rq); + set_rq_online(rq, RQ_ONOFF_TOPOLOGY); raw_spin_rq_unlock_irqrestore(rq, flags); From patchwork Fri Mar 17 21:33:12 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71494 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16626wrt; Fri, 17 Mar 2023 15:22:41 -0700 (PDT) X-Google-Smtp-Source: AK7set/m9KvG16bZpUZ/k+X2B+2KPSlYAg49wIXRthfMb3TpyEgk8uHe3oeJXCHOeyOP4vfX7CKn X-Received: by 2002:a17:902:9a92:b0:1a0:4405:5787 with SMTP id w18-20020a1709029a9200b001a044055787mr8819779plp.0.1679091761226; Fri, 17 Mar 2023 15:22:41 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091761; cv=none; d=google.com; s=arc-20160816; b=bBIvimfTXDM2ClAwmWqUkGHfrTKOsUIL1+TK6P4gr1aRwrZxpdQRUJnpQr3iZC5lzJ 49+9ew5tW7xPOKmyRI/dsWj7IK9IsQ6qlvvqGKpN+b6QnnFGKqWu2g3KFsZNAh4x2D7i qNRfCJEyKkuvNzZ1FYgj+jIvM1w3hWx/wFbOZaU1z1gnYa5ExiQkhZiLoSlf5Nks8I7M eQnC7ahbb+KIQ8bPzLUL5Mxb3v8kAyhbuqizNxAryJwlWXFX+ABzwpMN9eddiebx9va5 nUAA8oc/N1COjyhthT72SXJ6cdgvybryf6nk15mwDQuVx03sp0oMOm+Sg6cjRKLPej+q 3cwg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=X6qz8Axre0zWemU+wtTjt8NnBoL1lg5OlFMGApoRxNc=; b=PBNq4ywe/hi063P2qU00DN7sKWP9HrwOyKlaBmlLGkTOq8ZnydOj9bMtiJxyqQe0Xw bY+hk3plh/iNQ54O3ww9bbugB3XuYGHmIls0uLSw+BTX7kdAB2dVm537JKYI/dvvr4px rUN1LVvKK7+FxeaFJYcJV/7Q18YPY+eZzKfeU1/AM1TZqhm1Ig4LUldk9ekHdZnvxsYG LyRvL0sf5D/2hzr9ZRQBoxJo6Zg7vixkhq58m+J5+0gHE7V/claL2rLgSFx4DDIlEgfW 2kWz8y4Dj2zKM8UK6yR6e7Uoao6lBQDx7wLeE4kpPGpJ3TfHI1VX45rvmx8v9nvErouS HZ9Q== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="CuW8/lpw"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id b12-20020a170902d50c00b0019ce0f430bbsi3826872plg.476.2023.03.17.15.22.24; Fri, 17 Mar 2023 15:22:41 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="CuW8/lpw"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230255AbjCQVgX (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:23 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51934 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230071AbjCQVgJ (ORCPT ); Fri, 17 Mar 2023 17:36:09 -0400 Received: from mail-pl1-x633.google.com (mail-pl1-x633.google.com [IPv6:2607:f8b0:4864:20::633]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 092AB4AD25; Fri, 17 Mar 2023 14:35:25 -0700 (PDT) Received: by mail-pl1-x633.google.com with SMTP id c18so6658520ple.11; Fri, 17 Mar 2023 14:35:25 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088843; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=X6qz8Axre0zWemU+wtTjt8NnBoL1lg5OlFMGApoRxNc=; b=CuW8/lpwFnxQ67YvF+TfhggE1zVruHd28MH7kNA52B4YdNuXAQovYBoqFFJJFtMWle VRkl0RTm4kIQgXteqZxZ1hFDIBVXgiavfFYG/qXzTe09qZeevxVbV2t13cm2Xz7ev/ca WPKfRt+xEcFPqsDpw6lA/+qehrpcXAgjaF1C3PUPoAOjGV+aRFnVuoXkJGJezCNk3qAx s+uw0YagcHE9UNFrWNq5egcZfxp6BkjgMhpDRhAoXhS4Xqi0cO+Aa8sEG3II3WvmpMUf 2QxFiXp10Gq3+Br7jkk8vNxiCBDXIPDBp5apqkhymDBjX61Eb980ZWR3ASzQED6FK0jl 1YTg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088843; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=X6qz8Axre0zWemU+wtTjt8NnBoL1lg5OlFMGApoRxNc=; b=Ke4tXfTJaohiGzXNIx4/lnuFJBw2gSFTjIBV+NDPdAyM8xSf6KGl8ZMH6zTGY8RE9r gkkntKEF6DVRDEYrG5vMfSHbhyNYCR+hyocdKMGsLzCZ95g0es/CORbqWDBi5M2nvnkh WpAOuaKfjAzbYwb0xh0HBsN/WIhjCpH7VFcaNEmUYV0S0rCZ+pXCSpYrDqUGQ+mjVX8U xHNA0mUAJN7xhFTzUnMx2RHQTXNquYJy1XD5IUHKDy5BtP0sXas1r729nSxMly3O6Ahg LG2Kl9fMbhgjnqeFEkLcgd/78cnxhuLed0RV0HO+tNhSIVvuVYPTiIoeaCVQFRe5VphN 46mQ== X-Gm-Message-State: AO0yUKU18Ut7A4CzWaJQOM5bAAc+po2cZDM4/zhruYbCviprCNVHu665 WuTjH5Tzl/CaGuORyYmGQH4= X-Received: by 2002:a17:90b:17d0:b0:23f:634a:6c7 with SMTP id me16-20020a17090b17d000b0023f634a06c7mr2937291pjb.15.1679088842922; Fri, 17 Mar 2023 14:34:02 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id t7-20020a170902bc4700b0019a91895cdfsm2002559plz.50.2023.03.17.14.34.02 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:02 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 11/32] sched: Add normal_policy() Date: Fri, 17 Mar 2023 11:33:12 -1000 Message-Id: <20230317213333.2174969-12-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655323019389630?= X-GMAIL-MSGID: =?utf-8?q?1760655323019389630?= A new BPF extensible sched_class will need to dynamically change how a task picks its sched_class. For example, if the loaded BPF scheduler progs fail, the tasks will be forced back on CFS even if the task's policy is set to the new sched_class. To support such mapping, add normal_policy() which wraps testing for %SCHED_NORMAL. This doesn't cause any behavior changes. v2: Update the description with more details on the expected use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 28204472a3f1..ea3788ef9686 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7806,7 +7806,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ * Batch and idle tasks do not preempt non-idle tasks (their preemption * is driven by the tick): */ - if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) + if (unlikely(!normal_policy(p->policy)) || !sched_feat(WAKEUP_PREEMPTION)) return; find_matching_se(&se, &pse); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 958613dd8290..6397843b4482 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -182,9 +182,15 @@ static inline int idle_policy(int policy) { return policy == SCHED_IDLE; } + +static inline int normal_policy(int policy) +{ + return policy == SCHED_NORMAL; +} + static inline int fair_policy(int policy) { - return policy == SCHED_NORMAL || policy == SCHED_BATCH; + return normal_policy(policy) || policy == SCHED_BATCH; } static inline int rt_policy(int policy) From patchwork Fri Mar 17 21:33:13 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71485 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp10444wrt; Fri, 17 Mar 2023 15:05:02 -0700 (PDT) X-Google-Smtp-Source: AK7set9VXHdXfIPQx3mdM94956NdIkutvdu1FC4u+lk+X8lGMJ1XsEZs1jd4E5Nqc4zDHE/Aj/nw X-Received: by 2002:a17:90a:f815:b0:23d:19ea:734a with SMTP id ij21-20020a17090af81500b0023d19ea734amr10579952pjb.3.1679090702141; Fri, 17 Mar 2023 15:05:02 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090702; cv=none; d=google.com; s=arc-20160816; b=qsHM29uElcnx+Gi20Uzq/XPt5UhQH+/jDPepHP96LjAfFTtr5Zt8O7gY3YrvPAItFL dvF5M+P3sQx4rZ9kHcvEPKS+w1lTYpKAV7yL8Hc2Rl07SJA29GrASCy7ru1et3tXhmCE 1/mjLC4CfJJ7EnYES4AnoOqTNxuKF8sLMrjXnBJI6LYNh0v73ySsnUyDBMy1//JSbawl TkRumpN+glajUv1vkDeISNNk11P+xR5ZxkdwsT8eQ7D+W77FFfCeqedGqU19ZUYS1tIH o/E4+h8jETgIAimkN7Tf223m1TvQlHUBXTNsQvmarDLaK+/2wja0XxrHLhTRgM/uRF8Y PZLQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=QK2aJVKovuD8X9fkbu87j/H7Z7JuonUqKoeh6yiYhnU=; b=W4ysdj5echVKMy9XBlY2WHZaSF5ocpUzh3cVh6W2mebsj84D9aUEQmKNO+Nwu6F5oN ojfvnF+NOaWSmn4vRDZBpxl151Gu4qrBTuonxvaMjDxysuG3tjCDjeE6VTGCyW2AaR8c qHsseXQHXjxnOR54a6sIAPGYoLfPgvSOWg33fRKYzEA1mkM3QeDzeZBJUMxzA6YK3/x8 mfSsLYhSv3VG4CkvgAXJMDkLTbvcd+oYDe0qx8eJxiIf6axo55c1kh1CisuScIJq+3tg IBA+sZx5H24OJD9cev6mQRRbNgzB49gb0pL7C8zBzK9jH4rQG6Qvyfrv96VcrQuxwbg9 r6Lg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="DWP/A6JL"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id s11-20020a65644b000000b0050bd9c8922bsi3451007pgv.364.2023.03.17.15.04.14; Fri, 17 Mar 2023 15:05:02 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="DWP/A6JL"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231424AbjCQVjD (ORCPT + 99 others); Fri, 17 Mar 2023 17:39:03 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52814 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231391AbjCQVi1 (ORCPT ); Fri, 17 Mar 2023 17:38:27 -0400 Received: from mail-pj1-f44.google.com (mail-pj1-f44.google.com [209.85.216.44]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2302774A6C; Fri, 17 Mar 2023 14:36:51 -0700 (PDT) Received: by mail-pj1-f44.google.com with SMTP id qe8-20020a17090b4f8800b0023f07253a2cso6625817pjb.3; Fri, 17 Mar 2023 14:36:51 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088845; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=QK2aJVKovuD8X9fkbu87j/H7Z7JuonUqKoeh6yiYhnU=; b=DWP/A6JL25BlAScJg8Mki7VMS8L338jbzxjTJehps9vOBsi9ZV1lfEeMKn2IjLzSKM Bey1eWR5JXAi5uMKygnqVH918L2m7qjWG5UxVBH/OyHQgq8pDAtgN5HxXXg8duYQq2f3 JfCRrofmXL7UEVArd9u70V8nrEJzAeU/XrnWqAl1XOoHZZvRGDEQHcPDiHqea0tM/Ik2 vN7UQ867RTfemMFk9v+/RSMukUoKV1V01Ea/ME/QRzsD+Gq7K/3/yFPRbGbuMIZX7QEm gm8K+O6t3a6nd4Vm9siK4gxKpdI8inMcz046rPnqciExTJYWnbvErJ16PYTUpZKVbjlZ ksIA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088845; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=QK2aJVKovuD8X9fkbu87j/H7Z7JuonUqKoeh6yiYhnU=; b=O3vgLBlYtWn8MCQYqBxaefaqU3ah/cu1FIieCQ8wN0zltkESkV/EgW9CmsnffL4DwL KEzUk/glYCkUJt+0OWXNPehdd2Lg4dYkdDcahlafiNpNLEBYFi2hKzri61TiDhqvI9l2 /zWH9VenNFb0fiWQ7X3rxdzP5C09s3pZ+z7XGmK2kr+DNSLySBLcitAcJ4an0Ddj158F uXnTM4IGjOae0Ym2rUryKGHm9rLHyNWIEwSfwknqisAVBtWMMUu8NaGL2wOLOq2v1bvY qdu7QlUW0UXfYROmurKz1vRNpwwo/AGrdRKF+v8NhNGIIKTHNkYjxR2X5STEn/gUtVeW SKxQ== X-Gm-Message-State: AO0yUKX90hIJb96Lu+0DPU+kASv75fN86r+idMlQWjpTqMcI+f/lIMJE THvwmFOMegC3w4TfK4IBnDI= X-Received: by 2002:a17:90a:de94:b0:237:47b0:3235 with SMTP id n20-20020a17090ade9400b0023747b03235mr9519254pjv.32.1679088844803; Fri, 17 Mar 2023 14:34:04 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id x3-20020a17090abc8300b0023f4274bd9asm1837666pjr.29.2023.03.17.14.34.04 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:04 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 12/32] sched_ext: Add boilerplate for extensible scheduler class Date: Fri, 17 Mar 2023 11:33:13 -1000 Message-Id: <20230317213333.2174969-13-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,RCVD_IN_MSPIKE_H3, RCVD_IN_MSPIKE_WL,SPF_HELO_NONE,SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760654212203761510?= X-GMAIL-MSGID: =?utf-8?q?1760654212203761510?= This adds dummy implementations of sched_ext interfaces which interact with the scheduler core and hook them in the correct places. As they're all dummies, this doesn't cause any behavior changes. This is split out to help reviewing. v2: balance_scx_on_up() dropped. This will be handled in sched_ext proper. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 12 ++++++++++++ kernel/fork.c | 2 ++ kernel/sched/core.c | 32 ++++++++++++++++++++++++-------- kernel/sched/ext.h | 24 ++++++++++++++++++++++++ kernel/sched/idle.c | 2 ++ kernel/sched/sched.h | 2 ++ 6 files changed, 66 insertions(+), 8 deletions(-) create mode 100644 include/linux/sched/ext.h create mode 100644 kernel/sched/ext.h diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h new file mode 100644 index 000000000000..a05dfcf533b0 --- /dev/null +++ b/include/linux/sched/ext.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_EXT_H +#define _LINUX_SCHED_EXT_H + +#ifdef CONFIG_SCHED_CLASS_EXT +#error "NOT IMPLEMENTED YET" +#else /* !CONFIG_SCHED_CLASS_EXT */ + +static inline void sched_ext_free(struct task_struct *p) {} + +#endif /* CONFIG_SCHED_CLASS_EXT */ +#endif /* _LINUX_SCHED_EXT_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 0d166537a1a3..68d08701acd0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -843,6 +844,7 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); + sched_ext_free(tsk); io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index aa63371aa84c..9ecee40eb0bc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4682,6 +4682,8 @@ late_initcall(sched_core_sysctl_init); */ int sched_fork(unsigned long clone_flags, struct task_struct *p) { + int ret; + __sched_fork(clone_flags, p); /* * We mark the process as NEW here. This guarantees that @@ -4718,12 +4720,16 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->sched_reset_on_fork = 0; } - if (dl_prio(p->prio)) - return -EAGAIN; - else if (rt_prio(p->prio)) + scx_pre_fork(p); + + if (dl_prio(p->prio)) { + ret = -EAGAIN; + goto out_cancel; + } else if (rt_prio(p->prio)) { p->sched_class = &rt_sched_class; - else + } else { p->sched_class = &fair_sched_class; + } init_entity_runnable_average(&p->se); @@ -4741,6 +4747,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) RB_CLEAR_NODE(&p->pushable_dl_tasks); #endif return 0; + +out_cancel: + scx_cancel_fork(p); + return ret; } int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) @@ -4771,16 +4781,18 @@ int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); - return 0; + return scx_fork(p); } void sched_cancel_fork(struct task_struct *p) { + scx_cancel_fork(p); } void sched_post_fork(struct task_struct *p) { uclamp_post_fork(p); + scx_post_fork(p); } unsigned long to_ratio(u64 period, u64 runtime) @@ -5935,7 +5947,7 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, * We can terminate the balance pass as soon as we know there is * a runnable task of @class priority or higher. */ - for_class_range(class, prev->sched_class, &idle_sched_class) { + for_balance_class_range(class, prev->sched_class, &idle_sched_class) { if (class->balance(rq, prev, rf)) break; } @@ -5953,6 +5965,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) const struct sched_class *class; struct task_struct *p; + if (scx_enabled()) + goto restart; + /* * Optimization: we know that if all tasks are in the fair class we can * call that function directly, but only if the @prev task wasn't of a @@ -5978,7 +5993,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) restart: put_prev_task_balance(rq, prev, rf); - for_each_class(class) { + for_each_active_class(class) { p = class->pick_next_task(rq); if (p) return p; @@ -6011,7 +6026,7 @@ static inline struct task_struct *pick_task(struct rq *rq) const struct sched_class *class; struct task_struct *p; - for_each_class(class) { + for_each_active_class(class) { p = class->pick_task(rq); if (p) return p; @@ -9953,6 +9968,7 @@ void __init sched_init(void) balance_push_set(smp_processor_id(), false); #endif init_sched_fair_class(); + init_sched_ext_class(); psi_init(); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h new file mode 100644 index 000000000000..6a93c4825339 --- /dev/null +++ b/kernel/sched/ext.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifdef CONFIG_SCHED_CLASS_EXT +#error "NOT IMPLEMENTED YET" +#else /* CONFIG_SCHED_CLASS_EXT */ + +#define scx_enabled() false + +static inline void scx_pre_fork(struct task_struct *p) {} +static inline int scx_fork(struct task_struct *p) { return 0; } +static inline void scx_post_fork(struct task_struct *p) {} +static inline void scx_cancel_fork(struct task_struct *p) {} +static inline void init_sched_ext_class(void) {} + +#define for_each_active_class for_each_class +#define for_balance_class_range for_class_range + +#endif /* CONFIG_SCHED_CLASS_EXT */ + +#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP) +#error "NOT IMPLEMENTED YET" +#else +static inline void scx_update_idle(struct rq *rq, bool idle) {} +#endif diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index e9ef66be2870..65378f0be8dc 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -407,11 +407,13 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { + scx_update_idle(rq, false); } static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) { update_idle_core(rq); + scx_update_idle(rq, true); schedstat_inc(rq->sched_goidle); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6397843b4482..6c42b042daa4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3411,4 +3411,6 @@ enum cpu_cftype_id { extern struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1]; #endif /* CONFIG_CGROUP_SCHED */ +#include "ext.h" + #endif /* _KERNEL_SCHED_SCHED_H */ From patchwork Fri Mar 17 21:33:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71476 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp5351wrt; Fri, 17 Mar 2023 14:49:26 -0700 (PDT) X-Google-Smtp-Source: AK7set9f+HXjejzGdfFGfgodex/LmgPI9Dzm3v31HF22kDIe9inHeg6c51KuYGkq+Xo1Gwz4X37R X-Received: by 2002:a05:6a00:844:b0:625:6439:657a with SMTP id q4-20020a056a00084400b006256439657amr9115810pfk.0.1679089766504; Fri, 17 Mar 2023 14:49:26 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089766; cv=none; d=google.com; s=arc-20160816; b=unx+W0ykUYr+wIWCnnEuKyOg+gaAt11h96eNaUnO+EmqJSkoas+rD7Qub8vP5XaPR6 2QcRq5PrMLJUSpEWlEBFO14tRKIbEB/OxEBe6x1fIHNoqTh1qcRoHQnzLQiodZXs47Yh XVMqcX7OplQ5l25ish4+oMlkxmTV07gEDZa3XMaTxTttctw5OvX4r5hNFp3ekq82/yT7 tM5T+y0upIiEY2s7qD7QZv28mLJuKO6ucQZvc0fE7XzKI2viKoaQ+rRdshOgtLzQb5lM M0wYAsMWNKPQHM4zoBfLJwCloOoYi6IQO+dqhBLPZ7KH40jX+vLTP79thvTgXGkSUkle Klig== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=4BNXeu7tkVjZi2X41yUQIbCY15sJpeqw1t+i0mbdqLY=; b=iBZNooooja6mCzGe3u/oRYOHEXxEaKzCbPwdcbNGiO6CCUTiXuMX33zNlutEg+QPNU AzR/lLjLA9FGbL3Bokbmed0KI9sxrfoxhn1fFmvBseNiC7xK0V6+NSxnS65rXTOdMHD7 8Z88Ppr4jnPI3I/Pr+s6qWkUqxkx7b4buzYXR/kDzfav2xe396TiUpKKgume1+w1hPMZ 47b0SJHY3E0Muj4HdalJvEsLsbaEDSLdeDR11UN5dbJQDW5FAQLnapbEiHrkgQ7cyXuq 9rCr+8fPGPR8aup9id0KMTdzHimdLklERuWUQ2DI/v7aOi3cOJeLnc+64U/J7ubIJMXf 7E3w== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=QQXqpZk9; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id v21-20020a056a00149500b005e8a3dd45c3si3866519pfu.312.2023.03.17.14.48.48; Fri, 17 Mar 2023 14:49:26 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=QQXqpZk9; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230434AbjCQViB (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:01 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51862 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230495AbjCQVhA (ORCPT ); Fri, 17 Mar 2023 17:37:00 -0400 Received: from mail-pf1-x432.google.com (mail-pf1-x432.google.com [IPv6:2607:f8b0:4864:20::432]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7A85886A8; Fri, 17 Mar 2023 14:35:52 -0700 (PDT) Received: by mail-pf1-x432.google.com with SMTP id bd34so3928592pfb.3; Fri, 17 Mar 2023 14:35:52 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088849; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=4BNXeu7tkVjZi2X41yUQIbCY15sJpeqw1t+i0mbdqLY=; b=QQXqpZk9wqJA0XQvqmjoaKx5j7c4jjsbg8Dc4k3iaBbfiJUn7iOkXgvbc0ukzcOkFX eDkbJigz39J54ZoN/0JccS6zd+CXA2t/hpNV06E78jFYlaOm+FDePA/0pGaojW0n1t7l tKixGprkY5SSFWtSMWiXwZaae69t00WGe5ozW8SBB5noV5ntC38x+t18CyeSX0aIV7Mt mXS60+Oi4Ke7Im3T/NvvIbIqx01UgAIkXHeBBO1mIQhoGBGkxNxDEaciUiN35bPaeVXp t6gQbI1IkO7D6WdV/+Obc99g+M6k2sdDkBUnb6xgj2tUhQR/rG6CPOT5w1QGLVdIRkVl VFwQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088849; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=4BNXeu7tkVjZi2X41yUQIbCY15sJpeqw1t+i0mbdqLY=; b=SSvTMR9a1QoD6nTyAwIS3Gomf8kV0tc7Upnk2c+cNmt22cRDmvBKErVGGPYYzOYHk4 oemH/FU4bXOlVAF2Vzcd3t39BCM4YWFOIXwFQtL+2mjry7koxyFeoSKyaNcsgjrKe9Fo MdG1ppdeON01nFZqKpOPuZP2az2QcN4nocfIC3JYularfUdneH+a8ersymDu9uaplCVe HPijwybcOkpjzc6J5mbVfwykUJIZiVD4GDYiOPTzCK6oOV3/QyF1lK3NKFAvg4HMWyD3 fqmcNDtNUtWDpiqDB0k8Fu6z4ZgvSPmBHbAWQgPqmTIo2Q7BVWBCD+3+HA+OAIvYCZLZ GzwQ== X-Gm-Message-State: AO0yUKUYT0YQ3xinoXDYlbPxXcnuMEVaIP3vt+O27XLVSGKEW39XDtTJ wjkYjcI2ZYoTL5VdCK0wChI= X-Received: by 2002:aa7:9f96:0:b0:623:e5fa:24ba with SMTP id z22-20020aa79f96000000b00623e5fa24bamr7190032pfr.10.1679088846853; Fri, 17 Mar 2023 14:34:06 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id p18-20020a62ab12000000b005a8f1d76d46sm2043013pff.13.2023.03.17.14.34.05 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:06 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 13/32] sched_ext: Implement BPF extensible scheduler class Date: Fri, 17 Mar 2023 11:33:14 -1000 Message-Id: <20230317213333.2174969-14-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653231397166673?= X-GMAIL-MSGID: =?utf-8?q?1760653231397166673?= Implement a new scheduler class sched_ext (SCX), which allows scheduling policies to be implemented as BPF programs to achieve the following: 1. Ease of experimentation and exploration: Enabling rapid iteration of new scheduling policies. 2. Customization: Building application-specific schedulers which implement policies that are not applicable to general-purpose schedulers. 3. Rapid scheduler deployments: Non-disruptive swap outs of scheduling policies in production environments. sched_ext leverages BPF’s struct_ops feature to define a structure which exports function callbacks and flags to BPF programs that wish to implement scheduling policies. The struct_ops structure exported by sched_ext is struct sched_ext_ops, and is conceptually similar to struct sched_class. The role of sched_ext is to map the complex sched_class callbacks to the more simple and ergonomic struct sched_ext_ops callbacks. For more detailed discussion on the motivations and overview, please refer to the cover letter. Later patches will also add several example schedulers and documentation. This patch implements the minimum core framework to enable implementation of BPF schedulers. Subsequent patches will gradually add functionalities including safety guarantee mechanisms, nohz and cgroup support. include/linux/sched/ext.h defines struct sched_ext_ops. With the comment on top, each operation should be self-explanatory. The followings are worth noting: * Both "sched_ext" and its shorthand "scx" are used. If the identifier already has "sched" in it, "ext" is used; otherwise, "scx". * In sched_ext_ops, only .name is mandatory. Every operation is optional and if omitted a simple but functional default behavior is provided. * A new policy constant SCHED_EXT is added and a task can select sched_ext by invoking sched_setscheduler(2) with the new policy constant. However, if the BPF scheduler is not loaded, SCHED_EXT is the same as SCHED_NORMAL and the task is scheduled by CFS. When the BPF scheduler is loaded, all tasks which have the SCHED_EXT policy are switched to sched_ext. * To bridge the workflow imbalance between the scheduler core and sched_ext_ops callbacks, sched_ext uses simple FIFOs called dispatch queues (dsq's). By default, there is one global dsq (SCX_DSQ_GLOBAL), and one local per-CPU dsq (SCX_DSQ_LOCAL). SCX_DSQ_GLOBAL is provided for convenience and need not be used by a scheduler that doesn't require it. SCX_DSQ_LOCAL is the per-CPU FIFO that sched_ext pulls from when putting the next task on the CPU. The BPF scheduler can manage an arbitrary number of dsq's using scx_bpf_create_dsq() and scx_bpf_destroy_dsq(). * sched_ext guarantees system integrity no matter what the BPF scheduler does. To enable this, each task's ownership is tracked through p->scx.ops_state and all tasks are put on scx_tasks list. The disable path can always recover and revert all tasks back to CFS. See p->scx.ops_state and scx_tasks. * A task is not tied to its rq while enqueued. This decouples CPU selection from queueing and allows sharing a scheduling queue across an arbitrary subset of CPUs. This adds some complexities as a task may need to be bounced between rq's right before it starts executing. See dispatch_to_local_dsq() and move_task_to_local_dsq(). * One complication that arises from the above weak association between task and rq is that synchronizing with dequeue() gets complicated as dequeue() may happen anytime while the task is enqueued and the dispatch path might need to release the rq lock to transfer the task. Solving this requires a bit of complexity. See the logic around p->scx.sticky_cpu and p->scx.ops_qseq. * Both enable and disable paths are a bit complicated. The enable path switches all tasks without blocking to avoid issues which can arise from partially switched states (e.g. the switching task itself being starved). The disable path can't trust the BPF scheduler at all, so it also has to guarantee forward progress without blocking. See scx_ops_enable() and scx_ops_disable_workfn(). * When sched_ext is disabled, static_branches are used to shut down the entry points from hot paths. v3: * ops.set_weight() added to allow BPF schedulers to track weight changes without polling p->scx.weight. * move_task_to_local_dsq() was losing SCX-specific enq_flags when enqueueing the task on the target dsq because it goes through activate_task() which loses the upper 32bit of the flags. Carry the flags through rq->scx.extra_enq_flags. * scx_bpf_dispatch(), scx_bpf_pick_idle_cpu(), scx_bpf_task_running() and scx_bpf_task_cpu() now use the new KF_RCU instead of KF_TRUSTED_ARGS to make it easier for BPF schedulers to call them. * The kfunc helper access control mechanism implemented through sched_ext_entity.kf_mask is improved. Now SCX_CALL_OP*() is always used when invoking scx_ops operations. v2: * balance_scx_on_up() is dropped. Instead, on UP, balance_scx() is called from put_prev_taks_scx() and pick_next_task_scx() as necessary. To determine whether balance_scx() should be called from put_prev_task_scx(), SCX_TASK_DEQD_FOR_SLEEP flag is added. See the comment in put_prev_task_scx() for details. * sched_deq_and_put_task() / sched_enq_and_set_task() sequences replaced with SCHED_CHANGE_BLOCK(). * Unused all_dsqs list removed. This was a left-over from previous iterations. * p->scx.kf_mask is added to track and enforce which kfunc helpers are allowed. Also, init/exit sequences are updated to make some kfuncs always safe to call regardless of the current BPF scheduler state. Combined, this should make all the kfuncs safe. * BPF now supports sleepable struct_ops operations. Hacky workaround removed and operations and kfunc helpers are tagged appropriately. * BPF now supports bitmask / cpumask helpers. scx_bpf_get_idle_cpumask() and friends are added so that BPF schedulers can use the idle masks with the generic helpers. This replaces the hacky kfunc helpers added by a separate patch in V1. * CONFIG_SCHED_CLASS_EXT can no longer be enabled if SCHED_CORE is enabled. This restriction will be removed by a later patch which adds core-sched support. * Add MAINTAINERS entries and other misc changes. Signed-off-by: Tejun Heo Co-authored-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- MAINTAINERS | 3 + include/asm-generic/vmlinux.lds.h | 1 + include/linux/sched.h | 5 + include/linux/sched/ext.h | 401 +++- include/uapi/linux/sched.h | 1 + init/init_task.c | 10 + kernel/Kconfig.preempt | 22 +- kernel/bpf/bpf_struct_ops_types.h | 4 + kernel/sched/build_policy.c | 4 + kernel/sched/core.c | 26 + kernel/sched/debug.c | 6 + kernel/sched/ext.c | 3042 +++++++++++++++++++++++++++++ kernel/sched/ext.h | 97 +- kernel/sched/sched.h | 16 + 14 files changed, 3634 insertions(+), 4 deletions(-) create mode 100644 kernel/sched/ext.c diff --git a/MAINTAINERS b/MAINTAINERS index edd3d562beee..5273c5f895f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18605,6 +18605,8 @@ R: Ben Segall (CONFIG_CFS_BANDWIDTH) R: Mel Gorman (CONFIG_NUMA_BALANCING) R: Daniel Bristot de Oliveira (SCHED_DEADLINE) R: Valentin Schneider (TOPOLOGY) +R: Tejun Heo (SCHED_EXT) +R: David Vernet (SCHED_EXT) L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core @@ -18613,6 +18615,7 @@ F: include/linux/sched.h F: include/linux/wait.h F: include/uapi/linux/sched.h F: kernel/sched/ +F: tools/sched_ext/ SCR24X CHIP CARD INTERFACE DRIVER M: Lubomir Rintel diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index d1f57e4868ed..cd5a718ba49f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -131,6 +131,7 @@ *(__dl_sched_class) \ *(__rt_sched_class) \ *(__fair_sched_class) \ + *(__ext_sched_class) \ *(__idle_sched_class) \ __sched_class_lowest = .; diff --git a/include/linux/sched.h b/include/linux/sched.h index b11b4517760f..d014c1681cdc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -70,6 +70,8 @@ struct signal_struct; struct task_delay_info; struct task_group; +#include + /* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). @@ -788,6 +790,9 @@ struct task_struct { struct sched_entity se; struct sched_rt_entity rt; struct sched_dl_entity dl; +#ifdef CONFIG_SCHED_CLASS_EXT + struct sched_ext_entity scx; +#endif const struct sched_class *sched_class; #ifdef CONFIG_SCHED_CORE diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index a05dfcf533b0..45bf24a23c61 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -1,9 +1,408 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ #ifndef _LINUX_SCHED_EXT_H #define _LINUX_SCHED_EXT_H #ifdef CONFIG_SCHED_CLASS_EXT -#error "NOT IMPLEMENTED YET" + +#include +#include + +enum scx_consts { + SCX_OPS_NAME_LEN = 128, + SCX_EXIT_REASON_LEN = 128, + SCX_EXIT_BT_LEN = 64, + SCX_EXIT_MSG_LEN = 1024, + + SCX_SLICE_DFL = 20 * NSEC_PER_MSEC, +}; + +/* + * DSQ (dispatch queue) IDs are 64bit of the format: + * + * Bits: [63] [62 .. 0] + * [ B] [ ID ] + * + * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs + * ID: 63 bit ID + * + * Built-in IDs: + * + * Bits: [63] [62] [61..32] [31 .. 0] + * [ 1] [ L] [ R ] [ V ] + * + * 1: 1 for built-in DSQs. + * L: 1 for LOCAL_ON DSQ IDs, 0 for others + * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value. + */ +enum scx_dsq_id_flags { + SCX_DSQ_FLAG_BUILTIN = 1LLU << 63, + SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62, + + SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0, + SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1, + SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2, + SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON, + SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU, +}; + +enum scx_exit_type { + SCX_EXIT_NONE, + SCX_EXIT_DONE, + + SCX_EXIT_UNREG = 64, /* BPF unregistration */ + + SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ + SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ +}; + +/* + * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is + * being disabled. + */ +struct scx_exit_info { + /* %SCX_EXIT_* - broad category of the exit reason */ + enum scx_exit_type type; + /* textual representation of the above */ + char reason[SCX_EXIT_REASON_LEN]; + /* number of entries in the backtrace */ + u32 bt_len; + /* backtrace if exiting due to an error */ + unsigned long bt[SCX_EXIT_BT_LEN]; + /* extra message */ + char msg[SCX_EXIT_MSG_LEN]; +}; + +/* sched_ext_ops.flags */ +enum scx_ops_flags { + /* + * Keep built-in idle tracking even if ops.update_idle() is implemented. + */ + SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0, + + /* + * By default, if there are no other task to run on the CPU, ext core + * keeps running the current task even after its slice expires. If this + * flag is specified, such tasks are passed to ops.enqueue() with + * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info. + */ + SCX_OPS_ENQ_LAST = 1LLU << 1, + + /* + * An exiting task may schedule after PF_EXITING is set. In such cases, + * bpf_task_from_pid() may not be able to find the task and if the BPF + * scheduler depends on pid lookup for dispatching, the task will be + * lost leading to various issues including RCU grace period stalls. + * + * To mask this problem, by default, unhashed tasks are automatically + * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't + * depend on pid lookups and wants to handle these tasks directly, the + * following flag can be used. + */ + SCX_OPS_ENQ_EXITING = 1LLU << 2, + + SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | + SCX_OPS_ENQ_LAST | + SCX_OPS_ENQ_EXITING, +}; + +/* argument container for ops.enable() and friends */ +struct scx_enable_args { + /* empty for now */ +}; + +/** + * struct sched_ext_ops - Operation table for BPF scheduler implementation + * + * Userland can implement an arbitrary scheduling policy by implementing and + * loading operations in this table. + */ +struct sched_ext_ops { + /** + * select_cpu - Pick the target CPU for a task which is being woken up + * @p: task being woken up + * @prev_cpu: the cpu @p was on before sleeping + * @wake_flags: SCX_WAKE_* + * + * Decision made here isn't final. @p may be moved to any CPU while it + * is getting dispatched for execution later. However, as @p is not on + * the rq at this point, getting the eventual execution CPU right here + * saves a small bit of overhead down the line. + * + * If an idle CPU is returned, the CPU is kicked and will try to + * dispatch. While an explicit custom mechanism can be added, + * select_cpu() serves as the default way to wake up idle CPUs. + */ + s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); + + /** + * enqueue - Enqueue a task on the BPF scheduler + * @p: task being enqueued + * @enq_flags: %SCX_ENQ_* + * + * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch() + * or enqueue on the BPF scheduler. If not directly dispatched, the bpf + * scheduler owns @p and if it fails to dispatch @p, the task will + * stall. + */ + void (*enqueue)(struct task_struct *p, u64 enq_flags); + + /** + * dequeue - Remove a task from the BPF scheduler + * @p: task being dequeued + * @deq_flags: %SCX_DEQ_* + * + * Remove @p from the BPF scheduler. This is usually called to isolate + * the task while updating its scheduling properties (e.g. priority). + * + * The ext core keeps track of whether the BPF side owns a given task or + * not and can gracefully ignore spurious dispatches from BPF side, + * which makes it safe to not implement this method. However, depending + * on the scheduling logic, this can lead to confusing behaviors - e.g. + * scheduling position not being updated across a priority change. + */ + void (*dequeue)(struct task_struct *p, u64 deq_flags); + + /** + * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs + * @cpu: CPU to dispatch tasks for + * @prev: previous task being switched out + * + * Called when a CPU's local dsq is empty. The operation should dispatch + * one or more tasks from the BPF scheduler into the DSQs using + * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using + * scx_bpf_consume(). + * + * The maximum number of times scx_bpf_dispatch() can be called without + * an intervening scx_bpf_consume() is specified by + * ops.dispatch_max_batch. See the comments on top of the two functions + * for more details. + * + * When not %NULL, @prev is an SCX task with its slice depleted. If + * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in + * @prev->scx.flags, it is not enqueued yet and will be enqueued after + * ops.dispatch() returns. To keep executing @prev, return without + * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST. + */ + void (*dispatch)(s32 cpu, struct task_struct *prev); + + /** + * yield - Yield CPU + * @from: yielding task + * @to: optional yield target task + * + * If @to is NULL, @from is yielding the CPU to other runnable tasks. + * The BPF scheduler should ensure that other available tasks are + * dispatched before the yielding task. Return value is ignored in this + * case. + * + * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf + * scheduler can implement the request, return %true; otherwise, %false. + */ + bool (*yield)(struct task_struct *from, struct task_struct *to); + + /** + * set_weight - Set task weight + * @p: task to set weight for + * @weight: new eight [1..10000] + * + * Update @p's weight to @weight. + */ + void (*set_weight)(struct task_struct *p, u32 weight); + + /** + * set_cpumask - Set CPU affinity + * @p: task to set CPU affinity for + * @cpumask: cpumask of cpus that @p can run on + * + * Update @p's CPU affinity to @cpumask. + */ + void (*set_cpumask)(struct task_struct *p, struct cpumask *cpumask); + + /** + * update_idle - Update the idle state of a CPU + * @cpu: CPU to udpate the idle state for + * @idle: whether entering or exiting the idle state + * + * This operation is called when @rq's CPU goes or leaves the idle + * state. By default, implementing this operation disables the built-in + * idle CPU tracking and the following helpers become unavailable: + * + * - scx_bpf_select_cpu_dfl() + * - scx_bpf_test_and_clear_cpu_idle() + * - scx_bpf_pick_idle_cpu() + * - scx_bpf_any_idle_cpu() + * + * The user also must implement ops.select_cpu() as the default + * implementation relies on scx_bpf_select_cpu_dfl(). + * + * If you keep the built-in idle tracking, specify the + * %SCX_OPS_KEEP_BUILTIN_IDLE flag. + */ + void (*update_idle)(s32 cpu, bool idle); + + /** + * prep_enable - Prepare to enable BPF scheduling for a task + * @p: task to prepare BPF scheduling for + * @args: enable arguments, see the struct definition + * + * Either we're loading a BPF scheduler or a new task is being forked. + * Prepare BPF scheduling for @p. This operation may block and can be + * used for allocations. + * + * Return 0 for success, -errno for failure. An error return while + * loading will abort loading of the BPF scheduler. During a fork, will + * abort the specific fork. + */ + s32 (*prep_enable)(struct task_struct *p, struct scx_enable_args *args); + + /** + * enable - Enable BPF scheduling for a task + * @p: task to enable BPF scheduling for + * @args: enable arguments, see the struct definition + * + * Enable @p for BPF scheduling. @p will start running soon. + */ + void (*enable)(struct task_struct *p, struct scx_enable_args *args); + + /** + * cancel_enable - Cancel prep_enable() + * @p: task being canceled + * @args: enable arguments, see the struct definition + * + * @p was prep_enable()'d but failed before reaching enable(). Undo the + * preparation. + */ + void (*cancel_enable)(struct task_struct *p, + struct scx_enable_args *args); + + /** + * disable - Disable BPF scheduling for a task + * @p: task to disable BPF scheduling for + * + * @p is exiting, leaving SCX or the BPF scheduler is being unloaded. + * Disable BPF scheduling for @p. + */ + void (*disable)(struct task_struct *p); + + /* + * All online ops must come before ops.init(). + */ + + /** + * init - Initialize the BPF scheduler + */ + s32 (*init)(void); + + /** + * exit - Clean up after the BPF scheduler + * @info: Exit info + */ + void (*exit)(struct scx_exit_info *info); + + /** + * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch + */ + u32 dispatch_max_batch; + + /** + * flags - %SCX_OPS_* flags + */ + u64 flags; + + /** + * name - BPF scheduler's name + * + * Must be a non-zero valid BPF object name including only isalnum(), + * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the + * BPF scheduler is enabled. + */ + char name[SCX_OPS_NAME_LEN]; +}; + +/* + * Dispatch queue (dsq) is a simple FIFO which is used to buffer between the + * scheduler core and the BPF scheduler. See the documentation for more details. + */ +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head fifo; /* processed in dispatching order */ + u32 nr; + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct rcu_head rcu; +}; + +/* scx_entity.flags */ +enum scx_ent_flags { + SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */ + SCX_TASK_BAL_KEEP = 1 << 1, /* balance decided to keep current */ + SCX_TASK_ENQ_LOCAL = 1 << 2, /* used by scx_select_cpu_dfl() to set SCX_ENQ_LOCAL */ + + SCX_TASK_OPS_PREPPED = 1 << 8, /* prepared for BPF scheduler enable */ + SCX_TASK_OPS_ENABLED = 1 << 9, /* task has BPF scheduler enabled */ + + SCX_TASK_DEQD_FOR_SLEEP = 1 << 17, /* last dequeue was for SLEEP */ + + SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */ +}; + +/* + * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from + * everywhere and the following bits track which kfunc sets are currently + * allowed for %current. This simple per-task tracking works because SCX ops + * nest in a limited way. BPF will likely implement a way to allow and disallow + * kfuncs depending on the calling context which will replace this manual + * mechanism. See scx_kf_allow(). + */ +enum scx_kf_mask { + SCX_KF_UNLOCKED = 0, /* not sleepable, not rq locked */ + /* all non-sleepables may be nested inside INIT and SLEEPABLE */ + SCX_KF_INIT = 1 << 0, /* running ops.init() */ + SCX_KF_SLEEPABLE = 1 << 1, /* other sleepable init operations */ + /* ops.dequeue (in REST) may be nested inside DISPATCH */ + SCX_KF_DISPATCH = 1 << 3, /* ops.dispatch() */ + SCX_KF_ENQUEUE = 1 << 4, /* ops.enqueue() */ + SCX_KF_REST = 1 << 5, /* other rq-locked operations */ + + __SCX_KF_RQ_LOCKED = SCX_KF_DISPATCH | SCX_KF_ENQUEUE | SCX_KF_REST, +}; + +/* + * The following is embedded in task_struct and contains all fields necessary + * for a task to be scheduled by SCX. + */ +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct list_head dsq_node; + u32 flags; /* protected by rq lock */ + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; /* see scx_kf_mask above */ + atomic64_t ops_state; + + /* BPF scheduler modifiable fields */ + + /* + * Runtime budget in nsecs. This is usually set through + * scx_bpf_dispatch() but can also be modified directly by the BPF + * scheduler. Automatically decreased by SCX as the task executes. On + * depletion, a scheduling event is triggered. + */ + u64 slice; + + /* cold fields */ + struct list_head tasks_node; +}; + +void sched_ext_free(struct task_struct *p); + #else /* !CONFIG_SCHED_CLASS_EXT */ static inline void sched_ext_free(struct task_struct *p) {} diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 3bac0a8ceab2..359a14cc76a4 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -118,6 +118,7 @@ struct clone_args { /* SCHED_ISO: reserved but not implemented yet */ #define SCHED_IDLE 5 #define SCHED_DEADLINE 6 +#define SCHED_EXT 7 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 diff --git a/init/init_task.c b/init/init_task.c index ff6c4b9bfe6b..bdbc663107bf 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -101,6 +102,15 @@ struct task_struct init_task #endif #ifdef CONFIG_CGROUP_SCHED .sched_task_group = &root_task_group, +#endif +#ifdef CONFIG_SCHED_CLASS_EXT + .scx = { + .dsq_node = LIST_HEAD_INIT(init_task.scx.dsq_node), + .sticky_cpu = -1, + .holding_cpu = -1, + .ops_state = ATOMIC_INIT(0), + .slice = SCX_SLICE_DFL, + }, #endif .ptraced = LIST_HEAD_INIT(init_task.ptraced), .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry), diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index c2f1fd95a821..0afcda19bc50 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -133,4 +133,24 @@ config SCHED_CORE which is the likely usage by Linux distributions, there should be no measurable impact on performance. - +config SCHED_CLASS_EXT + bool "Extensible Scheduling Class" + depends on BPF_SYSCALL && BPF_JIT && !SCHED_CORE + help + This option enables a new scheduler class sched_ext (SCX), which + allows scheduling policies to be implemented as BPF programs to + achieve the following: + + - Ease of experimentation and exploration: Enabling rapid + iteration of new scheduling policies. + - Customization: Building application-specific schedulers which + implement policies that are not applicable to general-purpose + schedulers. + - Rapid scheduler deployments: Non-disruptive swap outs of + scheduling policies in production environments. + + sched_ext leverages BPF’s struct_ops feature to define a structure + which exports function callbacks and flags to BPF programs that + wish to implement scheduling policies. The struct_ops structure + exported by sched_ext is struct sched_ext_ops, and is conceptually + similar to struct sched_class. diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h index 5678a9ddf817..3618769d853d 100644 --- a/kernel/bpf/bpf_struct_ops_types.h +++ b/kernel/bpf/bpf_struct_ops_types.h @@ -9,4 +9,8 @@ BPF_STRUCT_OPS_TYPE(bpf_dummy_ops) #include BPF_STRUCT_OPS_TYPE(tcp_congestion_ops) #endif +#ifdef CONFIG_SCHED_CLASS_EXT +#include +BPF_STRUCT_OPS_TYPE(sched_ext_ops) +#endif #endif diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c index d9dc9ab3773f..4c658b21f603 100644 --- a/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -52,3 +53,6 @@ #include "cputime.c" #include "deadline.c" +#ifdef CONFIG_SCHED_CLASS_EXT +# include "ext.c" +#endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9ecee40eb0bc..a3fb6a05d131 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4480,6 +4480,18 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->rt.on_rq = 0; p->rt.on_list = 0; +#ifdef CONFIG_SCHED_CLASS_EXT + p->scx.dsq = NULL; + INIT_LIST_HEAD(&p->scx.dsq_node); + p->scx.flags = 0; + p->scx.weight = 0; + p->scx.sticky_cpu = -1; + p->scx.holding_cpu = -1; + p->scx.kf_mask = 0; + atomic64_set(&p->scx.ops_state, 0); + p->scx.slice = SCX_SLICE_DFL; +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&p->preempt_notifiers); #endif @@ -4727,6 +4739,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) goto out_cancel; } else if (rt_prio(p->prio)) { p->sched_class = &rt_sched_class; +#ifdef CONFIG_SCHED_CLASS_EXT + } else if (task_on_scx(p)) { + p->sched_class = &ext_sched_class; +#endif } else { p->sched_class = &fair_sched_class; } @@ -7001,6 +7017,10 @@ void __setscheduler_prio(struct task_struct *p, int prio) p->sched_class = &dl_sched_class; else if (rt_prio(prio)) p->sched_class = &rt_sched_class; +#ifdef CONFIG_SCHED_CLASS_EXT + else if (task_on_scx(p)) + p->sched_class = &ext_sched_class; +#endif else p->sched_class = &fair_sched_class; @@ -8927,6 +8947,7 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: + case SCHED_EXT: ret = 0; break; } @@ -8954,6 +8975,7 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: + case SCHED_EXT: ret = 0; } return ret; @@ -9799,6 +9821,10 @@ void __init sched_init(void) BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class)); BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class)); BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class)); +#ifdef CONFIG_SCHED_CLASS_EXT + BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class)); + BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class)); +#endif wait_bit_init(); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 1637b65ba07a..814ed80b8ff6 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -338,6 +338,9 @@ static __init int sched_init_debug(void) debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); +#ifdef CONFIG_SCHED_CLASS_EXT + debugfs_create_file("ext", 0444, debugfs_sched, NULL, &sched_ext_fops); +#endif return 0; } late_initcall(sched_init_debug); @@ -1047,6 +1050,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P(dl.runtime); P(dl.deadline); } +#ifdef CONFIG_SCHED_CLASS_EXT + __PS("ext.enabled", p->sched_class == &ext_sched_class); +#endif #undef PN_SCHEDSTAT #undef P_SCHEDSTAT diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c new file mode 100644 index 000000000000..738ae1d7a8ee --- /dev/null +++ b/kernel/sched/ext.c @@ -0,0 +1,3042 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) + +enum scx_internal_consts { + SCX_NR_ONLINE_OPS = SCX_OP_IDX(init), + SCX_DSP_DFL_MAX_BATCH = 32, +}; + +enum scx_ops_enable_state { + SCX_OPS_PREPPING, + SCX_OPS_ENABLING, + SCX_OPS_ENABLED, + SCX_OPS_DISABLING, + SCX_OPS_DISABLED, +}; + +/* + * sched_ext_entity->ops_state + * + * Used to track the task ownership between the SCX core and the BPF scheduler. + * State transitions look as follows: + * + * NONE -> QUEUEING -> QUEUED -> DISPATCHING + * ^ | | + * | v v + * \-------------------------------/ + * + * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call + * sites for explanations on the conditions being waited upon and why they are + * safe. Transitions out of them into NONE or QUEUED must store_release and the + * waiters should load_acquire. + * + * Tracking scx_ops_state enables sched_ext core to reliably determine whether + * any given task can be dispatched by the BPF scheduler at all times and thus + * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler + * to try to dispatch any task anytime regardless of its state as the SCX core + * can safely reject invalid dispatches. + */ +enum scx_ops_state { + SCX_OPSS_NONE, /* owned by the SCX core */ + SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */ + SCX_OPSS_QUEUED, /* owned by the BPF scheduler */ + SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */ + + /* + * QSEQ brands each QUEUED instance so that, when dispatch races + * dequeue/requeue, the dispatcher can tell whether it still has a claim + * on the task being dispatched. + */ + SCX_OPSS_QSEQ_SHIFT = 2, + SCX_OPSS_STATE_MASK = (1LLU << SCX_OPSS_QSEQ_SHIFT) - 1, + SCX_OPSS_QSEQ_MASK = ~SCX_OPSS_STATE_MASK, +}; + +/* + * During exit, a task may schedule after losing its PIDs. When disabling the + * BPF scheduler, we need to be able to iterate tasks in every state to + * guarantee system safety. Maintain a dedicated task list which contains every + * task between its fork and eventual free. + */ +static DEFINE_SPINLOCK(scx_tasks_lock); +static LIST_HEAD(scx_tasks); + +/* ops enable/disable */ +static struct kthread_worker *scx_ops_helper; +static DEFINE_MUTEX(scx_ops_enable_mutex); +DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled); +DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); +static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED); +static struct sched_ext_ops scx_ops; +static bool scx_warned_zero_slice; + +static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last); +static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting); +static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); + +struct static_key_false scx_has_op[SCX_NR_ONLINE_OPS] = + { [0 ... SCX_NR_ONLINE_OPS-1] = STATIC_KEY_FALSE_INIT }; + +static atomic_t scx_exit_type = ATOMIC_INIT(SCX_EXIT_DONE); +static struct scx_exit_info scx_exit_info; + +static atomic64_t scx_nr_rejected = ATOMIC64_INIT(0); + +/* idle tracking */ +#ifdef CONFIG_SMP +#ifdef CONFIG_CPUMASK_OFFSTACK +#define CL_ALIGNED_IF_ONSTACK +#else +#define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp +#endif + +static struct { + cpumask_var_t cpu; + cpumask_var_t smt; +} idle_masks CL_ALIGNED_IF_ONSTACK; + +static bool __cacheline_aligned_in_smp scx_has_idle_cpus; +#endif /* CONFIG_SMP */ + +/* + * Direct dispatch marker. + * + * Non-NULL values are used for direct dispatch from enqueue path. A valid + * pointer points to the task currently being enqueued. An ERR_PTR value is used + * to indicate that direct dispatch has already happened. + */ +static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); + +/* dispatch queues */ +static struct scx_dispatch_q __cacheline_aligned_in_smp scx_dsq_global; + +static const struct rhashtable_params dsq_hash_params = { + .key_len = 8, + .key_offset = offsetof(struct scx_dispatch_q, id), + .head_offset = offsetof(struct scx_dispatch_q, hash_node), +}; + +static struct rhashtable dsq_hash; +static LLIST_HEAD(dsqs_to_free); + +/* dispatch buf */ +struct scx_dsp_buf_ent { + struct task_struct *task; + u64 qseq; + u64 dsq_id; + u64 enq_flags; +}; + +static u32 scx_dsp_max_batch; +static struct scx_dsp_buf_ent __percpu *scx_dsp_buf; + +struct scx_dsp_ctx { + struct rq *rq; + struct rq_flags *rf; + u32 buf_cursor; + u32 nr_tasks; +}; + +static DEFINE_PER_CPU(struct scx_dsp_ctx, scx_dsp_ctx); + +void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, + u64 enq_flags); +__printf(2, 3) static void scx_ops_error_type(enum scx_exit_type type, + const char *fmt, ...); +#define scx_ops_error(fmt, args...) \ + scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) + +struct scx_task_iter { + struct sched_ext_entity cursor; + struct task_struct *locked; + struct rq *rq; + struct rq_flags rf; +}; + +#define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)]) + +/* if the highest set bit is N, return a mask with bits [N+1, 31] set */ +static u32 higher_bits(u32 flags) +{ + return ~((1 << fls(flags)) - 1); +} + +/* return the mask with only the highest bit set */ +static u32 highest_bit(u32 flags) +{ + int bit = fls(flags); + return bit ? 1 << (bit - 1) : 0; +} + +/* + * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX + * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate + * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check + * whether it's running from an allowed context. + * + * @mask is constant, always inline to cull the mask calculations. + */ +static __always_inline void scx_kf_allow(u32 mask) +{ + /* nesting is allowed only in increasing scx_kf_mask order */ + WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, + "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", + current->scx.kf_mask, mask); + current->scx.kf_mask |= mask; +} + +static void scx_kf_disallow(u32 mask) +{ + current->scx.kf_mask &= ~mask; +} + +#define SCX_CALL_OP(mask, op, args...) \ +do { \ + if (mask) { \ + scx_kf_allow(mask); \ + scx_ops.op(args); \ + scx_kf_disallow(mask); \ + } else { \ + scx_ops.op(args); \ + } \ +} while (0) + +#define SCX_CALL_OP_RET(mask, op, args...) \ +({ \ + __typeof__(scx_ops.op(args)) __ret; \ + if (mask) { \ + scx_kf_allow(mask); \ + __ret = scx_ops.op(args); \ + scx_kf_disallow(mask); \ + } else { \ + __ret = scx_ops.op(args); \ + } \ + __ret; \ +}) + +/* @mask is constant, always inline to cull unnecessary branches */ +static __always_inline bool scx_kf_allowed(u32 mask) +{ + if (unlikely(!(current->scx.kf_mask & mask))) { + scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x", + mask, current->scx.kf_mask); + return false; + } + + if (unlikely((mask & (SCX_KF_INIT | SCX_KF_SLEEPABLE)) && + in_interrupt())) { + scx_ops_error("sleepable kfunc called from non-sleepable context"); + return false; + } + + /* + * Enforce nesting boundaries. e.g. A kfunc which can be called from + * DISPATCH must not be called if we're running DEQUEUE which is nested + * inside ops.dispatch(). We don't need to check the SCX_KF_SLEEPABLE + * boundary thanks to the above in_interrupt() check. + */ + if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && + (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { + scx_ops_error("dispatch kfunc called from a nested operation"); + return false; + } + + return true; +} + +/** + * scx_task_iter_init - Initialize a task iterator + * @iter: iterator to init + * + * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized, + * @iter must eventually be exited with scx_task_iter_exit(). + * + * scx_tasks_lock may be released between this and the first next() call or + * between any two next() calls. If scx_tasks_lock is released between two + * next() calls, the caller is responsible for ensuring that the task being + * iterated remains accessible either through RCU read lock or obtaining a + * reference count. + * + * All tasks which existed when the iteration started are guaranteed to be + * visited as long as they still exist. + */ +static void scx_task_iter_init(struct scx_task_iter *iter) +{ + lockdep_assert_held(&scx_tasks_lock); + + iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; + list_add(&iter->cursor.tasks_node, &scx_tasks); + iter->locked = NULL; +} + +/** + * scx_task_iter_exit - Exit a task iterator + * @iter: iterator to exit + * + * Exit a previously initialized @iter. Must be called with scx_tasks_lock held. + * If the iterator holds a task's rq lock, that rq lock is released. See + * scx_task_iter_init() for details. + */ +static void scx_task_iter_exit(struct scx_task_iter *iter) +{ + struct list_head *cursor = &iter->cursor.tasks_node; + + lockdep_assert_held(&scx_tasks_lock); + + if (iter->locked) { + task_rq_unlock(iter->rq, iter->locked, &iter->rf); + iter->locked = NULL; + } + + if (list_empty(cursor)) + return; + + list_del_init(cursor); +} + +/** + * scx_task_iter_next - Next task + * @iter: iterator to walk + * + * Visit the next task. See scx_task_iter_init() for details. + */ +static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) +{ + struct list_head *cursor = &iter->cursor.tasks_node; + struct sched_ext_entity *pos; + + lockdep_assert_held(&scx_tasks_lock); + + list_for_each_entry(pos, cursor, tasks_node) { + if (&pos->tasks_node == &scx_tasks) + return NULL; + if (!(pos->flags & SCX_TASK_CURSOR)) { + list_move(cursor, &pos->tasks_node); + return container_of(pos, struct task_struct, scx); + } + } + + /* can't happen, should always terminate at scx_tasks above */ + BUG(); +} + +/** + * scx_task_iter_next_filtered - Next non-idle task + * @iter: iterator to walk + * + * Visit the next non-idle task. See scx_task_iter_init() for details. + */ +static struct task_struct * +scx_task_iter_next_filtered(struct scx_task_iter *iter) +{ + struct task_struct *p; + + while ((p = scx_task_iter_next(iter))) { + if (!is_idle_task(p)) + return p; + } + return NULL; +} + +/** + * scx_task_iter_next_filtered_locked - Next non-idle task with its rq locked + * @iter: iterator to walk + * + * Visit the next non-idle task with its rq lock held. See scx_task_iter_init() + * for details. + */ +static struct task_struct * +scx_task_iter_next_filtered_locked(struct scx_task_iter *iter) +{ + struct task_struct *p; + + if (iter->locked) { + task_rq_unlock(iter->rq, iter->locked, &iter->rf); + iter->locked = NULL; + } + + p = scx_task_iter_next_filtered(iter); + if (!p) + return NULL; + + iter->rq = task_rq_lock(p, &iter->rf); + iter->locked = p; + return p; +} + +static enum scx_ops_enable_state scx_ops_enable_state(void) +{ + return atomic_read(&scx_ops_enable_state_var); +} + +static enum scx_ops_enable_state +scx_ops_set_enable_state(enum scx_ops_enable_state to) +{ + return atomic_xchg(&scx_ops_enable_state_var, to); +} + +static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to, + enum scx_ops_enable_state from) +{ + int from_v = from; + + return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to); +} + +static bool scx_ops_disabling(void) +{ + return unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING); +} + +/** + * wait_ops_state - Busy-wait the specified ops state to end + * @p: target task + * @opss: state to wait the end of + * + * Busy-wait for @p to transition out of @opss. This can only be used when the + * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also + * has load_acquire semantics to ensure that the caller can see the updates made + * in the enqueueing and dispatching paths. + */ +static void wait_ops_state(struct task_struct *p, u64 opss) +{ + do { + cpu_relax(); + } while (atomic64_read_acquire(&p->scx.ops_state) == opss); +} + +/** + * ops_cpu_valid - Verify a cpu number + * @cpu: cpu number which came from a BPF ops + * + * @cpu is a cpu number which came from the BPF scheduler and can be any value. + * Verify that it is in range and one of the possible cpus. + */ +static bool ops_cpu_valid(s32 cpu) +{ + return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu)); +} + +/** + * ops_sanitize_err - Sanitize a -errno value + * @ops_name: operation to blame on failure + * @err: -errno value to sanitize + * + * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return + * -%EPROTO. This is necessary because returning a rogue -errno up the chain can + * cause misbehaviors. For an example, a large negative return from + * ops.prep_enable() triggers an oops when passed up the call chain because the + * value fails IS_ERR() test after being encoded with ERR_PTR() and then is + * handled as a pointer. + */ +static int ops_sanitize_err(const char *ops_name, s32 err) +{ + if (err < 0 && err >= -MAX_ERRNO) + return err; + + scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err); + return -EPROTO; +} + +static void update_curr_scx(struct rq *rq) +{ + struct task_struct *curr = rq->curr; + u64 now = rq_clock_task(rq); + u64 delta_exec; + + if (time_before_eq64(now, curr->se.exec_start)) + return; + + delta_exec = now - curr->se.exec_start; + curr->se.exec_start = now; + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + cgroup_account_cputime(curr, delta_exec); + + curr->scx.slice -= min(curr->scx.slice, delta_exec); +} + +static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, + u64 enq_flags) +{ + bool is_local = dsq->id == SCX_DSQ_LOCAL; + + WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_node)); + + if (!is_local) { + raw_spin_lock(&dsq->lock); + if (unlikely(dsq->id == SCX_DSQ_INVALID)) { + scx_ops_error("attempting to dispatch to a destroyed dsq"); + /* fall back to the global dsq */ + raw_spin_unlock(&dsq->lock); + dsq = &scx_dsq_global; + raw_spin_lock(&dsq->lock); + } + } + + if (enq_flags & SCX_ENQ_HEAD) + list_add(&p->scx.dsq_node, &dsq->fifo); + else + list_add_tail(&p->scx.dsq_node, &dsq->fifo); + dsq->nr++; + p->scx.dsq = dsq; + + /* + * We're transitioning out of QUEUEING or DISPATCHING. store_release to + * match waiters' load_acquire. + */ + if (enq_flags & SCX_ENQ_CLEAR_OPSS) + atomic64_set_release(&p->scx.ops_state, SCX_OPSS_NONE); + + if (is_local) { + struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); + + if (sched_class_above(&ext_sched_class, rq->curr->sched_class)) + resched_curr(rq); + } else { + raw_spin_unlock(&dsq->lock); + } +} + +static void dispatch_dequeue(struct scx_rq *scx_rq, struct task_struct *p) +{ + struct scx_dispatch_q *dsq = p->scx.dsq; + bool is_local = dsq == &scx_rq->local_dsq; + + if (!dsq) { + WARN_ON_ONCE(!list_empty(&p->scx.dsq_node)); + /* + * When dispatching directly from the BPF scheduler to a local + * DSQ, the task isn't associated with any DSQ but + * @p->scx.holding_cpu may be set under the protection of + * %SCX_OPSS_DISPATCHING. + */ + if (p->scx.holding_cpu >= 0) + p->scx.holding_cpu = -1; + return; + } + + if (!is_local) + raw_spin_lock(&dsq->lock); + + /* + * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_node + * can't change underneath us. + */ + if (p->scx.holding_cpu < 0) { + /* @p must still be on @dsq, dequeue */ + WARN_ON_ONCE(list_empty(&p->scx.dsq_node)); + list_del_init(&p->scx.dsq_node); + dsq->nr--; + } else { + /* + * We're racing against dispatch_to_local_dsq() which already + * removed @p from @dsq and set @p->scx.holding_cpu. Clear the + * holding_cpu which tells dispatch_to_local_dsq() that it lost + * the race. + */ + WARN_ON_ONCE(!list_empty(&p->scx.dsq_node)); + p->scx.holding_cpu = -1; + } + p->scx.dsq = NULL; + + if (!is_local) + raw_spin_unlock(&dsq->lock); +} + +static struct scx_dispatch_q *find_non_local_dsq(u64 dsq_id) +{ + lockdep_assert(rcu_read_lock_any_held()); + + if (dsq_id == SCX_DSQ_GLOBAL) + return &scx_dsq_global; + else + return rhashtable_lookup_fast(&dsq_hash, &dsq_id, + dsq_hash_params); +} + +static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, + struct task_struct *p) +{ + struct scx_dispatch_q *dsq; + + if (dsq_id == SCX_DSQ_LOCAL) + return &rq->scx.local_dsq; + + dsq = find_non_local_dsq(dsq_id); + if (unlikely(!dsq)) { + scx_ops_error("non-existent DSQ 0x%llx for %s[%d]", + dsq_id, p->comm, p->pid); + return &scx_dsq_global; + } + + return dsq; +} + +static void direct_dispatch(struct task_struct *ddsp_task, struct task_struct *p, + u64 dsq_id, u64 enq_flags) +{ + struct scx_dispatch_q *dsq; + + /* @p must match the task which is being enqueued */ + if (unlikely(p != ddsp_task)) { + if (IS_ERR(ddsp_task)) + scx_ops_error("%s[%d] already direct-dispatched", + p->comm, p->pid); + else + scx_ops_error("enqueueing %s[%d] but trying to direct-dispatch %s[%d]", + ddsp_task->comm, ddsp_task->pid, + p->comm, p->pid); + return; + } + + /* + * %SCX_DSQ_LOCAL_ON is not supported during direct dispatch because + * dispatching to the local DSQ of a different CPU requires unlocking + * the current rq which isn't allowed in the enqueue path. Use + * ops.select_cpu() to be on the target CPU and then %SCX_DSQ_LOCAL. + */ + if (unlikely((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON)) { + scx_ops_error("SCX_DSQ_LOCAL_ON can't be used for direct-dispatch"); + return; + } + + dsq = find_dsq_for_dispatch(task_rq(p), dsq_id, p); + dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); + + /* + * Mark that dispatch already happened by spoiling direct_dispatch_task + * with a non-NULL value which can never match a valid task pointer. + */ + __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); +} + +static bool test_rq_online(struct rq *rq) +{ +#ifdef CONFIG_SMP + return rq->online; +#else + return true; +#endif +} + +static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, + int sticky_cpu) +{ + struct task_struct **ddsp_taskp; + u64 qseq; + + WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); + + if (p->scx.flags & SCX_TASK_ENQ_LOCAL) { + enq_flags |= SCX_ENQ_LOCAL; + p->scx.flags &= ~SCX_TASK_ENQ_LOCAL; + } + + /* rq migration */ + if (sticky_cpu == cpu_of(rq)) + goto local_norefill; + + /* + * If !rq->online, we already told the BPF scheduler that the CPU is + * offline. We're just trying to on/offline the CPU. Don't bother the + * BPF scheduler. + */ + if (unlikely(!test_rq_online(rq))) + goto local; + + /* see %SCX_OPS_ENQ_EXITING */ + if (!static_branch_unlikely(&scx_ops_enq_exiting) && + unlikely(p->flags & PF_EXITING)) + goto local; + + /* see %SCX_OPS_ENQ_LAST */ + if (!static_branch_unlikely(&scx_ops_enq_last) && + (enq_flags & SCX_ENQ_LAST)) + goto local; + + if (!SCX_HAS_OP(enqueue)) { + if (enq_flags & SCX_ENQ_LOCAL) + goto local; + else + goto global; + } + + /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ + qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; + + WARN_ON_ONCE(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + atomic64_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); + + ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); + WARN_ON_ONCE(*ddsp_taskp); + *ddsp_taskp = p; + + SCX_CALL_OP(SCX_KF_ENQUEUE, enqueue, p, enq_flags); + + /* + * If not directly dispatched, QUEUEING isn't clear yet and dispatch or + * dequeue may be waiting. The store_release matches their load_acquire. + */ + if (*ddsp_taskp == p) + atomic64_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); + *ddsp_taskp = NULL; + return; + +local: + p->scx.slice = SCX_SLICE_DFL; +local_norefill: + dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); + return; + +global: + p->scx.slice = SCX_SLICE_DFL; + dispatch_enqueue(&scx_dsq_global, p, enq_flags); +} + +static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) +{ + int sticky_cpu = p->scx.sticky_cpu; + + enq_flags |= rq->scx.extra_enq_flags; + + if (sticky_cpu >= 0) + p->scx.sticky_cpu = -1; + + /* + * Restoring a running task will be immediately followed by + * set_next_task_scx() which expects the task to not be on the BPF + * scheduler as tasks can only start running through local DSQs. Force + * direct-dispatch into the local DSQ by setting the sticky_cpu. + */ + if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) + sticky_cpu = cpu_of(rq); + + if (p->scx.flags & SCX_TASK_QUEUED) + return; + + p->scx.flags |= SCX_TASK_QUEUED; + rq->scx.nr_running++; + add_nr_running(rq, 1); + + do_enqueue_task(rq, p, enq_flags, sticky_cpu); +} + +static void ops_dequeue(struct task_struct *p, u64 deq_flags) +{ + u64 opss; + + /* acquire ensures that we see the preceding updates on QUEUED */ + opss = atomic64_read_acquire(&p->scx.ops_state); + + switch (opss & SCX_OPSS_STATE_MASK) { + case SCX_OPSS_NONE: + break; + case SCX_OPSS_QUEUEING: + /* + * QUEUEING is started and finished while holding @p's rq lock. + * As we're holding the rq lock now, we shouldn't see QUEUEING. + */ + BUG(); + case SCX_OPSS_QUEUED: + if (SCX_HAS_OP(dequeue)) + SCX_CALL_OP(SCX_KF_REST, dequeue, p, deq_flags); + + if (atomic64_try_cmpxchg(&p->scx.ops_state, &opss, + SCX_OPSS_NONE)) + break; + fallthrough; + case SCX_OPSS_DISPATCHING: + /* + * If @p is being dispatched from the BPF scheduler to a DSQ, + * wait for the transfer to complete so that @p doesn't get + * added to its DSQ after dequeueing is complete. + * + * As we're waiting on DISPATCHING with the rq locked, the + * dispatching side shouldn't try to lock the rq while + * DISPATCHING is set. See dispatch_to_local_dsq(). + * + * DISPATCHING shouldn't have qseq set and control can reach + * here with NONE @opss from the above QUEUED case block. + * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. + */ + wait_ops_state(p, SCX_OPSS_DISPATCHING); + BUG_ON(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + break; + } +} + +static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) +{ + struct scx_rq *scx_rq = &rq->scx; + + if (!(p->scx.flags & SCX_TASK_QUEUED)) + return; + + ops_dequeue(p, deq_flags); + + if (deq_flags & SCX_DEQ_SLEEP) + p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; + else + p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; + + p->scx.flags &= ~SCX_TASK_QUEUED; + scx_rq->nr_running--; + sub_nr_running(rq, 1); + + dispatch_dequeue(scx_rq, p); +} + +static void yield_task_scx(struct rq *rq) +{ + struct task_struct *p = rq->curr; + + if (SCX_HAS_OP(yield)) + SCX_CALL_OP_RET(SCX_KF_REST, yield, p, NULL); + else + p->scx.slice = 0; +} + +static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) +{ + struct task_struct *from = rq->curr; + + if (SCX_HAS_OP(yield)) + return SCX_CALL_OP_RET(SCX_KF_REST, yield, from, to); + else + return false; +} + +#ifdef CONFIG_SMP +/** + * move_task_to_local_dsq - Move a task from a different rq to a local DSQ + * @rq: rq to move the task into, currently locked + * @p: task to move + * @enq_flags: %SCX_ENQ_* + * + * Move @p which is currently on a different rq to @rq's local DSQ. The caller + * must: + * + * 1. Start with exclusive access to @p either through its DSQ lock or + * %SCX_OPSS_DISPATCHING flag. + * + * 2. Set @p->scx.holding_cpu to raw_smp_processor_id(). + * + * 3. Remember task_rq(@p). Release the exclusive access so that we don't + * deadlock with dequeue. + * + * 4. Lock @rq and the task_rq from #3. + * + * 5. Call this function. + * + * Returns %true if @p was successfully moved. %false after racing dequeue and + * losing. + */ +static bool move_task_to_local_dsq(struct rq *rq, struct task_struct *p, + u64 enq_flags) +{ + struct rq *task_rq; + + lockdep_assert_rq_held(rq); + + /* + * If dequeue got to @p while we were trying to lock both rq's, it'd + * have cleared @p->scx.holding_cpu to -1. While other cpus may have + * updated it to different values afterwards, as this operation can't be + * preempted or recurse, @p->scx.holding_cpu can never become + * raw_smp_processor_id() again before we're done. Thus, we can tell + * whether we lost to dequeue by testing whether @p->scx.holding_cpu is + * still raw_smp_processor_id(). + * + * See dispatch_dequeue() for the counterpart. + */ + if (unlikely(p->scx.holding_cpu != raw_smp_processor_id())) + return false; + + /* @p->rq couldn't have changed if we're still the holding cpu */ + task_rq = task_rq(p); + lockdep_assert_rq_held(task_rq); + + WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(rq), p->cpus_ptr)); + deactivate_task(task_rq, p, 0); + set_task_cpu(p, cpu_of(rq)); + p->scx.sticky_cpu = cpu_of(rq); + + /* + * We want to pass scx-specific enq_flags but activate_task() will + * truncate the upper 32 bit. As we own @rq, we can pass them through + * @rq->scx.extra_enq_flags instead. + */ + WARN_ON_ONCE(rq->scx.extra_enq_flags); + rq->scx.extra_enq_flags = enq_flags; + activate_task(rq, p, 0); + rq->scx.extra_enq_flags = 0; + + return true; +} + +/** + * dispatch_to_local_dsq_lock - Ensure source and desitnation rq's are locked + * @rq: current rq which is locked + * @rf: rq_flags to use when unlocking @rq + * @src_rq: rq to move task from + * @dst_rq: rq to move task to + * + * We're holding @rq lock and trying to dispatch a task from @src_rq to + * @dst_rq's local DSQ and thus need to lock both @src_rq and @dst_rq. Whether + * @rq stays locked isn't important as long as the state is restored after + * dispatch_to_local_dsq_unlock(). + */ +static void dispatch_to_local_dsq_lock(struct rq *rq, struct rq_flags *rf, + struct rq *src_rq, struct rq *dst_rq) +{ + rq_unpin_lock(rq, rf); + + if (src_rq == dst_rq) { + raw_spin_rq_unlock(rq); + raw_spin_rq_lock(dst_rq); + } else if (rq == src_rq) { + double_lock_balance(rq, dst_rq); + rq_repin_lock(rq, rf); + } else if (rq == dst_rq) { + double_lock_balance(rq, src_rq); + rq_repin_lock(rq, rf); + } else { + raw_spin_rq_unlock(rq); + double_rq_lock(src_rq, dst_rq); + } +} + +/** + * dispatch_to_local_dsq_unlock - Undo dispatch_to_local_dsq_lock() + * @rq: current rq which is locked + * @rf: rq_flags to use when unlocking @rq + * @src_rq: rq to move task from + * @dst_rq: rq to move task to + * + * Unlock @src_rq and @dst_rq and ensure that @rq is locked on return. + */ +static void dispatch_to_local_dsq_unlock(struct rq *rq, struct rq_flags *rf, + struct rq *src_rq, struct rq *dst_rq) +{ + if (src_rq == dst_rq) { + raw_spin_rq_unlock(dst_rq); + raw_spin_rq_lock(rq); + rq_repin_lock(rq, rf); + } else if (rq == src_rq) { + double_unlock_balance(rq, dst_rq); + } else if (rq == dst_rq) { + double_unlock_balance(rq, src_rq); + } else { + double_rq_unlock(src_rq, dst_rq); + raw_spin_rq_lock(rq); + rq_repin_lock(rq, rf); + } +} +#endif /* CONFIG_SMP */ + + +static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf, + struct scx_dispatch_q *dsq) +{ + struct scx_rq *scx_rq = &rq->scx; + struct task_struct *p; + struct rq *task_rq; + bool moved = false; +retry: + if (list_empty(&dsq->fifo)) + return false; + + raw_spin_lock(&dsq->lock); + list_for_each_entry(p, &dsq->fifo, scx.dsq_node) { + task_rq = task_rq(p); + if (rq == task_rq) + goto this_rq; + if (likely(test_rq_online(rq)) && !is_migration_disabled(p) && + cpumask_test_cpu(cpu_of(rq), p->cpus_ptr)) + goto remote_rq; + } + raw_spin_unlock(&dsq->lock); + return false; + +this_rq: + /* @dsq is locked and @p is on this rq */ + WARN_ON_ONCE(p->scx.holding_cpu >= 0); + list_move_tail(&p->scx.dsq_node, &scx_rq->local_dsq.fifo); + dsq->nr--; + scx_rq->local_dsq.nr++; + p->scx.dsq = &scx_rq->local_dsq; + raw_spin_unlock(&dsq->lock); + return true; + +remote_rq: +#ifdef CONFIG_SMP + /* + * @dsq is locked and @p is on a remote rq. @p is currently protected by + * @dsq->lock. We want to pull @p to @rq but may deadlock if we grab + * @task_rq while holding @dsq and @rq locks. As dequeue can't drop the + * rq lock or fail, do a little dancing from our side. See + * move_task_to_local_dsq(). + */ + WARN_ON_ONCE(p->scx.holding_cpu >= 0); + list_del_init(&p->scx.dsq_node); + dsq->nr--; + p->scx.holding_cpu = raw_smp_processor_id(); + raw_spin_unlock(&dsq->lock); + + rq_unpin_lock(rq, rf); + double_lock_balance(rq, task_rq); + rq_repin_lock(rq, rf); + + moved = move_task_to_local_dsq(rq, p, 0); + + double_unlock_balance(rq, task_rq); +#endif /* CONFIG_SMP */ + if (likely(moved)) + return true; + goto retry; +} + +enum dispatch_to_local_dsq_ret { + DTL_DISPATCHED, /* successfully dispatched */ + DTL_LOST, /* lost race to dequeue */ + DTL_NOT_LOCAL, /* destination is not a local DSQ */ + DTL_INVALID, /* invalid local dsq_id */ +}; + +/** + * dispatch_to_local_dsq - Dispatch a task to a local dsq + * @rq: current rq which is locked + * @rf: rq_flags to use when unlocking @rq + * @dsq_id: destination dsq ID + * @p: task to dispatch + * @enq_flags: %SCX_ENQ_* + * + * We're holding @rq lock and want to dispatch @p to the local DSQ identified by + * @dsq_id. This function performs all the synchronization dancing needed + * because local DSQs are protected with rq locks. + * + * The caller must have exclusive ownership of @p (e.g. through + * %SCX_OPSS_DISPATCHING). + */ +static enum dispatch_to_local_dsq_ret +dispatch_to_local_dsq(struct rq *rq, struct rq_flags *rf, u64 dsq_id, + struct task_struct *p, u64 enq_flags) +{ + struct rq *src_rq = task_rq(p); + struct rq *dst_rq; + + /* + * We're synchronized against dequeue through DISPATCHING. As @p can't + * be dequeued, its task_rq and cpus_allowed are stable too. + */ + if (dsq_id == SCX_DSQ_LOCAL) { + dst_rq = rq; + } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { + s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; + + if (!ops_cpu_valid(cpu)) { + scx_ops_error("invalid cpu %d in SCX_DSQ_LOCAL_ON verdict for %s[%d]", + cpu, p->comm, p->pid); + return DTL_INVALID; + } + dst_rq = cpu_rq(cpu); + } else { + return DTL_NOT_LOCAL; + } + + /* if dispatching to @rq that @p is already on, no lock dancing needed */ + if (rq == src_rq && rq == dst_rq) { + dispatch_enqueue(&dst_rq->scx.local_dsq, p, + enq_flags | SCX_ENQ_CLEAR_OPSS); + return DTL_DISPATCHED; + } + +#ifdef CONFIG_SMP + if (cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)) { + struct rq *locked_dst_rq = dst_rq; + bool dsp; + + /* + * @p is on a possibly remote @src_rq which we need to lock to + * move the task. If dequeue is in progress, it'd be locking + * @src_rq and waiting on DISPATCHING, so we can't grab @src_rq + * lock while holding DISPATCHING. + * + * As DISPATCHING guarantees that @p is wholly ours, we can + * pretend that we're moving from a DSQ and use the same + * mechanism - mark the task under transfer with holding_cpu, + * release DISPATCHING and then follow the same protocol. + */ + p->scx.holding_cpu = raw_smp_processor_id(); + + /* store_release ensures that dequeue sees the above */ + atomic64_set_release(&p->scx.ops_state, SCX_OPSS_NONE); + + dispatch_to_local_dsq_lock(rq, rf, src_rq, locked_dst_rq); + + /* + * We don't require the BPF scheduler to avoid dispatching to + * offline CPUs mostly for convenience but also because CPUs can + * go offline between scx_bpf_dispatch() calls and here. If @p + * is destined to an offline CPU, queue it on its current CPU + * instead, which should always be safe. As this is an allowed + * behavior, don't trigger an ops error. + */ + if (unlikely(!test_rq_online(dst_rq))) + dst_rq = src_rq; + + if (src_rq == dst_rq) { + /* + * As @p is staying on the same rq, there's no need to + * go through the full deactivate/activate cycle. + * Optimize by abbreviating the operations in + * move_task_to_local_dsq(). + */ + dsp = p->scx.holding_cpu == raw_smp_processor_id(); + if (likely(dsp)) { + p->scx.holding_cpu = -1; + dispatch_enqueue(&dst_rq->scx.local_dsq, p, + enq_flags); + } + } else { + dsp = move_task_to_local_dsq(dst_rq, p, enq_flags); + } + + /* if the destination CPU is idle, wake it up */ + if (dsp && p->sched_class > dst_rq->curr->sched_class) + resched_curr(dst_rq); + + dispatch_to_local_dsq_unlock(rq, rf, src_rq, locked_dst_rq); + + return dsp ? DTL_DISPATCHED : DTL_LOST; + } +#endif /* CONFIG_SMP */ + + scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]", + cpu_of(dst_rq), p->comm, p->pid); + return DTL_INVALID; +} + +/** + * finish_dispatch - Asynchronously finish dispatching a task + * @rq: current rq which is locked + * @rf: rq_flags to use when unlocking @rq + * @p: task to finish dispatching + * @qseq_at_dispatch: qseq when @p started getting dispatched + * @dsq_id: destination DSQ ID + * @enq_flags: %SCX_ENQ_* + * + * Dispatching to local DSQs may need to wait for queueing to complete or + * require rq lock dancing. As we don't wanna do either while inside + * ops.dispatch() to avoid locking order inversion, we split dispatching into + * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the + * task and its qseq. Once ops.dispatch() returns, this function is called to + * finish up. + * + * There is no guarantee that @p is still valid for dispatching or even that it + * was valid in the first place. Make sure that the task is still owned by the + * BPF scheduler and claim the ownership before dispatching. + */ +static void finish_dispatch(struct rq *rq, struct rq_flags *rf, + struct task_struct *p, u64 qseq_at_dispatch, + u64 dsq_id, u64 enq_flags) +{ + struct scx_dispatch_q *dsq; + u64 opss; + +retry: + /* + * No need for _acquire here. @p is accessed only after a successful + * try_cmpxchg to DISPATCHING. + */ + opss = atomic64_read(&p->scx.ops_state); + + switch (opss & SCX_OPSS_STATE_MASK) { + case SCX_OPSS_DISPATCHING: + case SCX_OPSS_NONE: + /* someone else already got to it */ + return; + case SCX_OPSS_QUEUED: + /* + * If qseq doesn't match, @p has gone through at least one + * dispatch/dequeue and re-enqueue cycle between + * scx_bpf_dispatch() and here and we have no claim on it. + */ + if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) + return; + + /* + * While we know @p is accessible, we don't yet have a claim on + * it - the BPF scheduler is allowed to dispatch tasks + * spuriously and there can be a racing dequeue attempt. Let's + * claim @p by atomically transitioning it from QUEUED to + * DISPATCHING. + */ + if (likely(atomic64_try_cmpxchg(&p->scx.ops_state, &opss, + SCX_OPSS_DISPATCHING))) + break; + goto retry; + case SCX_OPSS_QUEUEING: + /* + * do_enqueue_task() is in the process of transferring the task + * to the BPF scheduler while holding @p's rq lock. As we aren't + * holding any kernel or BPF resource that the enqueue path may + * depend upon, it's safe to wait. + */ + wait_ops_state(p, opss); + goto retry; + } + + BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); + + switch (dispatch_to_local_dsq(rq, rf, dsq_id, p, enq_flags)) { + case DTL_DISPATCHED: + break; + case DTL_LOST: + break; + case DTL_INVALID: + dsq_id = SCX_DSQ_GLOBAL; + fallthrough; + case DTL_NOT_LOCAL: + dsq = find_dsq_for_dispatch(cpu_rq(raw_smp_processor_id()), + dsq_id, p); + dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); + break; + } +} + +static void flush_dispatch_buf(struct rq *rq, struct rq_flags *rf) +{ + struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); + u32 u; + + for (u = 0; u < dspc->buf_cursor; u++) { + struct scx_dsp_buf_ent *ent = &this_cpu_ptr(scx_dsp_buf)[u]; + + finish_dispatch(rq, rf, ent->task, ent->qseq, ent->dsq_id, + ent->enq_flags); + } + + dspc->nr_tasks += dspc->buf_cursor; + dspc->buf_cursor = 0; +} + +static int balance_scx(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf) +{ + struct scx_rq *scx_rq = &rq->scx; + struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); + bool prev_on_scx = prev->sched_class == &ext_sched_class; + + lockdep_assert_rq_held(rq); + + if (prev_on_scx) { + WARN_ON_ONCE(prev->scx.flags & SCX_TASK_BAL_KEEP); + update_curr_scx(rq); + + /* + * If @prev is runnable & has slice left, it has priority and + * fetching more just increases latency for the fetched tasks. + * Tell put_prev_task_scx() to put @prev on local_dsq. + * + * See scx_ops_disable_workfn() for the explanation on the + * disabling() test. + */ + if ((prev->scx.flags & SCX_TASK_QUEUED) && + prev->scx.slice && !scx_ops_disabling()) { + prev->scx.flags |= SCX_TASK_BAL_KEEP; + return 1; + } + } + + /* if there already are tasks to run, nothing to do */ + if (scx_rq->local_dsq.nr) + return 1; + + if (consume_dispatch_q(rq, rf, &scx_dsq_global)) + return 1; + + if (!SCX_HAS_OP(dispatch)) + return 0; + + dspc->rq = rq; + dspc->rf = rf; + + /* + * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, + * the local DSQ might still end up empty after a successful + * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() + * produced some tasks, retry. The BPF scheduler may depend on this + * looping behavior to simplify its implementation. + */ + do { + dspc->nr_tasks = 0; + + SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq), + prev_on_scx ? prev : NULL); + + flush_dispatch_buf(rq, rf); + + if (scx_rq->local_dsq.nr) + return 1; + if (consume_dispatch_q(rq, rf, &scx_dsq_global)) + return 1; + } while (dspc->nr_tasks); + + return 0; +} + +static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) +{ + if (p->scx.flags & SCX_TASK_QUEUED) { + WARN_ON_ONCE(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + dispatch_dequeue(&rq->scx, p); + } + + p->se.exec_start = rq_clock_task(rq); +} + +static void put_prev_task_scx(struct rq *rq, struct task_struct *p) +{ +#ifndef CONFIG_SMP + /* + * UP workaround. + * + * Because SCX may transfer tasks across CPUs during dispatch, dispatch + * is performed from its balance operation which isn't called in UP. + * Let's work around by calling it from the operations which come right + * after. + * + * 1. If the prev task is on SCX, pick_next_task() calls + * .put_prev_task() right after. As .put_prev_task() is also called + * from other places, we need to distinguish the calls which can be + * done by looking at the previous task's state - if still queued or + * dequeued with %SCX_DEQ_SLEEP, the caller must be pick_next_task(). + * This case is handled here. + * + * 2. If the prev task is not on SCX, the first following call into SCX + * will be .pick_next_task(), which is covered by calling + * balance_scx() from pick_next_task_scx(). + * + * Note that we can't merge the first case into the second as + * balance_scx() must be called before the previous SCX task goes + * through put_prev_task_scx(). + * + * As UP doesn't transfer tasks around, balance_scx() doesn't need @rf. + * Pass in %NULL. + */ + if (p->scx.flags & (SCX_TASK_QUEUED | SCX_TASK_DEQD_FOR_SLEEP)) + balance_scx(rq, p, NULL); +#endif + + update_curr_scx(rq); + + /* + * If we're being called from put_prev_task_balance(), balance_scx() may + * have decided that @p should keep running. + */ + if (p->scx.flags & SCX_TASK_BAL_KEEP) { + p->scx.flags &= ~SCX_TASK_BAL_KEEP; + dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); + return; + } + + if (p->scx.flags & SCX_TASK_QUEUED) { + /* + * If @p has slice left and balance_scx() didn't tag it for + * keeping, @p is getting preempted by a higher priority + * scheduler class. Leave it at the head of the local DSQ. + */ + if (p->scx.slice && !scx_ops_disabling()) { + dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); + return; + } + + /* + * If we're in the pick_next_task path, balance_scx() should + * have already populated the local DSQ if there are any other + * available tasks. If empty, tell ops.enqueue() that @p is the + * only one available for this cpu. ops.enqueue() should put it + * on the local DSQ so that the subsequent pick_next_task_scx() + * can find the task unless it wants to trigger a separate + * follow-up scheduling event. + */ + if (list_empty(&rq->scx.local_dsq.fifo)) + do_enqueue_task(rq, p, SCX_ENQ_LAST | SCX_ENQ_LOCAL, -1); + else + do_enqueue_task(rq, p, 0, -1); + } +} + +static struct task_struct *first_local_task(struct rq *rq) +{ + return list_first_entry_or_null(&rq->scx.local_dsq.fifo, + struct task_struct, scx.dsq_node); +} + +static struct task_struct *pick_next_task_scx(struct rq *rq) +{ + struct task_struct *p; + +#ifndef CONFIG_SMP + /* UP workaround - see the comment at the head of put_prev_task_scx() */ + if (unlikely(rq->curr->sched_class != &ext_sched_class)) + balance_scx(rq, rq->curr, NULL); +#endif + + p = first_local_task(rq); + if (!p) + return NULL; + + if (unlikely(!p->scx.slice)) { + if (!scx_ops_disabling() && !scx_warned_zero_slice) { + printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n", + p->comm, p->pid); + scx_warned_zero_slice = true; + } + p->scx.slice = SCX_SLICE_DFL; + } + + set_next_task_scx(rq, p, true); + + return p; +} + +#ifdef CONFIG_SMP + +static bool test_and_clear_cpu_idle(int cpu) +{ + if (cpumask_test_and_clear_cpu(cpu, idle_masks.cpu)) { + if (cpumask_empty(idle_masks.cpu)) + scx_has_idle_cpus = false; + return true; + } else { + return false; + } +} + +static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed) +{ + int cpu; + + do { + cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed); + if (cpu < nr_cpu_ids) { + const struct cpumask *sbm = topology_sibling_cpumask(cpu); + + /* + * If offline, @cpu is not its own sibling and we can + * get caught in an infinite loop as @cpu is never + * cleared from idle_masks.smt. Clear @cpu directly in + * such cases. + */ + if (likely(cpumask_test_cpu(cpu, sbm))) + cpumask_andnot(idle_masks.smt, idle_masks.smt, sbm); + else + cpumask_andnot(idle_masks.smt, idle_masks.smt, cpumask_of(cpu)); + } else { + cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed); + if (cpu >= nr_cpu_ids) + return -EBUSY; + } + } while (!test_and_clear_cpu_idle(cpu)); + + return cpu; +} + +static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags) +{ + s32 cpu; + + if (!static_branch_likely(&scx_builtin_idle_enabled)) { + scx_ops_error("built-in idle tracking is disabled"); + return prev_cpu; + } + + /* + * If WAKE_SYNC and the machine isn't fully saturated, wake up @p to the + * local DSQ of the waker. + */ + if ((wake_flags & SCX_WAKE_SYNC) && p->nr_cpus_allowed > 1 && + scx_has_idle_cpus && !(current->flags & PF_EXITING)) { + cpu = smp_processor_id(); + if (cpumask_test_cpu(cpu, p->cpus_ptr)) { + p->scx.flags |= SCX_TASK_ENQ_LOCAL; + return cpu; + } + } + + /* if the previous CPU is idle, dispatch directly to it */ + if (test_and_clear_cpu_idle(prev_cpu)) { + p->scx.flags |= SCX_TASK_ENQ_LOCAL; + return prev_cpu; + } + + if (p->nr_cpus_allowed == 1) + return prev_cpu; + + cpu = scx_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) { + p->scx.flags |= SCX_TASK_ENQ_LOCAL; + return cpu; + } + + return prev_cpu; +} + +static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) +{ + if (SCX_HAS_OP(select_cpu)) { + s32 cpu; + + cpu = SCX_CALL_OP_RET(SCX_KF_REST, select_cpu, p, prev_cpu, + wake_flags); + if (ops_cpu_valid(cpu)) { + return cpu; + } else { + scx_ops_error("select_cpu returned invalid cpu %d", cpu); + return prev_cpu; + } + } else { + return scx_select_cpu_dfl(p, prev_cpu, wake_flags); + } +} + +static void set_cpus_allowed_scx(struct task_struct *p, + struct affinity_context *ac) +{ + set_cpus_allowed_common(p, ac); + + /* + * The effective cpumask is stored in @p->cpus_ptr which may temporarily + * differ from the configured one in @p->cpus_mask. Always tell the bpf + * scheduler the effective one. + * + * Fine-grained memory write control is enforced by BPF making the const + * designation pointless. Cast it away when calling the operation. + */ + if (SCX_HAS_OP(set_cpumask)) + SCX_CALL_OP(SCX_KF_REST, set_cpumask, p, + (struct cpumask *)p->cpus_ptr); +} + +static void reset_idle_masks(void) +{ + /* consider all cpus idle, should converge to the actual state quickly */ + cpumask_setall(idle_masks.cpu); + cpumask_setall(idle_masks.smt); + scx_has_idle_cpus = true; +} + +void __scx_update_idle(struct rq *rq, bool idle) +{ + int cpu = cpu_of(rq); + struct cpumask *sib_mask = topology_sibling_cpumask(cpu); + + if (SCX_HAS_OP(update_idle)) { + SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle); + if (!static_branch_unlikely(&scx_builtin_idle_enabled)) + return; + } + + if (idle) { + cpumask_set_cpu(cpu, idle_masks.cpu); + if (!scx_has_idle_cpus) + scx_has_idle_cpus = true; + + /* + * idle_masks.smt handling is racy but that's fine as it's only + * for optimization and self-correcting. + */ + for_each_cpu(cpu, sib_mask) { + if (!cpumask_test_cpu(cpu, idle_masks.cpu)) + return; + } + cpumask_or(idle_masks.smt, idle_masks.smt, sib_mask); + } else { + cpumask_clear_cpu(cpu, idle_masks.cpu); + if (scx_has_idle_cpus && cpumask_empty(idle_masks.cpu)) + scx_has_idle_cpus = false; + + cpumask_andnot(idle_masks.smt, idle_masks.smt, sib_mask); + } +} + +#else /* !CONFIG_SMP */ + +static bool test_and_clear_cpu_idle(int cpu) { return false; } +static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed) { return -EBUSY; } +static void reset_idle_masks(void) {} + +#endif /* CONFIG_SMP */ + +static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) +{ + update_curr_scx(rq); + + /* + * While disabling, always resched as we can't trust the slice + * management. + */ + if (scx_ops_disabling()) + curr->scx.slice = 0; + + if (!curr->scx.slice) + resched_curr(rq); +} + +static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) +{ + int ret; + + WARN_ON_ONCE(p->scx.flags & SCX_TASK_OPS_PREPPED); + + if (SCX_HAS_OP(prep_enable)) { + struct scx_enable_args args = { }; + + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, prep_enable, p, &args); + if (unlikely(ret)) { + ret = ops_sanitize_err("prep_enable", ret); + return ret; + } + } + + p->scx.flags |= SCX_TASK_OPS_PREPPED; + return 0; +} + +static void scx_ops_enable_task(struct task_struct *p) +{ + lockdep_assert_rq_held(task_rq(p)); + WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_OPS_PREPPED)); + + if (SCX_HAS_OP(enable)) { + struct scx_enable_args args = { }; + SCX_CALL_OP(SCX_KF_REST, enable, p, &args); + } + p->scx.flags &= ~SCX_TASK_OPS_PREPPED; + p->scx.flags |= SCX_TASK_OPS_ENABLED; +} + +static void scx_ops_disable_task(struct task_struct *p) +{ + lockdep_assert_rq_held(task_rq(p)); + + if (p->scx.flags & SCX_TASK_OPS_PREPPED) { + if (SCX_HAS_OP(cancel_enable)) { + struct scx_enable_args args = { }; + SCX_CALL_OP(SCX_KF_REST, cancel_enable, p, &args); + } + p->scx.flags &= ~SCX_TASK_OPS_PREPPED; + } else if (p->scx.flags & SCX_TASK_OPS_ENABLED) { + if (SCX_HAS_OP(disable)) + SCX_CALL_OP(SCX_KF_REST, disable, p); + p->scx.flags &= ~SCX_TASK_OPS_ENABLED; + } +} + +/** + * refresh_scx_weight - Refresh a task's ext weight + * @p: task to refresh ext weight for + * + * @p->scx.weight carries the task's static priority in cgroup weight scale to + * enable easy access from the BPF scheduler. To keep it synchronized with the + * current task priority, this function should be called when a new task is + * created, priority is changed for a task on sched_ext, and a task is switched + * to sched_ext from other classes. + */ +static void refresh_scx_weight(struct task_struct *p) +{ + u32 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; + + p->scx.weight = sched_weight_to_cgroup(weight); + if (SCX_HAS_OP(set_weight)) + SCX_CALL_OP(SCX_KF_REST, set_weight, p, p->scx.weight); +} + +void scx_pre_fork(struct task_struct *p) +{ + /* + * BPF scheduler enable/disable paths want to be able to iterate and + * update all tasks which can become complex when racing forks. As + * enable/disable are very cold paths, let's use a percpu_rwsem to + * exclude forks. + */ + percpu_down_read(&scx_fork_rwsem); +} + +int scx_fork(struct task_struct *p) +{ + percpu_rwsem_assert_held(&scx_fork_rwsem); + + if (scx_enabled()) + return scx_ops_prepare_task(p, task_group(p)); + else + return 0; +} + +void scx_post_fork(struct task_struct *p) +{ + refresh_scx_weight(p); + + if (scx_enabled()) { + struct rq_flags rf; + struct rq *rq; + + rq = task_rq_lock(p, &rf); + scx_ops_enable_task(p); + task_rq_unlock(rq, p, &rf); + } + + spin_lock_irq(&scx_tasks_lock); + list_add_tail(&p->scx.tasks_node, &scx_tasks); + spin_unlock_irq(&scx_tasks_lock); + + percpu_up_read(&scx_fork_rwsem); +} + +void scx_cancel_fork(struct task_struct *p) +{ + if (scx_enabled()) + scx_ops_disable_task(p); + percpu_up_read(&scx_fork_rwsem); +} + +void sched_ext_free(struct task_struct *p) +{ + unsigned long flags; + + spin_lock_irqsave(&scx_tasks_lock, flags); + list_del_init(&p->scx.tasks_node); + spin_unlock_irqrestore(&scx_tasks_lock, flags); + + /* + * @p is off scx_tasks and wholly ours. scx_ops_enable()'s PREPPED -> + * ENABLED transitions can't race us. Disable ops for @p. + */ + if (p->scx.flags & (SCX_TASK_OPS_PREPPED | SCX_TASK_OPS_ENABLED)) { + struct rq_flags rf; + struct rq *rq; + + rq = task_rq_lock(p, &rf); + scx_ops_disable_task(p); + task_rq_unlock(rq, p, &rf); + } +} + +static void reweight_task_scx(struct rq *rq, struct task_struct *p, int newprio) +{ + refresh_scx_weight(p); +} + +static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio) +{ +} + +static void switching_to_scx(struct rq *rq, struct task_struct *p) +{ + refresh_scx_weight(p); + + /* + * set_cpus_allowed_scx() is not called while @p is associated with a + * different scheduler class. Keep the BPF scheduler up-to-date. + */ + if (SCX_HAS_OP(set_cpumask)) + SCX_CALL_OP(SCX_KF_REST, set_cpumask, p, + (struct cpumask *)p->cpus_ptr); +} + +static void check_preempt_curr_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} +static void switched_to_scx(struct rq *rq, struct task_struct *p) {} + +/* + * Omitted operations: + * + * - check_preempt_curr: NOOP as it isn't useful in the wakeup path because the + * task isn't tied to the CPU at that point. + * + * - migrate_task_rq: Unncessary as task to cpu mapping is transient. + * + * - task_fork/dead: We need fork/dead notifications for all tasks regardless of + * their current sched_class. Call them directly from sched core instead. + * + * - task_woken, switched_from: Unnecessary. + */ +DEFINE_SCHED_CLASS(ext) = { + .enqueue_task = enqueue_task_scx, + .dequeue_task = dequeue_task_scx, + .yield_task = yield_task_scx, + .yield_to_task = yield_to_task_scx, + + .check_preempt_curr = check_preempt_curr_scx, + + .pick_next_task = pick_next_task_scx, + + .put_prev_task = put_prev_task_scx, + .set_next_task = set_next_task_scx, + +#ifdef CONFIG_SMP + .balance = balance_scx, + .select_task_rq = select_task_rq_scx, + .set_cpus_allowed = set_cpus_allowed_scx, +#endif + + .task_tick = task_tick_scx, + + .switching_to = switching_to_scx, + .switched_to = switched_to_scx, + .reweight_task = reweight_task_scx, + .prio_changed = prio_changed_scx, + + .update_curr = update_curr_scx, + +#ifdef CONFIG_UCLAMP_TASK + .uclamp_enabled = 0, +#endif +}; + +static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) +{ + memset(dsq, 0, sizeof(*dsq)); + + raw_spin_lock_init(&dsq->lock); + INIT_LIST_HEAD(&dsq->fifo); + dsq->id = dsq_id; +} + +static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node) +{ + struct scx_dispatch_q *dsq; + int ret; + + if (dsq_id & SCX_DSQ_FLAG_BUILTIN) + return ERR_PTR(-EINVAL); + + dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); + if (!dsq) + return ERR_PTR(-ENOMEM); + + init_dsq(dsq, dsq_id); + + ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node, + dsq_hash_params); + if (ret) { + kfree(dsq); + return ERR_PTR(ret); + } + return dsq; +} + +static void free_dsq_irq_workfn(struct irq_work *irq_work) +{ + struct llist_node *to_free = llist_del_all(&dsqs_to_free); + struct scx_dispatch_q *dsq, *tmp_dsq; + + llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) + kfree_rcu(dsq); +} + +static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); + +static void destroy_dsq(u64 dsq_id) +{ + struct scx_dispatch_q *dsq; + unsigned long flags; + + rcu_read_lock(); + + dsq = rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params); + if (!dsq) + goto out_unlock_rcu; + + raw_spin_lock_irqsave(&dsq->lock, flags); + + if (dsq->nr) { + scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)", + dsq->id, dsq->nr); + goto out_unlock_dsq; + } + + if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params)) + goto out_unlock_dsq; + + /* + * Mark dead by invalidating ->id to prevent dispatch_enqueue() from + * queueing more tasks. As this function can be called from anywhere, + * freeing is bounced through an irq work to avoid nesting RCU + * operations inside scheduler locks. + */ + dsq->id = SCX_DSQ_INVALID; + llist_add(&dsq->free_node, &dsqs_to_free); + irq_work_queue(&free_dsq_irq_work); + +out_unlock_dsq: + raw_spin_unlock_irqrestore(&dsq->lock, flags); +out_unlock_rcu: + rcu_read_unlock(); +} + +/* + * Used by sched_fork() and __setscheduler_prio() to pick the matching + * sched_class. dl/rt are already handled. + */ +bool task_on_scx(struct task_struct *p) +{ + if (!scx_enabled() || scx_ops_disabling()) + return false; + return p->policy == SCHED_EXT; +} + +static void scx_ops_fallback_enqueue(struct task_struct *p, u64 enq_flags) +{ + if (enq_flags & SCX_ENQ_LAST) + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); + else + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); +} + +static void scx_ops_fallback_dispatch(s32 cpu, struct task_struct *prev) {} + +static void scx_ops_disable_workfn(struct kthread_work *work) +{ + struct scx_exit_info *ei = &scx_exit_info; + struct scx_task_iter sti; + struct task_struct *p; + struct rhashtable_iter rht_iter; + struct scx_dispatch_q *dsq; + const char *reason; + int i, type; + + type = atomic_read(&scx_exit_type); + while (true) { + /* + * NONE indicates that a new scx_ops has been registered since + * disable was scheduled - don't kill the new ops. DONE + * indicates that the ops has already been disabled. + */ + if (type == SCX_EXIT_NONE || type == SCX_EXIT_DONE) + return; + if (atomic_try_cmpxchg(&scx_exit_type, &type, SCX_EXIT_DONE)) + break; + } + + switch (type) { + case SCX_EXIT_UNREG: + reason = "BPF scheduler unregistered"; + break; + case SCX_EXIT_ERROR: + reason = "runtime error"; + break; + case SCX_EXIT_ERROR_BPF: + reason = "scx_bpf_error"; + break; + default: + reason = ""; + } + + ei->type = type; + strlcpy(ei->reason, reason, sizeof(ei->reason)); + + switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) { + case SCX_OPS_DISABLED: + pr_warn("sched_ext: ops error detected without ops (%s)\n", + scx_exit_info.msg); + WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != + SCX_OPS_DISABLING); + return; + case SCX_OPS_PREPPING: + goto forward_progress_guaranteed; + case SCX_OPS_DISABLING: + /* shouldn't happen but handle it like ENABLING if it does */ + WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); + fallthrough; + case SCX_OPS_ENABLING: + case SCX_OPS_ENABLED: + break; + } + + /* + * DISABLING is set and ops was either ENABLING or ENABLED indicating + * that the ops and static branches are set. + * + * We must guarantee that all runnable tasks make forward progress + * without trusting the BPF scheduler. We can't grab any mutexes or + * rwsems as they might be held by tasks that the BPF scheduler is + * forgetting to run, which unfortunately also excludes toggling the + * static branches. + * + * Let's work around by overriding a couple ops and modifying behaviors + * based on the DISABLING state and then cycling the tasks through + * dequeue/enqueue to force global FIFO scheduling. + * + * a. ops.enqueue() and .dispatch() are overridden for simple global + * FIFO scheduling. + * + * b. balance_scx() never sets %SCX_TASK_BAL_KEEP as the slice value + * can't be trusted. Whenever a tick triggers, the running task is + * rotated to the tail of the queue. + * + * c. pick_next_task() suppresses zero slice warning. + */ + scx_ops.enqueue = scx_ops_fallback_enqueue; + scx_ops.dispatch = scx_ops_fallback_dispatch; + + spin_lock_irq(&scx_tasks_lock); + scx_task_iter_init(&sti); + while ((p = scx_task_iter_next_filtered_locked(&sti))) { + if (READ_ONCE(p->__state) != TASK_DEAD) { + SCHED_CHANGE_BLOCK(task_rq(p), p, + DEQUEUE_SAVE | DEQUEUE_MOVE) { + /* cycling deq/enq is enough, see above */ + } + } + } + scx_task_iter_exit(&sti); + spin_unlock_irq(&scx_tasks_lock); + +forward_progress_guaranteed: + /* + * Here, every runnable task is guaranteed to make forward progress and + * we can safely use blocking synchronization constructs. Actually + * disable ops. + */ + mutex_lock(&scx_ops_enable_mutex); + + /* avoid racing against fork */ + cpus_read_lock(); + percpu_down_write(&scx_fork_rwsem); + + spin_lock_irq(&scx_tasks_lock); + scx_task_iter_init(&sti); + while ((p = scx_task_iter_next_filtered_locked(&sti))) { + const struct sched_class *old_class = p->sched_class; + struct rq *rq = task_rq(p); + bool alive = READ_ONCE(p->__state) != TASK_DEAD; + + update_rq_clock(rq); + + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_MOVE | + DEQUEUE_NOCLOCK) { + p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL); + + __setscheduler_prio(p, p->prio); + if (alive) + check_class_changing(task_rq(p), p, old_class); + } + + if (alive) + check_class_changed(task_rq(p), p, old_class, p->prio); + + scx_ops_disable_task(p); + } + scx_task_iter_exit(&sti); + spin_unlock_irq(&scx_tasks_lock); + + /* no task is on scx, turn off all the switches and flush in-progress calls */ + static_branch_disable_cpuslocked(&__scx_ops_enabled); + for (i = 0; i < SCX_NR_ONLINE_OPS; i++) + static_branch_disable_cpuslocked(&scx_has_op[i]); + static_branch_disable_cpuslocked(&scx_ops_enq_last); + static_branch_disable_cpuslocked(&scx_ops_enq_exiting); + static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); + synchronize_rcu(); + + percpu_up_write(&scx_fork_rwsem); + cpus_read_unlock(); + + if (ei->type >= SCX_EXIT_ERROR) { + printk(KERN_ERR "sched_ext: BPF scheduler \"%s\" errored, disabling\n", scx_ops.name); + + if (ei->msg[0] == '\0') + printk(KERN_ERR "sched_ext: %s\n", ei->reason); + else + printk(KERN_ERR "sched_ext: %s (%s)\n", ei->reason, ei->msg); + + stack_trace_print(ei->bt, ei->bt_len, 2); + } + + if (scx_ops.exit) + SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei); + + memset(&scx_ops, 0, sizeof(scx_ops)); + + rhashtable_walk_enter(&dsq_hash, &rht_iter); + do { + rhashtable_walk_start(&rht_iter); + + while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) + destroy_dsq(dsq->id); + + rhashtable_walk_stop(&rht_iter); + } while (dsq == ERR_PTR(-EAGAIN)); + rhashtable_walk_exit(&rht_iter); + + free_percpu(scx_dsp_buf); + scx_dsp_buf = NULL; + scx_dsp_max_batch = 0; + + mutex_unlock(&scx_ops_enable_mutex); + + WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != + SCX_OPS_DISABLING); +} + +static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); + +static void schedule_scx_ops_disable_work(void) +{ + struct kthread_worker *helper = READ_ONCE(scx_ops_helper); + + /* + * We may be called spuriously before the first bpf_sched_ext_reg(). If + * scx_ops_helper isn't set up yet, there's nothing to do. + */ + if (helper) + kthread_queue_work(helper, &scx_ops_disable_work); +} + +static void scx_ops_disable(enum scx_exit_type type) +{ + int none = SCX_EXIT_NONE; + + if (WARN_ON_ONCE(type == SCX_EXIT_NONE || type == SCX_EXIT_DONE)) + type = SCX_EXIT_ERROR; + + atomic_try_cmpxchg(&scx_exit_type, &none, type); + + schedule_scx_ops_disable_work(); +} + +static void scx_ops_error_irq_workfn(struct irq_work *irq_work) +{ + schedule_scx_ops_disable_work(); +} + +static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn); + +__printf(2, 3) static void scx_ops_error_type(enum scx_exit_type type, + const char *fmt, ...) +{ + struct scx_exit_info *ei = &scx_exit_info; + int none = SCX_EXIT_NONE; + va_list args; + + if (!atomic_try_cmpxchg(&scx_exit_type, &none, type)) + return; + + ei->bt_len = stack_trace_save(ei->bt, ARRAY_SIZE(ei->bt), 1); + + va_start(args, fmt); + vscnprintf(ei->msg, ARRAY_SIZE(ei->msg), fmt, args); + va_end(args); + + irq_work_queue(&scx_ops_error_irq_work); +} + +static struct kthread_worker *scx_create_rt_helper(const char *name) +{ + struct kthread_worker *helper; + + helper = kthread_create_worker(0, name); + if (helper) + sched_set_fifo(helper->task); + return helper; +} + +static int scx_ops_enable(struct sched_ext_ops *ops) +{ + struct scx_task_iter sti; + struct task_struct *p; + int i, ret; + + mutex_lock(&scx_ops_enable_mutex); + + if (!scx_ops_helper) { + WRITE_ONCE(scx_ops_helper, + scx_create_rt_helper("sched_ext_ops_helper")); + if (!scx_ops_helper) { + ret = -ENOMEM; + goto err_unlock; + } + } + + if (scx_ops_enable_state() != SCX_OPS_DISABLED) { + ret = -EBUSY; + goto err_unlock; + } + + /* + * Set scx_ops, transition to PREPPING and clear exit info to arm the + * disable path. Failure triggers full disabling from here on. + */ + scx_ops = *ops; + + WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_PREPPING) != + SCX_OPS_DISABLED); + + memset(&scx_exit_info, 0, sizeof(scx_exit_info)); + atomic_set(&scx_exit_type, SCX_EXIT_NONE); + scx_warned_zero_slice = false; + + atomic64_set(&scx_nr_rejected, 0); + + /* + * Keep CPUs stable during enable so that the BPF scheduler can track + * online CPUs by watching ->on/offline_cpu() after ->init(). + */ + cpus_read_lock(); + + if (scx_ops.init) { + ret = SCX_CALL_OP_RET(SCX_KF_INIT, init); + if (ret) { + ret = ops_sanitize_err("init", ret); + goto err_disable; + } + + /* + * Exit early if ops.init() triggered scx_bpf_error(). Not + * strictly necessary as we'll fail transitioning into ENABLING + * later but that'd be after calling ops.prep_enable() on all + * tasks and with -EBUSY which isn't very intuitive. Let's exit + * early with success so that the condition is notified through + * ops.exit() like other scx_bpf_error() invocations. + */ + if (atomic_read(&scx_exit_type) != SCX_EXIT_NONE) + goto err_disable; + } + + WARN_ON_ONCE(scx_dsp_buf); + scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; + scx_dsp_buf = __alloc_percpu(sizeof(scx_dsp_buf[0]) * scx_dsp_max_batch, + __alignof__(scx_dsp_buf[0])); + if (!scx_dsp_buf) { + ret = -ENOMEM; + goto err_disable; + } + + /* + * Lock out forks before opening the floodgate so that they don't wander + * into the operations prematurely. + */ + percpu_down_write(&scx_fork_rwsem); + + for (i = 0; i < SCX_NR_ONLINE_OPS; i++) + if (((void (**)(void))ops)[i]) + static_branch_enable_cpuslocked(&scx_has_op[i]); + + if (ops->flags & SCX_OPS_ENQ_LAST) + static_branch_enable_cpuslocked(&scx_ops_enq_last); + + if (ops->flags & SCX_OPS_ENQ_EXITING) + static_branch_enable_cpuslocked(&scx_ops_enq_exiting); + + if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) { + reset_idle_masks(); + static_branch_enable_cpuslocked(&scx_builtin_idle_enabled); + } else { + static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); + } + + static_branch_enable_cpuslocked(&__scx_ops_enabled); + + /* + * Enable ops for every task. Fork is excluded by scx_fork_rwsem + * preventing new tasks from being added. No need to exclude tasks + * leaving as sched_ext_free() can handle both prepped and enabled + * tasks. Prep all tasks first and then enable them with preemption + * disabled. + */ + spin_lock_irq(&scx_tasks_lock); + + scx_task_iter_init(&sti); + while ((p = scx_task_iter_next_filtered(&sti))) { + get_task_struct(p); + spin_unlock_irq(&scx_tasks_lock); + + ret = scx_ops_prepare_task(p, task_group(p)); + if (ret) { + put_task_struct(p); + spin_lock_irq(&scx_tasks_lock); + scx_task_iter_exit(&sti); + spin_unlock_irq(&scx_tasks_lock); + pr_err("sched_ext: ops.prep_enable() failed (%d) for %s[%d] while loading\n", + ret, p->comm, p->pid); + goto err_disable_unlock; + } + + put_task_struct(p); + spin_lock_irq(&scx_tasks_lock); + } + scx_task_iter_exit(&sti); + + /* + * All tasks are prepped but are still ops-disabled. Ensure that + * %current can't be scheduled out and switch everyone. + * preempt_disable() is necessary because we can't guarantee that + * %current won't be starved if scheduled out while switching. + */ + preempt_disable(); + + /* + * From here on, the disable path must assume that tasks have ops + * enabled and need to be recovered. + */ + if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLING, SCX_OPS_PREPPING)) { + preempt_enable(); + spin_unlock_irq(&scx_tasks_lock); + ret = -EBUSY; + goto err_disable_unlock; + } + + /* + * We're fully committed and can't fail. The PREPPED -> ENABLED + * transitions here are synchronized against sched_ext_free() through + * scx_tasks_lock. + */ + scx_task_iter_init(&sti); + while ((p = scx_task_iter_next_filtered_locked(&sti))) { + if (READ_ONCE(p->__state) != TASK_DEAD) { + const struct sched_class *old_class = p->sched_class; + struct rq *rq = task_rq(p); + + update_rq_clock(rq); + + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_MOVE | + DEQUEUE_NOCLOCK) { + scx_ops_enable_task(p); + __setscheduler_prio(p, p->prio); + check_class_changing(task_rq(p), p, old_class); + } + + check_class_changed(task_rq(p), p, old_class, p->prio); + } else { + scx_ops_disable_task(p); + } + } + scx_task_iter_exit(&sti); + + spin_unlock_irq(&scx_tasks_lock); + preempt_enable(); + percpu_up_write(&scx_fork_rwsem); + + if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) { + ret = -EBUSY; + goto err_disable_unlock; + } + + cpus_read_unlock(); + mutex_unlock(&scx_ops_enable_mutex); + + return 0; + +err_unlock: + mutex_unlock(&scx_ops_enable_mutex); + return ret; + +err_disable_unlock: + percpu_up_write(&scx_fork_rwsem); +err_disable: + cpus_read_unlock(); + mutex_unlock(&scx_ops_enable_mutex); + /* must be fully disabled before returning */ + scx_ops_disable(SCX_EXIT_ERROR); + kthread_flush_work(&scx_ops_disable_work); + return ret; +} + +#ifdef CONFIG_SCHED_DEBUG +static const char *scx_ops_enable_state_str[] = { + [SCX_OPS_PREPPING] = "prepping", + [SCX_OPS_ENABLING] = "enabling", + [SCX_OPS_ENABLED] = "enabled", + [SCX_OPS_DISABLING] = "disabling", + [SCX_OPS_DISABLED] = "disabled", +}; + +static int scx_debug_show(struct seq_file *m, void *v) +{ + mutex_lock(&scx_ops_enable_mutex); + seq_printf(m, "%-30s: %s\n", "ops", scx_ops.name); + seq_printf(m, "%-30s: %ld\n", "enabled", scx_enabled()); + seq_printf(m, "%-30s: %s\n", "enable_state", + scx_ops_enable_state_str[scx_ops_enable_state()]); + seq_printf(m, "%-30s: %llu\n", "nr_rejected", + atomic64_read(&scx_nr_rejected)); + mutex_unlock(&scx_ops_enable_mutex); + return 0; +} + +static int scx_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, scx_debug_show, NULL); +} + +const struct file_operations sched_ext_fops = { + .open = scx_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +/******************************************************************************** + * bpf_struct_ops plumbing. + */ +#include +#include +#include + +extern struct btf *btf_vmlinux; +static const struct btf_type *task_struct_type; + +static bool bpf_scx_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + + return btf_ctx_access(off, size, type, prog, info); +} + +static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, + const struct bpf_reg_state *reg, int off, + int size, enum bpf_access_type atype, + u32 *next_btf_id, enum bpf_type_flag *flag) +{ + const struct btf_type *t; + + t = btf_type_by_id(reg->btf, reg->btf_id); + if (t == task_struct_type) { + if (off >= offsetof(struct task_struct, scx.slice) && + off + size <= offsetofend(struct task_struct, scx.slice)) + return SCALAR_VALUE; + } + + if (atype == BPF_READ) + return btf_struct_access(log, reg, off, size, atype, + next_btf_id, flag); + + bpf_log(log, "only read is supported\n"); + return -EACCES; +} + +static const struct bpf_func_proto * +bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_task_storage_get: + return &bpf_task_storage_get_proto; + case BPF_FUNC_task_storage_delete: + return &bpf_task_storage_delete_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +const struct bpf_verifier_ops bpf_scx_verifier_ops = { + .get_func_proto = bpf_scx_get_func_proto, + .is_valid_access = bpf_scx_is_valid_access, + .btf_struct_access = bpf_scx_btf_struct_access, +}; + +static int bpf_scx_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + const struct sched_ext_ops *uops = udata; + struct sched_ext_ops *ops = kdata; + u32 moff = __btf_member_bit_offset(t, member) / 8; + int ret; + + switch (moff) { + case offsetof(struct sched_ext_ops, dispatch_max_batch): + if (*(u32 *)(udata + moff) > INT_MAX) + return -E2BIG; + ops->dispatch_max_batch = *(u32 *)(udata + moff); + return 1; + case offsetof(struct sched_ext_ops, flags): + if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) + return -EINVAL; + ops->flags = *(u64 *)(udata + moff); + return 1; + case offsetof(struct sched_ext_ops, name): + ret = bpf_obj_name_cpy(ops->name, uops->name, + sizeof(ops->name)); + if (ret < 0) + return ret; + if (ret == 0) + return -EINVAL; + return 1; + } + + return 0; +} + +static int bpf_scx_check_member(const struct btf_type *t, + const struct btf_member *member, + const struct bpf_prog *prog) +{ + u32 moff = __btf_member_bit_offset(t, member) / 8; + + switch (moff) { + case offsetof(struct sched_ext_ops, prep_enable): + case offsetof(struct sched_ext_ops, init): + case offsetof(struct sched_ext_ops, exit): + break; + default: + if (prog->aux->sleepable) + return -EINVAL; + } + + return 0; +} + +static int bpf_scx_reg(void *kdata) +{ + return scx_ops_enable(kdata); +} + +static void bpf_scx_unreg(void *kdata) +{ + scx_ops_disable(SCX_EXIT_UNREG); + kthread_flush_work(&scx_ops_disable_work); +} + +static int bpf_scx_init(struct btf *btf) +{ + u32 type_id; + + type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + task_struct_type = btf_type_by_id(btf, type_id); + + return 0; +} + +/* "extern" to avoid sparse warning, only used in this file */ +extern struct bpf_struct_ops bpf_sched_ext_ops; + +struct bpf_struct_ops bpf_sched_ext_ops = { + .verifier_ops = &bpf_scx_verifier_ops, + .reg = bpf_scx_reg, + .unreg = bpf_scx_unreg, + .check_member = bpf_scx_check_member, + .init_member = bpf_scx_init_member, + .init = bpf_scx_init, + .name = "sched_ext_ops", +}; + +void __init init_sched_ext_class(void) +{ + int cpu; + u32 v; + + /* + * The following is to prevent the compiler from optimizing out the enum + * definitions so that BPF scheduler implementations can use them + * through the generated vmlinux.h. + */ + WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP); + + BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); + init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL); +#ifdef CONFIG_SMP + BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL)); + BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL)); +#endif + for_each_possible_cpu(cpu) { + struct rq *rq = cpu_rq(cpu); + + init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); + } +} + + +/******************************************************************************** + * Helpers that can be called from the BPF scheduler. + */ +#include + +/* Disables missing prototype warnings for kfuncs */ +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +/** + * scx_bpf_create_dsq - Create a custom DSQ + * @dsq_id: DSQ to create + * @node: NUMA node to allocate from + * + * Create a custom DSQ identified by @dsq_id. Can be called from ops.init() and + * ops.prep_enable(). + */ +s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) +{ + if (!scx_kf_allowed(SCX_KF_INIT | SCX_KF_SLEEPABLE)) + return -EINVAL; + + if (unlikely(node >= (int)nr_node_ids || + (node < 0 && node != NUMA_NO_NODE))) + return -EINVAL; + return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node)); +} + +BTF_SET8_START(scx_kfunc_ids_sleepable) +BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) +BTF_SET8_END(scx_kfunc_ids_sleepable) + +static const struct btf_kfunc_id_set scx_kfunc_set_sleepable = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_sleepable, +}; + +static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags) +{ + if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) + return false; + + lockdep_assert_irqs_disabled(); + + if (unlikely(!p)) { + scx_ops_error("called with NULL task"); + return false; + } + + if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { + scx_ops_error("invalid enq_flags 0x%llx", enq_flags); + return false; + } + + return true; +} + +static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags) +{ + struct task_struct *ddsp_task; + int idx; + + ddsp_task = __this_cpu_read(direct_dispatch_task); + if (ddsp_task) { + direct_dispatch(ddsp_task, p, dsq_id, enq_flags); + return; + } + + idx = __this_cpu_read(scx_dsp_ctx.buf_cursor); + if (unlikely(idx >= scx_dsp_max_batch)) { + scx_ops_error("dispatch buffer overflow"); + return; + } + + this_cpu_ptr(scx_dsp_buf)[idx] = (struct scx_dsp_buf_ent){ + .task = p, + .qseq = atomic64_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, + .dsq_id = dsq_id, + .enq_flags = enq_flags, + }; + __this_cpu_inc(scx_dsp_ctx.buf_cursor); +} + +/** + * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ + * @p: task_struct to dispatch + * @dsq_id: DSQ to dispatch to + * @slice: duration @p can run for in nsecs + * @enq_flags: SCX_ENQ_* + * + * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe + * to call this function spuriously. Can be called from ops.enqueue() and + * ops.dispatch(). + * + * When called from ops.enqueue(), it's for direct dispatch and @p must match + * the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be used to target the + * local DSQ of a CPU other than the enqueueing one. Use ops.select_cpu() to be + * on the target CPU in the first place. + * + * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id + * and this function can be called upto ops.dispatch_max_batch times to dispatch + * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the + * remaining slots. scx_bpf_consume() flushes the batch and resets the counter. + * + * This function doesn't have any locking restrictions and may be called under + * BPF locks (in the future when BPF introduces more flexible locking). + * + * @p is allowed to run for @slice. The scheduling path is triggered on slice + * exhaustion. If zero, the current residual slice is maintained. If + * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with + * scx_bpf_kick_cpu() to trigger scheduling. + */ +void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, + u64 enq_flags) +{ + if (!scx_dispatch_preamble(p, enq_flags)) + return; + + if (slice) + p->scx.slice = slice; + else + p->scx.slice = p->scx.slice ?: 1; + + scx_dispatch_commit(p, dsq_id, enq_flags); +} + +BTF_SET8_START(scx_kfunc_ids_enqueue_dispatch) +BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU) +BTF_SET8_END(scx_kfunc_ids_enqueue_dispatch) + +static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_enqueue_dispatch, +}; + +/** + * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots + * + * Can only be called from ops.dispatch(). + */ +u32 scx_bpf_dispatch_nr_slots(void) +{ + if (!scx_kf_allowed(SCX_KF_DISPATCH)) + return 0; + + return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx.buf_cursor); +} + +/** + * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ + * @dsq_id: DSQ to consume + * + * Consume a task from the non-local DSQ identified by @dsq_id and transfer it + * to the current CPU's local DSQ for execution. Can only be called from + * ops.dispatch(). + * + * This function flushes the in-flight dispatches from scx_bpf_dispatch() before + * trying to consume the specified DSQ. It may also grab rq locks and thus can't + * be called under any BPF locks. + * + * Returns %true if a task has been consumed, %false if there isn't any task to + * consume. + */ +bool scx_bpf_consume(u64 dsq_id) +{ + struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); + struct scx_dispatch_q *dsq; + + if (!scx_kf_allowed(SCX_KF_DISPATCH)) + return false; + + flush_dispatch_buf(dspc->rq, dspc->rf); + + dsq = find_non_local_dsq(dsq_id); + if (unlikely(!dsq)) { + scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id); + return false; + } + + if (consume_dispatch_q(dspc->rq, dspc->rf, dsq)) { + /* + * A successfully consumed task can be dequeued before it starts + * running while the CPU is trying to migrate other dispatched + * tasks. Bump nr_tasks to tell balance_scx() to retry on empty + * local DSQ. + */ + dspc->nr_tasks++; + return true; + } else { + return false; + } +} + +BTF_SET8_START(scx_kfunc_ids_dispatch) +BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) +BTF_ID_FLAGS(func, scx_bpf_consume) +BTF_SET8_END(scx_kfunc_ids_dispatch) + +static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_dispatch, +}; + +/** + * scx_bpf_dsq_nr_queued - Return the number of queued tasks + * @dsq_id: id of the DSQ + * + * Return the number of tasks in the DSQ matching @dsq_id. If not found, + * -%ENOENT is returned. Can be called from any non-sleepable online scx_ops + * operations. + */ +s32 scx_bpf_dsq_nr_queued(u64 dsq_id) +{ + struct scx_dispatch_q *dsq; + + lockdep_assert(rcu_read_lock_any_held()); + + if (dsq_id == SCX_DSQ_LOCAL) { + return this_rq()->scx.local_dsq.nr; + } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { + s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; + + if (ops_cpu_valid(cpu)) + return cpu_rq(cpu)->scx.local_dsq.nr; + } else { + dsq = find_non_local_dsq(dsq_id); + if (dsq) + return dsq->nr; + } + return -ENOENT; +} + +/** + * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state + * @cpu: cpu to test and clear idle for + * + * Returns %true if @cpu was idle and its idle state was successfully cleared. + * %false otherwise. + * + * Unavailable if ops.update_idle() is implemented and + * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. + */ +bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) +{ + if (!static_branch_likely(&scx_builtin_idle_enabled)) { + scx_ops_error("built-in idle tracking is disabled"); + return false; + } + + if (ops_cpu_valid(cpu)) + return test_and_clear_cpu_idle(cpu); + else + return false; +} + +/** + * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu + * @cpus_allowed: Allowed cpumask + * + * Pick and claim an idle cpu which is also in @cpus_allowed. Returns the picked + * idle cpu number on success. -%EBUSY if no matching cpu was found. + * + * Unavailable if ops.update_idle() is implemented and + * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. + */ +s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed) +{ + if (!static_branch_likely(&scx_builtin_idle_enabled)) { + scx_ops_error("built-in idle tracking is disabled"); + return -EBUSY; + } + + return scx_pick_idle_cpu(cpus_allowed); +} + +/** + * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking + * per-CPU cpumask. + * + * Returns NULL if idle tracking is not enabled, or running on a UP kernel. + */ +const struct cpumask *scx_bpf_get_idle_cpumask(void) +{ + if (!static_branch_likely(&scx_builtin_idle_enabled)) { + scx_ops_error("built-in idle tracking is disabled"); + return cpu_none_mask; + } + +#ifdef CONFIG_SMP + return idle_masks.cpu; +#else + return cpu_none_mask; +#endif +} + +/** + * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking, + * per-physical-core cpumask. Can be used to determine if an entire physical + * core is free. + * + * Returns NULL if idle tracking is not enabled, or running on a UP kernel. + */ +const struct cpumask *scx_bpf_get_idle_smtmask(void) +{ + if (!static_branch_likely(&scx_builtin_idle_enabled)) { + scx_ops_error("built-in idle tracking is disabled"); + return cpu_none_mask; + } + +#ifdef CONFIG_SMP + return idle_masks.smt; +#else + return cpu_none_mask; +#endif +} + +/** + * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to + * either the percpu, or SMT idle-tracking cpumask. + */ +void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) +{ + /* + * Empty function body because we aren't actually acquiring or + * releasing a reference to a global idle cpumask, which is read-only + * in the caller and is never released. The acquire / release semantics + * here are just used to make the cpumask is a trusted pointer in the + * caller. + */ +} + +struct scx_bpf_error_bstr_bufs { + u64 data[MAX_BPRINTF_VARARGS]; + char msg[SCX_EXIT_MSG_LEN]; +}; + +static DEFINE_PER_CPU(struct scx_bpf_error_bstr_bufs, scx_bpf_error_bstr_bufs); + +/** + * scx_bpf_error_bstr - Indicate fatal error + * @fmt: error message format string + * @data: format string parameters packaged using ___bpf_fill() macro + * @data__sz: @data len, must end in '__sz' for the verifier + * + * Indicate that the BPF scheduler encountered a fatal error and initiate ops + * disabling. + */ +void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data__sz) +{ + struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; + struct scx_bpf_error_bstr_bufs *bufs; + unsigned long flags; + int ret; + + local_irq_save(flags); + bufs = this_cpu_ptr(&scx_bpf_error_bstr_bufs); + + if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || + (data__sz && !data)) { + scx_ops_error("invalid data=%p and data__sz=%u", + (void *)data, data__sz); + goto out_restore; + } + + ret = copy_from_kernel_nofault(bufs->data, data, data__sz); + if (ret) { + scx_ops_error("failed to read data fields (%d)", ret); + goto out_restore; + } + + ret = bpf_bprintf_prepare(fmt, UINT_MAX, bufs->data, data__sz / 8, + &bprintf_data); + if (ret < 0) { + scx_ops_error("failed to format prepration (%d)", ret); + goto out_restore; + } + + ret = bstr_printf(bufs->msg, sizeof(bufs->msg), fmt, + bprintf_data.bin_args); + bpf_bprintf_cleanup(&bprintf_data); + if (ret < 0) { + scx_ops_error("scx_ops_error(\"%s\", %p, %u) failed to format", + fmt, data, data__sz); + goto out_restore; + } + + scx_ops_error_type(SCX_EXIT_ERROR_BPF, "%s", bufs->msg); +out_restore: + local_irq_restore(flags); +} + +/** + * scx_bpf_destroy_dsq - Destroy a custom DSQ + * @dsq_id: DSQ to destroy + * + * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with + * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is + * empty and no further tasks are dispatched to it. Ignored if called on a DSQ + * which doesn't exist. Can be called from any online scx_ops operations. + */ +void scx_bpf_destroy_dsq(u64 dsq_id) +{ + destroy_dsq(dsq_id); +} + +/** + * scx_bpf_task_running - Is task currently running? + * @p: task of interest + */ +bool scx_bpf_task_running(const struct task_struct *p) +{ + return task_rq(p)->curr == p; +} + +/** + * scx_bpf_task_cpu - CPU a task is currently associated with + * @p: task of interest + */ +s32 scx_bpf_task_cpu(const struct task_struct *p) +{ + return task_cpu(p); +} + +BTF_SET8_START(scx_kfunc_ids_any) +BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) +BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) +BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE) +BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE) +BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE) +BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) +BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) +BTF_SET8_END(scx_kfunc_ids_any) + +static const struct btf_kfunc_id_set scx_kfunc_set_any = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_any, +}; + +__diag_pop(); + +/* + * This can't be done from init_sched_ext_class() as register_btf_kfunc_id_set() + * needs most of the system to be up. + */ +static int __init register_ext_kfuncs(void) +{ + int ret; + + /* + * Some kfuncs are context-sensitive and can only be called from + * specific SCX ops. They are grouped into BTF sets accordingly. + * Unfortunately, BPF currently doesn't have a way of enforcing such + * restrictions. Eventually, the verifier should be able to enforce + * them. For now, register them the same and make each kfunc explicitly + * check using scx_kf_allowed(). + */ + if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_sleepable)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_enqueue_dispatch)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_dispatch)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_any))) { + pr_err("sched_ext: failed to register kfunc sets (%d)\n", ret); + return ret; + } + + return 0; +} +__initcall(register_ext_kfuncs); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 6a93c4825339..f8d5682deacf 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -1,7 +1,94 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +enum scx_wake_flags { + /* expose select WF_* flags as enums */ + SCX_WAKE_EXEC = WF_EXEC, + SCX_WAKE_FORK = WF_FORK, + SCX_WAKE_TTWU = WF_TTWU, + SCX_WAKE_SYNC = WF_SYNC, +}; + +enum scx_enq_flags { + /* expose select ENQUEUE_* flags as enums */ + SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP, + SCX_ENQ_HEAD = ENQUEUE_HEAD, + + /* high 32bits are SCX specific */ + + /* + * The task being enqueued is the only task available for the cpu. By + * default, ext core keeps executing such tasks but when + * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with + * %SCX_ENQ_LAST and %SCX_ENQ_LOCAL flags set. + * + * If the BPF scheduler wants to continue executing the task, + * ops.enqueue() should dispatch the task to %SCX_DSQ_LOCAL immediately. + * If the task gets queued on a different dsq or the BPF side, the BPF + * scheduler is responsible for triggering a follow-up scheduling event. + * Otherwise, Execution may stall. + */ + SCX_ENQ_LAST = 1LLU << 41, + + /* + * A hint indicating that it's advisable to enqueue the task on the + * local dsq of the currently selected CPU. Currently used by + * select_cpu_dfl() and together with %SCX_ENQ_LAST. + */ + SCX_ENQ_LOCAL = 1LLU << 42, + + /* high 8 bits are internal */ + __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56, + + SCX_ENQ_CLEAR_OPSS = 1LLU << 56, +}; + +enum scx_deq_flags { + /* expose select DEQUEUE_* flags as enums */ + SCX_DEQ_SLEEP = DEQUEUE_SLEEP, +}; #ifdef CONFIG_SCHED_CLASS_EXT -#error "NOT IMPLEMENTED YET" + +extern const struct sched_class ext_sched_class; +extern const struct bpf_verifier_ops bpf_sched_ext_verifier_ops; +extern const struct file_operations sched_ext_fops; + +DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); +#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) + +bool task_on_scx(struct task_struct *p); +void scx_pre_fork(struct task_struct *p); +int scx_fork(struct task_struct *p); +void scx_post_fork(struct task_struct *p); +void scx_cancel_fork(struct task_struct *p); +void init_sched_ext_class(void); + +static inline const struct sched_class *next_active_class(const struct sched_class *class) +{ + class++; + if (!scx_enabled() && class == &ext_sched_class) + class++; + return class; +} + +#define for_active_class_range(class, _from, _to) \ + for (class = (_from); class != (_to); class = next_active_class(class)) + +#define for_each_active_class(class) \ + for_active_class_range(class, __sched_class_highest, __sched_class_lowest) + +/* + * SCX requires a balance() call before every pick_next_task() call including + * when waking up from idle. + */ +#define for_balance_class_range(class, prev_class, end_class) \ + for_active_class_range(class, (prev_class) > &ext_sched_class ? \ + &ext_sched_class : (prev_class), (end_class)) + #else /* CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false @@ -18,7 +105,13 @@ static inline void init_sched_ext_class(void) {} #endif /* CONFIG_SCHED_CLASS_EXT */ #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP) -#error "NOT IMPLEMENTED YET" +void __scx_update_idle(struct rq *rq, bool idle); + +static inline void scx_update_idle(struct rq *rq, bool idle) +{ + if (scx_enabled()) + __scx_update_idle(rq, idle); +} #else static inline void scx_update_idle(struct rq *rq, bool idle) {} #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6c42b042daa4..ae4cd306bf28 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -185,6 +185,10 @@ static inline int idle_policy(int policy) static inline int normal_policy(int policy) { +#ifdef CONFIG_SCHED_CLASS_EXT + if (policy == SCHED_EXT) + return true; +#endif return policy == SCHED_NORMAL; } @@ -681,6 +685,15 @@ struct cfs_rq { #endif /* CONFIG_FAIR_GROUP_SCHED */ }; +#ifdef CONFIG_SCHED_CLASS_EXT +struct scx_rq { + struct scx_dispatch_q local_dsq; + u64 ops_qseq; + u64 extra_enq_flags; /* see move_task_to_local_dsq() */ + u32 nr_running; +}; +#endif /* CONFIG_SCHED_CLASS_EXT */ + static inline int rt_bandwidth_enabled(void) { return sysctl_sched_rt_runtime >= 0; @@ -1022,6 +1035,9 @@ struct rq { struct cfs_rq cfs; struct rt_rq rt; struct dl_rq dl; +#ifdef CONFIG_SCHED_CLASS_EXT + struct scx_rq scx; +#endif #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this CPU: */ From patchwork Fri Mar 17 21:33:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71489 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp13785wrt; Fri, 17 Mar 2023 15:14:07 -0700 (PDT) X-Google-Smtp-Source: AK7set9Pflz/eTI6/0XVIWUL5mXvOz90lZdBMFDQgoebOtxdxyL/K1NeQvJUvUt8zi2RXIC5SHKW X-Received: by 2002:a17:90a:f815:b0:23d:19ea:734a with SMTP id ij21-20020a17090af81500b0023d19ea734amr10598307pjb.3.1679091247173; Fri, 17 Mar 2023 15:14:07 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091247; cv=none; d=google.com; s=arc-20160816; b=mAbKZ2qbHc4uSMMUlLYSysZdxons96r0ZC5hVu2pbUOMUkrdALIJGh5FrDlN+ILZ3B gvZsGwL+xmAACkyaceVHIrDqERZ75hL42MqZnyDLA+D8jlxm9BauS/IxYfpWy7XDZMkv pxkx68/ut2TmvM83HXsRF2Enxrlg/dQQN5BuLU/kUglX2vsEKOM9DtSnZMfSqCJHPOf1 Wa/jQ1OTbrdw+EaxdEYQDhliQy2ezqxJxCuEgjiWFIyfnogUpGRp6KUkkP08BclnfCZq OMiSH8owOgG79cwrTkMLcE/rHUEyDVusah3rxO+559PyDKdm+OMKh8K1Q5Fdv5HFOIUX VwJA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=utKo3RF+KCbjcTuuY85KTU0rv5lp5yge2JqHLuFqtvc=; b=W+0TmVVR5O2ET/3y9Yh3OigvLCHV2vvxIUwzH78E0jCjS40yIjWrX5vhDbbkjiSPz4 h5nekM3TVfAn2QIjG8+zhDLS05kGJRKB1Q25NMP5/R7PC9q2iMKQvg9+lmgWdzd9XAee yx3SZim6JaZfH5aQLUbSJHQDq7eS7nFMt7ztZHTckFb0xU94KPnA47B9HYOlrKSFVw8B icFbKqWniF+MPBEJvPaSAfwXUnynVhPeEN7VWCG2tlvQQnNHhWXJxucdHnMID9zRp861 rBLvdWy9csClOXbCHAOH/+wKZ6qVkTLfpFDwXw/eLtmUM0hPJCSAst5b6Mcw2w61hYpJ Velw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=HOshgCkg; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id m23-20020a17090aab1700b0023f064f20c7si3214365pjq.168.2023.03.17.15.13.49; Fri, 17 Mar 2023 15:14:07 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=HOshgCkg; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229875AbjCQVpo (ORCPT + 99 others); Fri, 17 Mar 2023 17:45:44 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41648 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231596AbjCQVpf (ORCPT ); Fri, 17 Mar 2023 17:45:35 -0400 Received: from mail-oa1-x34.google.com (mail-oa1-x34.google.com [IPv6:2001:4860:4864:20::34]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 061CF1F929; Fri, 17 Mar 2023 14:44:55 -0700 (PDT) Received: by mail-oa1-x34.google.com with SMTP id 586e51a60fabf-17aceccdcf6so7110872fac.9; Fri, 17 Mar 2023 14:44:54 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679089390; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=utKo3RF+KCbjcTuuY85KTU0rv5lp5yge2JqHLuFqtvc=; b=HOshgCkgZhw5fmsH+55AbA0mezXOEaPfSs8AjvbEJ+zRLmuDS6/bztnHEwghaBN4w6 9/Aicw5LK/eg7GP5p+jT1I5nwQUERMxjnf/iuyRe/EJ3vxmxFs3baXOR3y60GqJWh7fj az48lZ+kja9LrvZI9qR71dBcAEZz7btsqYTn3F0JEJ25zPR28f+tEwwf5DIK2wK+UMwa ZlUAu5GX7L1goe+GijSp2/z0pA729vp9DArdjiD8QlXf+qDRkBznf29iVHqBcVP8YOZO xfXwyt4HuJl6exnbEDvYyK8k950tgJ/3tAT+AEV/lJoeLpf8Iy0QOxOM+kJVLy/w/LLU +OMQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679089390; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=utKo3RF+KCbjcTuuY85KTU0rv5lp5yge2JqHLuFqtvc=; b=UrCggHyV5WmMJzoC6i6U9Q1eHkWHLUjPLaM0pnunnXool697pWpPieG3p8uzAJ0eKe Vom9tk6kpwEERB7c3zNmZRUe3YlFZRINNMYNxiEmk25fAkBrpHuN+s9Kr9mP3gbi1nUu Vl+xeCbC/w2RVFpr/+wtxo0zpV5OHNhwBBNjTxpP9UYgFDCUfKf+LPbSUPsW7/MDoNe0 bUkmxqAqXzPLNIAH/zauBWuS3nVVafAehD+Dp+BNF/7sd2gBj/F1Una7O+2/54xQum1D VLWsJ5xDaj/xgZnCLtrqswZ43J937GhQ/S1GYzy4tOyYrkg53QuYuzvCOVFld+pFMrZL wH7g== X-Gm-Message-State: AO0yUKVRyyV+5BRvgk8cyRbTNIQvN8igZP1mQI3QsAg+sT8zJGwYKjV3 I7dL06jYiwPJ3C8a0mPIfKGEL0Pp1cE= X-Received: by 2002:a17:902:d4c8:b0:19d:137c:2ad2 with SMTP id o8-20020a170902d4c800b0019d137c2ad2mr10260361plg.52.1679088848927; Fri, 17 Mar 2023 14:34:08 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id l19-20020a63f313000000b0050bd71ed66fsm1851749pgh.92.2023.03.17.14.34.08 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:08 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 14/32] sched_ext: Add scx_example_simple and scx_example_qmap example schedulers Date: Fri, 17 Mar 2023 11:33:15 -1000 Message-Id: <20230317213333.2174969-15-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS,URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760654783680138325?= X-GMAIL-MSGID: =?utf-8?q?1760654783680138325?= Add two simple example BPF schedulers - simple and qmap. * simple: In terms of scheduling, it behaves identical to not having any operation implemented at all. The two operations it implements are only to improve visibility and exit handling. On certain homogeneous configurations, this actually can perform pretty well. * qmap: A fixed five level priority scheduler to demonstrate queueing PIDs on BPF maps for scheduling. While not very practical, this is useful as a simple example and will be used to demonstrate different features. v3: * Rename scx_example_dummy to scx_example_simple and restructure a bit to ease later additions. Comment updates. * Added declarations for BPF inline iterators. In the future, hopefully, these will be consolidated into a generic BPF header so that they don't need to be replicated here. v2: * Updated with the generic BPF cpumask helpers. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- tools/sched_ext/.gitignore | 5 + tools/sched_ext/Makefile | 188 +++++++++++++++ tools/sched_ext/gnu/stubs.h | 1 + tools/sched_ext/scx_common.bpf.h | 284 +++++++++++++++++++++++ tools/sched_ext/scx_example_qmap.bpf.c | 241 +++++++++++++++++++ tools/sched_ext/scx_example_qmap.c | 84 +++++++ tools/sched_ext/scx_example_simple.bpf.c | 56 +++++ tools/sched_ext/scx_example_simple.c | 93 ++++++++ tools/sched_ext/user_exit_info.h | 50 ++++ 9 files changed, 1002 insertions(+) create mode 100644 tools/sched_ext/.gitignore create mode 100644 tools/sched_ext/Makefile create mode 100644 tools/sched_ext/gnu/stubs.h create mode 100644 tools/sched_ext/scx_common.bpf.h create mode 100644 tools/sched_ext/scx_example_qmap.bpf.c create mode 100644 tools/sched_ext/scx_example_qmap.c create mode 100644 tools/sched_ext/scx_example_simple.bpf.c create mode 100644 tools/sched_ext/scx_example_simple.c create mode 100644 tools/sched_ext/user_exit_info.h diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore new file mode 100644 index 000000000000..2ad3d86caf79 --- /dev/null +++ b/tools/sched_ext/.gitignore @@ -0,0 +1,5 @@ +scx_example_simple +scx_example_qmap +*.skel.h +*.subskel.h +/tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile new file mode 100644 index 000000000000..8f0f14bb59ff --- /dev/null +++ b/tools/sched_ext/Makefile @@ -0,0 +1,188 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022 Meta Platforms, Inc. and affiliates. +include ../build/Build.include +include ../scripts/Makefile.arch +include ../scripts/Makefile.include + +ifneq ($(LLVM),) +ifneq ($(filter %/,$(LLVM)),) +LLVM_PREFIX := $(LLVM) +else ifneq ($(filter -%,$(LLVM)),) +LLVM_SUFFIX := $(LLVM) +endif + +CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi +CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu +CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl +CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu +CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu +CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu +CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu +CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu +CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu +CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH)) + +ifeq ($(CROSS_COMPILE),) +ifeq ($(CLANG_TARGET_FLAGS),) +$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk +else +CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS) +endif # CLANG_TARGET_FLAGS +else +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) +endif # CROSS_COMPILE + +CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as +else +CC := $(CROSS_COMPILE)gcc +endif # LLVM + +CURDIR := $(abspath .) +TOOLSDIR := $(abspath ..) +LIBDIR := $(TOOLSDIR)/lib +BPFDIR := $(LIBDIR)/bpf +TOOLSINCDIR := $(TOOLSDIR)/include +BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool +APIDIR := $(TOOLSINCDIR)/uapi +GENDIR := $(abspath ../../include/generated) +GENHDR := $(GENDIR)/autoconf.h + +SCRATCH_DIR := $(CURDIR)/tools +BUILD_DIR := $(SCRATCH_DIR)/build +INCLUDE_DIR := $(SCRATCH_DIR)/include +BPFOBJ_DIR := $(BUILD_DIR)/libbpf +BPFOBJ := $(BPFOBJ_DIR)/libbpf.a +ifneq ($(CROSS_COMPILE),) +HOST_BUILD_DIR := $(BUILD_DIR)/host +HOST_SCRATCH_DIR := host-tools +HOST_INCLUDE_DIR := $(HOST_SCRATCH_DIR)/include +else +HOST_BUILD_DIR := $(BUILD_DIR) +HOST_SCRATCH_DIR := $(SCRATCH_DIR) +HOST_INCLUDE_DIR := $(INCLUDE_DIR) +endif +HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a +RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids +DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool + +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ + ../../vmlinux \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) +ifeq ($(VMLINUX_BTF),) +$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)") +endif + +BPFTOOL ?= $(DEFAULT_BPFTOOL) + +ifneq ($(wildcard $(GENHDR)),) + GENFLAGS := -DHAVE_GENHDR +endif + +CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \ + -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ + -I$(TOOLSINCDIR) -I$(APIDIR) + +# Silence some warnings when compiled with clang +ifneq ($(LLVM),) +CFLAGS += -Wno-unused-command-line-argument +endif + +LDFLAGS = -lelf -lz -lpthread + +IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - &1 \ + | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ +$(shell $(1) -dM -E - $@ +else + $(call msg,CP,,$@) + $(Q)cp "$(VMLINUX_H)" $@ +endif + +%.bpf.o: %.bpf.c $(INCLUDE_DIR)/vmlinux.h scx_common.bpf.h user_exit_info.h \ + | $(BPFOBJ) + $(call msg,CLNG-BPF,,$@) + $(Q)$(CLANG) $(BPF_CFLAGS) -target bpf -c $< -o $@ + +%.skel.h: %.bpf.o $(BPFTOOL) + $(call msg,GEN-SKEL,,$@) + $(Q)$(BPFTOOL) gen object $(<:.o=.linked1.o) $< + $(Q)$(BPFTOOL) gen object $(<:.o=.linked2.o) $(<:.o=.linked1.o) + $(Q)$(BPFTOOL) gen object $(<:.o=.linked3.o) $(<:.o=.linked2.o) + $(Q)diff $(<:.o=.linked2.o) $(<:.o=.linked3.o) + $(Q)$(BPFTOOL) gen skeleton $(<:.o=.linked3.o) name $(<:.bpf.o=) > $@ + $(Q)$(BPFTOOL) gen subskeleton $(<:.o=.linked3.o) name $(<:.bpf.o=) > $(@:.skel.h=.subskel.h) + +scx_example_simple: scx_example_simple.c scx_example_simple.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + +scx_example_qmap: scx_example_qmap.c scx_example_qmap.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + +clean: + rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) + rm -f *.o *.bpf.o *.skel.h *.subskel.h + rm -f scx_example_simple scx_example_qmap + +.PHONY: all clean + +# delete failed targets +.DELETE_ON_ERROR: + +# keep intermediate (.skel.h, .bpf.o, etc) targets +.SECONDARY: diff --git a/tools/sched_ext/gnu/stubs.h b/tools/sched_ext/gnu/stubs.h new file mode 100644 index 000000000000..719225b16626 --- /dev/null +++ b/tools/sched_ext/gnu/stubs.h @@ -0,0 +1 @@ +/* dummy .h to trick /usr/include/features.h to work with 'clang -target bpf' */ diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h new file mode 100644 index 000000000000..69fd41a55a3c --- /dev/null +++ b/tools/sched_ext/scx_common.bpf.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#ifndef __SCHED_EXT_COMMON_BPF_H +#define __SCHED_EXT_COMMON_BPF_H + +#include "vmlinux.h" +#include +#include +#include +#include "user_exit_info.h" + +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_EXITING 0x00000004 +#define CLOCK_MONOTONIC 1 + +/* + * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can + * lead to really confusing misbehaviors. Let's trigger a build failure. + */ +static inline void ___vmlinux_h_sanity_check___(void) +{ + _Static_assert(SCX_DSQ_FLAG_BUILTIN, + "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole"); +} + +void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; + +static inline __attribute__((format(printf, 1, 2))) +void ___scx_bpf_error_format_checker(const char *fmt, ...) {} + +/* + * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments + * instead of an array of u64. Note that __param[] must have at least one + * element to keep the verifier happy. + */ +#define scx_bpf_error(fmt, args...) \ +({ \ + static char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ + \ + ___scx_bpf_error_format_checker(fmt, ##args); \ +}) + +s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; +bool scx_bpf_consume(u64 dsq_id) __ksym; +u32 scx_bpf_dispatch_nr_slots(void) __ksym; +void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym; +s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; +bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; +s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed) __ksym; +const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; +const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; +void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; +void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; +bool scx_bpf_task_running(const struct task_struct *p) __ksym; +s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; + +#define BPF_STRUCT_OPS(name, args...) \ +SEC("struct_ops/"#name) \ +BPF_PROG(name, ##args) + +#define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \ +SEC("struct_ops.s/"#name) \ +BPF_PROG(name, ##args) + +/** + * MEMBER_VPTR - Obtain the verified pointer to a struct or array member + * @base: struct or array to index + * @member: dereferenced member (e.g. ->field, [idx0][idx1], ...) + * + * The verifier often gets confused by the instruction sequence the compiler + * generates for indexing struct fields or arrays. This macro forces the + * compiler to generate a code sequence which first calculates the byte offset, + * checks it against the struct or array size and add that byte offset to + * generate the pointer to the member to help the verifier. + * + * Ideally, we want to abort if the calculated offset is out-of-bounds. However, + * BPF currently doesn't support abort, so evaluate to NULL instead. The caller + * must check for NULL and take appropriate action to appease the verifier. To + * avoid confusing the verifier, it's best to check for NULL and dereference + * immediately. + * + * vptr = MEMBER_VPTR(my_array, [i][j]); + * if (!vptr) + * return error; + * *vptr = new_value; + */ +#define MEMBER_VPTR(base, member) (typeof(base member) *)({ \ + u64 __base = (u64)base; \ + u64 __addr = (u64)&(base member) - __base; \ + asm volatile ( \ + "if %0 <= %[max] goto +2\n" \ + "%0 = 0\n" \ + "goto +1\n" \ + "%0 += %1\n" \ + : "+r"(__addr) \ + : "r"(__base), \ + [max]"i"(sizeof(base) - sizeof(base member))); \ + __addr; \ +}) + +/* + * BPF core and other generic helpers + */ + +/* list and rbtree */ +#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) + +void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; +void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; + +#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) +#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) + +void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; +struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; +struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, + struct bpf_rb_node *node) __ksym; +void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym; +struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; + +/* task */ +struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +/* cgroup */ +struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; +void bpf_cgroup_release(struct cgroup *cgrp) __ksym; +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; + +/* cpumask */ +struct bpf_cpumask *bpf_cpumask_create(void) __ksym; +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; +struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **map_value) __ksym; +void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; +u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; +u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; +bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, + const struct cpumask *src2) __ksym; +void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, + const struct cpumask *src2) __ksym; +void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, + const struct cpumask *src2) __ksym; +bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; +bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; +bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; +bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; +bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; +u32 bpf_cpumask_any(const struct cpumask *cpumask) __ksym; +u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym; + +/* rcu */ +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +/* BPF core iterators from tools/testing/selftests/bpf/progs/bpf_misc.h */ +struct bpf_iter_num; + +extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; +extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym; +extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; + +#ifndef bpf_for_each +/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for + * using BPF open-coded iterators without having to write mundane explicit + * low-level loop logic. Instead, it provides for()-like generic construct + * that can be used pretty naturally. E.g., for some hypothetical cgroup + * iterator, you'd write: + * + * struct cgroup *cg, *parent_cg = <...>; + * + * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { + * bpf_printk("Child cgroup id = %d", cg->cgroup_id); + * if (cg->cgroup_id == 123) + * break; + * } + * + * I.e., it looks almost like high-level for each loop in other languages, + * supports continue/break, and is verifiable by BPF verifier. + * + * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to + * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int + * *`, not just `int`. So for integers bpf_for() is more convenient. + * + * Note: this macro relies on C99 feature of allowing to declare variables + * inside for() loop, bound to for() loop lifetime. It also utilizes GCC + * extension: __attribute__((cleanup())), supported by both GCC and + * Clang. + */ +#define bpf_for_each(type, cur, args...) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ + cleanup(bpf_iter_##type##_destroy))), \ + /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_##type##_new(&___it, ##args), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_##type##_destroy, (void *)0); \ + /* iteration and termination check */ \ + (((cur) = bpf_iter_##type##_next(&___it))); \ +) +#endif /* bpf_for_each */ + +#ifndef bpf_for +/* bpf_for(i, start, end) implements a for()-like looping construct that sets + * provided integer variable *i* to values starting from *start* through, + * but not including, *end*. It also proves to BPF verifier that *i* belongs + * to range [start, end), so this can be used for accessing arrays without + * extra checks. + * + * Note: *start* and *end* are assumed to be expressions with no side effects + * and whose values do not change throughout bpf_for() loop execution. They do + * not have to be statically known or constant, though. + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_for(i, start, end) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, (start), (end)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + ({ \ + /* iteration step */ \ + int *___t = bpf_iter_num_next(&___it); \ + /* termination and bounds check */ \ + (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ + }); \ +) +#endif /* bpf_for */ + +#ifndef bpf_repeat +/* bpf_repeat(N) performs N iterations without exposing iteration number + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_repeat(N) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, 0, (N)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + bpf_iter_num_next(&___it); \ + /* nothing here */ \ +) +#endif /* bpf_repeat */ + +#endif /* __SCHED_EXT_COMMON_BPF_H */ diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c new file mode 100644 index 000000000000..60e260577a3a --- /dev/null +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A simple five-level FIFO queue scheduler. + * + * There are five FIFOs implemented using BPF_MAP_TYPE_QUEUE. A task gets + * assigned to one depending on its compound weight. Each CPU round robins + * through the FIFOs and dispatches more from FIFOs with higher indices - 1 from + * queue0, 2 from queue1, 4 from queue2 and so on. + * + * This scheduler demonstrates: + * + * - BPF-side queueing using PIDs. + * - Sleepable per-task storage allocation using ops.prep_enable(). + * + * This scheduler is primarily for demonstration and testing of sched_ext + * features and unlikely to be useful for actual workloads. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" +#include + +char _license[] SEC("license") = "GPL"; + +const volatile u64 slice_ns = SCX_SLICE_DFL; + +u32 test_error_cnt; + +struct user_exit_info uei; + +struct qmap { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, 4096); + __type(value, u32); +} queue0 SEC(".maps"), + queue1 SEC(".maps"), + queue2 SEC(".maps"), + queue3 SEC(".maps"), + queue4 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 5); + __type(key, int); + __array(values, struct qmap); +} queue_arr SEC(".maps") = { + .values = { + [0] = &queue0, + [1] = &queue1, + [2] = &queue2, + [3] = &queue3, + [4] = &queue4, + }, +}; + +/* Per-task scheduling context */ +struct task_ctx { + bool force_local; /* Dispatch directly to local_dsq */ +}; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct task_ctx); +} task_ctx_stor SEC(".maps"); + +/* Per-cpu dispatch index and remaining count */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 2); + __type(key, u32); + __type(value, u64); +} dispatch_idx_cnt SEC(".maps"); + +/* Statistics */ +unsigned long nr_enqueued, nr_dispatched, nr_dequeued; + +s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + struct task_ctx *tctx; + s32 cpu; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("task_ctx lookup failed"); + return -ESRCH; + } + + if (p->nr_cpus_allowed == 1 || + scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { + tctx->force_local = true; + return prev_cpu; + } + + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) + return cpu; + + return prev_cpu; +} + +static int weight_to_idx(u32 weight) +{ + /* Coarsely map the compound weight to a FIFO. */ + if (weight <= 25) + return 0; + else if (weight <= 50) + return 1; + else if (weight < 200) + return 2; + else if (weight < 400) + return 3; + else + return 4; +} + +void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) +{ + struct task_ctx *tctx; + u32 pid = p->pid; + int idx = weight_to_idx(p->scx.weight); + void *ring; + + if (test_error_cnt && !--test_error_cnt) + scx_bpf_error("test triggering error"); + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("task_ctx lookup failed"); + return; + } + + /* Is select_cpu() is telling us to enqueue locally? */ + if (tctx->force_local) { + tctx->force_local = false; + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, enq_flags); + return; + } + + ring = bpf_map_lookup_elem(&queue_arr, &idx); + if (!ring) { + scx_bpf_error("failed to find ring %d", idx); + return; + } + + /* Queue on the selected FIFO. If the FIFO overflows, punt to global. */ + if (bpf_map_push_elem(ring, &pid, 0)) { + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, enq_flags); + return; + } + + __sync_fetch_and_add(&nr_enqueued, 1); +} + +/* + * The BPF queue map doesn't support removal and sched_ext can handle spurious + * dispatches. qmap_dequeue() is only used to collect statistics. + */ +void BPF_STRUCT_OPS(qmap_dequeue, struct task_struct *p, u64 deq_flags) +{ + __sync_fetch_and_add(&nr_dequeued, 1); +} + +void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) +{ + u32 zero = 0, one = 1; + u64 *idx = bpf_map_lookup_elem(&dispatch_idx_cnt, &zero); + u64 *cnt = bpf_map_lookup_elem(&dispatch_idx_cnt, &one); + void *fifo; + s32 pid; + int i; + + if (!idx || !cnt) { + scx_bpf_error("failed to lookup idx[%p], cnt[%p]", idx, cnt); + return; + } + + for (i = 0; i < 5; i++) { + /* Advance the dispatch cursor and pick the fifo. */ + if (!*cnt) { + *idx = (*idx + 1) % 5; + *cnt = 1 << *idx; + } + (*cnt)--; + + fifo = bpf_map_lookup_elem(&queue_arr, idx); + if (!fifo) { + scx_bpf_error("failed to find ring %llu", *idx); + return; + } + + /* Dispatch or advance. */ + if (!bpf_map_pop_elem(fifo, &pid)) { + struct task_struct *p; + + p = bpf_task_from_pid(pid); + if (p) { + __sync_fetch_and_add(&nr_dispatched, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, 0); + bpf_task_release(p); + return; + } + } + + *cnt = 0; + } +} + +s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + /* + * @p is new. Let's ensure that its task_ctx is available. We can sleep + * in this function and the following will automatically use GFP_KERNEL. + */ + if (bpf_task_storage_get(&task_ctx_stor, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE)) + return 0; + else + return -ENOMEM; +} + +void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops qmap_ops = { + .select_cpu = (void *)qmap_select_cpu, + .enqueue = (void *)qmap_enqueue, + .dequeue = (void *)qmap_dequeue, + .dispatch = (void *)qmap_dispatch, + .prep_enable = (void *)qmap_prep_enable, + .exit = (void *)qmap_exit, + .name = "qmap", +}; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c new file mode 100644 index 000000000000..56c85c9fa979 --- /dev/null +++ b/tools/sched_ext/scx_example_qmap.c @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_qmap.skel.h" + +const char help_fmt[] = +"A simple five-level FIFO queue sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-s SLICE_US] [-e COUNT]\n" +"\n" +" -s SLICE_US Override slice duration\n" +" -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_example_qmap *skel; + struct bpf_link *link; + int opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_qmap__open(); + assert(skel); + + while ((opt = getopt(argc, argv, "s:e:tTd:h")) != -1) { + switch (opt) { + case 's': + skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; + break; + case 'e': + skel->bss->test_error_cnt = strtoul(optarg, NULL, 0); + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + assert(!scx_example_qmap__load(skel)); + + link = bpf_map__attach_struct_ops(skel->maps.qmap_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + long nr_enqueued = skel->bss->nr_enqueued; + long nr_dispatched = skel->bss->nr_dispatched; + + printf("enq=%lu, dsp=%lu, delta=%ld, deq=%lu\n", + nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, + skel->bss->nr_dequeued); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_qmap__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_simple.bpf.c b/tools/sched_ext/scx_example_simple.bpf.c new file mode 100644 index 000000000000..74716d0dd08d --- /dev/null +++ b/tools/sched_ext/scx_example_simple.bpf.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A simple scheduler. + * + * A simple global FIFO scheduler. It also demonstrates the following niceties. + * + * - Statistics tracking how many tasks are queued to local and global dsq's. + * - Termination notification for userspace. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" + +char _license[] SEC("license") = "GPL"; + +struct user_exit_info uei; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); + __uint(max_entries, 2); /* [local, global] */ +} stats SEC(".maps"); + +static void stat_inc(u32 idx) +{ + u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx); + if (cnt_p) + (*cnt_p)++; +} + +void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) +{ + if (enq_flags & SCX_ENQ_LOCAL) { + stat_inc(0); /* count local queueing */ + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); + return; + } + + stat_inc(1); /* count global queueing */ + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); +} + +void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops simple_ops = { + .enqueue = (void *)simple_enqueue, + .exit = (void *)simple_exit, + .name = "simple", +}; diff --git a/tools/sched_ext/scx_example_simple.c b/tools/sched_ext/scx_example_simple.c new file mode 100644 index 000000000000..2f1ee40f7e5a --- /dev/null +++ b/tools/sched_ext/scx_example_simple.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_simple.skel.h" + +const char help_fmt[] = +"A simple sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s\n" +"\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int simple) +{ + exit_req = 1; +} + +static void read_stats(struct scx_example_simple *skel, u64 *stats) +{ + int nr_cpus = libbpf_num_possible_cpus(); + u64 cnts[2][nr_cpus]; + u32 idx; + + memset(stats, 0, sizeof(stats[0]) * 2); + + for (idx = 0; idx < 2; idx++) { + int ret, cpu; + + ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), + &idx, cnts[idx]); + if (ret < 0) + continue; + for (cpu = 0; cpu < nr_cpus; cpu++) + stats[idx] += cnts[idx][cpu]; + } +} + +int main(int argc, char **argv) +{ + struct scx_example_simple *skel; + struct bpf_link *link; + u32 opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_simple__open(); + assert(skel); + + while ((opt = getopt(argc, argv, "h")) != -1) { + switch (opt) { + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + assert(!scx_example_simple__load(skel)); + + link = bpf_map__attach_struct_ops(skel->maps.simple_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + u64 stats[2]; + + read_stats(skel, stats); + printf("local=%lu global=%lu\n", stats[0], stats[1]); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_simple__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/user_exit_info.h b/tools/sched_ext/user_exit_info.h new file mode 100644 index 000000000000..e701ef0e0b86 --- /dev/null +++ b/tools/sched_ext/user_exit_info.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define struct user_exit_info which is shared between BPF and userspace parts + * to communicate exit status and other information. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#ifndef __USER_EXIT_INFO_H +#define __USER_EXIT_INFO_H + +struct user_exit_info { + int type; + char reason[128]; + char msg[1024]; +}; + +#ifdef __bpf__ + +#include "vmlinux.h" +#include + +static inline void uei_record(struct user_exit_info *uei, + const struct scx_exit_info *ei) +{ + bpf_probe_read_kernel_str(uei->reason, sizeof(uei->reason), ei->reason); + bpf_probe_read_kernel_str(uei->msg, sizeof(uei->msg), ei->msg); + /* use __sync to force memory barrier */ + __sync_val_compare_and_swap(&uei->type, uei->type, ei->type); +} + +#else /* !__bpf__ */ + +static inline bool uei_exited(struct user_exit_info *uei) +{ + /* use __sync to force memory barrier */ + return __sync_val_compare_and_swap(&uei->type, -1, -1); +} + +static inline void uei_print(const struct user_exit_info *uei) +{ + fprintf(stderr, "EXIT: %s", uei->reason); + if (uei->msg[0] != '\0') + fprintf(stderr, " (%s)", uei->msg); + fputs("\n", stderr); +} + +#endif /* __bpf__ */ +#endif /* __USER_EXIT_INFO_H */ From patchwork Fri Mar 17 21:33:16 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71469 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp2810wrt; Fri, 17 Mar 2023 14:39:25 -0700 (PDT) X-Google-Smtp-Source: AK7set+yfyTirr6fTlacLszkMNPVuqVU2MvKUHhnHK7B9SjW1usPqQt8AtspTOUwAHYXPVgouVyj X-Received: by 2002:a17:902:ee4d:b0:19a:839d:b67a with SMTP id 13-20020a170902ee4d00b0019a839db67amr8636942plo.5.1679089164828; Fri, 17 Mar 2023 14:39:24 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089164; cv=none; d=google.com; s=arc-20160816; b=pI7luC2tnWaidFUOy6wIe9KvdpKkMMZGlU6hbpa1kdmUqnG3NdEB6L7FzUt2soDhKW GJ4TYvTPP8CoVgUQeS3o96KSZiLupyh7wRSTQb8U+hlwKg4SFQBl3GEAnJCEbl2hUBYW aJqheY6r1VHnTsJD8+WErp9tPdnir7FCTfzKDb8Df/jgkmdmF5U8BPZipImJ/Hd/ObFH USqXr2Ol4Ty0+s79Ew5R0gTMAvEmzKDlZNy+so+hYzmCUlVR8BhR1CSd335k9/uK91Yl T7WZsW/jJ29mm+/vtSCQhLSlzNfwvI8gfVOJS91BpCyC278Qqt+gFjnTMzRUNnF4xZy3 EYWQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=1K7uT5MTAAWZJBuE8qEaFUDCULD6bsJ7gIDLvBZb3/E=; b=Uow1Tb4esjSMprRCFzpt/NNSoCa7+q8OWRb9IPXB98iCEJfab+rYAYjHmtBaSMePU9 VAs5sLJ+HMq7kydxyxzWC2kBOFa9wNA8MvUhqcY/BsPBx0YMtDF5lRE5LH/n5bO810u3 Pkqv2D8JFl62djJXInWqydPoNhJB8LeuDg4tdQg5dGJuQTVFDCh00ReBhPhekbUKa0ty lRlNTAxBKWj2sR/RAjm4t2XdFPKmz8k+bp6Z4FB3bNUGYFRJp8oqofCWx5QuUoZfwaRA +BVlNtNX3k6Bdozjye/9Y+KXvpVBW947ybZQpHRqbJfHTwKwS5vOiezywOZ4r7Rt24TU gNQw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="Oe/grPgb"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id j17-20020a170902da9100b0019894a59be0si3964169plx.566.2023.03.17.14.39.09; Fri, 17 Mar 2023 14:39:24 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="Oe/grPgb"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230461AbjCQVge (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:34 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51952 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231145AbjCQVgO (ORCPT ); Fri, 17 Mar 2023 17:36:14 -0400 Received: from mail-pj1-x102e.google.com (mail-pj1-x102e.google.com [IPv6:2607:f8b0:4864:20::102e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 91ED02DE51; Fri, 17 Mar 2023 14:35:30 -0700 (PDT) Received: by mail-pj1-x102e.google.com with SMTP id lr16-20020a17090b4b9000b0023f187954acso6631116pjb.2; Fri, 17 Mar 2023 14:35:30 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088851; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=1K7uT5MTAAWZJBuE8qEaFUDCULD6bsJ7gIDLvBZb3/E=; b=Oe/grPgbOTYv/R7zibXuXsAM8Zrl3ckCZ56OLBvR2ChKOGmbhe0AVapP9Uj7NEjgxs T7gZt1VpGQR0rJ98GNhcQDMT7VSHR01oHGIojr9H7RWxS4PybzKt9Vqo/g9T7Ur90YLb GHgTMyx40348aKZYRDndi2oDlD0+gEpCsHpliC5LaWgxkyd4bmGK2hN489QKK/8ZKqVy ++zdt4jahFxNAuEQM88Qpk61vBzrVxgktcQlLOuvouNF3OAiBePKb3s4KFanJbD9gtf7 Zm2fwYVC4W0SPZ9Lc+Uy1tvAUU1wz/t96uoNi1WIlmrWm1C9OFSp8BXA2dAM/yxTbyoB 6SRw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088851; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=1K7uT5MTAAWZJBuE8qEaFUDCULD6bsJ7gIDLvBZb3/E=; b=29SvO4QFUxq3sr/0OWit1h1MJ11QAXMKk9nDOaSenzGvSlccSTM2nNjZojdxc7HhZO VUIc7eqj5cfk4bTK+wRQZ/nIeRVJDDaudEZrSo4wUb+MuPItB2VOhHOYPL7UvdGgpvwe e3hcFAG0/IGF15DPAu2d34yFQQL6VR3E/u4+EWHM9MTHhmw8qdBZNE1c91Zf6Ye5N5tp Bqs6X4kWidrgmcEMJcde79obUuWurubnIhPKwsJ3IfATxW5cVj7irmpsP5mB/cvZJUZN KOhi7w9BtUcVWsIDknAjozXiz7u9A68Daeae0IZpGVWtWu/Gl6xo92iVE+06+TnSQ/bN oI/Q== X-Gm-Message-State: AO0yUKUMptH48FW3otQlnIZyVVK+gGBMM3fwioN3ZGN6aI4y7bJN5pnf TplLuzWmGvj1siOv8GtEK54= X-Received: by 2002:a17:902:c651:b0:1a1:8b54:ab58 with SMTP id s17-20020a170902c65100b001a18b54ab58mr7079692pls.19.1679088850715; Fri, 17 Mar 2023 14:34:10 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id x21-20020a170902ea9500b0019c2cf12d15sm1967084plb.116.2023.03.17.14.34.10 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:10 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 15/32] sched_ext: Add sysrq-S which disables the BPF scheduler Date: Fri, 17 Mar 2023 11:33:16 -1000 Message-Id: <20230317213333.2174969-16-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652600089771319?= X-GMAIL-MSGID: =?utf-8?q?1760652600089771319?= This enables the admin to abort the BPF scheduler and revert to CFS anytime. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- drivers/tty/sysrq.c | 1 + include/linux/sched/ext.h | 1 + kernel/sched/build_policy.c | 1 + kernel/sched/ext.c | 20 ++++++++++++++++++++ 4 files changed, 23 insertions(+) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index b6e70c5cfa17..ddfcdb6aecd7 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -520,6 +520,7 @@ static const struct sysrq_key_op *sysrq_key_table[62] = { NULL, /* P */ NULL, /* Q */ NULL, /* R */ + /* S: May be registered by sched_ext for resetting */ NULL, /* S */ NULL, /* T */ NULL, /* U */ diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 45bf24a23c61..4b6b9386e2f8 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -55,6 +55,7 @@ enum scx_exit_type { SCX_EXIT_DONE, SCX_EXIT_UNREG = 64, /* BPF unregistration */ + SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */ SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c index 4c658b21f603..005025f55bea 100644 --- a/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 738ae1d7a8ee..9406869fbc90 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1930,6 +1930,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) case SCX_EXIT_UNREG: reason = "BPF scheduler unregistered"; break; + case SCX_EXIT_SYSRQ: + reason = "disabled by sysrq-S"; + break; case SCX_EXIT_ERROR: reason = "runtime error"; break; @@ -2539,6 +2542,21 @@ struct bpf_struct_ops bpf_sched_ext_ops = { .name = "sched_ext_ops", }; +static void sysrq_handle_sched_ext_reset(int key) +{ + if (scx_ops_helper) + scx_ops_disable(SCX_EXIT_SYSRQ); + else + pr_info("sched_ext: BPF scheduler not yet used\n"); +} + +static const struct sysrq_key_op sysrq_sched_ext_reset_op = { + .handler = sysrq_handle_sched_ext_reset, + .help_msg = "reset-sched-ext(S)", + .action_msg = "Disable sched_ext and revert all tasks to CFS", + .enable_mask = SYSRQ_ENABLE_RTNICE, +}; + void __init init_sched_ext_class(void) { int cpu; @@ -2562,6 +2580,8 @@ void __init init_sched_ext_class(void) init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); } + + register_sysrq_key('S', &sysrq_sched_ext_reset_op); } From patchwork Fri Mar 17 21:33:17 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71481 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp7873wrt; Fri, 17 Mar 2023 14:59:05 -0700 (PDT) X-Google-Smtp-Source: AK7set+TxX19tCT3YyemeXRSucjUy33t1hNrB/kMt9eJNzPCfvb8q6TJ/rwj11a1pBxz8FHpQCJM X-Received: by 2002:a05:6a00:3186:b0:5a9:cebd:7b79 with SMTP id bj6-20020a056a00318600b005a9cebd7b79mr8014718pfb.0.1679090344823; Fri, 17 Mar 2023 14:59:04 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090344; cv=none; d=google.com; s=arc-20160816; b=j5n2dzbC7gFNW7WrhjIA/zronGNqiGKRkWZ9W5tVU6d/8gaPygTwnGwM0VAiVmBBGe skFV6anaovTgAXMNogtKtSiBBfkbVjg4dI+vRnLIwzdb1R5cNxeB4zpkuANDO3Hw4yqx Cld8wEuH48j7tu0eaI7TkLT/qLe6dCMV9yMSBI7TL9OOVEL5dA0WeOflVVBQsqJmyTMZ tbL6AZtOOcPZSctDyU5NCLGkAKobmCAmHK1tG7q/Slv96WV8eKjFivbXvMjhPUh6a8vM BmPOPI6HwaOLwRq9qPdmiDaL3ny2C50A/uNTsUBvwIypVJNxxWZVHvTD/XeEyoPLXntb XDTg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=ddU7EzNdrNtTneaU1bR/pMeEcdkBa5htb4ly2GB2puk=; b=pLMs8zhIyqWryKk1WZ+nxKZjndQUP0Lg5QUN29IRmt/IoWg7iTyPDmaE+2DbTagV2O 3BE8FiN5HaHb8C8WgimAz3c8VSlct8c9J0FcIXVEiHzUCOD2ttGOkxuqhDqOrMEFJGQX A1BirSG7HJloepZ0EOGnz9sLSHeCElPHJEeEWKQXXI8i7K+zCzliULEeCADGZ1uBavrd uIhQEDdcp20e20HIDSJ0lhM9JqL1e7mGZP1HSPyo3gUtHemYeZjJsAyMiNrnyp4bnmox xmGlZagutvfXh4zWezh/+LoHFs1UOMGzldSWqLsZm/haAExIkM+ii1j8IYG24bejKULm rvEg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=EiO8C1WZ; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id 66-20020a630145000000b004f2920ecea3si3513659pgb.623.2023.03.17.14.58.44; Fri, 17 Mar 2023 14:59:04 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=EiO8C1WZ; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231265AbjCQVgm (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:42 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52286 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231175AbjCQVgR (ORCPT ); Fri, 17 Mar 2023 17:36:17 -0400 Received: from mail-pj1-x102e.google.com (mail-pj1-x102e.google.com [IPv6:2607:f8b0:4864:20::102e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B7EA13D08A; Fri, 17 Mar 2023 14:35:33 -0700 (PDT) Received: by mail-pj1-x102e.google.com with SMTP id qe8-20020a17090b4f8800b0023f07253a2cso6626092pjb.3; Fri, 17 Mar 2023 14:35:33 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088853; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=ddU7EzNdrNtTneaU1bR/pMeEcdkBa5htb4ly2GB2puk=; b=EiO8C1WZNYn4zXQg78CrR4tD9s3SXrAVlranqGvZ5f5t9uBVM8ESjCeI4Nm/rVqOx3 mvyf295FoWK2gF3cGyGhq0O28rxt1CoPj6EZEs0OLBqimwuoT40zWl/tI6lc12o7Yvhz vdGtOIp+ba2o2Wn8MDh0ed+Md0s8O97vdcADzVD/U25fytGEiHZYV56IWkwo7nEZB4tY J4awSSstdAOTfBz1OuPDRYK1rEbBfDLR+Pa5h74s4O1hiRdqqvqb/wStQn/EVoh4V6yH TLV5+kw9D4mSbzpmj79PzOA33NlVTFGSAxVTBg6zFI6sbT7n4nT9IAHTDBfrK40thH+b pYtA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088853; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=ddU7EzNdrNtTneaU1bR/pMeEcdkBa5htb4ly2GB2puk=; b=SJYC//zx3kJJlbMyx+jOeFzyPCTP9mT752DKXv7dAxE5lAqQ7xSQEiZspS/5IrYTmZ HrWyIe9vLdAZN3q/23W7nhDAPnTOGIqs4OQAMZoYm7X51xkdQiYj9xVqslH+4mssz1bg o3Vpi7O2R88dwEPO7rrbPFJTHVUVPoC4U8z3Ry2gQH5xr3k//h399pO094G368XtZvao MSqljzGM5O0sUwA3I344UNnLfmwOhZa1RoK8vi6UGTxvqjAyI9hbIqU3fr+PAdY+3hW5 lA7lVMOh36A/qb0vQYxvFzjPYIEb204sr/RJXh+mIM8RBQBSi1wX+4MCcZCWpwDlbSV2 nwNQ== X-Gm-Message-State: AO0yUKXbRuzbPHMi+XSx36M6aaAn/BV/bXpWdh7Vkl1dUz1Br2rhlEnK HqZV9FGQM8xjwazTOK+Mvj8= X-Received: by 2002:a17:903:28c4:b0:1a1:a7f7:9718 with SMTP id kv4-20020a17090328c400b001a1a7f79718mr2864910plb.38.1679088852530; Fri, 17 Mar 2023 14:34:12 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id e5-20020a170902b78500b0019f789cddccsm1990972pls.19.2023.03.17.14.34.11 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:12 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , Julia Lawall Subject: [PATCH 16/32] sched_ext: Implement runnable task stall watchdog Date: Fri, 17 Mar 2023 11:33:17 -1000 Message-Id: <20230317213333.2174969-17-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653837356391976?= X-GMAIL-MSGID: =?utf-8?q?1760653837356391976?= From: David Vernet The most common and critical way that a BPF scheduler can misbehave is by failing to run runnable tasks for too long. This patch implements a watchdog. * All tasks record when they become runnable. * A watchdog work periodically scans all runnable tasks. If any task has stayed runnable for too long, the BPF scheduler is aborted. * scheduler_tick() monitors whether the watchdog itself is stuck. If so, the BPF scheduler is aborted. Because the watchdog only scans the tasks which are currently runnable and usually very infrequently, the overhead should be negligible. scx_example_qmap is updated so that it can be told to stall user and/or kernel tasks. A detected task stall looks like the following: sched_ext: BPF scheduler "qmap" errored, disabling sched_ext: runnable task stall (dbus-daemon[953] failed to run for 6.478s) scx_check_timeout_workfn+0x10e/0x1b0 process_one_work+0x287/0x560 worker_thread+0x234/0x420 kthread+0xe9/0x100 ret_from_fork+0x1f/0x30 A detected watchdog stall: sched_ext: BPF scheduler "qmap" errored, disabling sched_ext: runnable task stall (watchdog failed to check in for 5.001s) scheduler_tick+0x2eb/0x340 update_process_times+0x7a/0x90 tick_sched_timer+0xd8/0x130 __hrtimer_run_queues+0x178/0x3b0 hrtimer_interrupt+0xfc/0x390 __sysvec_apic_timer_interrupt+0xb7/0x2b0 sysvec_apic_timer_interrupt+0x90/0xb0 asm_sysvec_apic_timer_interrupt+0x1b/0x20 default_idle+0x14/0x20 arch_cpu_idle+0xf/0x20 default_idle_call+0x50/0x90 do_idle+0xe8/0x240 cpu_startup_entry+0x1d/0x20 kernel_init+0x0/0x190 start_kernel+0x0/0x392 start_kernel+0x324/0x392 x86_64_start_reservations+0x2a/0x2c x86_64_start_kernel+0x104/0x109 secondary_startup_64_no_verify+0xce/0xdb Note that this patch exposes scx_ops_error[_type]() in kernel/sched/ext.h to inline scx_notify_sched_tick(). v2: Julia Lawall noticed that the watchdog code was mixing msecs and jiffies. Fix by using jiffies for everything. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Julia Lawall --- include/linux/sched/ext.h | 13 +++ init/init_task.c | 2 + kernel/sched/core.c | 3 + kernel/sched/ext.c | 128 +++++++++++++++++++++++-- kernel/sched/ext.h | 25 +++++ kernel/sched/sched.h | 1 + tools/sched_ext/scx_example_qmap.bpf.c | 12 +++ tools/sched_ext/scx_example_qmap.c | 12 ++- 8 files changed, 185 insertions(+), 11 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 4b6b9386e2f8..7a4d088a2378 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -59,6 +59,7 @@ enum scx_exit_type { SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ + SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */ }; /* @@ -315,6 +316,15 @@ struct sched_ext_ops { */ u64 flags; + /** + * timeout_ms - The maximum amount of time, in milliseconds, that a + * runnable task should be able to wait before being scheduled. The + * maximum timeout may not exceed the default timeout of 30 seconds. + * + * Defaults to the maximum allowed timeout value of 30 seconds. + */ + u32 timeout_ms; + /** * name - BPF scheduler's name * @@ -348,6 +358,7 @@ enum scx_ent_flags { SCX_TASK_OPS_PREPPED = 1 << 8, /* prepared for BPF scheduler enable */ SCX_TASK_OPS_ENABLED = 1 << 9, /* task has BPF scheduler enabled */ + SCX_TASK_WATCHDOG_RESET = 1 << 16, /* task watchdog counter should be reset */ SCX_TASK_DEQD_FOR_SLEEP = 1 << 17, /* last dequeue was for SLEEP */ SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */ @@ -381,12 +392,14 @@ enum scx_kf_mask { struct sched_ext_entity { struct scx_dispatch_q *dsq; struct list_head dsq_node; + struct list_head watchdog_node; u32 flags; /* protected by rq lock */ u32 weight; s32 sticky_cpu; s32 holding_cpu; u32 kf_mask; /* see scx_kf_mask above */ atomic64_t ops_state; + unsigned long runnable_at; /* BPF scheduler modifiable fields */ diff --git a/init/init_task.c b/init/init_task.c index bdbc663107bf..913194aab623 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -106,9 +106,11 @@ struct task_struct init_task #ifdef CONFIG_SCHED_CLASS_EXT .scx = { .dsq_node = LIST_HEAD_INIT(init_task.scx.dsq_node), + .watchdog_node = LIST_HEAD_INIT(init_task.scx.watchdog_node), .sticky_cpu = -1, .holding_cpu = -1, .ops_state = ATOMIC_INIT(0), + .runnable_at = INITIAL_JIFFIES, .slice = SCX_SLICE_DFL, }, #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a3fb6a05d131..9f721df512f0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4483,12 +4483,14 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) #ifdef CONFIG_SCHED_CLASS_EXT p->scx.dsq = NULL; INIT_LIST_HEAD(&p->scx.dsq_node); + INIT_LIST_HEAD(&p->scx.watchdog_node); p->scx.flags = 0; p->scx.weight = 0; p->scx.sticky_cpu = -1; p->scx.holding_cpu = -1; p->scx.kf_mask = 0; atomic64_set(&p->scx.ops_state, 0); + p->scx.runnable_at = INITIAL_JIFFIES; p->scx.slice = SCX_SLICE_DFL; #endif @@ -5651,6 +5653,7 @@ void scheduler_tick(void) if (sched_feat(LATENCY_WARN) && resched_latency) resched_latency_warn(cpu, resched_latency); + scx_notify_sched_tick(); perf_event_task_tick(); #ifdef CONFIG_SMP diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 9406869fbc90..92e0780b182f 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -9,6 +9,7 @@ enum scx_internal_consts { SCX_NR_ONLINE_OPS = SCX_OP_IDX(init), SCX_DSP_DFL_MAX_BATCH = 32, + SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, }; enum scx_ops_enable_state { @@ -87,6 +88,23 @@ static struct scx_exit_info scx_exit_info; static atomic64_t scx_nr_rejected = ATOMIC64_INIT(0); +/* + * The maximum amount of time in jiffies that a task may be runnable without + * being scheduled on a CPU. If this timeout is exceeded, it will trigger + * scx_ops_error(). + */ +unsigned long scx_watchdog_timeout; + +/* + * The last time the delayed work was run. This delayed work relies on + * ksoftirqd being able to run to service timer interrupts, so it's possible + * that this work itself could get wedged. To account for this, we check that + * it's not stalled in the timer tick, and trigger an error if it is. + */ +unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; + +static struct delayed_work scx_watchdog_work; + /* idle tracking */ #ifdef CONFIG_SMP #ifdef CONFIG_CPUMASK_OFFSTACK @@ -146,10 +164,6 @@ static DEFINE_PER_CPU(struct scx_dsp_ctx, scx_dsp_ctx); void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags); -__printf(2, 3) static void scx_ops_error_type(enum scx_exit_type type, - const char *fmt, ...); -#define scx_ops_error(fmt, args...) \ - scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) struct scx_task_iter { struct sched_ext_entity cursor; @@ -699,6 +713,27 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, dispatch_enqueue(&scx_dsq_global, p, enq_flags); } +static bool watchdog_task_watched(const struct task_struct *p) +{ + return !list_empty(&p->scx.watchdog_node); +} + +static void watchdog_watch_task(struct rq *rq, struct task_struct *p) +{ + lockdep_assert_rq_held(rq); + if (p->scx.flags & SCX_TASK_WATCHDOG_RESET) + p->scx.runnable_at = jiffies; + p->scx.flags &= ~SCX_TASK_WATCHDOG_RESET; + list_add_tail(&p->scx.watchdog_node, &rq->scx.watchdog_list); +} + +static void watchdog_unwatch_task(struct task_struct *p, bool reset_timeout) +{ + list_del_init(&p->scx.watchdog_node); + if (reset_timeout) + p->scx.flags |= SCX_TASK_WATCHDOG_RESET; +} + static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) { int sticky_cpu = p->scx.sticky_cpu; @@ -717,9 +752,12 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) sticky_cpu = cpu_of(rq); - if (p->scx.flags & SCX_TASK_QUEUED) + if (p->scx.flags & SCX_TASK_QUEUED) { + WARN_ON_ONCE(!watchdog_task_watched(p)); return; + } + watchdog_watch_task(rq, p); p->scx.flags |= SCX_TASK_QUEUED; rq->scx.nr_running++; add_nr_running(rq, 1); @@ -731,6 +769,8 @@ static void ops_dequeue(struct task_struct *p, u64 deq_flags) { u64 opss; + watchdog_unwatch_task(p, false); + /* acquire ensures that we see the preceding updates on QUEUED */ opss = atomic64_read_acquire(&p->scx.ops_state); @@ -775,8 +815,10 @@ static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags { struct scx_rq *scx_rq = &rq->scx; - if (!(p->scx.flags & SCX_TASK_QUEUED)) + if (!(p->scx.flags & SCX_TASK_QUEUED)) { + WARN_ON_ONCE(watchdog_task_watched(p)); return; + } ops_dequeue(p, deq_flags); @@ -1300,6 +1342,8 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) } p->se.exec_start = rq_clock_task(rq); + + watchdog_unwatch_task(p, true); } static void put_prev_task_scx(struct rq *rq, struct task_struct *p) @@ -1343,11 +1387,14 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) */ if (p->scx.flags & SCX_TASK_BAL_KEEP) { p->scx.flags &= ~SCX_TASK_BAL_KEEP; + watchdog_watch_task(rq, p); dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); return; } if (p->scx.flags & SCX_TASK_QUEUED) { + watchdog_watch_task(rq, p); + /* * If @p has slice left and balance_scx() didn't tag it for * keeping, @p is getting preempted by a higher priority @@ -1576,6 +1623,49 @@ static void reset_idle_masks(void) {} #endif /* CONFIG_SMP */ +static bool check_rq_for_timeouts(struct rq *rq) +{ + struct task_struct *p; + struct rq_flags rf; + bool timed_out = false; + + rq_lock_irqsave(rq, &rf); + list_for_each_entry(p, &rq->scx.watchdog_list, scx.watchdog_node) { + unsigned long last_runnable = p->scx.runnable_at; + + if (unlikely(time_after(jiffies, + last_runnable + scx_watchdog_timeout))) { + u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); + + scx_ops_error_type(SCX_EXIT_ERROR_STALL, + "%s[%d] failed to run for %u.%03us", + p->comm, p->pid, + dur_ms / 1000, dur_ms % 1000); + timed_out = true; + break; + } + } + rq_unlock_irqrestore(rq, &rf); + + return timed_out; +} + +static void scx_watchdog_workfn(struct work_struct *work) +{ + int cpu; + + scx_watchdog_timestamp = jiffies; + + for_each_online_cpu(cpu) { + if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) + break; + + cond_resched(); + } + queue_delayed_work(system_unbound_wq, to_delayed_work(work), + scx_watchdog_timeout / 2); +} + static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) { update_curr_scx(rq); @@ -1607,7 +1697,7 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) } } - p->scx.flags |= SCX_TASK_OPS_PREPPED; + p->scx.flags |= (SCX_TASK_OPS_PREPPED | SCX_TASK_WATCHDOG_RESET); return 0; } @@ -1926,6 +2016,8 @@ static void scx_ops_disable_workfn(struct kthread_work *work) break; } + cancel_delayed_work_sync(&scx_watchdog_work); + switch (type) { case SCX_EXIT_UNREG: reason = "BPF scheduler unregistered"; @@ -1939,6 +2031,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) case SCX_EXIT_ERROR_BPF: reason = "scx_bpf_error"; break; + case SCX_EXIT_ERROR_STALL: + reason = "runnable task stall"; + break; default: reason = ""; } @@ -2123,8 +2218,8 @@ static void scx_ops_error_irq_workfn(struct irq_work *irq_work) static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn); -__printf(2, 3) static void scx_ops_error_type(enum scx_exit_type type, - const char *fmt, ...) +__printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, + const char *fmt, ...) { struct scx_exit_info *ei = &scx_exit_info; int none = SCX_EXIT_NONE; @@ -2223,6 +2318,14 @@ static int scx_ops_enable(struct sched_ext_ops *ops) goto err_disable; } + scx_watchdog_timeout = SCX_WATCHDOG_MAX_TIMEOUT; + if (ops->timeout_ms) + scx_watchdog_timeout = msecs_to_jiffies(ops->timeout_ms); + + scx_watchdog_timestamp = jiffies; + queue_delayed_work(system_unbound_wq, &scx_watchdog_work, + scx_watchdog_timeout / 2); + /* * Lock out forks before opening the floodgate so that they don't wander * into the operations prematurely. @@ -2482,6 +2585,11 @@ static int bpf_scx_init_member(const struct btf_type *t, if (ret == 0) return -EINVAL; return 1; + case offsetof(struct sched_ext_ops, timeout_ms): + if (*(u32 *)(udata + moff) > SCX_WATCHDOG_MAX_TIMEOUT) + return -E2BIG; + ops->timeout_ms = *(u32 *)(udata + moff); + return 1; } return 0; @@ -2579,9 +2687,11 @@ void __init init_sched_ext_class(void) struct rq *rq = cpu_rq(cpu); init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); + INIT_LIST_HEAD(&rq->scx.watchdog_list); } register_sysrq_key('S', &sysrq_sched_ext_reset_op); + INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); } diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index f8d5682deacf..7dfa7b888487 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -56,6 +56,8 @@ enum scx_deq_flags { extern const struct sched_class ext_sched_class; extern const struct bpf_verifier_ops bpf_sched_ext_verifier_ops; extern const struct file_operations sched_ext_fops; +extern unsigned long scx_watchdog_timeout; +extern unsigned long scx_watchdog_timestamp; DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) @@ -67,6 +69,28 @@ void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); void init_sched_ext_class(void); +__printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, + const char *fmt, ...); +#define scx_ops_error(fmt, args...) \ + scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) + +static inline void scx_notify_sched_tick(void) +{ + unsigned long last_check; + + if (!scx_enabled()) + return; + + last_check = scx_watchdog_timestamp; + if (unlikely(time_after(jiffies, last_check + scx_watchdog_timeout))) { + u32 dur_ms = jiffies_to_msecs(jiffies - last_check); + + scx_ops_error_type(SCX_EXIT_ERROR_STALL, + "watchdog failed to check in for %u.%03us", + dur_ms / 1000, dur_ms % 1000); + } +} + static inline const struct sched_class *next_active_class(const struct sched_class *class) { class++; @@ -98,6 +122,7 @@ static inline int scx_fork(struct task_struct *p) { return 0; } static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} static inline void init_sched_ext_class(void) {} +static inline void scx_notify_sched_tick(void) {} #define for_each_active_class for_each_class #define for_balance_class_range for_class_range diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ae4cd306bf28..bd9851ee0257 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -688,6 +688,7 @@ struct cfs_rq { #ifdef CONFIG_SCHED_CLASS_EXT struct scx_rq { struct scx_dispatch_q local_dsq; + struct list_head watchdog_list; u64 ops_qseq; u64 extra_enq_flags; /* see move_task_to_local_dsq() */ u32 nr_running; diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 60e260577a3a..2a969c68a2e4 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -25,6 +25,8 @@ char _license[] SEC("license") = "GPL"; const volatile u64 slice_ns = SCX_SLICE_DFL; +const volatile u32 stall_user_nth; +const volatile u32 stall_kernel_nth; u32 test_error_cnt; @@ -120,11 +122,20 @@ static int weight_to_idx(u32 weight) void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) { + static u32 user_cnt, kernel_cnt; struct task_ctx *tctx; u32 pid = p->pid; int idx = weight_to_idx(p->scx.weight); void *ring; + if (p->flags & PF_KTHREAD) { + if (stall_kernel_nth && !(++kernel_cnt % stall_kernel_nth)) + return; + } else { + if (stall_user_nth && !(++user_cnt % stall_user_nth)) + return; + } + if (test_error_cnt && !--test_error_cnt) scx_bpf_error("test triggering error"); @@ -237,5 +248,6 @@ struct sched_ext_ops qmap_ops = { .dispatch = (void *)qmap_dispatch, .prep_enable = (void *)qmap_prep_enable, .exit = (void *)qmap_exit, + .timeout_ms = 5000U, .name = "qmap", }; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index 56c85c9fa979..3f98534c2a9c 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,10 +20,12 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT]\n" +"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" +" -t COUNT Stall every COUNT'th user thread\n" +" -T COUNT Stall every COUNT'th kernel thread\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -47,7 +49,7 @@ int main(int argc, char **argv) skel = scx_example_qmap__open(); assert(skel); - while ((opt = getopt(argc, argv, "s:e:tTd:h")) != -1) { + while ((opt = getopt(argc, argv, "s:e:t:T:d:h")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; @@ -55,6 +57,12 @@ int main(int argc, char **argv) case 'e': skel->bss->test_error_cnt = strtoul(optarg, NULL, 0); break; + case 't': + skel->rodata->stall_user_nth = strtoul(optarg, NULL, 0); + break; + case 'T': + skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); + break; default: fprintf(stderr, help_fmt, basename(argv[0])); return opt != 'h'; From patchwork Fri Mar 17 21:33:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71495 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16636wrt; Fri, 17 Mar 2023 15:22:43 -0700 (PDT) X-Google-Smtp-Source: AK7set8PKuoopcxWZOYvHYXPq0jSHUzjDYi+yeJKuUhinCBLcDYpvzUymc8OP4DL/ycyA8GwvvM7 X-Received: by 2002:a17:903:2292:b0:197:8e8e:f15 with SMTP id b18-20020a170903229200b001978e8e0f15mr11426347plh.6.1679091762710; Fri, 17 Mar 2023 15:22:42 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091762; cv=none; d=google.com; s=arc-20160816; b=qyunUV9qGHDMVOVb4eqfDjcZBB5Ha5CuvJADDOtA2+Rl5DrAM8+Yhy5nS/WR8whQjZ TK4QnpUOyZuiwg1OhFQBEKYlH/q9ZqQiHC4Xw26WivFKofthi66KoL2T8YtyA0QoSFV7 mYkDwzqyPqIy70BLIddET4it3m5iw85rkiTezjgYL5CmoJUeqUCxLcr0/8GZVefm4VrS M+ik1w8B8wJYbzg8LJeaR1sc6ewVAMKddpOfAvflTvTCGjcGtLoInc4JKXMdFWC5S3Xd qfpVdxZswdMBqp3h+bkSEBaMm7FL6cuzyeQSB2BNhzEF6lSCh/zb30sdNSuGLhpL5P25 oOUg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=0ptHP2lhItNL95igmnoTpFfZGEIfkLjzOV4nlaJyR3w=; b=E3kk7I/p4sf2kQGeJFcjJ/xvx6nXmcSddHTdqqDotgjmAA2JqSWmnSPZav0a7s1X8l HhUprVBVDmYRj6Tvd9AckCRonS2Ufr8YZRBi/pWNTObTe6h6Ej9Mh6iAHsoq2DOuNnlf GG3amQxQ0ESqPjxMcCbxycJQmDSck/tSd+ItZK0ExiEWfb12GZYPemCgjCtKELehcgh8 CplZEcJQ8TP1HtYEP1pAeG7xlTInHeE2xNnrnDbQFTUca/qf8377a66SlDMngHoVA5Wz ohkmBeAG6gqmVEf/iBTMhf9y1zrTmkNTPkOowl+dpO6bcbB1tet8IeZIQGLo8lW+iQ7t IffQ== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=mxraJSkI; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id k8-20020a170902c40800b001a1b6562148si130628plk.96.2023.03.17.15.22.26; Fri, 17 Mar 2023 15:22:42 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=mxraJSkI; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231246AbjCQVgh (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:37 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52234 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231161AbjCQVgP (ORCPT ); Fri, 17 Mar 2023 17:36:15 -0400 Received: from mail-pj1-x1029.google.com (mail-pj1-x1029.google.com [IPv6:2607:f8b0:4864:20::1029]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 07F6A38477; Fri, 17 Mar 2023 14:35:32 -0700 (PDT) Received: by mail-pj1-x1029.google.com with SMTP id cn6so6680695pjb.2; Fri, 17 Mar 2023 14:35:32 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088854; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=0ptHP2lhItNL95igmnoTpFfZGEIfkLjzOV4nlaJyR3w=; b=mxraJSkI3+hxKN6bG72vHdJdSZ4hDbKdmWm9TFLuqXrdKZc7NiXj1HW0RvduPhVRTY 5CB/I4/yZ+4I6H1sJbKwEmpG0+jbTJxpGlNP76Obo9AJbvyUHg6ENP22a0OirCXFFRzV xXvKEzZOgQFtIpv8uFVJCAlcdrQ6gE98jUPym1doZYt+GqjU94WXl0fOIZqYrW5bf1vw DQv71EqYaU0hWeJ9oEB1BktmHrRUAEzkf8lSlmiUaI2Oq3F8lYGr9QtBMoH5qjI9ReTR w78aPHRuwkmmqh4MvVfybuzF7QdDgrivtE47gWCWqCvg0ragNLa9delfVCVGNBnQb7Z6 lrxQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088854; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=0ptHP2lhItNL95igmnoTpFfZGEIfkLjzOV4nlaJyR3w=; b=5lURyX0b+rgLjx473oMXBOoImVPGVpmtgjBHGvKmNHIuZUdFuHinD3izoM2kSuc2vC afiYL8arjnj4MoQLzKTamZGgH0ACcQeO2dU3MvDdkq9URO0Xngz//2pWOXvbgSsMNGty oH+gqYJovzaw2P4cs4bDit4DjhpPZgm5eyyu1DVQpr0EVkn6FXypQZt2tMo9Owq+1nuI wJql5uiggLykwORbTg2TaDuhm5EZM3ALpZn4HTBWDSoqqBga0iFeJyrpxG5WMe20SmK1 YejPHB4y7AgJZiOMcxi8Xp1SBotn1DyXO2EMaC8+6A9tze4u5LEHagAduMhaHAm92D8U ITXw== X-Gm-Message-State: AO0yUKUj53oQ4vB+amb+7HTVuflMK3htosT7wbvoUgW53glcLgr51pU+ YilxFd/xxgiK03DS5JQ1b5xnuvXpmDc= X-Received: by 2002:a17:902:c944:b0:19c:94ad:cbe8 with SMTP id i4-20020a170902c94400b0019c94adcbe8mr11913651pla.36.1679088854326; Fri, 17 Mar 2023 14:34:14 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id q12-20020a170902b10c00b0019a70a42b0asm1985258plr.169.2023.03.17.14.34.13 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:13 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 17/32] sched_ext: Allow BPF schedulers to disallow specific tasks from joining SCHED_EXT Date: Fri, 17 Mar 2023 11:33:18 -1000 Message-Id: <20230317213333.2174969-18-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655324437491994?= X-GMAIL-MSGID: =?utf-8?q?1760655324437491994?= BPF schedulers might not want to schedule certain tasks - e.g. kernel threads. This patch adds p->scx.disallow which can be set by BPF schedulers in such cases. The field can be changed anytime and setting it in ops.prep_enable() guarantees that the task can never be scheduled by sched_ext. scx_example_qmap is updated with the -d option to disallow a specific PID: # echo $$ 1092 # egrep '(policy)|(ext\.enabled)' /proc/self/sched policy : 0 ext.enabled : 0 # ./set-scx 1092 # egrep '(policy)|(ext\.enabled)' /proc/self/sched policy : 7 ext.enabled : 0 Run "scx_example_qmap -d 1092" in another terminal. # grep rejected /sys/kernel/debug/sched/ext nr_rejected : 1 # egrep '(policy)|(ext\.enabled)' /proc/self/sched policy : 0 ext.enabled : 0 # ./set-scx 1092 setparam failed for 1092 (Permission denied) Signed-off-by: Tejun Heo Suggested-by: Barret Rhoden Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 12 ++++++++ kernel/sched/core.c | 4 +++ kernel/sched/ext.c | 38 ++++++++++++++++++++++++++ kernel/sched/ext.h | 3 ++ tools/sched_ext/scx_example_qmap.bpf.c | 4 +++ tools/sched_ext/scx_example_qmap.c | 8 +++++- 6 files changed, 68 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 7a4d088a2378..da85bc3751ad 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -411,6 +411,18 @@ struct sched_ext_entity { */ u64 slice; + /* + * If set, reject future sched_setscheduler(2) calls updating the policy + * to %SCHED_EXT with -%EACCES. + * + * If set from ops.prep_enable() and the task's policy is already + * %SCHED_EXT, which can happen while the BPF scheduler is being loaded + * or by inhering the parent's policy during fork, the task's policy is + * rejected and forcefully reverted to %SCHED_NORMAL. The number of such + * events are reported through /sys/kernel/debug/sched_ext::nr_rejected. + */ + bool disallow; /* reject switching into SCX */ + /* cold fields */ struct list_head tasks_node; }; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9f721df512f0..08a72f146f9d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7665,6 +7665,10 @@ static int __sched_setscheduler(struct task_struct *p, goto unlock; } + retval = scx_check_setscheduler(p, policy); + if (retval) + goto unlock; + /* * If not changing anything there's no need to proceed further, * but store a possible modification of reset_on_fork. diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 92e0780b182f..52744d7dd181 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1687,6 +1687,8 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) WARN_ON_ONCE(p->scx.flags & SCX_TASK_OPS_PREPPED); + p->scx.disallow = false; + if (SCX_HAS_OP(prep_enable)) { struct scx_enable_args args = { }; @@ -1697,6 +1699,27 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) } } + if (p->scx.disallow) { + struct rq *rq; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); + + /* + * We're either in fork or load path and @p->policy will be + * applied right after. Reverting @p->policy here and rejecting + * %SCHED_EXT transitions from scx_check_setscheduler() + * guarantees that if ops.prep_enable() sets @p->disallow, @p + * can never be in SCX. + */ + if (p->policy == SCHED_EXT) { + p->policy = SCHED_NORMAL; + atomic64_inc(&scx_nr_rejected); + } + + task_rq_unlock(rq, p, &rf); + } + p->scx.flags |= (SCX_TASK_OPS_PREPPED | SCX_TASK_WATCHDOG_RESET); return 0; } @@ -1845,6 +1868,18 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) static void check_preempt_curr_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} static void switched_to_scx(struct rq *rq, struct task_struct *p) {} +int scx_check_setscheduler(struct task_struct *p, int policy) +{ + lockdep_assert_rq_held(task_rq(p)); + + /* if disallow, reject transitioning into SCX */ + if (scx_enabled() && READ_ONCE(p->scx.disallow) && + p->policy != policy && policy == SCHED_EXT) + return -EACCES; + + return 0; +} + /* * Omitted operations: * @@ -2528,6 +2563,9 @@ static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, if (off >= offsetof(struct task_struct, scx.slice) && off + size <= offsetofend(struct task_struct, scx.slice)) return SCALAR_VALUE; + if (off >= offsetof(struct task_struct, scx.disallow) && + off + size <= offsetofend(struct task_struct, scx.disallow)) + return SCALAR_VALUE; } if (atype == BPF_READ) diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 7dfa7b888487..76c94babd19e 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -67,6 +67,7 @@ void scx_pre_fork(struct task_struct *p); int scx_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); +int scx_check_setscheduler(struct task_struct *p, int policy); void init_sched_ext_class(void); __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, @@ -121,6 +122,8 @@ static inline void scx_pre_fork(struct task_struct *p) {} static inline int scx_fork(struct task_struct *p) { return 0; } static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} +static inline int scx_check_setscheduler(struct task_struct *p, + int policy) { return 0; } static inline void init_sched_ext_class(void) {} static inline void scx_notify_sched_tick(void) {} diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 2a969c68a2e4..0e4cccf878f5 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -27,6 +27,7 @@ char _license[] SEC("license") = "GPL"; const volatile u64 slice_ns = SCX_SLICE_DFL; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; +const volatile s32 disallow_tgid; u32 test_error_cnt; @@ -224,6 +225,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, struct scx_enable_args *args) { + if (p->tgid == disallow_tgid) + p->scx.disallow = true; + /* * @p is new. Let's ensure that its task_ctx is available. We can sleep * in this function and the following will automatically use GFP_KERNEL. diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index 3f98534c2a9c..d080a0c853c0 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,12 +20,13 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT]\n" +"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" " -t COUNT Stall every COUNT'th user thread\n" " -T COUNT Stall every COUNT'th kernel thread\n" +" -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -63,6 +64,11 @@ int main(int argc, char **argv) case 'T': skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); break; + case 'd': + skel->rodata->disallow_tgid = strtol(optarg, NULL, 0); + if (skel->rodata->disallow_tgid < 0) + skel->rodata->disallow_tgid = getpid(); + break; default: fprintf(stderr, help_fmt, basename(argv[0])); return opt != 'h'; From patchwork Fri Mar 17 21:33:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71482 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp8141wrt; Fri, 17 Mar 2023 15:00:00 -0700 (PDT) X-Google-Smtp-Source: AK7set8Trsuui+yoPNwOjHtrP/hT5q9wyDrUnjGwbKrE3x8xoUMBsNl8h2yicHL8ch1XdbItMBoM X-Received: by 2002:a05:6a00:409a:b0:626:fe8b:48a6 with SMTP id bw26-20020a056a00409a00b00626fe8b48a6mr1935774pfb.3.1679090400141; Fri, 17 Mar 2023 15:00:00 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090400; cv=none; d=google.com; s=arc-20160816; b=fljCQZsz9/HzV/8dpPhdefnwi0OMoGxyxJLAtPazkYez+ayZPzOfqiPlWdlj5w0xhi AWWnpX5aHQrVOwEWxOdY0mB0NJjy9T51LBjhY4IWFIrOjZ71CqgFuAIH4Fi4EqZfCWjm WCZnNp9xXK5vwTwi9tEeQYvsyLARsiYPrM+oUTUYq5cPSue/HZj1mFRbroWxzVlwFauS gtlD38uQMXtO+C7DitbnPf/g+uyOk8frd+GMZyK+DJXa+tAqq3MqaRWpRtrFECKRzxKp yyBlYy7Wv+iKYtOCUrvPwCbD6vXimRO9FFtGVMdCb+P4dwWHMcYeoaTmeLkOpWKX+DOO npTA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=QdWArsc/MoSBIqnPLuVIVJ41NPeW3Fkepk3tYKzGEnQ=; b=aJb/wZDyWfSJ5qrNnZcnwW2ECVKjhaUXW2kPhhYkGfosLr4OEWlO2GHJzs7+kr/PcR JugTokMaLYyWPbfp8kr1rr1CpeIx5FhrVE3KECO9816j2FmImOgXBIBNdYh63KUQQKde k6Y/R9w99i7wGeeVJoP0UwzNaFH38H1OzhyXhgoh1SKq1vfHSGmhRDLOPzuNshbpAkrs 9YOeEY9k3WC3zqAFfzt8b0nnOIqK6KtgOkPmr+nUtUq3y5ptU7unCshbECd8yYWfze3V mrl22UUwT9Or3PtKP4aFi+639BM/rrNDs25QX9ee+kkJ5g/vz/dVl+93bh/dSgyO2Agn pC2A== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=EnLCpM5o; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id 66-20020a630145000000b004f2920ecea3si3513659pgb.623.2023.03.17.14.59.39; Fri, 17 Mar 2023 15:00:00 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=EnLCpM5o; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231403AbjCQViy (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:54 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54012 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230369AbjCQViB (ORCPT ); Fri, 17 Mar 2023 17:38:01 -0400 Received: from mail-pj1-f53.google.com (mail-pj1-f53.google.com [209.85.216.53]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4283D166C7; Fri, 17 Mar 2023 14:36:41 -0700 (PDT) Received: by mail-pj1-f53.google.com with SMTP id o6-20020a17090a9f8600b0023f32869993so6635184pjp.1; Fri, 17 Mar 2023 14:36:41 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088856; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=QdWArsc/MoSBIqnPLuVIVJ41NPeW3Fkepk3tYKzGEnQ=; b=EnLCpM5oL+Wn1UBHCBLbl05fsXwWdDxV0MFsg73NPxuSlYCtMmYB3qVEkGaqNAKAcD ah/J6t7iZXdPjrGNYad8B8fyq0VPhhiYBztFUv9jFjH/xOvbfIGR9TEF4z9EwczZwPih SFYUYXRsFd3Pc32D6C5C8W0ed4+An8v0CT3yw5t1xp8oQQAvPXgDE0B4r5CVwu5LQUuJ HHwGu0QNgF9PRoGIUNX8oaT/4/PIVKCiJlcFiKqZiIiqOXboCVssmafRh9ElXBSZEu+H IN0NbxBjztkyIpQP/E1luwN8sKCMpR00eXz02we2WYR56fccvFnSe9RWRF9j/cRvgL/u WMMg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088856; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=QdWArsc/MoSBIqnPLuVIVJ41NPeW3Fkepk3tYKzGEnQ=; b=1Krxei/1y2WA3P8VpNIIneyBSI/UB1G6jKDRy3w9A9huLvx+GG2uZn9jApJzaHuE0t Fus8u+sUV8zad2s/+J6PfNV+gL8wrnhCwJrYnCtDR3bwEf3L5JS8dCNHzBDU/9D88waM vVbLouo4feVCICbX8IswkDQoz10R3Q69qc196ZU3VNlb3j9dzzusjrgrmsYlTSTSeQpx lZ2K4qWdj8fq3V16dcKRFad/d9oXF97kdpd907kaJbZJ+07B2hWzUDEPkndBGAcqjYSm /eEpcVCLb1m/QqksrN09MfR0J1VOp9GI1tW8ZEs4lmMkp8edN2av3pFky7bavExfAVZL w89Q== X-Gm-Message-State: AO0yUKWgeVm+EgfFE458HAi52YUwcwndAzANCRq43vnKOuWjYy3OZGyB c+r/v+X/uXValEruhk9mCyA= X-Received: by 2002:a05:6a20:4295:b0:cb:6e9e:e6df with SMTP id o21-20020a056a20429500b000cb6e9ee6dfmr10452098pzj.14.1679088856213; Fri, 17 Mar 2023 14:34:16 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id z5-20020a6552c5000000b0050301521335sm1879410pgp.11.2023.03.17.14.34.15 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:15 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 18/32] sched_ext: Allow BPF schedulers to switch all eligible tasks into sched_ext Date: Fri, 17 Mar 2023 11:33:19 -1000 Message-Id: <20230317213333.2174969-19-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,RCVD_IN_MSPIKE_H3, RCVD_IN_MSPIKE_WL,SPF_HELO_NONE,SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653895011331130?= X-GMAIL-MSGID: =?utf-8?q?1760653895011331130?= Currently, to use sched_ext, each task has to be put into sched_ext using sched_setscheduler(2). However, some BPF schedulers and use cases might prefer to service all eligible tasks. This patch adds a new kfunc helper, scx_bpf_switch_all(), that BPF schedulers can call from ops.init() to switch all SCHED_NORMAL, SCHED_BATCH and SCHED_IDLE tasks into sched_ext. This has the benefit that the scheduler swaps are transparent to the users and applications. As we know that CFS is not being used when scx_bpf_switch_all() is used, we can also disable hot path entry points with static_branches. Both the simple and qmap example schedulers are updated to switch all tasks by default to ease testing. '-p' option is added which enables the original behavior of switching only tasks which are explicitly on SCHED_EXT. v2: In the example schedulers, switch all tasks by default. Signed-off-by: Tejun Heo Suggested-by: Barret Rhoden Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 8 +++-- kernel/sched/ext.c | 45 ++++++++++++++++++++++++ kernel/sched/ext.h | 5 +++ tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_qmap.bpf.c | 9 +++++ tools/sched_ext/scx_example_qmap.c | 8 +++-- tools/sched_ext/scx_example_simple.bpf.c | 10 ++++++ tools/sched_ext/scx_example_simple.c | 8 +++-- 8 files changed, 87 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 08a72f146f9d..bc90327f950d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1206,7 +1206,7 @@ bool sched_can_stop_tick(struct rq *rq) * if there's more than one we need the tick for involuntary * preemption. */ - if (rq->nr_running > 1) + if (!scx_switched_all() && rq->nr_running > 1) return false; return true; @@ -5657,8 +5657,10 @@ void scheduler_tick(void) perf_event_task_tick(); #ifdef CONFIG_SMP - rq->idle_balance = idle_cpu(cpu); - trigger_load_balance(rq); + if (!scx_switched_all()) { + rq->idle_balance = idle_cpu(cpu); + trigger_load_balance(rq); + } #endif } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 52744d7dd181..ed2f13d90fcb 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -73,6 +73,10 @@ static DEFINE_MUTEX(scx_ops_enable_mutex); DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled); DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED); +static bool scx_switch_all_req; +static bool scx_switching_all; +DEFINE_STATIC_KEY_FALSE(__scx_switched_all); + static struct sched_ext_ops scx_ops; static bool scx_warned_zero_slice; @@ -2015,6 +2019,8 @@ bool task_on_scx(struct task_struct *p) { if (!scx_enabled() || scx_ops_disabling()) return false; + if (READ_ONCE(scx_switching_all)) + return true; return p->policy == SCHED_EXT; } @@ -2141,6 +2147,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) */ mutex_lock(&scx_ops_enable_mutex); + static_branch_disable(&__scx_switched_all); + WRITE_ONCE(scx_switching_all, false); + /* avoid racing against fork */ cpus_read_lock(); percpu_down_write(&scx_fork_rwsem); @@ -2325,6 +2334,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops) */ cpus_read_lock(); + scx_switch_all_req = false; if (scx_ops.init) { ret = SCX_CALL_OP_RET(SCX_KF_INIT, init); if (ret) { @@ -2440,6 +2450,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops) * transitions here are synchronized against sched_ext_free() through * scx_tasks_lock. */ + WRITE_ONCE(scx_switching_all, scx_switch_all_req); + scx_task_iter_init(&sti); while ((p = scx_task_iter_next_filtered_locked(&sti))) { if (READ_ONCE(p->__state) != TASK_DEAD) { @@ -2471,6 +2483,9 @@ static int scx_ops_enable(struct sched_ext_ops *ops) goto err_disable_unlock; } + if (scx_switch_all_req) + static_branch_enable_cpuslocked(&__scx_switched_all); + cpus_read_unlock(); mutex_unlock(&scx_ops_enable_mutex); @@ -2505,6 +2520,9 @@ static int scx_debug_show(struct seq_file *m, void *v) mutex_lock(&scx_ops_enable_mutex); seq_printf(m, "%-30s: %s\n", "ops", scx_ops.name); seq_printf(m, "%-30s: %ld\n", "enabled", scx_enabled()); + seq_printf(m, "%-30s: %d\n", "switching_all", + READ_ONCE(scx_switching_all)); + seq_printf(m, "%-30s: %ld\n", "switched_all", scx_switched_all()); seq_printf(m, "%-30s: %s\n", "enable_state", scx_ops_enable_state_str[scx_ops_enable_state()]); seq_printf(m, "%-30s: %llu\n", "nr_rejected", @@ -2743,6 +2761,31 @@ __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF"); +/** + * scx_bpf_switch_all - Switch all tasks into SCX + * @into_scx: switch direction + * + * If @into_scx is %true, all existing and future non-dl/rt tasks are switched + * to SCX. If %false, only tasks which have %SCHED_EXT explicitly set are put on + * SCX. The actual switching is asynchronous. Can be called from ops.init(). + */ +void scx_bpf_switch_all(void) +{ + if (!scx_kf_allowed(SCX_KF_INIT)) + return; + + scx_switch_all_req = true; +} + +BTF_SET8_START(scx_kfunc_ids_init) +BTF_ID_FLAGS(func, scx_bpf_switch_all) +BTF_SET8_END(scx_kfunc_ids_init) + +static const struct btf_kfunc_id_set scx_kfunc_set_init = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_init, +}; + /** * scx_bpf_create_dsq - Create a custom DSQ * @dsq_id: DSQ to create @@ -3194,6 +3237,8 @@ static int __init register_ext_kfuncs(void) * check using scx_kf_allowed(). */ if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_init)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_sleepable)) || (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_enqueue_dispatch)) || diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 76c94babd19e..a4fe649e649d 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -60,7 +60,9 @@ extern unsigned long scx_watchdog_timeout; extern unsigned long scx_watchdog_timestamp; DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); +DECLARE_STATIC_KEY_FALSE(__scx_switched_all); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) +#define scx_switched_all() static_branch_unlikely(&__scx_switched_all) bool task_on_scx(struct task_struct *p); void scx_pre_fork(struct task_struct *p); @@ -95,6 +97,8 @@ static inline void scx_notify_sched_tick(void) static inline const struct sched_class *next_active_class(const struct sched_class *class) { class++; + if (scx_switched_all() && class == &fair_sched_class) + class++; if (!scx_enabled() && class == &ext_sched_class) class++; return class; @@ -117,6 +121,7 @@ static inline const struct sched_class *next_active_class(const struct sched_cla #else /* CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false +#define scx_switched_all() false static inline void scx_pre_fork(struct task_struct *p) {} static inline int scx_fork(struct task_struct *p) { return 0; } diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index 69fd41a55a3c..5efb58e1eb6a 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -52,6 +52,7 @@ void ___scx_bpf_error_format_checker(const char *fmt, ...) {} ___scx_bpf_error_format_checker(fmt, ##args); \ }) +void scx_bpf_switch_all(void) __ksym; s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; bool scx_bpf_consume(u64 dsq_id) __ksym; u32 scx_bpf_dispatch_nr_slots(void) __ksym; diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 0e4cccf878f5..abb134fb18ce 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -25,6 +25,7 @@ char _license[] SEC("license") = "GPL"; const volatile u64 slice_ns = SCX_SLICE_DFL; +const volatile bool switch_partial; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; const volatile s32 disallow_tgid; @@ -239,6 +240,13 @@ s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, return -ENOMEM; } +s32 BPF_STRUCT_OPS(qmap_init) +{ + if (!switch_partial) + scx_bpf_switch_all(); + return 0; +} + void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei) { uei_record(&uei, ei); @@ -251,6 +259,7 @@ struct sched_ext_ops qmap_ops = { .dequeue = (void *)qmap_dequeue, .dispatch = (void *)qmap_dispatch, .prep_enable = (void *)qmap_prep_enable, + .init = (void *)qmap_init, .exit = (void *)qmap_exit, .timeout_ms = 5000U, .name = "qmap", diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index d080a0c853c0..f94fd39c4ed8 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,13 +20,14 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID]\n" +"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID] [-p]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" " -t COUNT Stall every COUNT'th user thread\n" " -T COUNT Stall every COUNT'th kernel thread\n" " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" +" -p Switch only tasks on SCHED_EXT policy intead of all\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -50,7 +51,7 @@ int main(int argc, char **argv) skel = scx_example_qmap__open(); assert(skel); - while ((opt = getopt(argc, argv, "s:e:t:T:d:h")) != -1) { + while ((opt = getopt(argc, argv, "s:e:t:T:d:ph")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; @@ -69,6 +70,9 @@ int main(int argc, char **argv) if (skel->rodata->disallow_tgid < 0) skel->rodata->disallow_tgid = getpid(); break; + case 'p': + skel->rodata->switch_partial = true; + break; default: fprintf(stderr, help_fmt, basename(argv[0])); return opt != 'h'; diff --git a/tools/sched_ext/scx_example_simple.bpf.c b/tools/sched_ext/scx_example_simple.bpf.c index 74716d0dd08d..fa5ae683ace1 100644 --- a/tools/sched_ext/scx_example_simple.bpf.c +++ b/tools/sched_ext/scx_example_simple.bpf.c @@ -15,6 +15,8 @@ char _license[] SEC("license") = "GPL"; +const volatile bool switch_partial; + struct user_exit_info uei; struct { @@ -43,6 +45,13 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); } +s32 BPF_STRUCT_OPS(simple_init) +{ + if (!switch_partial) + scx_bpf_switch_all(); + return 0; +} + void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) { uei_record(&uei, ei); @@ -51,6 +60,7 @@ void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) SEC(".struct_ops") struct sched_ext_ops simple_ops = { .enqueue = (void *)simple_enqueue, + .init = (void *)simple_init, .exit = (void *)simple_exit, .name = "simple", }; diff --git a/tools/sched_ext/scx_example_simple.c b/tools/sched_ext/scx_example_simple.c index 2f1ee40f7e5a..868fd39e45c7 100644 --- a/tools/sched_ext/scx_example_simple.c +++ b/tools/sched_ext/scx_example_simple.c @@ -19,8 +19,9 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s\n" +"Usage: %s [-p]\n" "\n" +" -p Switch only tasks on SCHED_EXT policy intead of all\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -64,8 +65,11 @@ int main(int argc, char **argv) skel = scx_example_simple__open(); assert(skel); - while ((opt = getopt(argc, argv, "h")) != -1) { + while ((opt = getopt(argc, argv, "ph")) != -1) { switch (opt) { + case 'p': + skel->rodata->switch_partial = true; + break; default: fprintf(stderr, help_fmt, basename(argv[0])); return opt != 'h'; From patchwork Fri Mar 17 21:33:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71471 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp2885wrt; Fri, 17 Mar 2023 14:39:40 -0700 (PDT) X-Google-Smtp-Source: AK7set+M4F9VA6zE4eZn0GReLgI4QEc4iVDH0IiyDfeDBJIg88sE3I/vCcte4JQJA8+uxlyeHTUG X-Received: by 2002:a05:6a21:99a9:b0:cd:fc47:dd74 with SMTP id ve41-20020a056a2199a900b000cdfc47dd74mr9925758pzb.4.1679089179618; Fri, 17 Mar 2023 14:39:39 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089179; cv=none; d=google.com; s=arc-20160816; b=f9oz+WhkWqUhT8ncIUTPJNO5o3HGdmhKW94XQZg+P4hqSX7nq9bec2rQuxD9uhq0P4 +s4FK6MuRzqV3jcjaw1d40gw2wVMxLa5pJf3YdAXn5gXP/0K2dGXDAb3TJezxlqjth2a e46kzU2TeMenz4gWYYB4ZihnE/UXWyR8s4ArmA5mQLl2lgC435xHV8ykkuBEgDRUsuth 7cXx29NX9D0AFItIIUVvZhuCCL3MglKCyK4FgLAF9Koo9PbQSxNne6KcEEdLd2RjjaAf kc6D55Pua+AxLX+4nk54HuhKF3G2XdgylisRZflSxsxDLZ1P0w7+HhOFeh3FY6oxnUzd yVDQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=cCJSVVI6f1wKb1p8Bxf6utk3UJv34Z9BK1DCxmP+WOo=; b=VIddumGFo+d4j3YO3h/3bMHe9P+uGv6SUxmLGmQJzczXIDOXJ/5LWl/HFf7HGGP/fC P4GsgW3v+0ynR98m0tHwr50Vm9BgCH+WFdJqYeew0XLLdPzx84OjCIKjkzRCVKHODwVH MmEGj8JI7JcLVI2KYGR1W94AIErgnjob5W98ALmNhEbC1HLhNmQ//BYq5YGflA9Jijdr VEWn3KtgQNHG7G3t7aTyZjmAWTuGQnrUamAiiDIRBL0Jw37TF7d+YgRa5bkg03l41Q9j M+I/2eswaHOymje/XsPe+EEjFDK1mxrCkZ1n/o70/Nf5AKZosHz/c7TyRJM+D8X9UPjv CABw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=d5Jx99jo; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id j11-20020a170902da8b00b001a179347c13si3915437plx.552.2023.03.17.14.39.24; Fri, 17 Mar 2023 14:39:39 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=d5Jx99jo; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231304AbjCQVgw (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:52 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51954 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230208AbjCQVgX (ORCPT ); Fri, 17 Mar 2023 17:36:23 -0400 Received: from mail-pj1-x102f.google.com (mail-pj1-x102f.google.com [IPv6:2607:f8b0:4864:20::102f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D5C833A87A; Fri, 17 Mar 2023 14:35:37 -0700 (PDT) Received: by mail-pj1-x102f.google.com with SMTP id om3-20020a17090b3a8300b0023efab0e3bfso10421444pjb.3; Fri, 17 Mar 2023 14:35:37 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088858; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=cCJSVVI6f1wKb1p8Bxf6utk3UJv34Z9BK1DCxmP+WOo=; b=d5Jx99joXe2nYpeLpYk0m7nSpTvnVdQtkMgCwnQhf8VWeNm5oir2vPOBvpefrI4STn WhNwBsabtwdHj6hiCwMAFqx09ZmarDlKmB2jU5mjH6j15T2FeN9ztcFJFI/ykTiP1lTq nTOLep5/RE4WaGoF3GPqFvfIs0gvcEuHjh1gF/w5nqAu0fDq9Im6Zq8IToURDcC4Kpvc Iu6qD0zRGnXeCZ/kSmLg4mPyZWA585ohGabHKcjD3/WKvFcWNACBRwsusIaNbGK11UMC FFYbEFpia9tndieOA4jCjKSUWbb4n9bOZn3HZhmI1Y3OG8HeIsJ42glgeY9v4sTRXdVs EOCw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088858; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=cCJSVVI6f1wKb1p8Bxf6utk3UJv34Z9BK1DCxmP+WOo=; b=HXrZ8/JB2BcuCfVpwpkiBrbQNJRuUBolT+rXnTQmjGLwxh0ar6gozPI/CqoRU78MR5 c7+CHZShw+rKyb9IGjLgoB0rzsLn5NGZ9DLl6OpsvEUHrghQ7GfhxomUGrIXAhBeRGBz y0xgiAKzkzckvYX1Opr3FzHqlcCCErPEbWCuoV6rYvMUQa1nEsn9LWaOuCya6kuv/wni I3z4EUY1IbICustBn5q0uJR11g7gp88Rg97zPBzUF/sdz6NJdFULpPw/YEoZFPyv+ZzX LpHUBPo5xz9Cws1KFsqnIQMj+CjbWiFrz8c2S0VmQ2T0Zteb3vNDmRinYJS6H3CfbbmZ LPpQ== X-Gm-Message-State: AO0yUKXucbzYmJN4A6COSoEceagznyUP0mKDJogmEvlMkCk4II/Z1pug lP8PhcliopAAtzRJb3X1StM= X-Received: by 2002:a17:90b:4a07:b0:234:1d1d:6ae6 with SMTP id kk7-20020a17090b4a0700b002341d1d6ae6mr9761936pjb.1.1679088857922; Fri, 17 Mar 2023 14:34:17 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id x34-20020a17090a6c2500b002339195a47bsm5380946pjj.53.2023.03.17.14.34.17 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:17 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , Julia Lawall Subject: [PATCH 19/32] sched_ext: Implement scx_bpf_kick_cpu() and task preemption support Date: Fri, 17 Mar 2023 11:33:20 -1000 Message-Id: <20230317213333.2174969-20-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652615399114229?= X-GMAIL-MSGID: =?utf-8?q?1760652615399114229?= It's often useful to wake up and/or trigger reschedule on other CPUs. This patch adds scx_bpf_kick_cpu() kfunc helper that BPF scheduler can call to kick the target CPU into the scheduling path. As a sched_ext task relinquishes its CPU only after its slice is depleted, this patch also adds SCX_KICK_PREEMPT and SCX_ENQ_PREEMPT which clears the slice of the target CPU's current task to guarantee that sched_ext's scheduling path runs on the CPU. This patch also adds a new example scheduler, scx_example_central, which demonstrates central scheduling where one CPU is responsible for making all scheduling decisions in the system. The central CPU makes scheduling decisions for all CPUs in the system, queues tasks on the appropriate local dsq's and preempts the worker CPUs. The worker CPUs in turn preempt the central CPU when it needs tasks to run. Currently, every CPU depends on its own tick to expire the current task. A follow-up patch implementing tickless support for sched_ext will allow the worker CPUs to go full tickless so that they can run completely undisturbed. v3: * Make scx_example_central switch all tasks by default. * Convert to BPF inline iterators. v2: * Julia Lawall reported that scx_example_central can overflow the dispatch buffer and malfunction. As scheduling for other CPUs can't be handled by the automatic retry mechanism, fix by implementing an explicit overflow and retry handling. * Updated to use generic BPF cpumask helpers. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Julia Lawall --- include/linux/sched/ext.h | 4 + kernel/sched/ext.c | 82 +++++++- kernel/sched/ext.h | 12 ++ kernel/sched/sched.h | 3 + tools/sched_ext/.gitignore | 1 + tools/sched_ext/Makefile | 8 +- tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_central.bpf.c | 225 ++++++++++++++++++++++ tools/sched_ext/scx_example_central.c | 93 +++++++++ 9 files changed, 424 insertions(+), 5 deletions(-) create mode 100644 tools/sched_ext/scx_example_central.bpf.c create mode 100644 tools/sched_ext/scx_example_central.c diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index da85bc3751ad..ac19b720374d 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -408,6 +408,10 @@ struct sched_ext_entity { * scx_bpf_dispatch() but can also be modified directly by the BPF * scheduler. Automatically decreased by SCX as the task executes. On * depletion, a scheduling event is triggered. + * + * This value is cleared to zero if the task is preempted by + * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the + * task ran. Use p->se.sum_exec_runtime instead. */ u64 slice; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ed2f13d90fcb..d1f09ab51c4a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -497,7 +497,7 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, } } - if (enq_flags & SCX_ENQ_HEAD) + if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) list_add(&p->scx.dsq_node, &dsq->fifo); else list_add_tail(&p->scx.dsq_node, &dsq->fifo); @@ -513,8 +513,16 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, if (is_local) { struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); + bool preempt = false; - if (sched_class_above(&ext_sched_class, rq->curr->sched_class)) + if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && + rq->curr->sched_class == &ext_sched_class) { + rq->curr->scx.slice = 0; + preempt = true; + } + + if (preempt || sched_class_above(&ext_sched_class, + rq->curr->sched_class)) resched_curr(rq); } else { raw_spin_unlock(&dsq->lock); @@ -1888,7 +1896,9 @@ int scx_check_setscheduler(struct task_struct *p, int policy) * Omitted operations: * * - check_preempt_curr: NOOP as it isn't useful in the wakeup path because the - * task isn't tied to the CPU at that point. + * task isn't tied to the CPU at that point. Preemption is implemented by + * resetting the victim task's slice to 0 and triggering reschedule on the + * target CPU. * * - migrate_task_rq: Unncessary as task to cpu mapping is transient. * @@ -2721,6 +2731,32 @@ static const struct sysrq_key_op sysrq_sched_ext_reset_op = { .enable_mask = SYSRQ_ENABLE_RTNICE, }; +static void kick_cpus_irq_workfn(struct irq_work *irq_work) +{ + struct rq *this_rq = this_rq(); + int this_cpu = cpu_of(this_rq); + int cpu; + + for_each_cpu(cpu, this_rq->scx.cpus_to_kick) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + raw_spin_rq_lock_irqsave(rq, flags); + + if (cpu_online(cpu) || cpu == this_cpu) { + if (cpumask_test_cpu(cpu, this_rq->scx.cpus_to_preempt) && + rq->curr->sched_class == &ext_sched_class) + rq->curr->scx.slice = 0; + resched_curr(rq); + } + + raw_spin_rq_unlock_irqrestore(rq, flags); + } + + cpumask_clear(this_rq->scx.cpus_to_kick); + cpumask_clear(this_rq->scx.cpus_to_preempt); +} + void __init init_sched_ext_class(void) { int cpu; @@ -2744,6 +2780,10 @@ void __init init_sched_ext_class(void) init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); INIT_LIST_HEAD(&rq->scx.watchdog_list); + + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); + init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); } register_sysrq_key('S', &sysrq_sched_ext_reset_op); @@ -2980,6 +3020,41 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; +/** + * scx_bpf_kick_cpu - Trigger reschedule on a CPU + * @cpu: cpu to kick + * @flags: SCX_KICK_* flags + * + * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or + * trigger rescheduling on a busy CPU. This can be called from any online + * scx_ops operation and the actual kicking is performed asynchronously through + * an irq work. + */ +void scx_bpf_kick_cpu(s32 cpu, u64 flags) +{ + struct rq *rq; + + if (!ops_cpu_valid(cpu)) { + scx_ops_error("invalid cpu %d", cpu); + return; + } + + preempt_disable(); + rq = this_rq(); + + /* + * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting + * rq locks. We can probably be smarter and avoid bouncing if called + * from ops which don't hold a rq lock. + */ + cpumask_set_cpu(cpu, rq->scx.cpus_to_kick); + if (flags & SCX_KICK_PREEMPT) + cpumask_set_cpu(cpu, rq->scx.cpus_to_preempt); + + irq_work_queue(&rq->scx.kick_cpus_irq_work); + preempt_enable(); +} + /** * scx_bpf_dsq_nr_queued - Return the number of queued tasks * @dsq_id: id of the DSQ @@ -3201,6 +3276,7 @@ s32 scx_bpf_task_cpu(const struct task_struct *p) } BTF_SET8_START(scx_kfunc_ids_any) +BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index a4fe649e649d..0b04626e8ca2 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -19,6 +19,14 @@ enum scx_enq_flags { /* high 32bits are SCX specific */ + /* + * Set the following to trigger preemption when calling + * scx_bpf_dispatch() with a local dsq as the target. The slice of the + * current task is cleared to zero and the CPU is kicked into the + * scheduling path. Implies %SCX_ENQ_HEAD. + */ + SCX_ENQ_PREEMPT = 1LLU << 32, + /* * The task being enqueued is the only task available for the cpu. By * default, ext core keeps executing such tasks but when @@ -51,6 +59,10 @@ enum scx_deq_flags { SCX_DEQ_SLEEP = DEQUEUE_SLEEP, }; +enum scx_kick_flags { + SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ +}; + #ifdef CONFIG_SCHED_CLASS_EXT extern const struct sched_class ext_sched_class; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bd9851ee0257..cbdfc7b61225 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -692,6 +692,9 @@ struct scx_rq { u64 ops_qseq; u64 extra_enq_flags; /* see move_task_to_local_dsq() */ u32 nr_running; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_preempt; + struct irq_work kick_cpus_irq_work; }; #endif /* CONFIG_SCHED_CLASS_EXT */ diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore index 2ad3d86caf79..3d8ec46ca304 100644 --- a/tools/sched_ext/.gitignore +++ b/tools/sched_ext/.gitignore @@ -1,5 +1,6 @@ scx_example_simple scx_example_qmap +scx_example_central *.skel.h *.subskel.h /tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index 8f0f14bb59ff..bcec7c1fb7b1 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -115,7 +115,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -Wall -Wno-compare-distinct-pointer-types \ -O2 -mcpu=v3 -all: scx_example_simple scx_example_qmap +all: scx_example_simple scx_example_qmap scx_example_central # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -174,10 +174,14 @@ scx_example_qmap: scx_example_qmap.c scx_example_qmap.skel.h user_exit_info.h $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +scx_example_central: scx_example_central.c scx_example_central.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + clean: rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h - rm -f scx_example_simple scx_example_qmap + rm -f scx_example_simple scx_example_qmap scx_example_central .PHONY: all clean diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index 5efb58e1eb6a..e6f6171edf3c 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -57,6 +57,7 @@ s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; bool scx_bpf_consume(u64 dsq_id) __ksym; u32 scx_bpf_dispatch_nr_slots(void) __ksym; void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym; +void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed) __ksym; diff --git a/tools/sched_ext/scx_example_central.bpf.c b/tools/sched_ext/scx_example_central.bpf.c new file mode 100644 index 000000000000..443504fa68f8 --- /dev/null +++ b/tools/sched_ext/scx_example_central.bpf.c @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A central FIFO sched_ext scheduler which demonstrates the followings: + * + * a. Making all scheduling decisions from one CPU: + * + * The central CPU is the only one making scheduling decisions. All other + * CPUs kick the central CPU when they run out of tasks to run. + * + * There is one global BPF queue and the central CPU schedules all CPUs by + * dispatching from the global queue to each CPU's local dsq from dispatch(). + * This isn't the most straightforward. e.g. It'd be easier to bounce + * through per-CPU BPF queues. The current design is chosen to maximally + * utilize and verify various SCX mechanisms such as LOCAL_ON dispatching. + * + * b. Preemption + * + * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the + * next tasks. + * + * This scheduler is designed to maximize usage of various SCX mechanisms. A + * more practical implementation would likely put the scheduling loop outside + * the central CPU's dispatch() path and add some form of priority mechanism. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" + +char _license[] SEC("license") = "GPL"; + +enum { + FALLBACK_DSQ_ID = 0, + MAX_CPUS = 4096, + MS_TO_NS = 1000LLU * 1000, + TIMER_INTERVAL_NS = 1 * MS_TO_NS, +}; + +const volatile bool switch_partial; +const volatile s32 central_cpu; +const volatile u32 nr_cpu_ids = 64; /* !0 for veristat, set during init */ + +u64 nr_total, nr_locals, nr_queued, nr_lost_pids; +u64 nr_dispatches, nr_mismatches, nr_retries; +u64 nr_overflows; + +struct user_exit_info uei; + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, 4096); + __type(value, s32); +} central_q SEC(".maps"); + +/* can't use percpu map due to bad lookups */ +static bool cpu_gimme_task[MAX_CPUS]; + +struct central_timer { + struct bpf_timer timer; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, struct central_timer); +} central_timer SEC(".maps"); + +s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + /* + * Steer wakeups to the central CPU as much as possible to avoid + * disturbing other CPUs. It's safe to blindly return the central cpu as + * select_cpu() is a hint and if @p can't be on it, the kernel will + * automatically pick a fallback CPU. + */ + return central_cpu; +} + +void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags) +{ + s32 pid = p->pid; + + __sync_fetch_and_add(&nr_total, 1); + + if (bpf_map_push_elem(¢ral_q, &pid, 0)) { + __sync_fetch_and_add(&nr_overflows, 1); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, enq_flags); + return; + } + + __sync_fetch_and_add(&nr_queued, 1); + + if (!scx_bpf_task_running(p)) + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); +} + +static bool dispatch_to_cpu(s32 cpu) +{ + struct task_struct *p; + s32 pid; + + bpf_repeat(BPF_MAX_LOOPS) { + if (bpf_map_pop_elem(¢ral_q, &pid)) + break; + + __sync_fetch_and_sub(&nr_queued, 1); + + p = bpf_task_from_pid(pid); + if (!p) { + __sync_fetch_and_add(&nr_lost_pids, 1); + continue; + } + + /* + * If we can't run the task at the top, do the dumb thing and + * bounce it to the fallback dsq. + */ + if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { + __sync_fetch_and_add(&nr_mismatches, 1); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, 0); + bpf_task_release(p); + continue; + } + + /* dispatch to local and mark that @cpu doesn't need more */ + scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, 0); + + if (cpu != central_cpu) + scx_bpf_kick_cpu(cpu, 0); + + bpf_task_release(p); + return true; + } + + return false; +} + +void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev) +{ + if (cpu == central_cpu) { + /* dispatch for all other CPUs first */ + __sync_fetch_and_add(&nr_dispatches, 1); + + bpf_for(cpu, 0, nr_cpu_ids) { + bool *gimme; + + if (!scx_bpf_dispatch_nr_slots()) + break; + + /* central's gimme is never set */ + gimme = MEMBER_VPTR(cpu_gimme_task, [cpu]); + if (gimme && !*gimme) + continue; + + if (dispatch_to_cpu(cpu)) + *gimme = false; + } + + /* + * Retry if we ran out of dispatch buffer slots as we might have + * skipped some CPUs and also need to dispatch for self. The ext + * core automatically retries if the local dsq is empty but we + * can't rely on that as we're dispatching for other CPUs too. + * Kick self explicitly to retry. + */ + if (!scx_bpf_dispatch_nr_slots()) { + __sync_fetch_and_add(&nr_retries, 1); + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); + return; + } + + /* look for a task to run on the central CPU */ + if (scx_bpf_consume(FALLBACK_DSQ_ID)) + return; + dispatch_to_cpu(central_cpu); + } else { + bool *gimme; + + if (scx_bpf_consume(FALLBACK_DSQ_ID)) + return; + + gimme = MEMBER_VPTR(cpu_gimme_task, [cpu]); + if (gimme) + *gimme = true; + + /* + * Force dispatch on the scheduling CPU so that it finds a task + * to run for us. + */ + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); + } +} + +int BPF_STRUCT_OPS_SLEEPABLE(central_init) +{ + if (!switch_partial) + scx_bpf_switch_all(); + + return scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); +} + +void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops central_ops = { + /* + * We are offloading all scheduling decisions to the central CPU and + * thus being the last task on a given CPU doesn't mean anything + * special. Enqueue the last tasks like any other tasks. + */ + .flags = SCX_OPS_ENQ_LAST, + + .select_cpu = (void *)central_select_cpu, + .enqueue = (void *)central_enqueue, + .dispatch = (void *)central_dispatch, + .init = (void *)central_init, + .exit = (void *)central_exit, + .name = "central", +}; diff --git a/tools/sched_ext/scx_example_central.c b/tools/sched_ext/scx_example_central.c new file mode 100644 index 000000000000..959b305a93a9 --- /dev/null +++ b/tools/sched_ext/scx_example_central.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_central.skel.h" + +const char help_fmt[] = +"A central FIFO sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-c CPU] [-p]\n" +"\n" +" -c CPU Override the central CPU (default: 0)\n" +" -p Switch only tasks on SCHED_EXT policy intead of all\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_example_central *skel; + struct bpf_link *link; + u64 seq = 0; + s32 opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_central__open(); + assert(skel); + + skel->rodata->central_cpu = 0; + skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); + + while ((opt = getopt(argc, argv, "c:ph")) != -1) { + switch (opt) { + case 'c': + skel->rodata->central_cpu = strtoul(optarg, NULL, 0); + break; + case 'p': + skel->rodata->switch_partial = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + assert(!scx_example_central__load(skel)); + + link = bpf_map__attach_struct_ops(skel->maps.central_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + printf("[SEQ %lu]\n", seq++); + printf("total :%10lu local:%10lu queued:%10lu lost:%10lu\n", + skel->bss->nr_total, + skel->bss->nr_locals, + skel->bss->nr_queued, + skel->bss->nr_lost_pids); + printf(" dispatch:%10lu mismatch:%10lu retry:%10lu\n", + skel->bss->nr_dispatches, + skel->bss->nr_mismatches, + skel->bss->nr_retries); + printf("overflow:%10lu\n", + skel->bss->nr_overflows); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_central__destroy(skel); + return 0; +} From patchwork Fri Mar 17 21:33:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71488 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp12762wrt; Fri, 17 Mar 2023 15:10:57 -0700 (PDT) X-Google-Smtp-Source: AK7set9X5vyWw1inEDVLpnJ95shJeaoJ3Y88kthNYTsuYEu9lKjakAf9TNGApxjknhZScdx+8BfX X-Received: by 2002:a05:6a00:3186:b0:5a9:cebd:7b79 with SMTP id bj6-20020a056a00318600b005a9cebd7b79mr8042014pfb.0.1679091057387; Fri, 17 Mar 2023 15:10:57 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091057; cv=none; d=google.com; s=arc-20160816; b=IrRzSfI4tfq0cGReiH4IyminBL1o5UM4uAODaABSUhOVSh2UwJFxCe/hioDB5CmjT0 eewlHk1LZWLIsNBJoh9en5AnrimJutDhW3lQagjKyFGcQHLagBLAcE05MAcPr6Q5HhoZ D7zU7XHLYDi2HAQ0Edf8x+Qt+qQKGFLpXG3l6l05dJzkSzMGu5JOgo1DKfu6qRuB9/wE nhdBvzYK5DfSpI+OzaIU0k55pwscA9rBgqKKYQnsHzlaFe4myKpYiN2CXgVtZuNnX7eu ALtcuvja+p0lm+6fB2McZvS2erJNxpLrHGjxW7GARdu4EDrqyAayTHJbx6eR5Dd9DmG8 5BDQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=Mk9CUHzVKpOlxfASIcLRI047WvC8d0hSApNZ1eNw/Uk=; b=aWJHvamxmObfSwprUCKNVD8Fpi1vRWcmM19I7BGK5s3J7LlLQXymohk6aNtWQeAo5O 46xR2H2xVsBoj8kSrg6tEdFYSAvRn6nZw6e8Z5Nk0TTCzXLQnvrbyB2bDT9OZDjf121h FUzNo23JInOVsKxXHSxz2MhwvO19vfsTNtQKoqKgJ0deI/bJBB1PTgYOZbv8pqtfAh75 2t/taj6KCQUviAgcehrBhRTQ28+j2PXLiWa8EvxnMpDHfdLX1SWPYn91UW7HYXMeT4Vw ajy0KjQmH0NOZwowPDDCUTnmciuvAB8JQkRfNZnS7nE08Et93eJ02E1y+H0ZfBLcmFy5 AGuQ== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=WPIVV69R; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id h22-20020a056a001a5600b005a8d44b8a56si3627412pfv.266.2023.03.17.15.10.44; Fri, 17 Mar 2023 15:10:57 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=WPIVV69R; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231279AbjCQVgp (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:45 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51970 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231203AbjCQVgU (ORCPT ); Fri, 17 Mar 2023 17:36:20 -0400 Received: from mail-pj1-x1033.google.com (mail-pj1-x1033.google.com [IPv6:2607:f8b0:4864:20::1033]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C4B9B166C7; Fri, 17 Mar 2023 14:35:37 -0700 (PDT) Received: by mail-pj1-x1033.google.com with SMTP id om3-20020a17090b3a8300b0023efab0e3bfso10421494pjb.3; Fri, 17 Mar 2023 14:35:37 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088860; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Mk9CUHzVKpOlxfASIcLRI047WvC8d0hSApNZ1eNw/Uk=; b=WPIVV69RpJIbvGcLuqXOWfUgw/sU++b6jBanC0NTrMl5BvEFoEhWCO97W/svLCF3sw a8fxqxuvgaFJhJ9ENr6S89RkX2X5x4wziNjoKWYWrmVvInutTHfoo07ef/6ljf0R8GeH 3FipStChOOk7IDKotVVuwLXMqI4AyMmPcdTK1riUWuldogFr38EMFyXmhHjENEZE+Mbh e+O3qFqU6BWK9S+rgksSFCE4NqdcgV5K0yOazwLALnHIqHRW9FLaEBQUYZS69/rn2YXL sfNzdQSpUOWCyDP+lqaT72P/c2fS8wM2LDSXl0ieue+1NlwtZtPaS4x4FwGqPAPJgGBc fcZQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088860; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Mk9CUHzVKpOlxfASIcLRI047WvC8d0hSApNZ1eNw/Uk=; b=2JyUt9MyTzaKO5U2nbD/Xy83EeUykWtJsCxDHSMCabAKTG9rpo96Aq+2MqOcPSp8oS gsdgEZPotWkvc/mQmquODHM9nx4/8jr6Y7d2yD4Wpw4qLiD6mgcnOTyWQ+5LUIVggoq2 3Onq0OoBO+asgahTSF3DxMObrrCLN/vKc2kH17ozgEHYg3/tv11fit6iHZL/jJzJdluc oatM+oLTtBQot5H3LskRnUNnWN10PHihkUJljW0uEHyX82bILhCKWwjfla+R4gS+sekS k3poyC2W2+MyPA7F6saS//q1Flt1Hr8+ASRDaYeFlD0I0daZxmgALlE1a0pbopMhZVK6 8eLg== X-Gm-Message-State: AO0yUKVkL2iqGkTuj3v4mDu9XOBuAxH+WcW4+ggvh0Bplc4wK9A5esTs ewPVOY+EJlGqp/yrxXSrwB0= X-Received: by 2002:a17:902:c40c:b0:19f:2dff:21a4 with SMTP id k12-20020a170902c40c00b0019f2dff21a4mr9713294plk.16.1679088859713; Fri, 17 Mar 2023 14:34:19 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id b23-20020a170902d89700b0019c912c19d3sm1710371plz.62.2023.03.17.14.34.19 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:19 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 20/32] sched_ext: Make watchdog handle ops.dispatch() looping stall Date: Fri, 17 Mar 2023 11:33:21 -1000 Message-Id: <20230317213333.2174969-21-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760654584657693042?= X-GMAIL-MSGID: =?utf-8?q?1760654584657693042?= The dispatch path retries if the local DSQ is still empty after ops.dispatch() either dispatched or consumed a task. This is both out of necessity and for convenience. It has to retry because the dispatch path might lose the tasks to dequeue while the rq lock is released while trying to migrate tasks across CPUs, and the retry mechanism makes ops.dispatch() implementation easier as it only needs to make some forward progress each iteration. However, this makes it possible for ops.dispatch() to stall CPUs by repeatedly dispatching ineligible tasks. If all CPUs are stalled that way, the watchdog or sysrq handler can't run and the system can't be saved. Let's address the issue by breaking out of the dispatch loop after 32 iterations. It is unlikely but not impossible for ops.dispatch() to legitimately go over the iteration limit. We want to come back to the dispatch path in such cases as not doing so risks stalling the CPU by idling with runnable tasks pending. As the previous task is still current in balance_scx(), resched_curr() doesn't do anything - it will just get cleared. Let's instead use scx_kick_bpf() which will trigger reschedule after switching to the next task which will likely be the idle task. Signed-off-by: Tejun Heo Reviewed-by: David Vernet --- kernel/sched/ext.c | 17 +++++++++++++++++ tools/sched_ext/scx_example_qmap.bpf.c | 17 +++++++++++++++++ tools/sched_ext/scx_example_qmap.c | 8 ++++++-- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index d1f09ab51c4a..56fa5eed33ac 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -9,6 +9,7 @@ enum scx_internal_consts { SCX_NR_ONLINE_OPS = SCX_OP_IDX(init), SCX_DSP_DFL_MAX_BATCH = 32, + SCX_DSP_MAX_LOOPS = 32, SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, }; @@ -168,6 +169,7 @@ static DEFINE_PER_CPU(struct scx_dsp_ctx, scx_dsp_ctx); void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags); +void scx_bpf_kick_cpu(s32 cpu, u64 flags); struct scx_task_iter { struct sched_ext_entity cursor; @@ -1287,6 +1289,7 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, struct scx_rq *scx_rq = &rq->scx; struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); bool prev_on_scx = prev->sched_class == &ext_sched_class; + int nr_loops = SCX_DSP_MAX_LOOPS; lockdep_assert_rq_held(rq); @@ -1341,6 +1344,20 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, return 1; if (consume_dispatch_q(rq, rf, &scx_dsq_global)) return 1; + + /* + * ops.dispatch() can trap us in this loop by repeatedly + * dispatching ineligible tasks. Break out once in a while to + * allow the watchdog to run. As IRQ can't be enabled in + * balance(), we want to complete this scheduling cycle and then + * start a new one. IOW, we want to call resched_curr() on the + * next, most likely idle, task, not the current one. Use + * scx_bpf_kick_cpu() for deferred kicking. + */ + if (unlikely(!--nr_loops)) { + scx_bpf_kick_cpu(cpu_of(rq), 0); + break; + } } while (dspc->nr_tasks); return 0; diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index abb134fb18ce..ed704a4024c0 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -28,6 +28,7 @@ const volatile u64 slice_ns = SCX_SLICE_DFL; const volatile bool switch_partial; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; +const volatile u32 dsp_inf_loop_after; const volatile s32 disallow_tgid; u32 test_error_cnt; @@ -187,6 +188,22 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) s32 pid; int i; + if (dsp_inf_loop_after && nr_dispatched > dsp_inf_loop_after) { + struct task_struct *p; + + /* + * PID 2 should be kthreadd which should mostly be idle and off + * the scheduler. Let's keep dispatching it to force the kernel + * to call this function over and over again. + */ + p = bpf_task_from_pid(2); + if (p) { + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, 0); + bpf_task_release(p); + return; + } + } + if (!idx || !cnt) { scx_bpf_error("failed to lookup idx[%p], cnt[%p]", idx, cnt); return; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index f94fd39c4ed8..3f68dae47bd0 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,12 +20,13 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID] [-p]\n" +"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-d PID] [-p]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" " -t COUNT Stall every COUNT'th user thread\n" " -T COUNT Stall every COUNT'th kernel thread\n" +" -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n" " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" " -p Switch only tasks on SCHED_EXT policy intead of all\n" " -h Display this help and exit\n"; @@ -51,7 +52,7 @@ int main(int argc, char **argv) skel = scx_example_qmap__open(); assert(skel); - while ((opt = getopt(argc, argv, "s:e:t:T:d:ph")) != -1) { + while ((opt = getopt(argc, argv, "s:e:t:T:l:d:ph")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; @@ -65,6 +66,9 @@ int main(int argc, char **argv) case 'T': skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); break; + case 'l': + skel->rodata->dsp_inf_loop_after = strtoul(optarg, NULL, 0); + break; case 'd': skel->rodata->disallow_tgid = strtol(optarg, NULL, 0); if (skel->rodata->disallow_tgid < 0) From patchwork Fri Mar 17 21:33:22 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71470 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp2887wrt; Fri, 17 Mar 2023 14:39:40 -0700 (PDT) X-Google-Smtp-Source: AK7set8k32x6IWVFX6Hkzj6qLK6c2/V5/TQcxiNFTOTHYJ5F9mCXBzRDMpGJpCoqKga4lCmPf5vI X-Received: by 2002:a05:6a00:4211:b0:623:5c0f:b24a with SMTP id cd17-20020a056a00421100b006235c0fb24amr7908509pfb.2.1679089180002; Fri, 17 Mar 2023 14:39:40 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089179; cv=none; d=google.com; s=arc-20160816; b=pXF5EaGVZ2Uej4IFegSl1kSBPECMZmqW63qCE9DEZF7bfUq9omjLJbjT7hszYyCf20 s3xp+LRJFmBJj8dpyqoBdDztBTHsqf//OAMKVfyS2qIw+ZXIF4kCxsRVRVVt1FuTMsfF 9RnMso/h/qQwKzm2jja/TsG7+tmtvITRLXIGiHYsPlMDW5kuiwMLnGP2lmkUj0ossDS/ ADZ0XmKPTy8kc0521h+flumUmx5Ij3vb8I2De5d5UoGcjYsqja60fXy+YhCRC+AFa6U1 w8MqqO2YquQaf6s3oTW8T/oA2yUYRsTnBB5XXJLOHDJbYPyIzCf8lAYbkpZ8aczFAnl2 BdJA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=+9y1dItfFtvTCcd2jMRX78Y3CvuU5tntYbyJY+7/t8A=; b=zjHVJt7RdsSV9DOecvqTkWmsQQc8dAi2xAhifNkQnu5R6ASES8FUu0iX/yGNcCsZ+j 0OUUTnuMnnUjEkbOwo+TLKTa8+MunrvIfJ+bQ1dDN8RgCx+aCfjh9z0ytg1O8+Bc29Xr KWt7REDHZWI5RQp6WdI3kI6YYYJqK/FrbF3XpIGYjae9/Tplp9wvmdU9NZ0BWxYT6mTZ 9+2CcOI26qqfYKSji7VRfSBl/ROSE4S9Os8SHCtinORYU1j/KSKwaLJfcZXo+8BbPD5z QEsLE4ArRwDDYwhnGptCrhBxdY0Vz+qo6CQkBTlqM6X38EJnW3m7D65+xGHEsyva5aH2 RwRw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=ZpXwb6WP; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id a71-20020a63904a000000b00507681d47bcsi3431333pge.567.2023.03.17.14.39.24; Fri, 17 Mar 2023 14:39:39 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=ZpXwb6WP; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229733AbjCQVg6 (ORCPT + 99 others); Fri, 17 Mar 2023 17:36:58 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52158 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230469AbjCQVgX (ORCPT ); Fri, 17 Mar 2023 17:36:23 -0400 Received: from mail-pj1-x1034.google.com (mail-pj1-x1034.google.com [IPv6:2607:f8b0:4864:20::1034]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 400FF3D908; Fri, 17 Mar 2023 14:35:38 -0700 (PDT) Received: by mail-pj1-x1034.google.com with SMTP id l9-20020a17090a3f0900b0023d32684e7fso8929451pjc.1; Fri, 17 Mar 2023 14:35:38 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088861; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=+9y1dItfFtvTCcd2jMRX78Y3CvuU5tntYbyJY+7/t8A=; b=ZpXwb6WPuIxmUQqBt/R3yfcYXdA/7uFM9veLj2HAlkXZzsnvCKCo2eYLCtmp14uugo vQCrkDAVapWb9kT3E3oUOAHtJky++qTgxsiFPrtI+gFhRjvxOfsxRi4tmMVob8jCMRlU ZR5VKttkktVdbc6Fh93bCOAADhEjk6/cIeYePW72/mioshB3wvVB7teXIEnEz5yZ8eAH es+sM/WOSD9W1ElluEQYh2wuQ/Pqdz0luC4FEHWJLJWuzd4XMROeKwaDMgUeEc+GDtvQ FSEOKDTsqdBtqYYbh6ke/d9Hb9kmgUl/1d2wv10qCOdhXikbnFqUYBI/LCJlgYkd+O5W Wxjg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088861; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=+9y1dItfFtvTCcd2jMRX78Y3CvuU5tntYbyJY+7/t8A=; b=5q3HxNbwy2+Aba/Emeg8/rn11Vj+Y7Lj3WtRXc4MXWxHDamXTJswSQcrUMekd2pvv1 B+CL/v+XEP6LjfxgBR2Wt3c6YffQ3DVZ8Mx9FC+R59+hs3AlvLoSnCTHbw3jhgaMvdfS C605hNX8uMeDKEh/oFycKoyeuvZXx9IvG7uuQDhZWlz9RzGI2U+F6SGEqFpFr0bOjroO 9gMaPWFtw6CHFrU4aVjc5yglRGLJ/2JVq8jOnUotCigYajCQwZc5D5fR8Jltq68HdGn0 Y1PlqBGJFnzSsTTXqoxA20dqzfAwxOq9lCkOG/KEPHELM7TOT9qbhZFt5BQf+S0u0s3c bF8A== X-Gm-Message-State: AO0yUKXMekpWOwMramRFD/XygX0QNni2MllABvJpRX6iCHbVtFjpu0Ez zyovIh9w5J3Awhy54V+YwlE= X-Received: by 2002:a05:6a20:b285:b0:d7:5f98:d2a3 with SMTP id ei5-20020a056a20b28500b000d75f98d2a3mr5443486pzb.54.1679088861348; Fri, 17 Mar 2023 14:34:21 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id j10-20020a63fc0a000000b00503000f0492sm1873374pgi.14.2023.03.17.14.34.20 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:21 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 21/32] sched_ext: Add task state tracking operations Date: Fri, 17 Mar 2023 11:33:22 -1000 Message-Id: <20230317213333.2174969-22-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652615961524446?= X-GMAIL-MSGID: =?utf-8?q?1760652615961524446?= Being able to track the task runnable and running state transitions are useful for a variety of purposes including latency tracking and load factor calculation. Currently, BPF schedulers don't have a good way of tracking these transitions. Becoming runnable can be determined from ops.enqueue() but becoming quiescent can only be inferred from the lack of subsequent enqueue. Also, as the local dsq can have multiple tasks and some events are handled in the sched_ext core, it's difficult to determine when a given task starts and stops executing. This patch adds sched_ext_ops.runnable(), .running(), .stopping() and .quiescent() operations to track the task runnable and running state transitions. They're mostly self explanatory; however, we want to ensure that running <-> stopping transitions are always contained within runnable <-> quiescent transitions which is a bit different from how the scheduler core behaves. This adds a bit of complication. See the comment in dequeue_task_scx(). Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 65 +++++++++++++++++++++++++++++++++++++++ kernel/sched/ext.c | 31 +++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index ac19b720374d..16eb54635e35 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -192,6 +192,71 @@ struct sched_ext_ops { */ void (*dispatch)(s32 cpu, struct task_struct *prev); + /** + * runnable - A task is becoming runnable on its associated CPU + * @p: task becoming runnable + * @enq_flags: %SCX_ENQ_* + * + * This and the following three functions can be used to track a task's + * execution state transitions. A task becomes ->runnable() on a CPU, + * and then goes through one or more ->running() and ->stopping() pairs + * as it runs on the CPU, and eventually becomes ->quiescent() when it's + * done running on the CPU. + * + * @p is becoming runnable on the CPU because it's + * + * - waking up (%SCX_ENQ_WAKEUP) + * - being moved from another CPU + * - being restored after temporarily taken off the queue for an + * attribute change. + * + * This and ->enqueue() are related but not coupled. This operation + * notifies @p's state transition and may not be followed by ->enqueue() + * e.g. when @p is being dispatched to a remote CPU. Likewise, a task + * may be ->enqueue()'d without being preceded by this operation e.g. + * after exhausting its slice. + */ + void (*runnable)(struct task_struct *p, u64 enq_flags); + + /** + * running - A task is starting to run on its associated CPU + * @p: task starting to run + * + * See ->runnable() for explanation on the task state notifiers. + */ + void (*running)(struct task_struct *p); + + /** + * stopping - A task is stopping execution + * @p: task stopping to run + * @runnable: is task @p still runnable? + * + * See ->runnable() for explanation on the task state notifiers. If + * !@runnable, ->quiescent() will be invoked after this operation + * returns. + */ + void (*stopping)(struct task_struct *p, bool runnable); + + /** + * quiescent - A task is becoming not runnable on its associated CPU + * @p: task becoming not runnable + * @deq_flags: %SCX_DEQ_* + * + * See ->runnable() for explanation on the task state notifiers. + * + * @p is becoming quiescent on the CPU because it's + * + * - sleeping (%SCX_DEQ_SLEEP) + * - being moved to another CPU + * - being temporarily taken off the queue for an attribute change + * (%SCX_DEQ_SAVE) + * + * This and ->dequeue() are related but not coupled. This operation + * notifies @p's state transition and may not be preceded by ->dequeue() + * e.g. when @p is being dispatched to a remote CPU. + */ + void (*quiescent)(struct task_struct *p, u64 deq_flags); + /** * yield - Yield CPU * @from: yielding task diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 56fa5eed33ac..c9aa74e55999 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -776,6 +776,9 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags rq->scx.nr_running++; add_nr_running(rq, 1); + if (SCX_HAS_OP(runnable)) + SCX_CALL_OP(SCX_KF_REST, runnable, p, enq_flags); + do_enqueue_task(rq, p, enq_flags, sticky_cpu); } @@ -836,6 +839,26 @@ static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags ops_dequeue(p, deq_flags); + /* + * A currently running task which is going off @rq first gets dequeued + * and then stops running. As we want running <-> stopping transitions + * to be contained within runnable <-> quiescent transitions, trigger + * ->stopping() early here instead of in put_prev_task_scx(). + * + * @p may go through multiple stopping <-> running transitions between + * here and put_prev_task_scx() if task attribute changes occur while + * balance_scx() leaves @rq unlocked. However, they don't contain any + * information meaningful to the BPF scheduler and can be suppressed by + * skipping the callbacks if the task is !QUEUED. + */ + if (SCX_HAS_OP(stopping) && task_current(rq, p)) { + update_curr_scx(rq); + SCX_CALL_OP(SCX_KF_REST, stopping, p, false); + } + + if (SCX_HAS_OP(quiescent)) + SCX_CALL_OP(SCX_KF_REST, quiescent, p, deq_flags); + if (deq_flags & SCX_DEQ_SLEEP) p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; else @@ -1372,6 +1395,10 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) p->se.exec_start = rq_clock_task(rq); + /* see dequeue_task_scx() on why we skip when !QUEUED */ + if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) + SCX_CALL_OP(SCX_KF_REST, running, p); + watchdog_unwatch_task(p, true); } @@ -1410,6 +1437,10 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) update_curr_scx(rq); + /* see dequeue_task_scx() on why we skip when !QUEUED */ + if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) + SCX_CALL_OP(SCX_KF_REST, stopping, p, true); + /* * If we're being called from put_prev_task_balance(), balance_scx() may * have decided that @p should keep running. From patchwork Fri Mar 17 21:33:23 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71483 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp8755wrt; Fri, 17 Mar 2023 15:01:14 -0700 (PDT) X-Google-Smtp-Source: AK7set85WYdN5YLJz0X5oU4nuY+cXrgxzbFkxG++5vOkb0F0e90sgiksNhggzjdoNfUcqNv+fjFH X-Received: by 2002:a17:902:d505:b0:19a:723a:8405 with SMTP id b5-20020a170902d50500b0019a723a8405mr8929082plg.6.1679090474078; Fri, 17 Mar 2023 15:01:14 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090474; cv=none; d=google.com; s=arc-20160816; b=AyoShRS0KZVHgpwl8+CT/UhVe0fI7vXDZIPgEsRZNmt1c1bu3RyDImXdMoTsUDQ3QL VsvvT79P1hVd1pHY7xtGI9fl+flEr2IIqw/T3+uRL9rMRpx1EofCp0BL2iIf1yZkbcqX GYkatCWwalX1YgV9hruPugmQIaoqCdOj8HH+HUE4VKZuFW1d4xJYYav/SO2+PJiw+rtD F7WqjCc8fUAGdZ6DhLJIp5npIKAPjs5v9OIJVHPvUAZJHc9Mi9nlWFpjJJ9VA5HE07zt TBOd7oTuJ49SRcBbE/Yy9Pi5UqBjuFgltmB7XadWunfr/OmASv9SakH6oeWcOTKKJlUZ Zh5g== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=6r2fOL3rD+vduV0LHS7egjhJ2wEMiRGrFAoqb6nik1o=; b=1GpxUd/IMIAsxSwNCjXldyVeCepKKOQD6KjKTO9ryJ0BmVJyzbq4iiaoCiHqYVjTHz gi27DuYMmsmn1FR/KKDeRwnn44Up4TJTMKR44XJPS0eWcXErzl4qS52WcGTDf8q96VME mPBI9QVfkCvu2hheCxuZ9/f9is6TFutnBhsJ5cSq8GbHniB0xHXXdTorajrJhHUTW7nh /oiQ4PP4FPaS+p10B3VqA1BUHBTUqwdX4COaRkY2VD0dVTXerJKXg1GHiD3DoeB16nsl DKuvqNEZnM0acygZ1Ojgjthxb9Kzu377lUg+cJlVn5knLYUDqSZBjksCh1uR3ORPaTln 5Qag== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=XG1RhO3n; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id b189-20020a6367c6000000b0050bf747e913si3328938pgc.224.2023.03.17.15.01.00; Fri, 17 Mar 2023 15:01:14 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=XG1RhO3n; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229845AbjCQVhd (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:33 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52866 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231253AbjCQVgi (ORCPT ); Fri, 17 Mar 2023 17:36:38 -0400 Received: from mail-pj1-x1036.google.com (mail-pj1-x1036.google.com [IPv6:2607:f8b0:4864:20::1036]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0963E4B807; Fri, 17 Mar 2023 14:35:50 -0700 (PDT) Received: by mail-pj1-x1036.google.com with SMTP id om3-20020a17090b3a8300b0023efab0e3bfso10421615pjb.3; Fri, 17 Mar 2023 14:35:50 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088863; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=6r2fOL3rD+vduV0LHS7egjhJ2wEMiRGrFAoqb6nik1o=; b=XG1RhO3nYeS7D0gNGDFwkgLyY+5r1/xRSm91eiN75ugDUFD5X4DhWVHRZqLqG+IK4H czdScpUYsVlYBxkkqYyD/0bZfzchGACRv8peHI4+xrwa9s4Ez9QWS1uiWq2rp26wnf5n 1dwjTqYrGATisTX/SA6r7Hz5YwYDRTKxOpUpn5WPb9SKkGTMbRVt4j8GG8ixZdl3Edjl ZL/OaYGyxi4BEgBBRMPlVEm8/16oQGS5JPkweQ7kf+5sC7HJH/huCsSOO/Zhc/SDJmdv Be3BdHjYaWEzDc4g+JI9F1qFXEG1hG4I7mKZeumMIio+4n1YoGBnBZewzKBC9eqclinI 0f/Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088863; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=6r2fOL3rD+vduV0LHS7egjhJ2wEMiRGrFAoqb6nik1o=; b=uz4qjtAp+GB38HvhppTfuTGfMcsgCE2BjE2t79fPUSrjhfRw3AfLijUwItPkRKJ85C UrDA7WyDsfbRGRDq1M31eRQu6ceubvuqaClSb7BaVj3J260PANDn6RGW+PO3/MHzurO3 I1/y08PUtTCN4CkBGEhI6F7tkbXQYcmGpSQTAMAbdvZeBnYy3hCKTPuCDNHqdaGUXwG8 Jr7CerHMfC79u88+rWHI2Mvq0DxF0o5Ctqqg3CuCTrL0CED5rdmqGYccw5fCELT6tiAA A86k/mX1c4uNs+5rGmLY2k3HQWBCx6SfeSGodliTLBRVUTPYaiAWikuQs7RmCF3jr4+o uPIg== X-Gm-Message-State: AO0yUKXeAIvX6yo6AoxXq6/8Dc1gCh3gmFzd6rBZBAEl57U2+exJrvjT s2RpT45fWO1T7OKMimvuCWE= X-Received: by 2002:a17:90b:4c0b:b0:23d:35cf:44be with SMTP id na11-20020a17090b4c0b00b0023d35cf44bemr4534865pjb.6.1679088863070; Fri, 17 Mar 2023 14:34:23 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id hg4-20020a17090b300400b002340d317f3esm1844386pjb.52.2023.03.17.14.34.22 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:22 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 22/32] sched_ext: Implement tickless support Date: Fri, 17 Mar 2023 11:33:23 -1000 Message-Id: <20230317213333.2174969-23-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653973207234418?= X-GMAIL-MSGID: =?utf-8?q?1760653973207234418?= Allow BPF schedulers to indicate tickless operation by setting p->scx.slice to SCX_SLICE_INF. A CPU whose current task has infinte slice goes into tickless operation. scx_example_central is updated to use tickless operations for all tasks and instead use a BPF timer to expire slices. This also uses the SCX_ENQ_PREEMPT and task state tracking added by the previous patches. Currently, there is no way to pin the timer on the central CPU, so it may end up on one of the worker CPUs; however, outside of that, the worker CPUs can go tickless both while running sched_ext tasks and idling. With schbench running, scx_example_central shows: root@test ~# grep ^LOC /proc/interrupts; sleep 10; grep ^LOC /proc/interrupts LOC: 142024 656 664 449 Local timer interrupts LOC: 161663 663 665 449 Local timer interrupts Without it: root@test ~ [SIGINT]# grep ^LOC /proc/interrupts; sleep 10; grep ^LOC /proc/interrupts LOC: 188778 3142 3793 3993 Local timer interrupts LOC: 198993 5314 6323 6438 Local timer interrupts While scx_example_central itself is too barebone to be useful as a production scheduler, a more featureful central scheduler can be built using the same approach. Google's experience shows that such an approach can have significant benefits for certain applications such as VM hosting. v2: * Convert to BPF inline iterators. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 1 + kernel/sched/core.c | 9 +- kernel/sched/ext.c | 43 +++++++- kernel/sched/ext.h | 2 + kernel/sched/sched.h | 6 ++ tools/sched_ext/scx_example_central.bpf.c | 121 ++++++++++++++++++++-- tools/sched_ext/scx_example_central.c | 3 +- 7 files changed, 173 insertions(+), 12 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 16eb54635e35..2f2ee3e05904 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -19,6 +19,7 @@ enum scx_consts { SCX_EXIT_MSG_LEN = 1024, SCX_SLICE_DFL = 20 * NSEC_PER_MSEC, + SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */ }; /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bc90327f950d..a7e0725c2469 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1202,13 +1202,16 @@ bool sched_can_stop_tick(struct rq *rq) return true; /* - * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; - * if there's more than one we need the tick for involuntary - * preemption. + * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks + * left. For CFS, if there's more than one we need the tick for + * involuntary preemption. For SCX, ask. */ if (!scx_switched_all() && rq->nr_running > 1) return false; + if (scx_enabled() && !scx_can_stop_tick(rq)) + return false; + return true; } #endif /* CONFIG_NO_HZ_FULL */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index c9aa74e55999..ed35b5575b9f 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -478,7 +478,8 @@ static void update_curr_scx(struct rq *rq) account_group_exec_runtime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec); - curr->scx.slice -= min(curr->scx.slice, delta_exec); + if (curr->scx.slice != SCX_SLICE_INF) + curr->scx.slice -= min(curr->scx.slice, delta_exec); } static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, @@ -1400,6 +1401,20 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) SCX_CALL_OP(SCX_KF_REST, running, p); watchdog_unwatch_task(p, true); + + /* + * @p is getting newly scheduled or got kicked after someone updated its + * slice. Refresh whether tick can be stopped. See can_stop_tick_scx(). + */ + if ((p->scx.slice == SCX_SLICE_INF) != + (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { + if (p->scx.slice == SCX_SLICE_INF) + rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; + else + rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; + + sched_update_tick_dependency(rq); + } } static void put_prev_task_scx(struct rq *rq, struct task_struct *p) @@ -1940,6 +1955,26 @@ int scx_check_setscheduler(struct task_struct *p, int policy) return 0; } +#ifdef CONFIG_NO_HZ_FULL +bool scx_can_stop_tick(struct rq *rq) +{ + struct task_struct *p = rq->curr; + + if (scx_ops_disabling()) + return false; + + if (p->sched_class != &ext_sched_class) + return true; + + /* + * @rq can dispatch from different DSQs, so we can't tell whether it + * needs the tick or not by looking at nr_running. Allow stopping ticks + * iff the BPF scheduler indicated so. See set_next_task_scx(). + */ + return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; +} +#endif + /* * Omitted operations: * @@ -2100,7 +2135,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work) struct rhashtable_iter rht_iter; struct scx_dispatch_q *dsq; const char *reason; - int i, type; + int i, cpu, type; type = atomic_read(&scx_exit_type); while (true) { @@ -2197,6 +2232,10 @@ static void scx_ops_disable_workfn(struct kthread_work *work) scx_task_iter_exit(&sti); spin_unlock_irq(&scx_tasks_lock); + /* kick all CPUs to restore ticks */ + for_each_possible_cpu(cpu) + resched_cpu(cpu); + forward_progress_guaranteed: /* * Here, every runnable task is guaranteed to make forward progress and diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 0b04626e8ca2..9c9284f91e38 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -82,6 +82,7 @@ int scx_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); int scx_check_setscheduler(struct task_struct *p, int policy); +bool scx_can_stop_tick(struct rq *rq); void init_sched_ext_class(void); __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, @@ -141,6 +142,7 @@ static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } +static inline bool scx_can_stop_tick(struct rq *rq) { return true; } static inline void init_sched_ext_class(void) {} static inline void scx_notify_sched_tick(void) {} diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cbdfc7b61225..e6dacf488a20 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -686,12 +686,18 @@ struct cfs_rq { }; #ifdef CONFIG_SCHED_CLASS_EXT +/* scx_rq->flags, protected by the rq lock */ +enum scx_rq_flags { + SCX_RQ_CAN_STOP_TICK = 1 << 0, +}; + struct scx_rq { struct scx_dispatch_q local_dsq; struct list_head watchdog_list; u64 ops_qseq; u64 extra_enq_flags; /* see move_task_to_local_dsq() */ u32 nr_running; + u32 flags; cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_preempt; struct irq_work kick_cpus_irq_work; diff --git a/tools/sched_ext/scx_example_central.bpf.c b/tools/sched_ext/scx_example_central.bpf.c index 443504fa68f8..4cec04b4c2ed 100644 --- a/tools/sched_ext/scx_example_central.bpf.c +++ b/tools/sched_ext/scx_example_central.bpf.c @@ -13,7 +13,26 @@ * through per-CPU BPF queues. The current design is chosen to maximally * utilize and verify various SCX mechanisms such as LOCAL_ON dispatching. * - * b. Preemption + * b. Tickless operation + * + * All tasks are dispatched with the infinite slice which allows stopping the + * ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full + * parameter. The tickless operation can be observed through + * /proc/interrupts. + * + * Periodic switching is enforced by a periodic timer checking all CPUs and + * preempting them as necessary. Unfortunately, BPF timer currently doesn't + * have a way to pin to a specific CPU, so the periodic timer isn't pinned to + * the central CPU. + * + * c. Preemption + * + * Kthreads are unconditionally queued to the head of a matching local dsq + * and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always + * prioritized over user threads, which is required for ensuring forward + * progress as e.g. the periodic timer may run on a ksoftirqd and if the + * ksoftirqd gets starved by a user thread, there may not be anything else to + * vacate that user thread. * * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the * next tasks. @@ -42,7 +61,7 @@ const volatile s32 central_cpu; const volatile u32 nr_cpu_ids = 64; /* !0 for veristat, set during init */ u64 nr_total, nr_locals, nr_queued, nr_lost_pids; -u64 nr_dispatches, nr_mismatches, nr_retries; +u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries; u64 nr_overflows; struct user_exit_info uei; @@ -55,6 +74,7 @@ struct { /* can't use percpu map due to bad lookups */ static bool cpu_gimme_task[MAX_CPUS]; +static u64 cpu_started_at[MAX_CPUS]; struct central_timer { struct bpf_timer timer; @@ -67,6 +87,11 @@ struct { __type(value, struct central_timer); } central_timer SEC(".maps"); +static bool vtime_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) { @@ -85,9 +110,22 @@ void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags) __sync_fetch_and_add(&nr_total, 1); + /* + * Push per-cpu kthreads at the head of local dsq's and preempt the + * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked + * behind other threads which is necessary for forward progress + * guarantee as we depend on the BPF timer which may run from ksoftirqd. + */ + if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) { + __sync_fetch_and_add(&nr_locals, 1); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_INF, + enq_flags | SCX_ENQ_PREEMPT); + return; + } + if (bpf_map_push_elem(¢ral_q, &pid, 0)) { __sync_fetch_and_add(&nr_overflows, 1); - scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags); return; } @@ -120,13 +158,13 @@ static bool dispatch_to_cpu(s32 cpu) */ if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { __sync_fetch_and_add(&nr_mismatches, 1); - scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0); bpf_task_release(p); continue; } /* dispatch to local and mark that @cpu doesn't need more */ - scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0); if (cpu != central_cpu) scx_bpf_kick_cpu(cpu, 0); @@ -194,12 +232,81 @@ void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev) } } +void BPF_STRUCT_OPS(central_running, struct task_struct *p) +{ + s32 cpu = scx_bpf_task_cpu(p); + u64 *started_at = MEMBER_VPTR(cpu_started_at, [cpu]); + if (started_at) + *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */ +} + +void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) +{ + s32 cpu = scx_bpf_task_cpu(p); + u64 *started_at = MEMBER_VPTR(cpu_started_at, [cpu]); + if (started_at) + *started_at = 0; +} + +static int central_timerfn(void *map, int *key, struct bpf_timer *timer) +{ + u64 now = bpf_ktime_get_ns(); + u64 nr_to_kick = nr_queued; + s32 i; + + bpf_for(i, 0, nr_cpu_ids) { + s32 cpu = (nr_timers + i) % nr_cpu_ids; + u64 *started_at; + + if (cpu == central_cpu) + continue; + + /* kick iff the current one exhausted its slice */ + started_at = MEMBER_VPTR(cpu_started_at, [cpu]); + if (started_at && *started_at && + vtime_before(now, *started_at + SCX_SLICE_DFL)) + continue; + + /* and there's something pending */ + if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) || + scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu)) + ; + else if (nr_to_kick) + nr_to_kick--; + else + continue; + + scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT); + } + + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); + + bpf_timer_start(timer, TIMER_INTERVAL_NS, 0); + __sync_fetch_and_add(&nr_timers, 1); + return 0; +} + int BPF_STRUCT_OPS_SLEEPABLE(central_init) { + u32 key = 0; + struct bpf_timer *timer; + int ret; + if (!switch_partial) scx_bpf_switch_all(); - return scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); + ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); + if (ret) + return ret; + + timer = bpf_map_lookup_elem(¢ral_timer, &key); + if (!timer) + return -ESRCH; + + bpf_timer_init(timer, ¢ral_timer, CLOCK_MONOTONIC); + bpf_timer_set_callback(timer, central_timerfn); + ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0); + return ret; } void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei) @@ -219,6 +326,8 @@ struct sched_ext_ops central_ops = { .select_cpu = (void *)central_select_cpu, .enqueue = (void *)central_enqueue, .dispatch = (void *)central_dispatch, + .running = (void *)central_running, + .stopping = (void *)central_stopping, .init = (void *)central_init, .exit = (void *)central_exit, .name = "central", diff --git a/tools/sched_ext/scx_example_central.c b/tools/sched_ext/scx_example_central.c index 959b305a93a9..7ad591cbdc65 100644 --- a/tools/sched_ext/scx_example_central.c +++ b/tools/sched_ext/scx_example_central.c @@ -76,7 +76,8 @@ int main(int argc, char **argv) skel->bss->nr_locals, skel->bss->nr_queued, skel->bss->nr_lost_pids); - printf(" dispatch:%10lu mismatch:%10lu retry:%10lu\n", + printf("timer :%10lu dispatch:%10lu mismatch:%10lu retry:%10lu\n", + skel->bss->nr_timers, skel->bss->nr_dispatches, skel->bss->nr_mismatches, skel->bss->nr_retries); From patchwork Fri Mar 17 21:33:24 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71473 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp3690wrt; Fri, 17 Mar 2023 14:42:53 -0700 (PDT) X-Google-Smtp-Source: AK7set9MlUyE0rUtM92PGrsSEyqDJtjAUEKWdnoKxNFjrpUlyslWRHu9jsdz2vOJS5YFx9FZ+efh X-Received: by 2002:a17:902:9a92:b0:1a0:4405:5787 with SMTP id w18-20020a1709029a9200b001a044055787mr8730087plp.0.1679089373727; Fri, 17 Mar 2023 14:42:53 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089373; cv=none; d=google.com; s=arc-20160816; b=YD2k7MEswmblhrHqVQBL/nC4XC9m147kbt+UH22g8gZmnyuY/w3BnT2ABb/IcRyw0O 8mKmnyWCl9WuZ+eL+5l4ID18lr63gmAZioWWVv0j05WKO7eRrqd7ZrphSTjC8XnsZOiQ wPxaKy1IKw/QwhBQ/ZAbgrAM65ZS7IZ0hazwoY/K4X1pPWtvkFi9jqfclNKu/Jph4i22 +w+DBl+vQ1hzsAHnsPzhexHBlELq2QVBvKcS5EoulAtHffsvedLl77tI8JJwl16LHtyt qg2TRroPWbG5Ruu521sCfSHmr9+ARm00J/uoJGRZecemR2CVw24n8RGqrMcsITAKKPE1 Fjpw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=RSrWwn78A8NkyoI+B6fTIg1qB48gzCqnhlfC8zaKofU=; b=xjm+tXitPNm2xdZyHr9/LTuZXzc6Yh2Bw8Hg1YB1cOpFJQITG8MQcQcxWv/2GDQxrA Od6xO60movwyJVfTABa5jqzmET6dYjqe4sT8SE8hTYkObPQgPEbFAhlpsJ2CuhldgCRP biQY2PRcPRxg0KFIKFvuH7hIo+OnmbnfFSYl121FPqjzyi9oHtCLrohMz/nsAr64YwUm 6d4TfMJItJMaOU5cpkDrUmahcmFIOQAJsd5jqmbxqnylCejBImYz0EZj2tOlmbErmJ7e 7PXZs446ySMx7Ri7V/3Tvu2OMg4H9oM2EJg1su3IeCWQgMGMf6O1UQSfPxgDL/0c/3Op kLIA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=bAJustyi; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id u6-20020a170902a60600b001960922af15si2678891plq.239.2023.03.17.14.42.07; Fri, 17 Mar 2023 14:42:53 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=bAJustyi; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231310AbjCQVhD (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:03 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52234 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230501AbjCQVg0 (ORCPT ); Fri, 17 Mar 2023 17:36:26 -0400 Received: from mail-pj1-x1035.google.com (mail-pj1-x1035.google.com [IPv6:2607:f8b0:4864:20::1035]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 724E847806; Fri, 17 Mar 2023 14:35:41 -0700 (PDT) Received: by mail-pj1-x1035.google.com with SMTP id j3-20020a17090adc8300b0023d09aea4a6so10550714pjv.5; Fri, 17 Mar 2023 14:35:41 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088865; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=RSrWwn78A8NkyoI+B6fTIg1qB48gzCqnhlfC8zaKofU=; b=bAJustyimCzGyUNZgui6WmJ41dqCROMR0HuUpK6eoRL78iUIT9q/55S041hqCnYWnN ICczAp6A5ZLEXdcN21+zoqTNxwXuCrBAkA4DAckVjj470nKvk3Kna1VSjjSdJembBlz3 OwqbAPamEOGT3eSeyiQJzrmthixbaFrIn6JHJahf9zFDFw8/iCnI4/HyVS4BlzjKRStY /AnmGPJ0/vl7tzaTXWDcfwwZWjXWP5pO6Ra9mIJeWKuudvRVDjGii+pCrlJqFaBlWtCo ybPf6aomavu6v+eVJ+ZvOdtShGftW8fa7Us0+mSoeh1+4Kic1r3uXh+0yhefqv7gIk7b 1pDQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088865; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=RSrWwn78A8NkyoI+B6fTIg1qB48gzCqnhlfC8zaKofU=; b=W8klN+JVZt6CzBPT3B8+4EYN5CGVgbLpW87HpCkcc5lKLzk8noyjObEfQQTqmWHaP/ gT1LtSlgZK0cwfSPD95EaWPf8GwO2V9li0iA9At2vg09E78RFdKozhAr/yhs9NbGdlQL FPzLlwygpGfIGiGWtDTZHdTZ/8jjJk44p6TkeDluUUsVdBiSrK1be/Hb+YCUyskwZiyN /XZrNwdUMONYvWdVx8Cu3hkFTztq9563jCzlwi8VAMKdZ8bM5iPduLMEJxiyn8EqcdkR NDumUsHt0lPR5SXwnoGFQ5GHeLpNuayT2mCrVUYML+t7CWylakwmu6Oy5FaWQo4snN7W eUdg== X-Gm-Message-State: AO0yUKXX+7xX7DL4N8UDCyVp5yhEvFhqCepFUp9uUgwX3TyQv03hLGgc YdLk4p579aRoW1LIv9FyPHk= X-Received: by 2002:a17:902:e195:b0:1a0:6721:6cd2 with SMTP id y21-20020a170902e19500b001a067216cd2mr6909965pla.28.1679088864896; Fri, 17 Mar 2023 14:34:24 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id d3-20020a170902728300b0019c32968271sm1994614pll.11.2023.03.17.14.34.24 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:24 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 23/32] sched_ext: Track tasks that are subjects of the in-flight SCX operation Date: Fri, 17 Mar 2023 11:33:24 -1000 Message-Id: <20230317213333.2174969-24-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652819342852246?= X-GMAIL-MSGID: =?utf-8?q?1760652819342852246?= When some SCX operations are in flight, it is known that the subject task's rq lock is held throughout which makes it safe to access certain fields of the task - e.g. its current task_group. We want to add SCX kfunc helpers that can make use of this guarantee - e.g. to help determining the currently associated CPU cgroup from the task's current task_group. As it'd be dangerous call such a helper on a task which isn't rq lock protected, the helper should be able to verify the input task and reject accordingly. This patch adds sched_ext_entity.kf_tasks[] that track the tasks which are currently being operated on by a terminal SCX operation. The new SCX_CALL_OP_[2]TASK[_RET]() can be used when invoking SCX operations which take tasks as arguments and the scx_kf_allowed_on_arg_tasks() can be used by kfunc helpers to verify the input task status. Note that as sched_ext_entity.kf_tasks[] can't handle nesting, the tracking is currently only limited to terminal SCX operations. If needed in the future, this restriction can be removed by moving the tracking to the task side with a couple per-task counters. Signed-off-by: Tejun Heo Reviewed-by: David Vernet --- include/linux/sched/ext.h | 2 + kernel/sched/ext.c | 91 +++++++++++++++++++++++++++++++-------- 2 files changed, 76 insertions(+), 17 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 2f2ee3e05904..1ed07b4bdb24 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -449,6 +449,7 @@ enum scx_kf_mask { SCX_KF_REST = 1 << 5, /* other rq-locked operations */ __SCX_KF_RQ_LOCKED = SCX_KF_DISPATCH | SCX_KF_ENQUEUE | SCX_KF_REST, + __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_REST, }; /* @@ -464,6 +465,7 @@ struct sched_ext_entity { s32 sticky_cpu; s32 holding_cpu; u32 kf_mask; /* see scx_kf_mask above */ + struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */ atomic64_t ops_state; unsigned long runnable_at; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ed35b5575b9f..ac7b2d57b656 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -239,6 +239,47 @@ do { \ __ret; \ }) +/* + * Some kfuncs are allowed only on the tasks that are subjects of the + * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such + * restrictions, the following SCX_CALL_OP_*() variants should be used when + * invoking scx_ops operations that take task arguments. These can only be used + * for non-nesting operations due to the way the tasks are tracked. + * + * kfuncs which can only operate on such tasks can in turn use + * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on + * the specific task. + */ +#define SCX_CALL_OP_TASK(mask, op, task, args...) \ +do { \ + BUILD_BUG_ON(mask & ~__SCX_KF_TERMINAL); \ + current->scx.kf_tasks[0] = task; \ + SCX_CALL_OP(mask, op, task, ##args); \ + current->scx.kf_tasks[0] = NULL; \ +} while (0) + +#define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \ +({ \ + __typeof__(scx_ops.op(task, ##args)) __ret; \ + BUILD_BUG_ON(mask & ~__SCX_KF_TERMINAL); \ + current->scx.kf_tasks[0] = task; \ + __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \ + current->scx.kf_tasks[0] = NULL; \ + __ret; \ +}) + +#define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \ +({ \ + __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \ + BUILD_BUG_ON(mask & ~__SCX_KF_TERMINAL); \ + current->scx.kf_tasks[0] = task0; \ + current->scx.kf_tasks[1] = task1; \ + __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \ + current->scx.kf_tasks[0] = NULL; \ + current->scx.kf_tasks[1] = NULL; \ + __ret; \ +}) + /* @mask is constant, always inline to cull unnecessary branches */ static __always_inline bool scx_kf_allowed(u32 mask) { @@ -269,6 +310,22 @@ static __always_inline bool scx_kf_allowed(u32 mask) return true; } +/* see SCX_CALL_OP_TASK() */ +static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask, + struct task_struct *p) +{ + if (!scx_kf_allowed(__SCX_KF_RQ_LOCKED)) + return false; + + if (unlikely((p != current->scx.kf_tasks[0] && + p != current->scx.kf_tasks[1]))) { + scx_ops_error("called on a task not being operated on"); + return false; + } + + return true; +} + /** * scx_task_iter_init - Initialize a task iterator * @iter: iterator to init @@ -706,7 +763,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, WARN_ON_ONCE(*ddsp_taskp); *ddsp_taskp = p; - SCX_CALL_OP(SCX_KF_ENQUEUE, enqueue, p, enq_flags); + SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags); /* * If not directly dispatched, QUEUEING isn't clear yet and dispatch or @@ -778,7 +835,7 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags add_nr_running(rq, 1); if (SCX_HAS_OP(runnable)) - SCX_CALL_OP(SCX_KF_REST, runnable, p, enq_flags); + SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags); do_enqueue_task(rq, p, enq_flags, sticky_cpu); } @@ -803,7 +860,7 @@ static void ops_dequeue(struct task_struct *p, u64 deq_flags) BUG(); case SCX_OPSS_QUEUED: if (SCX_HAS_OP(dequeue)) - SCX_CALL_OP(SCX_KF_REST, dequeue, p, deq_flags); + SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags); if (atomic64_try_cmpxchg(&p->scx.ops_state, &opss, SCX_OPSS_NONE)) @@ -854,11 +911,11 @@ static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags */ if (SCX_HAS_OP(stopping) && task_current(rq, p)) { update_curr_scx(rq); - SCX_CALL_OP(SCX_KF_REST, stopping, p, false); + SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false); } if (SCX_HAS_OP(quiescent)) - SCX_CALL_OP(SCX_KF_REST, quiescent, p, deq_flags); + SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags); if (deq_flags & SCX_DEQ_SLEEP) p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; @@ -877,7 +934,7 @@ static void yield_task_scx(struct rq *rq) struct task_struct *p = rq->curr; if (SCX_HAS_OP(yield)) - SCX_CALL_OP_RET(SCX_KF_REST, yield, p, NULL); + SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL); else p->scx.slice = 0; } @@ -887,7 +944,7 @@ static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) struct task_struct *from = rq->curr; if (SCX_HAS_OP(yield)) - return SCX_CALL_OP_RET(SCX_KF_REST, yield, from, to); + return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to); else return false; } @@ -1398,7 +1455,7 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) /* see dequeue_task_scx() on why we skip when !QUEUED */ if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) - SCX_CALL_OP(SCX_KF_REST, running, p); + SCX_CALL_OP_TASK(SCX_KF_REST, running, p); watchdog_unwatch_task(p, true); @@ -1454,7 +1511,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) /* see dequeue_task_scx() on why we skip when !QUEUED */ if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) - SCX_CALL_OP(SCX_KF_REST, stopping, p, true); + SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true); /* * If we're being called from put_prev_task_balance(), balance_scx() may @@ -1617,8 +1674,8 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag if (SCX_HAS_OP(select_cpu)) { s32 cpu; - cpu = SCX_CALL_OP_RET(SCX_KF_REST, select_cpu, p, prev_cpu, - wake_flags); + cpu = SCX_CALL_OP_TASK_RET(SCX_KF_REST, select_cpu, p, prev_cpu, + wake_flags); if (ops_cpu_valid(cpu)) { return cpu; } else { @@ -1644,8 +1701,8 @@ static void set_cpus_allowed_scx(struct task_struct *p, * designation pointless. Cast it away when calling the operation. */ if (SCX_HAS_OP(set_cpumask)) - SCX_CALL_OP(SCX_KF_REST, set_cpumask, p, - (struct cpumask *)p->cpus_ptr); + SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, + (struct cpumask *)p->cpus_ptr); } static void reset_idle_masks(void) @@ -1806,7 +1863,7 @@ static void scx_ops_enable_task(struct task_struct *p) if (SCX_HAS_OP(enable)) { struct scx_enable_args args = { }; - SCX_CALL_OP(SCX_KF_REST, enable, p, &args); + SCX_CALL_OP_TASK(SCX_KF_REST, enable, p, &args); } p->scx.flags &= ~SCX_TASK_OPS_PREPPED; p->scx.flags |= SCX_TASK_OPS_ENABLED; @@ -1845,7 +1902,7 @@ static void refresh_scx_weight(struct task_struct *p) p->scx.weight = sched_weight_to_cgroup(weight); if (SCX_HAS_OP(set_weight)) - SCX_CALL_OP(SCX_KF_REST, set_weight, p, p->scx.weight); + SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight); } void scx_pre_fork(struct task_struct *p) @@ -1936,8 +1993,8 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) * different scheduler class. Keep the BPF scheduler up-to-date. */ if (SCX_HAS_OP(set_cpumask)) - SCX_CALL_OP(SCX_KF_REST, set_cpumask, p, - (struct cpumask *)p->cpus_ptr); + SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, + (struct cpumask *)p->cpus_ptr); } static void check_preempt_curr_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} From patchwork Fri Mar 17 21:33:25 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71479 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp6166wrt; Fri, 17 Mar 2023 14:52:37 -0700 (PDT) X-Google-Smtp-Source: AK7set9YsAluaPXx1VNZxFSBU/H780A68wgKs56nd+PKUzSQejmX9bwb79F+K8oNb2t68VcZc4AQ X-Received: by 2002:a17:902:c40c:b0:19a:a815:2864 with SMTP id k12-20020a170902c40c00b0019aa8152864mr8857042plk.4.1679089957457; Fri, 17 Mar 2023 14:52:37 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089957; cv=none; d=google.com; s=arc-20160816; b=GearMg5dJJrPmQpoLnDN5Umyxkj6BwtkHEBQWvByJj56MfRRH+zf7G9xlKq4g6p6bM qjvn8nc71/M6eu/OH1ZtZ3nZKMR+v/5LQgzDHOcR4l3aKU2WUMi7Ae1yKDcS+TwhUt9c 2k8x0dAsWxCuxQC9N6Kj2oXqffDhRInAvHM96fVrZrC49kI6KpBVj+kfQLmY+YQNcgqR t+vZePA1GMlk2mCK3kChQ22TCSxCKzW+6R5HmBrW+UBzTI1KLmJTJnDpWOSI2lX73fUD S7GgG4sCfe312WjeI9K2CcOf3pqO3pvKtBLRA3AM6CIbgPwpC0uDbuNgkhdbu/LHriTu 8hAQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=URop9Rwoa/oriNypEYg1VhPsA6VU2wwgSg2A31vlxYM=; b=fljUU0zj/g7xGOQZ6+3nluXxEOkGx6OPD63PluQ/t4IjPCOStq1/3RTCO1V7xUAVKL cOECS6xgnL6hS4OPFBaA70CXN1vSjIAjXZr46ORnkbQRxtroGroRGG0FoDsydceWo0qE m6W/CGx9jp+OKuQv6R4vCPRLv95RFwydWsj6s1V8zuQVpVFoM90FZD1UjuRx/WrogrZq blT+33H35f7Pm+GftqgjXdmbjfYETEZiMHu+T++lyEDoJ4/c6sNFrPNxe4KWTzkFoZk3 XZVrYokacugI5EU9G3t/buBCffBycjum9y6jEMlUWlf6SC9GYer9I0DgqlNdEJxWR3hu OlwA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=LRGloDqL; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id j11-20020a170902da8b00b001a179347c13si3915437plx.552.2023.03.17.14.52.21; Fri, 17 Mar 2023 14:52:37 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=LRGloDqL; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231205AbjCQVi6 (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:58 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54036 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231373AbjCQViY (ORCPT ); Fri, 17 Mar 2023 17:38:24 -0400 Received: from mail-pj1-f47.google.com (mail-pj1-f47.google.com [209.85.216.47]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4DC497B3AA; Fri, 17 Mar 2023 14:36:39 -0700 (PDT) Received: by mail-pj1-f47.google.com with SMTP id p3-20020a17090a74c300b0023f69bc7a68so2140286pjl.4; Fri, 17 Mar 2023 14:36:39 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088868; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=URop9Rwoa/oriNypEYg1VhPsA6VU2wwgSg2A31vlxYM=; b=LRGloDqLlMBpAyDFhtS/3PZY9eLMMd1VFQIIMdWCOeigwXF/02NVAMjqCxWqZp7LrC JoQM69cK1uhwm3PBNz3iVsxEAvJ5IrZzpX8L4tH5uGUDkSzeXfKqtpIuZM97wtcc2Z/v jMTJhtUhQC7ecXqi5hb07WAbZAGkTc08HZUpAOzCCI0hBgHacXf+FC5q63A9op+TBdPM Xp1q2fdDDqV+tr5+NLab4aXC45Xg4eLJUIrR4bFgF6L/uYERKosw6DFKEdmue4Sza5L0 0pyGRAbHycp1ueSsmkeUTvoFy7/nZqmCkJcUUHV502NNHi/MSBv9EOUxTDSXRIPMq1La bKXw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088868; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=URop9Rwoa/oriNypEYg1VhPsA6VU2wwgSg2A31vlxYM=; b=C3NcyZ7+HDlGqD6/z5VC0C1eJfPu2iRqZlnBsnb6qa183Li/zkZOIUeYUazQ8NmJRW QbHc/pq0XrnToBXnAReyrYdHZOxdlu9Bw6ggLEBAtyByAQ1KtX2Alw8dISF6WDHQ40uK 7XwO2VjHy53qAv6PJZ8Kx8+8DP+WCoAdi1lvIHgPgw1Xulyrm/bxuTK88fL2zFTQ8ZGg Hj+xXa+3qo2JjmOvGzNZ4uCLqHePPSDFw1dR9zsfIFbk/cHEKqyhjyT6O2HENqE5STJg PSmrUrpQMGwF4RUFRs6RMg1ZjquIXcbC5Gx/dyMtEEsb3rYYbRBBsSI/X4cADUmHW8wb QSjg== X-Gm-Message-State: AO0yUKVuSRr8uDzLNzLzag3u3mmgXzJ88r4lvApA682qPT90Z7Dv+rvK FR57M/JY+SUrGqEa3XksJuc= X-Received: by 2002:a17:903:d1:b0:1a1:a273:1812 with SMTP id x17-20020a17090300d100b001a1a2731812mr3802401plc.45.1679088866928; Fri, 17 Mar 2023 14:34:26 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id j18-20020a170902759200b00194caf3e975sm1964703pll.208.2023.03.17.14.34.26 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:26 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , kernel test robot Subject: [PATCH 24/32] sched_ext: Add cgroup support Date: Fri, 17 Mar 2023 11:33:25 -1000 Message-Id: <20230317213333.2174969-25-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,RCVD_IN_MSPIKE_H3, RCVD_IN_MSPIKE_WL,SPF_HELO_NONE,SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653431309447315?= X-GMAIL-MSGID: =?utf-8?q?1760653431309447315?= Add sched_ext_ops operations to init/exit cgroups, and track task migrations and config changes. Because different BPF schedulers may implement different subsets of CPU control features, allow BPF schedulers to pick which cgroup interface files to enable using SCX_OPS_CGROUP_KNOB_* flags. For now, only the weight knobs are supported but adding more should be straightforward. While a BPF scheduler is being enabled and disabled, relevant cgroup operations are locked out using scx_cgroup_rwsem. This avoids situations like task prep taking place while the task is being moved across cgroups, making things easier for BPF schedulers. This patch also adds scx_example_pair which implements a variant of core scheduling where a hyperthread pair only run tasks from the same cgroup. The BPF scheduler achieves this by putting tasks into per-cgroup queues, time-slicing the cgroup to run for each pair first, and then scheduling within the cgroup. See the header comment in scx_example_pair.bpf.c for more details. Note that scx_example_pair's cgroup-boundary guarantee breaks down for tasks running in higher priority scheduler classes. This will be addressed by a followup patch which implements a mechanism to track CPU preemption. v3: * Make scx_example_pair switch all tasks by default. * Convert to BPF inline iterators. * scx_bpf_task_cgroup() is added to determine the current cgroup from CPU controller's POV. This allows BPF schedulers to accurately track CPU cgroup membership. * scx_exampl_flatcg added. This demonstrates flattened hierarchy implementation of CPU cgroup control and shows significant performance improvement when cgroups which are nested multiple levels are under competition. v2: * Build fixes for different CONFIG combinations. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Reported-by: kernel test robot --- include/linux/sched/ext.h | 100 ++- init/Kconfig | 5 + kernel/sched/core.c | 70 +- kernel/sched/ext.c | 391 ++++++++++- kernel/sched/ext.h | 25 + kernel/sched/sched.h | 12 +- tools/sched_ext/.gitignore | 2 + tools/sched_ext/Makefile | 14 +- tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_flatcg.bpf.c | 824 +++++++++++++++++++++++ tools/sched_ext/scx_example_flatcg.c | 228 +++++++ tools/sched_ext/scx_example_flatcg.h | 49 ++ tools/sched_ext/scx_example_pair.bpf.c | 536 +++++++++++++++ tools/sched_ext/scx_example_pair.c | 143 ++++ tools/sched_ext/scx_example_pair.h | 10 + 15 files changed, 2386 insertions(+), 24 deletions(-) create mode 100644 tools/sched_ext/scx_example_flatcg.bpf.c create mode 100644 tools/sched_ext/scx_example_flatcg.c create mode 100644 tools/sched_ext/scx_example_flatcg.h create mode 100644 tools/sched_ext/scx_example_pair.bpf.c create mode 100644 tools/sched_ext/scx_example_pair.c create mode 100644 tools/sched_ext/scx_example_pair.h diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 1ed07b4bdb24..9e47e320369d 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -12,6 +12,8 @@ #include #include +struct cgroup; + enum scx_consts { SCX_OPS_NAME_LEN = 128, SCX_EXIT_REASON_LEN = 128, @@ -108,14 +110,29 @@ enum scx_ops_flags { */ SCX_OPS_ENQ_EXITING = 1LLU << 2, + /* + * CPU cgroup knob enable flags + */ + SCX_OPS_CGROUP_KNOB_WEIGHT = 1LLU << 16, /* cpu.weight */ + SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | SCX_OPS_ENQ_LAST | - SCX_OPS_ENQ_EXITING, + SCX_OPS_ENQ_EXITING | + SCX_OPS_CGROUP_KNOB_WEIGHT, }; /* argument container for ops.enable() and friends */ struct scx_enable_args { - /* empty for now */ +#ifdef CONFIG_EXT_GROUP_SCHED + /* the cgroup the task is joining */ + struct cgroup *cgroup; +#endif +}; + +/* argument container for ops->cgroup_init() */ +struct scx_cgroup_init_args { + /* the weight of the cgroup [1..10000] */ + u32 weight; }; /** @@ -333,7 +350,8 @@ struct sched_ext_ops { * @p: task to enable BPF scheduling for * @args: enable arguments, see the struct definition * - * Enable @p for BPF scheduling. @p will start running soon. + * Enable @p for BPF scheduling. @p is now in the cgroup specified for + * the preceding prep_enable() and will start running soon. */ void (*enable)(struct task_struct *p, struct scx_enable_args *args); @@ -357,6 +375,79 @@ struct sched_ext_ops { */ void (*disable)(struct task_struct *p); +#ifdef CONFIG_EXT_GROUP_SCHED + /** + * cgroup_init - Initialize a cgroup + * @cgrp: cgroup being initialized + * @args: init arguments, see the struct definition + * + * Either the BPF scheduler is being loaded or @cgrp created, initialize + * @cgrp for sched_ext. This operation may block. + * + * Return 0 for success, -errno for failure. An error return while + * loading will abort loading of the BPF scheduler. During cgroup + * creation, it will abort the specific cgroup creation. + */ + s32 (*cgroup_init)(struct cgroup *cgrp, + struct scx_cgroup_init_args *args); + + /** + * cgroup_exit - Exit a cgroup + * @cgrp: cgroup being exited + * + * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit + * @cgrp for sched_ext. This operation my block. + */ + void (*cgroup_exit)(struct cgroup *cgrp); + + /** + * cgroup_prep_move - Prepare a task to be moved to a different cgroup + * @p: task being moved + * @from: cgroup @p is being moved from + * @to: cgroup @p is being moved to + * + * Prepare @p for move from cgroup @from to @to. This operation may + * block and can be used for allocations. + * + * Return 0 for success, -errno for failure. An error return aborts the + * migration. + */ + s32 (*cgroup_prep_move)(struct task_struct *p, + struct cgroup *from, struct cgroup *to); + + /** + * cgroup_move - Commit cgroup move + * @p: task being moved + * @from: cgroup @p is being moved from + * @to: cgroup @p is being moved to + * + * Commit the move. @p is dequeued during this operation. + */ + void (*cgroup_move)(struct task_struct *p, + struct cgroup *from, struct cgroup *to); + + /** + * cgroup_cancel_move - Cancel cgroup move + * @p: task whose cgroup move is being canceled + * @from: cgroup @p was being moved from + * @to: cgroup @p was being moved to + * + * @p was cgroup_prep_move()'d but failed before reaching cgroup_move(). + * Undo the preparation. + */ + void (*cgroup_cancel_move)(struct task_struct *p, + struct cgroup *from, struct cgroup *to); + + /** + * cgroup_set_weight - A cgroup's weight is being changed + * @cgrp: cgroup whose weight is being updated + * @weight: new weight [1..10000] + * + * Update @tg's weight to @weight. + */ + void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight); +#endif /* CONFIG_CGROUPS */ + /* * All online ops must come before ops.init(). */ @@ -497,6 +588,9 @@ struct sched_ext_entity { /* cold fields */ struct list_head tasks_node; +#ifdef CONFIG_EXT_GROUP_SCHED + struct cgroup *cgrp_moving_from; +#endif }; void sched_ext_free(struct task_struct *p); diff --git a/init/Kconfig b/init/Kconfig index 1fb5f313d18f..375e9c6557b6 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1039,6 +1039,11 @@ config RT_GROUP_SCHED realtime bandwidth for them. See Documentation/scheduler/sched-rt-group.rst for more information. +config EXT_GROUP_SCHED + bool + depends on SCHED_CLASS_EXT && CGROUP_SCHED + default y + endif #CGROUP_SCHED config SCHED_MM_CID diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a7e0725c2469..0780414f3c15 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9859,6 +9859,9 @@ void __init sched_init(void) root_task_group.shares = ROOT_TASK_GROUP_LOAD; init_cfs_bandwidth(&root_task_group.cfs_bandwidth); #endif /* CONFIG_FAIR_GROUP_SCHED */ +#ifdef CONFIG_EXT_GROUP_SCHED + root_task_group.scx_weight = CGROUP_WEIGHT_DFL; +#endif /* CONFIG_EXT_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED root_task_group.rt_se = (struct sched_rt_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); @@ -10315,6 +10318,7 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; + scx_group_set_weight(tg, CGROUP_WEIGHT_DFL); alloc_uclamp_sched_group(tg, parent); return tg; @@ -10418,6 +10422,7 @@ void sched_move_task(struct task_struct *tsk) SCHED_CHANGE_BLOCK(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK) { sched_change_group(tsk); + scx_move_task(tsk); } /* @@ -10454,6 +10459,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); struct task_group *parent = css_tg(css->parent); + int ret; + + ret = scx_tg_online(tg); + if (ret) + return ret; if (parent) sched_online_group(tg, parent); @@ -10470,6 +10480,13 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) return 0; } +static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) +{ + struct task_group *tg = css_tg(css); + + scx_tg_offline(tg); +} + static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); @@ -10487,9 +10504,10 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) sched_unregister_group(tg); } -#ifdef CONFIG_RT_GROUP_SCHED +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) { +#ifdef CONFIG_RT_GROUP_SCHED struct task_struct *task; struct cgroup_subsys_state *css; @@ -10497,7 +10515,8 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; } - return 0; +#endif + return scx_cgroup_can_attach(tset); } #endif @@ -10508,8 +10527,17 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset) cgroup_taskset_for_each(task, css, tset) sched_move_task(task); + + scx_cgroup_finish_attach(); } +#ifdef CONFIG_EXT_GROUP_SCHED +static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset) +{ + scx_cgroup_cancel_attach(tset); +} +#endif + #ifdef CONFIG_UCLAMP_TASK_GROUP static void cpu_util_update_eff(struct cgroup_subsys_state *css) { @@ -10691,9 +10719,15 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v) static int cpu_shares_write_u64(struct cgroup_subsys_state *css, struct cftype *cftype, u64 shareval) { + int ret; + if (shareval > scale_load_down(ULONG_MAX)) shareval = MAX_SHARES; - return sched_group_set_shares(css_tg(css), scale_load(shareval)); + ret = sched_group_set_shares(css_tg(css), scale_load(shareval)); + if (!ret) + scx_group_set_weight(css_tg(css), + sched_weight_to_cgroup(shareval)); + return ret; } static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, @@ -11157,11 +11191,15 @@ static int cpu_extra_stat_show(struct seq_file *sf, return 0; } -#ifdef CONFIG_FAIR_GROUP_SCHED +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) static unsigned long tg_weight(struct task_group *tg) { +#ifdef CONFIG_FAIR_GROUP_SCHED return scale_load_down(tg->shares); +#else + return sched_weight_from_cgroup(tg->scx_weight); +#endif } static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, @@ -11174,13 +11212,17 @@ static int cpu_weight_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, u64 cgrp_weight) { unsigned long weight; + int ret; if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX) return -ERANGE; weight = sched_weight_from_cgroup(cgrp_weight); - return sched_group_set_shares(css_tg(css), scale_load(weight)); + ret = sched_group_set_shares(css_tg(css), scale_load(weight)); + if (!ret) + scx_group_set_weight(css_tg(css), cgrp_weight); + return ret; } static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, @@ -11205,7 +11247,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, s64 nice) { unsigned long weight; - int idx; + int idx, ret; if (nice < MIN_NICE || nice > MAX_NICE) return -ERANGE; @@ -11214,7 +11256,11 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, idx = array_index_nospec(idx, 40); weight = sched_prio_to_weight[idx]; - return sched_group_set_shares(css_tg(css), scale_load(weight)); + ret = sched_group_set_shares(css_tg(css), scale_load(weight)); + if (!ret) + scx_group_set_weight(css_tg(css), + sched_weight_to_cgroup(weight)); + return ret; } #endif @@ -11276,7 +11322,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, #endif struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { -#ifdef CONFIG_FAIR_GROUP_SCHED +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) [CPU_CFTYPE_WEIGHT] = { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, @@ -11289,6 +11335,8 @@ struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { .read_s64 = cpu_weight_nice_read_s64, .write_s64 = cpu_weight_nice_write_s64, }, +#endif +#ifdef CONFIG_FAIR_GROUP_SCHED [CPU_CFTYPE_IDLE] = { .name = "idle", .flags = CFTYPE_NOT_ON_ROOT, @@ -11330,13 +11378,17 @@ struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, .css_online = cpu_cgroup_css_online, + .css_offline = cpu_cgroup_css_offline, .css_released = cpu_cgroup_css_released, .css_free = cpu_cgroup_css_free, .css_extra_stat_show = cpu_extra_stat_show, -#ifdef CONFIG_RT_GROUP_SCHED +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) .can_attach = cpu_cgroup_can_attach, #endif .attach = cpu_cgroup_attach, +#ifdef CONFIG_EXT_GROUP_SCHED + .cancel_attach = cpu_cgroup_cancel_attach, +#endif .legacy_cftypes = cpu_legacy_cftypes, .dfl_cftypes = cpu_cftypes, .early_init = true, diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ac7b2d57b656..736f764d0f62 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1813,6 +1813,28 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) resched_curr(rq); } +#ifdef CONFIG_EXT_GROUP_SCHED +static struct cgroup *tg_cgrp(struct task_group *tg) +{ + /* + * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, + * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the + * root cgroup. + */ + if (tg && tg->css.cgroup) + return tg->css.cgroup; + else + return &cgrp_dfl_root.cgrp; +} + +#define SCX_ENABLE_ARGS_INIT_CGROUP(tg) .cgroup = tg_cgrp(tg), + +#else /* CONFIG_EXT_GROUP_SCHED */ + +#define SCX_ENABLE_ARGS_INIT_CGROUP(tg) + +#endif /* CONFIG_EXT_GROUP_SCHED */ + static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) { int ret; @@ -1822,7 +1844,9 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) p->scx.disallow = false; if (SCX_HAS_OP(prep_enable)) { - struct scx_enable_args args = { }; + struct scx_enable_args args = { + SCX_ENABLE_ARGS_INIT_CGROUP(tg) + }; ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, prep_enable, p, &args); if (unlikely(ret)) { @@ -1862,7 +1886,9 @@ static void scx_ops_enable_task(struct task_struct *p) WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_OPS_PREPPED)); if (SCX_HAS_OP(enable)) { - struct scx_enable_args args = { }; + struct scx_enable_args args = { + SCX_ENABLE_ARGS_INIT_CGROUP(task_group(p)) + }; SCX_CALL_OP_TASK(SCX_KF_REST, enable, p, &args); } p->scx.flags &= ~SCX_TASK_OPS_PREPPED; @@ -1875,7 +1901,9 @@ static void scx_ops_disable_task(struct task_struct *p) if (p->scx.flags & SCX_TASK_OPS_PREPPED) { if (SCX_HAS_OP(cancel_enable)) { - struct scx_enable_args args = { }; + struct scx_enable_args args = { + SCX_ENABLE_ARGS_INIT_CGROUP(task_group(p)) + }; SCX_CALL_OP(SCX_KF_REST, cancel_enable, p, &args); } p->scx.flags &= ~SCX_TASK_OPS_PREPPED; @@ -2032,6 +2060,166 @@ bool scx_can_stop_tick(struct rq *rq) } #endif +#ifdef CONFIG_EXT_GROUP_SCHED + +DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); + +int scx_tg_online(struct task_group *tg) +{ + int ret = 0; + + WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED)); + + percpu_down_read(&scx_cgroup_rwsem); + + if (SCX_HAS_OP(cgroup_init)) { + struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; + + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, cgroup_init, + tg->css.cgroup, &args); + if (!ret) + tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED; + else + ret = ops_sanitize_err("cgroup_init", ret); + } else { + tg->scx_flags |= SCX_TG_ONLINE; + } + + percpu_up_read(&scx_cgroup_rwsem); + return ret; +} + +void scx_tg_offline(struct task_group *tg) +{ + WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE)); + + percpu_down_read(&scx_cgroup_rwsem); + + if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED)) + SCX_CALL_OP(SCX_KF_SLEEPABLE, cgroup_exit, tg->css.cgroup); + tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); + + percpu_up_read(&scx_cgroup_rwsem); +} + +int scx_cgroup_can_attach(struct cgroup_taskset *tset) +{ + struct cgroup_subsys_state *css; + struct task_struct *p; + int ret; + + /* released in scx_finish/cancel_attach() */ + percpu_down_read(&scx_cgroup_rwsem); + + if (!scx_enabled()) + return 0; + + cgroup_taskset_for_each(p, css, tset) { + struct cgroup *from = tg_cgrp(task_group(p)); + + if (SCX_HAS_OP(cgroup_prep_move)) { + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, cgroup_prep_move, + p, from, css->cgroup); + if (ret) + goto err; + } + + WARN_ON_ONCE(p->scx.cgrp_moving_from); + p->scx.cgrp_moving_from = from; + } + + return 0; + +err: + cgroup_taskset_for_each(p, css, tset) { + if (!p->scx.cgrp_moving_from) + break; + if (SCX_HAS_OP(cgroup_cancel_move)) + SCX_CALL_OP(SCX_KF_SLEEPABLE, cgroup_cancel_move, p, + p->scx.cgrp_moving_from, css->cgroup); + p->scx.cgrp_moving_from = NULL; + } + + percpu_up_read(&scx_cgroup_rwsem); + return ops_sanitize_err("cgroup_prep_move", ret); +} + +void scx_move_task(struct task_struct *p) +{ + /* + * We're called from sched_move_task() which handles both cgroup and + * autogroup moves. Ignore the latter. + */ + if (task_group_is_autogroup(task_group(p))) + return; + + if (!scx_enabled()) + return; + + if (SCX_HAS_OP(cgroup_move)) { + WARN_ON_ONCE(!p->scx.cgrp_moving_from); + SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p, + p->scx.cgrp_moving_from, tg_cgrp(task_group(p))); + } + p->scx.cgrp_moving_from = NULL; +} + +void scx_cgroup_finish_attach(void) +{ + percpu_up_read(&scx_cgroup_rwsem); +} + +void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) +{ + struct cgroup_subsys_state *css; + struct task_struct *p; + + if (!scx_enabled()) + goto out_unlock; + + cgroup_taskset_for_each(p, css, tset) { + if (SCX_HAS_OP(cgroup_cancel_move)) { + WARN_ON_ONCE(!p->scx.cgrp_moving_from); + SCX_CALL_OP(SCX_KF_SLEEPABLE, cgroup_cancel_move, p, + p->scx.cgrp_moving_from, css->cgroup); + } + p->scx.cgrp_moving_from = NULL; + } +out_unlock: + percpu_up_read(&scx_cgroup_rwsem); +} + +void scx_group_set_weight(struct task_group *tg, unsigned long weight) +{ + percpu_down_read(&scx_cgroup_rwsem); + + if (tg->scx_weight != weight) { + if (SCX_HAS_OP(cgroup_set_weight)) + SCX_CALL_OP(SCX_KF_SLEEPABLE, cgroup_set_weight, + tg_cgrp(tg), weight); + tg->scx_weight = weight; + } + + percpu_up_read(&scx_cgroup_rwsem); +} + +static void scx_cgroup_lock(void) +{ + percpu_down_write(&scx_cgroup_rwsem); +} + +static void scx_cgroup_unlock(void) +{ + percpu_up_write(&scx_cgroup_rwsem); +} + +#else /* CONFIG_EXT_GROUP_SCHED */ + +static inline void scx_cgroup_lock(void) {} +static inline void scx_cgroup_unlock(void) {} + +#endif /* CONFIG_EXT_GROUP_SCHED */ + /* * Omitted operations: * @@ -2161,6 +2349,131 @@ static void destroy_dsq(u64 dsq_id) rcu_read_unlock(); } +#ifdef CONFIG_EXT_GROUP_SCHED +static void scx_cgroup_exit(void) +{ + struct cgroup_subsys_state *css; + + percpu_rwsem_assert_held(&scx_cgroup_rwsem); + + /* + * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk + * cgroups and exit all the inited ones, all online cgroups are exited. + */ + rcu_read_lock(); + css_for_each_descendant_post(css, &root_task_group.css) { + struct task_group *tg = css_tg(css); + + if (!(tg->scx_flags & SCX_TG_INITED)) + continue; + tg->scx_flags &= ~SCX_TG_INITED; + + if (!scx_ops.cgroup_exit) + continue; + + if (WARN_ON_ONCE(!css_tryget(css))) + continue; + rcu_read_unlock(); + + SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup); + + rcu_read_lock(); + css_put(css); + } + rcu_read_unlock(); +} + +static int scx_cgroup_init(void) +{ + struct cgroup_subsys_state *css; + int ret; + + percpu_rwsem_assert_held(&scx_cgroup_rwsem); + + /* + * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk + * cgroups and init, all online cgroups are initialized. + */ + rcu_read_lock(); + css_for_each_descendant_pre(css, &root_task_group.css) { + struct task_group *tg = css_tg(css); + struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; + + if ((tg->scx_flags & + (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) + continue; + + if (!scx_ops.cgroup_init) { + tg->scx_flags |= SCX_TG_INITED; + continue; + } + + if (WARN_ON_ONCE(!css_tryget(css))) + continue; + rcu_read_unlock(); + + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, cgroup_init, + css->cgroup, &args); + if (ret) { + css_put(css); + return ret; + } + tg->scx_flags |= SCX_TG_INITED; + + rcu_read_lock(); + css_put(css); + } + rcu_read_unlock(); + + return 0; +} + +static void scx_cgroup_config_knobs(void) +{ + static DEFINE_MUTEX(cgintf_mutex); + DECLARE_BITMAP(mask, CPU_CFTYPE_CNT) = { }; + u64 knob_flags; + int i; + + /* + * Called from both class switch and ops enable/disable paths, + * synchronize internally. + */ + mutex_lock(&cgintf_mutex); + + /* if fair is in use, all knobs should be shown */ + if (!scx_switched_all()) { + bitmap_fill(mask, CPU_CFTYPE_CNT); + goto apply; + } + + /* + * On ext, only show the supported knobs. Otherwise, show all possible + * knobs so that configuration attempts succeed and the states are + * remembered while ops is not loaded. + */ + if (scx_enabled()) + knob_flags = scx_ops.flags; + else + knob_flags = SCX_OPS_ALL_FLAGS; + + if (knob_flags & SCX_OPS_CGROUP_KNOB_WEIGHT) { + __set_bit(CPU_CFTYPE_WEIGHT, mask); + __set_bit(CPU_CFTYPE_WEIGHT_NICE, mask); + } +apply: + for (i = 0; i < CPU_CFTYPE_CNT; i++) + cgroup_show_cftype(&cpu_cftypes[i], test_bit(i, mask)); + + mutex_unlock(&cgintf_mutex); +} + +#else +static void scx_cgroup_exit(void) {} +static int scx_cgroup_init(void) { return 0; } +static void scx_cgroup_config_knobs(void) {} +#endif + /* * Used by sched_fork() and __setscheduler_prio() to pick the matching * sched_class. dl/rt are already handled. @@ -2304,9 +2617,10 @@ static void scx_ops_disable_workfn(struct kthread_work *work) static_branch_disable(&__scx_switched_all); WRITE_ONCE(scx_switching_all, false); - /* avoid racing against fork */ + /* avoid racing against fork and cgroup changes */ cpus_read_lock(); percpu_down_write(&scx_fork_rwsem); + scx_cgroup_lock(); spin_lock_irq(&scx_tasks_lock); scx_task_iter_init(&sti); @@ -2343,6 +2657,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); synchronize_rcu(); + scx_cgroup_exit(); + + scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); cpus_read_unlock(); @@ -2381,6 +2698,8 @@ static void scx_ops_disable_workfn(struct kthread_work *work) WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != SCX_OPS_DISABLING); + + scx_cgroup_config_knobs(); } static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); @@ -2526,10 +2845,11 @@ static int scx_ops_enable(struct sched_ext_ops *ops) scx_watchdog_timeout / 2); /* - * Lock out forks before opening the floodgate so that they don't wander - * into the operations prematurely. + * Lock out forks, cgroup on/offlining and moves before opening the + * floodgate so that they don't wander into the operations prematurely. */ percpu_down_write(&scx_fork_rwsem); + scx_cgroup_lock(); for (i = 0; i < SCX_NR_ONLINE_OPS; i++) if (((void (**)(void))ops)[i]) @@ -2548,6 +2868,14 @@ static int scx_ops_enable(struct sched_ext_ops *ops) static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); } + /* + * All cgroups should be initialized before letting in tasks. cgroup + * on/offlining and task migrations are already locked out. + */ + ret = scx_cgroup_init(); + if (ret) + goto err_disable_unlock; + static_branch_enable_cpuslocked(&__scx_ops_enabled); /* @@ -2630,6 +2958,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops) spin_unlock_irq(&scx_tasks_lock); preempt_enable(); + scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) { @@ -2643,6 +2972,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops) cpus_read_unlock(); mutex_unlock(&scx_ops_enable_mutex); + scx_cgroup_config_knobs(); + return 0; err_unlock: @@ -2650,6 +2981,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops) return ret; err_disable_unlock: + scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); err_disable: cpus_read_unlock(); @@ -2813,6 +3145,11 @@ static int bpf_scx_check_member(const struct btf_type *t, switch (moff) { case offsetof(struct sched_ext_ops, prep_enable): +#ifdef CONFIG_EXT_GROUP_SCHED + case offsetof(struct sched_ext_ops, cgroup_init): + case offsetof(struct sched_ext_ops, cgroup_exit): + case offsetof(struct sched_ext_ops, cgroup_prep_move): +#endif case offsetof(struct sched_ext_ops, init): case offsetof(struct sched_ext_ops, exit): break; @@ -2911,7 +3248,8 @@ void __init init_sched_ext_class(void) * definitions so that BPF scheduler implementations can use them * through the generated vmlinux.h. */ - WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP); + WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | + SCX_TG_ONLINE); BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL); @@ -2932,6 +3270,7 @@ void __init init_sched_ext_class(void) register_sysrq_key('S', &sysrq_sched_ext_reset_op); INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); + scx_cgroup_config_knobs(); } @@ -2975,8 +3314,8 @@ static const struct btf_kfunc_id_set scx_kfunc_set_init = { * @dsq_id: DSQ to create * @node: NUMA node to allocate from * - * Create a custom DSQ identified by @dsq_id. Can be called from ops.init() and - * ops.prep_enable(). + * Create a custom DSQ identified by @dsq_id. Can be called from ops.init(), + * ops.prep_enable(), ops.cgroup_init() and ops.cgroup_prep_move(). */ s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) { @@ -3419,6 +3758,39 @@ s32 scx_bpf_task_cpu(const struct task_struct *p) return task_cpu(p); } +/** + * scx_bpf_task_cgroup - Return the sched cgroup of a task + * @p: task of interest + * + * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with + * from the scheduler's POV. SCX operations should use this function to + * determine @p's current cgroup as, unlike following @p->cgroups, + * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all + * rq-locked operations. Can be called on the parameter tasks of rq-locked + * operations. The restriction guarantees that @p's rq is locked by the caller. + */ +struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) +{ + struct task_group *tg = p->sched_task_group; + struct cgroup *cgrp = &cgrp_dfl_root.cgrp; + + if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p)) + goto out; + + /* + * A task_group may either be a cgroup or an autogroup. In the latter + * case, @tg->css.cgroup is %NULL. A task_group can't become the other + * kind once created. + */ + if (tg && tg->css.cgroup) + cgrp = tg->css.cgroup; + else + cgrp = &cgrp_dfl_root.cgrp; +out: + cgroup_get(cgrp); + return cgrp; +} + BTF_SET8_START(scx_kfunc_ids_any) BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) @@ -3431,6 +3803,7 @@ BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) BTF_SET8_END(scx_kfunc_ids_any) static const struct btf_kfunc_id_set scx_kfunc_set_any = { diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 9c9284f91e38..0c5a109e7e6d 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -59,6 +59,11 @@ enum scx_deq_flags { SCX_DEQ_SLEEP = DEQUEUE_SLEEP, }; +enum scx_tg_flags { + SCX_TG_ONLINE = 1U << 0, + SCX_TG_INITED = 1U << 1, +}; + enum scx_kick_flags { SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ }; @@ -162,3 +167,23 @@ static inline void scx_update_idle(struct rq *rq, bool idle) #else static inline void scx_update_idle(struct rq *rq, bool idle) {} #endif + +#ifdef CONFIG_CGROUP_SCHED +#ifdef CONFIG_EXT_GROUP_SCHED +int scx_tg_online(struct task_group *tg); +void scx_tg_offline(struct task_group *tg); +int scx_cgroup_can_attach(struct cgroup_taskset *tset); +void scx_move_task(struct task_struct *p); +void scx_cgroup_finish_attach(void); +void scx_cgroup_cancel_attach(struct cgroup_taskset *tset); +void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight); +#else /* CONFIG_EXT_GROUP_SCHED */ +static inline int scx_tg_online(struct task_group *tg) { return 0; } +static inline void scx_tg_offline(struct task_group *tg) {} +static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; } +static inline void scx_move_task(struct task_struct *p) {} +static inline void scx_cgroup_finish_attach(void) {} +static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {} +static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {} +#endif /* CONFIG_EXT_GROUP_SCHED */ +#endif /* CONFIG_CGROUP_SCHED */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e6dacf488a20..3571cfda62a3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -424,6 +424,11 @@ struct task_group { struct rt_bandwidth rt_bandwidth; #endif +#ifdef CONFIG_EXT_GROUP_SCHED + u32 scx_flags; /* SCX_TG_* */ + u32 scx_weight; +#endif + struct rcu_head rcu; struct list_head list; @@ -528,6 +533,11 @@ extern void set_task_rq_fair(struct sched_entity *se, static inline void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) { } #endif /* CONFIG_SMP */ +#else /* CONFIG_FAIR_GROUP_SCHED */ +static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) +{ + return 0; +} #endif /* CONFIG_FAIR_GROUP_SCHED */ #else /* CONFIG_CGROUP_SCHED */ @@ -3418,7 +3428,7 @@ static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *n #ifdef CONFIG_CGROUP_SCHED enum cpu_cftype_id { -#ifdef CONFIG_FAIR_GROUP_SCHED +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) CPU_CFTYPE_WEIGHT, CPU_CFTYPE_WEIGHT_NICE, CPU_CFTYPE_IDLE, diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore index 3d8ec46ca304..769bc6f35cc6 100644 --- a/tools/sched_ext/.gitignore +++ b/tools/sched_ext/.gitignore @@ -1,6 +1,8 @@ scx_example_simple scx_example_qmap scx_example_central +scx_example_pair +scx_example_flatcg *.skel.h *.subskel.h /tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index bcec7c1fb7b1..8c7543bbff8d 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -115,7 +115,8 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -Wall -Wno-compare-distinct-pointer-types \ -O2 -mcpu=v3 -all: scx_example_simple scx_example_qmap scx_example_central +all: scx_example_simple scx_example_qmap scx_example_central scx_example_pair \ + scx_example_flatcg # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -178,10 +179,19 @@ scx_example_central: scx_example_central.c scx_example_central.skel.h user_exit_ $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +scx_example_pair: scx_example_pair.c scx_example_pair.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + +scx_example_flatcg: scx_example_flatcg.c scx_example_flatcg.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + clean: rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h - rm -f scx_example_simple scx_example_qmap scx_example_central + rm -f scx_example_simple scx_example_qmap scx_example_central \ + scx_example_pair scx_example_flatcg .PHONY: all clean diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index e6f6171edf3c..3f58737d80b1 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -67,6 +67,7 @@ void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; bool scx_bpf_task_running(const struct task_struct *p) __ksym; s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; +struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym; #define BPF_STRUCT_OPS(name, args...) \ SEC("struct_ops/"#name) \ diff --git a/tools/sched_ext/scx_example_flatcg.bpf.c b/tools/sched_ext/scx_example_flatcg.bpf.c new file mode 100644 index 000000000000..9632bab7f164 --- /dev/null +++ b/tools/sched_ext/scx_example_flatcg.bpf.c @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext flattened cgroup hierarchy scheduler. It implements + * hierarchical weight-based cgroup CPU control by flattening the cgroup + * hierarchy into a single layer by compounding the active weight share at each + * level. Consider the following hierarchy with weights in parentheses: + * + * R + A (100) + B (100) + * | \ C (100) + * \ D (200) + * + * Ignoring the root and threaded cgroups, only B, C and D can contain tasks. + * Let's say all three have runnable tasks. The total share that each of these + * three cgroups is entitled to can be calculated by compounding its share at + * each level. + * + * For example, B is competing against C and in that competition its share is + * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's + * share in that competition is 200/(200+100) == 1/3. B's eventual share in the + * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's + * eventual shaer is the same at 1/6. D is only competing at the top level and + * its share is 200/(100+200) == 2/3. + * + * So, instead of hierarchically scheduling level-by-level, we can consider it + * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3 + * and keep updating the eventual shares as the cgroups' runnable states change. + * + * This flattening of hierarchy can bring a substantial performance gain when + * the cgroup hierarchy is nested multiple levels. in a simple benchmark using + * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it + * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two + * apache instances competing with 2:1 weight ratio nested four level deep. + * + * However, the gain comes at the cost of not being able to properly handle + * thundering herd of cgroups. For example, if many cgroups which are nested + * behind a low priority parent cgroup wake up around the same time, they may be + * able to consume more CPU cycles than they are entitled to. In many use cases, + * this isn't a real concern especially given the performance gain. Also, there + * are ways to mitigate the problem further by e.g. introducing an extra + * scheduling layer on cgroup delegation boundaries. + */ +#include "scx_common.bpf.h" +#include "user_exit_info.h" +#include "scx_example_flatcg.h" + +char _license[] SEC("license") = "GPL"; + +const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */ +const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL; +const volatile bool switch_partial; + +u64 cvtime_now; +struct user_exit_info uei; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, u64); + __uint(max_entries, FCG_NR_STATS); +} stats SEC(".maps"); + +static void stat_inc(enum fcg_stat_idx idx) +{ + u32 idx_v = idx; + + u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v); + if (cnt_p) + (*cnt_p)++; +} + +struct fcg_cpu_ctx { + u64 cur_cgid; + u64 cur_at; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct fcg_cpu_ctx); + __uint(max_entries, 1); +} cpu_ctx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct fcg_cgrp_ctx); +} cgrp_ctx SEC(".maps"); + +struct cgv_node { + struct bpf_rb_node rb_node; + __u64 cvtime; + __u64 cgid; +}; + +private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock; +private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node); + +struct cgv_node_stash { + struct cgv_node __kptr *node; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16384); + __type(key, __u64); + __type(value, struct cgv_node_stash); +} cgv_node_stash SEC(".maps"); + +struct fcg_task_ctx { + u64 bypassed_at; +}; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct fcg_task_ctx); +} task_ctx SEC(".maps"); + +/* gets inc'd on weight tree changes to expire the cached hweights */ +unsigned long hweight_gen = 1; + +static u64 div_round_up(u64 dividend, u64 divisor) +{ + return (dividend + divisor - 1) / divisor; +} + +static bool vtime_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + +static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct cgv_node *cgc_a, *cgc_b; + + cgc_a = container_of(a, struct cgv_node, rb_node); + cgc_b = container_of(b, struct cgv_node, rb_node); + + return cgc_a->cvtime < cgc_b->cvtime; +} + +static struct fcg_cpu_ctx *find_cpu_ctx(void) +{ + struct fcg_cpu_ctx *cpuc; + u32 idx = 0; + + cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx); + if (!cpuc) { + scx_bpf_error("cpu_ctx lookup failed"); + return NULL; + } + return cpuc; +} + +static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp) +{ + struct fcg_cgrp_ctx *cgc; + + cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); + if (!cgc) { + scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id); + return NULL; + } + return cgc; +} + +static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level) +{ + struct fcg_cgrp_ctx *cgc; + + cgrp = bpf_cgroup_ancestor(cgrp, level); + if (!cgrp) { + scx_bpf_error("ancestor cgroup lookup failed"); + return NULL; + } + + cgc = find_cgrp_ctx(cgrp); + if (!cgc) + scx_bpf_error("ancestor cgrp_ctx lookup failed"); + bpf_cgroup_release(cgrp); + return cgc; +} + +static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc) +{ + int level; + + if (!cgc->nr_active) { + stat_inc(FCG_STAT_HWT_SKIP); + return; + } + + if (cgc->hweight_gen == hweight_gen) { + stat_inc(FCG_STAT_HWT_CACHE); + return; + } + + stat_inc(FCG_STAT_HWT_UPDATES); + bpf_for(level, 0, cgrp->level + 1) { + struct fcg_cgrp_ctx *cgc; + bool is_active; + + cgc = find_ancestor_cgrp_ctx(cgrp, level); + if (!cgc) + break; + + if (!level) { + cgc->hweight = FCG_HWEIGHT_ONE; + cgc->hweight_gen = hweight_gen; + } else { + struct fcg_cgrp_ctx *pcgc; + + pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1); + if (!pcgc) + break; + + /* + * We can be oppotunistic here and not grab the + * cgv_tree_lock and deal with the occasional races. + * However, hweight updates are already cached and + * relatively low-frequency. Let's just do the + * straightforward thing. + */ + bpf_spin_lock(&cgv_tree_lock); + is_active = cgc->nr_active; + if (is_active) { + cgc->hweight_gen = pcgc->hweight_gen; + cgc->hweight = + div_round_up(pcgc->hweight * cgc->weight, + pcgc->child_weight_sum); + } + bpf_spin_unlock(&cgv_tree_lock); + + if (!is_active) { + stat_inc(FCG_STAT_HWT_RACE); + break; + } + } + } +} + +static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc) +{ + u64 delta, cvtime, max_budget; + + /* + * A node which is on the rbtree can't be pointed to from elsewhere yet + * and thus can't be updated and repositioned. Instead, we collect the + * vtime deltas separately and apply it asynchronously here. + */ + delta = cgc->cvtime_delta; + __sync_fetch_and_sub(&cgc->cvtime_delta, delta); + cvtime = cgv_node->cvtime + delta; + + /* + * Allow a cgroup to carry the maximum budget proportional to its + * hweight such that a full-hweight cgroup can immediately take up half + * of the CPUs at the most while staying at the front of the rbtree. + */ + max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) / + (2 * FCG_HWEIGHT_ONE); + if (vtime_before(cvtime, cvtime_now - max_budget)) + cvtime = cvtime_now - max_budget; + + cgv_node->cvtime = cvtime; +} + +static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc) +{ + struct cgv_node_stash *stash; + struct cgv_node *cgv_node; + u64 cgid = cgrp->kn->id; + + /* paired with cmpxchg in try_pick_next_cgroup() */ + if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) { + stat_inc(FCG_STAT_ENQ_SKIP); + return; + } + + stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid); + if (!stash) { + scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid); + return; + } + + /* NULL if the node is already on the rbtree */ + cgv_node = bpf_kptr_xchg(&stash->node, NULL); + if (!cgv_node) { + stat_inc(FCG_STAT_ENQ_RACE); + return; + } + + bpf_spin_lock(&cgv_tree_lock); + cgrp_cap_budget(cgv_node, cgc); + bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less); + bpf_spin_unlock(&cgv_tree_lock); +} + +void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags) +{ + struct fcg_task_ctx *taskc; + struct cgroup *cgrp; + struct fcg_cgrp_ctx *cgc; + + taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); + if (!taskc) { + scx_bpf_error("task_ctx lookup failed"); + return; + } + + /* + * If select_cpu_dfl() is recommending local enqueue, the target CPU is + * idle. Follow it and charge the cgroup later in fcg_stopping() after + * the fact. Use the same mechanism to deal with tasks with custom + * affinities so that we don't have to worry about per-cgroup dq's + * containing tasks that can't be executed from some CPUs. + */ + if ((enq_flags & SCX_ENQ_LOCAL) || p->nr_cpus_allowed != nr_cpus) { + /* + * Tell fcg_stopping() that this bypassed the regular scheduling + * path and should be force charged to the cgroup. 0 is used to + * indicate that the task isn't bypassing, so if the current + * runtime is 0, go back by one nanosecond. + */ + taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1; + + /* + * The global dq is deprioritized as we don't want to let tasks + * to boost themselves by constraining its cpumask. The + * deprioritization is rather severe, so let's not apply that to + * per-cpu kernel threads. This is ham-fisted. We probably wanna + * implement per-cgroup fallback dq's instead so that we have + * more control over when tasks with custom cpumask get issued. + */ + if ((enq_flags & SCX_ENQ_LOCAL) || + (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD))) { + stat_inc(FCG_STAT_LOCAL); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); + } else { + stat_inc(FCG_STAT_GLOBAL); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } + return; + } + + cgrp = scx_bpf_task_cgroup(p); + cgc = find_cgrp_ctx(cgrp); + if (!cgc) + goto out_release; + + scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags); + + cgrp_enqueued(cgrp, cgc); +out_release: + bpf_cgroup_release(cgrp); +} + +/* + * Walk the cgroup tree to update the active weight sums as tasks wake up and + * sleep. The weight sums are used as the base when calculating the proportion a + * given cgroup or task is entitled to at each level. + */ +static void update_active_weight_sums(struct cgroup *cgrp, bool runnable) +{ + struct fcg_cgrp_ctx *cgc; + bool updated = false; + int idx; + + cgc = find_cgrp_ctx(cgrp); + if (!cgc) + return; + + /* + * In most cases, a hot cgroup would have multiple threads going to + * sleep and waking up while the whole cgroup stays active. In leaf + * cgroups, ->nr_runnable which is updated with __sync operations gates + * ->nr_active updates, so that we don't have to grab the cgv_tree_lock + * repeatedly for a busy cgroup which is staying active. + */ + if (runnable) { + if (__sync_fetch_and_add(&cgc->nr_runnable, 1)) + return; + stat_inc(FCG_STAT_ACT); + } else { + if (__sync_sub_and_fetch(&cgc->nr_runnable, 1)) + return; + stat_inc(FCG_STAT_DEACT); + } + + /* + * If @cgrp is becoming runnable, its hweight should be refreshed after + * it's added to the weight tree so that enqueue has the up-to-date + * value. If @cgrp is becoming quiescent, the hweight should be + * refreshed before it's removed from the weight tree so that the usage + * charging which happens afterwards has access to the latest value. + */ + if (!runnable) + cgrp_refresh_hweight(cgrp, cgc); + + /* propagate upwards */ + bpf_for(idx, 0, cgrp->level) { + int level = cgrp->level - idx; + struct fcg_cgrp_ctx *cgc, *pcgc = NULL; + bool propagate = false; + + cgc = find_ancestor_cgrp_ctx(cgrp, level); + if (!cgc) + break; + if (level) { + pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1); + if (!pcgc) + break; + } + + /* + * We need the propagation protected by a lock to synchronize + * against weight changes. There's no reason to drop the lock at + * each level but bpf_spin_lock() doesn't want any function + * calls while locked. + */ + bpf_spin_lock(&cgv_tree_lock); + + if (runnable) { + if (!cgc->nr_active++) { + updated = true; + if (pcgc) { + propagate = true; + pcgc->child_weight_sum += cgc->weight; + } + } + } else { + if (!--cgc->nr_active) { + updated = true; + if (pcgc) { + propagate = true; + pcgc->child_weight_sum -= cgc->weight; + } + } + } + + bpf_spin_unlock(&cgv_tree_lock); + + if (!propagate) + break; + } + + if (updated) + __sync_fetch_and_add(&hweight_gen, 1); + + if (runnable) + cgrp_refresh_hweight(cgrp, cgc); +} + +void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags) +{ + struct cgroup *cgrp; + + cgrp = scx_bpf_task_cgroup(p); + update_active_weight_sums(cgrp, true); + bpf_cgroup_release(cgrp); +} + +void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) +{ + struct fcg_task_ctx *taskc; + struct cgroup *cgrp; + struct fcg_cgrp_ctx *cgc; + + taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); + if (!taskc) { + scx_bpf_error("task_ctx lookup failed"); + return; + } + + if (!taskc->bypassed_at) + return; + + cgrp = scx_bpf_task_cgroup(p); + cgc = find_cgrp_ctx(cgrp); + if (cgc) { + __sync_fetch_and_add(&cgc->cvtime_delta, + p->se.sum_exec_runtime - taskc->bypassed_at); + taskc->bypassed_at = 0; + } + bpf_cgroup_release(cgrp); +} + +void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags) +{ + struct cgroup *cgrp; + + cgrp = scx_bpf_task_cgroup(p); + update_active_weight_sums(cgrp, false); + bpf_cgroup_release(cgrp); +} + +void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight) +{ + struct fcg_cgrp_ctx *cgc, *pcgc = NULL; + + cgc = find_cgrp_ctx(cgrp); + if (!cgc) + return; + + if (cgrp->level) { + pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1); + if (!pcgc) + return; + } + + bpf_spin_lock(&cgv_tree_lock); + if (pcgc && cgc->nr_active) + pcgc->child_weight_sum += (s64)weight - cgc->weight; + cgc->weight = weight; + bpf_spin_unlock(&cgv_tree_lock); +} + +static bool try_pick_next_cgroup(u64 *cgidp) +{ + struct bpf_rb_node *rb_node; + struct cgv_node_stash *stash; + struct cgv_node *cgv_node; + struct fcg_cgrp_ctx *cgc; + struct cgroup *cgrp; + u64 cgid; + + /* pop the front cgroup and wind cvtime_now accordingly */ + bpf_spin_lock(&cgv_tree_lock); + + rb_node = bpf_rbtree_first(&cgv_tree); + if (!rb_node) { + bpf_spin_unlock(&cgv_tree_lock); + stat_inc(FCG_STAT_PNC_NO_CGRP); + *cgidp = 0; + return true; + } + + rb_node = bpf_rbtree_remove(&cgv_tree, rb_node); + bpf_spin_unlock(&cgv_tree_lock); + + cgv_node = container_of(rb_node, struct cgv_node, rb_node); + cgid = cgv_node->cgid; + + if (vtime_before(cvtime_now, cgv_node->cvtime)) + cvtime_now = cgv_node->cvtime; + + /* + * If lookup fails, the cgroup's gone. Free and move on. See + * fcg_cgroup_exit(). + */ + cgrp = bpf_cgroup_from_id(cgid); + if (!cgrp) { + stat_inc(FCG_STAT_PNC_GONE); + goto out_free; + } + + cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); + if (!cgc) { + bpf_cgroup_release(cgrp); + stat_inc(FCG_STAT_PNC_GONE); + goto out_free; + } + + if (!scx_bpf_consume(cgid)) { + bpf_cgroup_release(cgrp); + stat_inc(FCG_STAT_PNC_EMPTY); + goto out_stash; + } + + /* + * Successfully consumed from the cgroup. This will be our current + * cgroup for the new slice. Refresh its hweight. + */ + cgrp_refresh_hweight(cgrp, cgc); + + bpf_cgroup_release(cgrp); + + /* + * As the cgroup may have more tasks, add it back to the rbtree. Note + * that here we charge the full slice upfront and then exact later + * according to the actual consumption. This prevents lowpri thundering + * herd from saturating the machine. + */ + bpf_spin_lock(&cgv_tree_lock); + cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1); + cgrp_cap_budget(cgv_node, cgc); + bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less); + bpf_spin_unlock(&cgv_tree_lock); + + *cgidp = cgid; + stat_inc(FCG_STAT_PNC_NEXT); + return true; + +out_stash: + stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid); + if (!stash) { + stat_inc(FCG_STAT_PNC_GONE); + goto out_free; + } + + /* + * Paired with cmpxchg in cgrp_enqueued(). If they see the following + * transition, they'll enqueue the cgroup. If they are earlier, we'll + * see their task in the dq below and requeue the cgroup. + */ + __sync_val_compare_and_swap(&cgc->queued, 1, 0); + + if (scx_bpf_dsq_nr_queued(cgid)) { + bpf_spin_lock(&cgv_tree_lock); + bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less); + bpf_spin_unlock(&cgv_tree_lock); + } else { + cgv_node = bpf_kptr_xchg(&stash->node, cgv_node); + if (cgv_node) { + scx_bpf_error("unexpected !NULL cgv_node stash"); + goto out_free; + } + } + + return false; + +out_free: + bpf_obj_drop(cgv_node); + return false; +} + +void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev) +{ + struct fcg_cpu_ctx *cpuc; + struct fcg_cgrp_ctx *cgc; + struct cgroup *cgrp; + u64 now = bpf_ktime_get_ns(); + + cpuc = find_cpu_ctx(); + if (!cpuc) + return; + + if (!cpuc->cur_cgid) + goto pick_next_cgroup; + + if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) { + if (scx_bpf_consume(cpuc->cur_cgid)) { + stat_inc(FCG_STAT_CNS_KEEP); + return; + } + stat_inc(FCG_STAT_CNS_EMPTY); + } else { + stat_inc(FCG_STAT_CNS_EXPIRE); + } + + /* + * The current cgroup is expiring. It was already charged a full slice. + * Calculate the actual usage and accumulate the delta. + */ + cgrp = bpf_cgroup_from_id(cpuc->cur_cgid); + if (!cgrp) { + stat_inc(FCG_STAT_CNS_GONE); + goto pick_next_cgroup; + } + + cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); + if (cgc) { + /* + * We want to update the vtime delta and then look for the next + * cgroup to execute but the latter needs to be done in a loop + * and we can't keep the lock held. Oh well... + */ + bpf_spin_lock(&cgv_tree_lock); + __sync_fetch_and_add(&cgc->cvtime_delta, + (cpuc->cur_at + cgrp_slice_ns - now) * + FCG_HWEIGHT_ONE / (cgc->hweight ?: 1)); + bpf_spin_unlock(&cgv_tree_lock); + } else { + stat_inc(FCG_STAT_CNS_GONE); + } + + bpf_cgroup_release(cgrp); + +pick_next_cgroup: + cpuc->cur_at = now; + + if (scx_bpf_consume(SCX_DSQ_GLOBAL)) { + cpuc->cur_cgid = 0; + return; + } + + bpf_repeat(BPF_MAX_LOOPS) { + if (try_pick_next_cgroup(&cpuc->cur_cgid)) + break; + } +} + +s32 BPF_STRUCT_OPS(fcg_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + struct fcg_task_ctx *taskc; + + /* + * @p is new. Let's ensure that its task_ctx is available. We can sleep + * in this function and the following will automatically use GFP_KERNEL. + */ + taskc = bpf_task_storage_get(&task_ctx, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!taskc) + return -ENOMEM; + + taskc->bypassed_at = 0; + return 0; +} + +int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp, + struct scx_cgroup_init_args *args) +{ + struct fcg_cgrp_ctx *cgc; + struct cgv_node *cgv_node; + struct cgv_node_stash empty_stash = {}, *stash; + u64 cgid = cgrp->kn->id; + int ret; + + /* + * Technically incorrect as cgroup ID is full 64bit while dq ID is + * 63bit. Should not be a problem in practice and easy to spot in the + * unlikely case that it breaks. + */ + ret = scx_bpf_create_dsq(cgid, -1); + if (ret) + return ret; + + cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!cgc) { + ret = -ENOMEM; + goto err_destroy_dsq; + } + + cgc->weight = args->weight; + cgc->hweight = FCG_HWEIGHT_ONE; + + ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash, + BPF_NOEXIST); + if (ret) { + if (ret != -ENOMEM) + scx_bpf_error("unexpected stash creation error (%d)", + ret); + goto err_destroy_dsq; + } + + stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid); + if (!stash) { + scx_bpf_error("unexpected cgv_node stash lookup failure"); + ret = -ENOENT; + goto err_destroy_dsq; + } + + cgv_node = bpf_obj_new(struct cgv_node); + if (!cgv_node) { + ret = -ENOMEM; + goto err_del_cgv_node; + } + + cgv_node->cgid = cgid; + cgv_node->cvtime = cvtime_now; + + cgv_node = bpf_kptr_xchg(&stash->node, cgv_node); + if (cgv_node) { + scx_bpf_error("unexpected !NULL cgv_node stash"); + ret = -EBUSY; + goto err_drop; + } + + return 0; + +err_drop: + bpf_obj_drop(cgv_node); +err_del_cgv_node: + bpf_map_delete_elem(&cgv_node_stash, &cgid); +err_destroy_dsq: + scx_bpf_destroy_dsq(cgid); + return ret; +} + +void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + + /* + * For now, there's no way find and remove the cgv_node if it's on the + * cgv_tree. Let's drain them in the dispatch path as they get popped + * off the front of the tree. + */ + bpf_map_delete_elem(&cgv_node_stash, &cgid); + scx_bpf_destroy_dsq(cgid); +} + +s32 BPF_STRUCT_OPS(fcg_init) +{ + if (!switch_partial) + scx_bpf_switch_all(); + return 0; +} + +void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops flatcg_ops = { + .enqueue = (void *)fcg_enqueue, + .dispatch = (void *)fcg_dispatch, + .runnable = (void *)fcg_runnable, + .stopping = (void *)fcg_stopping, + .quiescent = (void *)fcg_quiescent, + .prep_enable = (void *)fcg_prep_enable, + .cgroup_set_weight = (void *)fcg_cgroup_set_weight, + .cgroup_init = (void *)fcg_cgroup_init, + .cgroup_exit = (void *)fcg_cgroup_exit, + .init = (void *)fcg_init, + .exit = (void *)fcg_exit, + .flags = SCX_OPS_CGROUP_KNOB_WEIGHT | SCX_OPS_ENQ_EXITING, + .name = "flatcg", +}; diff --git a/tools/sched_ext/scx_example_flatcg.c b/tools/sched_ext/scx_example_flatcg.c new file mode 100644 index 000000000000..150f7e16996e --- /dev/null +++ b/tools/sched_ext/scx_example_flatcg.c @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2023 Tejun Heo + * Copyright (c) 2023 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_flatcg.h" +#include "scx_example_flatcg.skel.h" + +#ifndef FILEID_KERNFS +#define FILEID_KERNFS 0xfe +#endif + +const char help_fmt[] = +"A flattened cgroup hierarchy sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-s SLICE_US] [-i INTERVAL] [-p]\n" +"\n" +" -s SLICE_US Override slice duration\n" +" -i INTERVAL Report interval\n" +" -p Switch only tasks on SCHED_EXT policy intead of all\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +static float read_cpu_util(__u64 *last_sum, __u64 *last_idle) +{ + FILE *fp; + char buf[4096]; + char *line, *cur = NULL, *tok; + __u64 sum = 0, idle = 0; + __u64 delta_sum, delta_idle; + int idx; + + fp = fopen("/proc/stat", "r"); + if (!fp) { + perror("fopen(\"/proc/stat\")"); + return 0.0; + } + + if (!fgets(buf, sizeof(buf), fp)) { + perror("fgets(\"/proc/stat\")"); + fclose(fp); + return 0.0; + } + fclose(fp); + + line = buf; + for (idx = 0; (tok = strtok_r(line, " \n", &cur)); idx++) { + char *endp = NULL; + __u64 v; + + if (idx == 0) { + line = NULL; + continue; + } + v = strtoull(tok, &endp, 0); + if (!endp || *endp != '\0') { + fprintf(stderr, "failed to parse %dth field of /proc/stat (\"%s\")\n", + idx, tok); + continue; + } + sum += v; + if (idx == 4) + idle = v; + } + + delta_sum = sum - *last_sum; + delta_idle = idle - *last_idle; + *last_sum = sum; + *last_idle = idle; + + return delta_sum ? (float)(delta_sum - delta_idle) / delta_sum : 0.0; +} + +static void fcg_read_stats(struct scx_example_flatcg *skel, __u64 *stats) +{ + __u64 cnts[FCG_NR_STATS][skel->rodata->nr_cpus]; + __u32 idx; + + memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS); + + for (idx = 0; idx < FCG_NR_STATS; idx++) { + int ret, cpu; + + ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), + &idx, cnts[idx]); + if (ret < 0) + continue; + for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++) + stats[idx] += cnts[idx][cpu]; + } +} + +int main(int argc, char **argv) +{ + struct scx_example_flatcg *skel; + struct bpf_link *link; + struct timespec intv_ts = { .tv_sec = 2, .tv_nsec = 0 }; + bool dump_cgrps = false; + __u64 last_cpu_sum = 0, last_cpu_idle = 0; + __u64 last_stats[FCG_NR_STATS] = {}; + unsigned long seq = 0; + s32 opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_flatcg__open(); + if (!skel) { + fprintf(stderr, "Failed to open: %s\n", strerror(errno)); + return 1; + } + + skel->rodata->nr_cpus = libbpf_num_possible_cpus(); + + while ((opt = getopt(argc, argv, "s:i:dfph")) != -1) { + double v; + + switch (opt) { + case 's': + v = strtod(optarg, NULL); + skel->rodata->cgrp_slice_ns = v * 1000; + break; + case 'i': + v = strtod(optarg, NULL); + intv_ts.tv_sec = v; + intv_ts.tv_nsec = (v - (float)intv_ts.tv_sec) * 1000000000; + break; + case 'd': + dump_cgrps = true; + break; + case 'p': + skel->rodata->switch_partial = true; + break; + case 'h': + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + printf("slice=%.1lfms intv=%.1lfs dump_cgrps=%d", + (double)skel->rodata->cgrp_slice_ns / 1000000.0, + (double)intv_ts.tv_sec + (double)intv_ts.tv_nsec / 1000000000.0, + dump_cgrps); + + if (scx_example_flatcg__load(skel)) { + fprintf(stderr, "Failed to load: %s\n", strerror(errno)); + return 1; + } + + link = bpf_map__attach_struct_ops(skel->maps.flatcg_ops); + if (!link) { + fprintf(stderr, "Failed to attach_struct_ops: %s\n", + strerror(errno)); + return 1; + } + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + __u64 acc_stats[FCG_NR_STATS]; + __u64 stats[FCG_NR_STATS]; + float cpu_util; + int i; + + cpu_util = read_cpu_util(&last_cpu_sum, &last_cpu_idle); + + fcg_read_stats(skel, acc_stats); + for (i = 0; i < FCG_NR_STATS; i++) + stats[i] = acc_stats[i] - last_stats[i]; + + memcpy(last_stats, acc_stats, sizeof(acc_stats)); + + printf("\n[SEQ %6lu cpu=%5.1lf hweight_gen=%lu]\n", + seq++, cpu_util * 100.0, skel->data->hweight_gen); + printf(" act:%6llu deact:%6llu local:%6llu global:%6llu\n", + stats[FCG_STAT_ACT], + stats[FCG_STAT_DEACT], + stats[FCG_STAT_LOCAL], + stats[FCG_STAT_GLOBAL]); + printf("HWT skip:%6llu race:%6llu cache:%6llu update:%6llu\n", + stats[FCG_STAT_HWT_SKIP], + stats[FCG_STAT_HWT_RACE], + stats[FCG_STAT_HWT_CACHE], + stats[FCG_STAT_HWT_UPDATES]); + printf("ENQ skip:%6llu race:%6llu\n", + stats[FCG_STAT_ENQ_SKIP], + stats[FCG_STAT_ENQ_RACE]); + printf("CNS keep:%6llu expire:%6llu empty:%6llu gone:%6llu\n", + stats[FCG_STAT_CNS_KEEP], + stats[FCG_STAT_CNS_EXPIRE], + stats[FCG_STAT_CNS_EMPTY], + stats[FCG_STAT_CNS_GONE]); + printf("PNC nocgrp:%6llu next:%6llu empty:%6llu gone:%6llu\n", + stats[FCG_STAT_PNC_NO_CGRP], + stats[FCG_STAT_PNC_NEXT], + stats[FCG_STAT_PNC_EMPTY], + stats[FCG_STAT_PNC_GONE]); + printf("BAD remove:%6llu\n", + acc_stats[FCG_STAT_BAD_REMOVAL]); + + nanosleep(&intv_ts, NULL); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_flatcg__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_flatcg.h b/tools/sched_ext/scx_example_flatcg.h new file mode 100644 index 000000000000..490758ed41f0 --- /dev/null +++ b/tools/sched_ext/scx_example_flatcg.h @@ -0,0 +1,49 @@ +#ifndef __SCX_EXAMPLE_FLATCG_H +#define __SCX_EXAMPLE_FLATCG_H + +enum { + FCG_HWEIGHT_ONE = 1LLU << 16, +}; + +enum fcg_stat_idx { + FCG_STAT_ACT, + FCG_STAT_DEACT, + FCG_STAT_LOCAL, + FCG_STAT_GLOBAL, + + FCG_STAT_HWT_UPDATES, + FCG_STAT_HWT_CACHE, + FCG_STAT_HWT_SKIP, + FCG_STAT_HWT_RACE, + + FCG_STAT_ENQ_SKIP, + FCG_STAT_ENQ_RACE, + + FCG_STAT_CNS_KEEP, + FCG_STAT_CNS_EXPIRE, + FCG_STAT_CNS_EMPTY, + FCG_STAT_CNS_GONE, + + FCG_STAT_PNC_NO_CGRP, + FCG_STAT_PNC_NEXT, + FCG_STAT_PNC_EMPTY, + FCG_STAT_PNC_GONE, + + FCG_STAT_BAD_REMOVAL, + + FCG_NR_STATS, +}; + +struct fcg_cgrp_ctx { + u32 nr_active; + u32 nr_runnable; + u32 queued; + u32 weight; + u32 hweight; + u64 child_weight_sum; + u64 hweight_gen; + s64 cvtime_delta; + u64 tvtime_now; +}; + +#endif /* __SCX_EXAMPLE_FLATCG_H */ diff --git a/tools/sched_ext/scx_example_pair.bpf.c b/tools/sched_ext/scx_example_pair.bpf.c new file mode 100644 index 000000000000..e5ff39083181 --- /dev/null +++ b/tools/sched_ext/scx_example_pair.bpf.c @@ -0,0 +1,536 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext core-scheduler which always makes every sibling CPU pair + * execute from the same CPU cgroup. + * + * This scheduler is a minimal implementation and would need some form of + * priority handling both inside each cgroup and across the cgroups to be + * practically useful. + * + * Each CPU in the system is paired with exactly one other CPU, according to a + * "stride" value that can be specified when the BPF scheduler program is first + * loaded. Throughout the runtime of the scheduler, these CPU pairs guarantee + * that they will only ever schedule tasks that belong to the same CPU cgroup. + * + * Scheduler Initialization + * ------------------------ + * + * The scheduler BPF program is first initialized from user space, before it is + * enabled. During this initialization process, each CPU on the system is + * assigned several values that are constant throughout its runtime: + * + * 1. *Pair CPU*: The CPU that it synchronizes with when making scheduling + * decisions. Paired CPUs always schedule tasks from the same + * CPU cgroup, and synchronize with each other to guarantee + * that this constraint is not violated. + * 2. *Pair ID*: Each CPU pair is assigned a Pair ID, which is used to access + * a struct pair_ctx object that is shared between the pair. + * 3. *In-pair-index*: An index, 0 or 1, that is assigned to each core in the + * pair. Each struct pair_ctx has an active_mask field, + * which is a bitmap used to indicate whether each core + * in the pair currently has an actively running task. + * This index specifies which entry in the bitmap corresponds + * to each CPU in the pair. + * + * During this initialization, the CPUs are paired according to a "stride" that + * may be specified when invoking the user space program that initializes and + * loads the scheduler. By default, the stride is 1/2 the total number of CPUs. + * + * Tasks and cgroups + * ----------------- + * + * Every cgroup in the system is registered with the scheduler using the + * pair_cgroup_init() callback, and every task in the system is associated with + * exactly one cgroup. At a high level, the idea with the pair scheduler is to + * always schedule tasks from the same cgroup within a given CPU pair. When a + * task is enqueued (i.e. passed to the pair_enqueue() callback function), its + * cgroup ID is read from its task struct, and then a corresponding queue map + * is used to FIFO-enqueue the task for that cgroup. + * + * If you look through the implementation of the scheduler, you'll notice that + * there is quite a bit of complexity involved with looking up the per-cgroup + * FIFO queue that we enqueue tasks in. For example, there is a cgrp_q_idx_hash + * BPF hash map that is used to map a cgroup ID to a globally unique ID that's + * allocated in the BPF program. This is done because we use separate maps to + * store the FIFO queue of tasks, and the length of that map, per cgroup. This + * complexity is only present because of current deficiencies in BPF that will + * soon be addressed. The main point to keep in mind is that newly enqueued + * tasks are added to their cgroup's FIFO queue. + * + * Dispatching tasks + * ----------------- + * + * This section will describe how enqueued tasks are dispatched and scheduled. + * Tasks are dispatched in pair_dispatch(), and at a high level the workflow is + * as follows: + * + * 1. Fetch the struct pair_ctx for the current CPU. As mentioned above, this is + * the structure that's used to synchronize amongst the two pair CPUs in their + * scheduling decisions. After any of the following events have occurred: + * + * - The cgroup's slice run has expired, or + * - The cgroup becomes empty, or + * - Either CPU in the pair is preempted by a higher priority scheduling class + * + * The cgroup transitions to the draining state and stops executing new tasks + * from the cgroup. + * + * 2. If the pair is still executing a task, mark the pair_ctx as draining, and + * wait for the pair CPU to be preempted. + * + * 3. Otherwise, if the pair CPU is not running a task, we can move onto + * scheduling new tasks. Pop the next cgroup id from the top_q queue. + * + * 4. Pop a task from that cgroup's FIFO task queue, and begin executing it. + * + * Note again that this scheduling behavior is simple, but the implementation + * is complex mostly because this it hits several BPF shortcomings and has to + * work around in often awkward ways. Most of the shortcomings are expected to + * be resolved in the near future which should allow greatly simplifying this + * scheduler. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" +#include "scx_example_pair.h" + +char _license[] SEC("license") = "GPL"; + +const volatile bool switch_partial; + +/* !0 for veristat, set during init */ +const volatile u32 nr_cpu_ids = 64; + +/* a pair of CPUs stay on a cgroup for this duration */ +const volatile u32 pair_batch_dur_ns = SCX_SLICE_DFL; + +/* cpu ID -> pair cpu ID */ +const volatile s32 pair_cpu[MAX_CPUS] = { [0 ... MAX_CPUS - 1] = -1 }; + +/* cpu ID -> pair_id */ +const volatile u32 pair_id[MAX_CPUS]; + +/* CPU ID -> CPU # in the pair (0 or 1) */ +const volatile u32 in_pair_idx[MAX_CPUS]; + +struct pair_ctx { + struct bpf_spin_lock lock; + + /* the cgroup the pair is currently executing */ + u64 cgid; + + /* the pair started executing the current cgroup at */ + u64 started_at; + + /* whether the current cgroup is draining */ + bool draining; + + /* the CPUs that are currently active on the cgroup */ + u32 active_mask; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_CPUS / 2); + __type(key, u32); + __type(value, struct pair_ctx); +} pair_ctx SEC(".maps"); + +/* queue of cgrp_q's possibly with tasks on them */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + /* + * Because it's difficult to build strong synchronization encompassing + * multiple non-trivial operations in BPF, this queue is managed in an + * opportunistic way so that we guarantee that a cgroup w/ active tasks + * is always on it but possibly multiple times. Once we have more robust + * synchronization constructs and e.g. linked list, we should be able to + * do this in a prettier way but for now just size it big enough. + */ + __uint(max_entries, 4 * MAX_CGRPS); + __type(value, u64); +} top_q SEC(".maps"); + +/* per-cgroup q which FIFOs the tasks from the cgroup */ +struct cgrp_q { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_QUEUED); + __type(value, u32); +}; + +/* + * Ideally, we want to allocate cgrp_q and cgrq_q_len in the cgroup local + * storage; however, a cgroup local storage can only be accessed from the BPF + * progs attached to the cgroup. For now, work around by allocating array of + * cgrp_q's and then allocating per-cgroup indices. + * + * Another caveat: It's difficult to populate a large array of maps statically + * or from BPF. Initialize it from userland. + */ +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_CGRPS); + __type(key, s32); + __array(values, struct cgrp_q); +} cgrp_q_arr SEC(".maps"); + +static u64 cgrp_q_len[MAX_CGRPS]; + +/* + * This and cgrp_q_idx_hash combine into a poor man's IDR. This likely would be + * useful to have as a map type. + */ +static u32 cgrp_q_idx_cursor; +static u64 cgrp_q_idx_busy[MAX_CGRPS]; + +/* + * All added up, the following is what we do: + * + * 1. When a cgroup is enabled, RR cgroup_q_idx_busy array doing cmpxchg looking + * for a free ID. If not found, fail cgroup creation with -EBUSY. + * + * 2. Hash the cgroup ID to the allocated cgrp_q_idx in the following + * cgrp_q_idx_hash. + * + * 3. Whenever a cgrp_q needs to be accessed, first look up the cgrp_q_idx from + * cgrp_q_idx_hash and then access the corresponding entry in cgrp_q_arr. + * + * This is sadly complicated for something pretty simple. Hopefully, we should + * be able to simplify in the future. + */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_CGRPS); + __uint(key_size, sizeof(u64)); /* cgrp ID */ + __uint(value_size, sizeof(s32)); /* cgrp_q idx */ +} cgrp_q_idx_hash SEC(".maps"); + +/* statistics */ +u64 nr_total, nr_dispatched, nr_missing, nr_kicks, nr_preemptions; +u64 nr_exps, nr_exp_waits, nr_exp_empty; +u64 nr_cgrp_next, nr_cgrp_coll, nr_cgrp_empty; + +struct user_exit_info uei; + +static bool time_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + +void BPF_STRUCT_OPS(pair_enqueue, struct task_struct *p, u64 enq_flags) +{ + struct cgroup *cgrp; + struct cgrp_q *cgq; + s32 pid = p->pid; + u64 cgid; + u32 *q_idx; + u64 *cgq_len; + + __sync_fetch_and_add(&nr_total, 1); + + cgrp = scx_bpf_task_cgroup(p); + cgid = cgrp->kn->id; + bpf_cgroup_release(cgrp); + + /* find the cgroup's q and push @p into it */ + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!q_idx) { + scx_bpf_error("failed to lookup q_idx for cgroup[%llu]", cgid); + return; + } + + cgq = bpf_map_lookup_elem(&cgrp_q_arr, q_idx); + if (!cgq) { + scx_bpf_error("failed to lookup q_arr for cgroup[%llu] q_idx[%u]", + cgid, *q_idx); + return; + } + + if (bpf_map_push_elem(cgq, &pid, 0)) { + scx_bpf_error("cgroup[%llu] queue overflow", cgid); + return; + } + + /* bump q len, if going 0 -> 1, queue cgroup into the top_q */ + cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); + if (!cgq_len) { + scx_bpf_error("MEMBER_VTPR malfunction"); + return; + } + + if (!__sync_fetch_and_add(cgq_len, 1) && + bpf_map_push_elem(&top_q, &cgid, 0)) { + scx_bpf_error("top_q overflow"); + return; + } +} + +static int lookup_pairc_and_mask(s32 cpu, struct pair_ctx **pairc, u32 *mask) +{ + u32 *vptr; + + vptr = (u32 *)MEMBER_VPTR(pair_id, [cpu]); + if (!vptr) + return -EINVAL; + + *pairc = bpf_map_lookup_elem(&pair_ctx, vptr); + if (!(*pairc)) + return -EINVAL; + + vptr = (u32 *)MEMBER_VPTR(in_pair_idx, [cpu]); + if (!vptr) + return -EINVAL; + + *mask = 1U << *vptr; + + return 0; +} + +static int try_dispatch(s32 cpu) +{ + struct pair_ctx *pairc; + struct bpf_map *cgq_map; + struct task_struct *p; + u64 now = bpf_ktime_get_ns(); + bool kick_pair = false; + bool expired; + u32 *vptr, in_pair_mask; + s32 pid, q_idx; + u64 cgid; + int ret; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) { + scx_bpf_error("failed to lookup pairc and in_pair_mask for cpu[%d]", + cpu); + return -ENOENT; + } + + bpf_spin_lock(&pairc->lock); + pairc->active_mask &= ~in_pair_mask; + + expired = time_before(pairc->started_at + pair_batch_dur_ns, now); + if (expired || pairc->draining) { + u64 new_cgid = 0; + + __sync_fetch_and_add(&nr_exps, 1); + + /* + * We're done with the current cgid. An obvious optimization + * would be not draining if the next cgroup is the current one. + * For now, be dumb and always expire. + */ + pairc->draining = true; + + if (pairc->active_mask) { + /* + * The other CPU is still active We want to wait until + * this cgroup expires. + * + * If the pair controls its CPU, and the time already + * expired, kick. When the other CPU arrives at + * dispatch and clears its active mask, it'll push the + * pair to the next cgroup and kick this CPU. + */ + __sync_fetch_and_add(&nr_exp_waits, 1); + bpf_spin_unlock(&pairc->lock); + if (expired) + kick_pair = true; + goto out_maybe_kick; + } + + bpf_spin_unlock(&pairc->lock); + + /* + * Pick the next cgroup. It'd be easier / cleaner to not drop + * pairc->lock and use stronger synchronization here especially + * given that we'll be switching cgroups significantly less + * frequently than tasks. Unfortunately, bpf_spin_lock can't + * really protect anything non-trivial. Let's do opportunistic + * operations instead. + */ + bpf_repeat(BPF_MAX_LOOPS) { + u32 *q_idx; + u64 *cgq_len; + + if (bpf_map_pop_elem(&top_q, &new_cgid)) { + /* no active cgroup, go idle */ + __sync_fetch_and_add(&nr_exp_empty, 1); + return 0; + } + + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &new_cgid); + if (!q_idx) + continue; + + /* + * This is the only place where empty cgroups are taken + * off the top_q. + */ + cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); + if (!cgq_len || !*cgq_len) + continue; + + /* + * If it has any tasks, requeue as we may race and not + * execute it. + */ + bpf_map_push_elem(&top_q, &new_cgid, 0); + break; + } + + bpf_spin_lock(&pairc->lock); + + /* + * The other CPU may already have started on a new cgroup while + * we dropped the lock. Make sure that we're still draining and + * start on the new cgroup. + */ + if (pairc->draining && !pairc->active_mask) { + __sync_fetch_and_add(&nr_cgrp_next, 1); + pairc->cgid = new_cgid; + pairc->started_at = now; + pairc->draining = false; + kick_pair = true; + } else { + __sync_fetch_and_add(&nr_cgrp_coll, 1); + } + } + + cgid = pairc->cgid; + pairc->active_mask |= in_pair_mask; + bpf_spin_unlock(&pairc->lock); + + /* again, it'd be better to do all these with the lock held, oh well */ + vptr = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!vptr) { + scx_bpf_error("failed to lookup q_idx for cgroup[%llu]", cgid); + return -ENOENT; + } + q_idx = *vptr; + + /* claim one task from cgrp_q w/ q_idx */ + bpf_repeat(BPF_MAX_LOOPS) { + u64 *cgq_len, len; + + cgq_len = MEMBER_VPTR(cgrp_q_len, [q_idx]); + if (!cgq_len || !(len = *(volatile u64 *)cgq_len)) { + /* the cgroup must be empty, expire and repeat */ + __sync_fetch_and_add(&nr_cgrp_empty, 1); + bpf_spin_lock(&pairc->lock); + pairc->draining = true; + pairc->active_mask &= ~in_pair_mask; + bpf_spin_unlock(&pairc->lock); + return -EAGAIN; + } + + if (__sync_val_compare_and_swap(cgq_len, len, len - 1) != len) + continue; + + break; + } + + cgq_map = bpf_map_lookup_elem(&cgrp_q_arr, &q_idx); + if (!cgq_map) { + scx_bpf_error("failed to lookup cgq_map for cgroup[%llu] q_idx[%d]", + cgid, q_idx); + return -ENOENT; + } + + if (bpf_map_pop_elem(cgq_map, &pid)) { + scx_bpf_error("cgq_map is empty for cgroup[%llu] q_idx[%d]", + cgid, q_idx); + return -ENOENT; + } + + p = bpf_task_from_pid(pid); + if (p) { + __sync_fetch_and_add(&nr_dispatched, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + } else { + /* we don't handle dequeues, retry on lost tasks */ + __sync_fetch_and_add(&nr_missing, 1); + return -EAGAIN; + } + +out_maybe_kick: + if (kick_pair) { + s32 *pair = (s32 *)MEMBER_VPTR(pair_cpu, [cpu]); + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT); + } + } + return 0; +} + +void BPF_STRUCT_OPS(pair_dispatch, s32 cpu, struct task_struct *prev) +{ + bpf_repeat(BPF_MAX_LOOPS) { + if (try_dispatch(cpu) != -EAGAIN) + break; + } +} + +s32 BPF_STRUCT_OPS(pair_cgroup_init, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + s32 i, q_idx; + + bpf_for(i, 0, MAX_CGRPS) { + q_idx = __sync_fetch_and_add(&cgrp_q_idx_cursor, 1) % MAX_CGRPS; + if (!__sync_val_compare_and_swap(&cgrp_q_idx_busy[q_idx], 0, 1)) + break; + } + if (i == MAX_CGRPS) + return -EBUSY; + + if (bpf_map_update_elem(&cgrp_q_idx_hash, &cgid, &q_idx, BPF_ANY)) { + u64 *busy = MEMBER_VPTR(cgrp_q_idx_busy, [q_idx]); + if (busy) + *busy = 0; + return -EBUSY; + } + + return 0; +} + +void BPF_STRUCT_OPS(pair_cgroup_exit, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + s32 *q_idx; + + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (q_idx) { + u64 *busy = MEMBER_VPTR(cgrp_q_idx_busy, [*q_idx]); + if (busy) + *busy = 0; + bpf_map_delete_elem(&cgrp_q_idx_hash, &cgid); + } +} + +s32 BPF_STRUCT_OPS(pair_init) +{ + if (!switch_partial) + scx_bpf_switch_all(); + return 0; +} + +void BPF_STRUCT_OPS(pair_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops pair_ops = { + .enqueue = (void *)pair_enqueue, + .dispatch = (void *)pair_dispatch, + .cgroup_init = (void *)pair_cgroup_init, + .cgroup_exit = (void *)pair_cgroup_exit, + .init = (void *)pair_init, + .exit = (void *)pair_exit, + .name = "pair", +}; diff --git a/tools/sched_ext/scx_example_pair.c b/tools/sched_ext/scx_example_pair.c new file mode 100644 index 000000000000..18e032bbc173 --- /dev/null +++ b/tools/sched_ext/scx_example_pair.c @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_pair.h" +#include "scx_example_pair.skel.h" + +const char help_fmt[] = +"A demo sched_ext core-scheduler which always makes every sibling CPU pair\n" +"execute from the same CPU cgroup.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-S STRIDE] [-p]\n" +"\n" +" -S STRIDE Override CPU pair stride (default: nr_cpus_ids / 2)\n" +" -p Switch only tasks on SCHED_EXT policy intead of all\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_example_pair *skel; + struct bpf_link *link; + u64 seq = 0; + s32 stride, i, opt, outer_fd; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_pair__open(); + assert(skel); + + skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); + + /* pair up the earlier half to the latter by default, override with -s */ + stride = skel->rodata->nr_cpu_ids / 2; + + while ((opt = getopt(argc, argv, "S:ph")) != -1) { + switch (opt) { + case 'S': + stride = strtoul(optarg, NULL, 0); + break; + case 'p': + skel->rodata->switch_partial = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + for (i = 0; i < skel->rodata->nr_cpu_ids; i++) { + if (skel->rodata->pair_cpu[i] < 0) { + skel->rodata->pair_cpu[i] = i + stride; + skel->rodata->pair_cpu[i + stride] = i; + skel->rodata->pair_id[i] = i; + skel->rodata->pair_id[i + stride] = i; + skel->rodata->in_pair_idx[i] = 0; + skel->rodata->in_pair_idx[i + stride] = 1; + } + } + + assert(!scx_example_pair__load(skel)); + + /* + * Populate the cgrp_q_arr map which is an array containing per-cgroup + * queues. It'd probably be better to do this from BPF but there are too + * many to initialize statically and there's no way to dynamically + * populate from BPF. + */ + outer_fd = bpf_map__fd(skel->maps.cgrp_q_arr); + assert(outer_fd >= 0); + + printf("Initializing"); + for (i = 0; i < MAX_CGRPS; i++) { + s32 inner_fd; + + if (exit_req) + break; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, + sizeof(u32), MAX_QUEUED, NULL); + assert(inner_fd >= 0); + assert(!bpf_map_update_elem(outer_fd, &i, &inner_fd, BPF_ANY)); + close(inner_fd); + + if (!(i % 10)) + printf("."); + fflush(stdout); + } + printf("\n"); + + /* + * Fully initialized, attach and run. + */ + link = bpf_map__attach_struct_ops(skel->maps.pair_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + printf("[SEQ %lu]\n", seq++); + printf(" total:%10lu dispatch:%10lu missing:%10lu\n", + skel->bss->nr_total, + skel->bss->nr_dispatched, + skel->bss->nr_missing); + printf(" kicks:%10lu preemptions:%7lu\n", + skel->bss->nr_kicks, + skel->bss->nr_preemptions); + printf(" exp:%10lu exp_wait:%10lu exp_empty:%10lu\n", + skel->bss->nr_exps, + skel->bss->nr_exp_waits, + skel->bss->nr_exp_empty); + printf("cgnext:%10lu cgcoll:%10lu cgempty:%10lu\n", + skel->bss->nr_cgrp_next, + skel->bss->nr_cgrp_coll, + skel->bss->nr_cgrp_empty); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_pair__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_pair.h b/tools/sched_ext/scx_example_pair.h new file mode 100644 index 000000000000..f60b824272f7 --- /dev/null +++ b/tools/sched_ext/scx_example_pair.h @@ -0,0 +1,10 @@ +#ifndef __SCX_EXAMPLE_PAIR_H +#define __SCX_EXAMPLE_PAIR_H + +enum { + MAX_CPUS = 4096, + MAX_QUEUED = 4096, + MAX_CGRPS = 4096, +}; + +#endif /* __SCX_EXAMPLE_PAIR_H */ From patchwork Fri Mar 17 21:33:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71478 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp5998wrt; Fri, 17 Mar 2023 14:52:00 -0700 (PDT) X-Google-Smtp-Source: AK7set+M3SdC1JlL0gVqDM3L0fr8zWnS26Smltt4PUPP3UoRRP1S3yWwV0etMVuu8tTfcX9z1KZ8 X-Received: by 2002:a05:6a20:6914:b0:cc:4118:65c4 with SMTP id q20-20020a056a20691400b000cc411865c4mr10382951pzj.5.1679089920163; Fri, 17 Mar 2023 14:52:00 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089920; cv=none; d=google.com; s=arc-20160816; b=NZ7QGH2OinULXhplBSOQMQEYMmUjB+h36Ge+dTsLi7UYLCpVFAL/KhFxLcd4KARXpM Lw+4jj6cs8n/xmzEVa0LO+UwxEjYgqjlFVMY5OxzXvdFHlGagesyPVQXENypm9YsORy0 aBXkOGnCG5fqB4VUksGqvet0uXMLu4M9fCjE8r9O12YisDsA4EhcpDNyf7+SpqCOHuVb fDIqzXTiNkifr/kqBxvyc64/L1ishtPnc2jMhMQnp5qATTGKQuXARsTIRQgfvyG9oHPi FEgFyn0uLekgYySsmfbx5GWlbbldmH/bFDFllsVHCFeWIpEKtdAE+WpwrVEdhc1byp7J BVKg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=/eoGGlzywStMiYbVMcBjMDTNFvxz4K+Jg2JHrfcCjW0=; b=hB4TeBVm9wiVmPNBOX7zsdojYx43EVNCwQm4lru9RhAdIOZph+h+VDw+yKK4DCRmxP a+llJHZp8EbCTCwOe63+y3HwCWEqO6YNhFXWLbBwVMHnd9U8VZYeKz3ZBQyYGF1YSZ+3 vAYtd7MvNIMeWFVmAUxHX5OzhUt7ii8es/A1A8rVKGJjXoPenPwXgpR5XniF7DS/qzaM HLXOJLyctXzris5ZUk13j3+vAxXV1qmnqN4Ty1vewDMSM69yczhMYXDJcXQISvRxJYp3 OQUEg14ZXlbUrkvElJ59GBYGiineZw3OrWae1T8AB5yHds3Garo+e+Pkz/2C1mqqZ+z5 gW9Q== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="UjNKU/f2"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id a71-20020a63904a000000b00507681d47bcsi3455744pge.567.2023.03.17.14.51.44; Fri, 17 Mar 2023 14:52:00 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="UjNKU/f2"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231320AbjCQVi0 (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:26 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52660 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231205AbjCQVhV (ORCPT ); Fri, 17 Mar 2023 17:37:21 -0400 Received: from mail-pj1-x102b.google.com (mail-pj1-x102b.google.com [IPv6:2607:f8b0:4864:20::102b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id CFB5B66D01; Fri, 17 Mar 2023 14:36:15 -0700 (PDT) Received: by mail-pj1-x102b.google.com with SMTP id j3-20020a17090adc8300b0023d09aea4a6so10550868pjv.5; Fri, 17 Mar 2023 14:36:15 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088869; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=/eoGGlzywStMiYbVMcBjMDTNFvxz4K+Jg2JHrfcCjW0=; b=UjNKU/f2J8N3ifxLlrYmdFdbzq7XZBHYfs9mFRkSXDvsoRakA7tI58rig1WQXFUYdq L0jHLrneNSqmryneQwVQF6ipWGkNr7rMc/0YAb0l2RMBc8zFiYtvtQRg4fyzK2lJEs1n 9/lVggDZVYMDgWjBdNvpzBRXke2jcJk60HVjoULdbTy1bCEnHLidFnDWYsISVvHF9y5A 9Au466uluvLuqEXTNIQpHvgO7bfpWqeLWyA3WICW4ez3u//f86kGvufh5Yf87oG9KX5N DdOzHJFL+kwnLwoCVgzCziCyfAURdCQBG+jH8wOBBRQsGR9vF0y85YFZN69Lc+xHqbbz oDCQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088869; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=/eoGGlzywStMiYbVMcBjMDTNFvxz4K+Jg2JHrfcCjW0=; b=h8gidpAcbNkiGlH5K4tN9UNX8DNIoLf/TwxOWyL5rMoYLnWwDIBkpUPor+4PQ86fcI 9NOdjzsOJV2e7pPTS791rY0Px3a0Rn/48GRP8WJl7DT8MRTwq/w9+lzwCxQgx7Ef6oX9 6wE3AhRe111jL6HP0cJUdzb8o20SiDaGMLfLbXlFvPuEpa5KkuXSyNM+AGqhIaIjqM+G kUgO9i0n5aBrKwkbW1hdV/gvrVr4i2li9g2CMalNdPNENErhQVhy6TiOVYdd/COPZqSa c+JPWu8myxT/pHO4OuqYuZMaJ84oCcnaAIsZIt8RgIo4p0NF42F4XyrxVOebN5QWDMNI N5qA== X-Gm-Message-State: AO0yUKXU0f3g5eNe+vvO5PFcHpPHA4xaygJ3cd8tKSOgMlx1PM6gVg/4 kh/WDxpNTrYG10BjZHPkVsU= X-Received: by 2002:a17:902:f685:b0:1a0:67fb:445c with SMTP id l5-20020a170902f68500b001a067fb445cmr4637107plg.28.1679088868688; Fri, 17 Mar 2023 14:34:28 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id 6-20020a170902c20600b0019b089bc8d7sm1997428pll.78.2023.03.17.14.34.28 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:28 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 25/32] sched_ext: Implement SCX_KICK_WAIT Date: Fri, 17 Mar 2023 11:33:26 -1000 Message-Id: <20230317213333.2174969-26-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653392244349242?= X-GMAIL-MSGID: =?utf-8?q?1760653392244349242?= From: David Vernet If set when calling scx_bpf_kick_cpu(), the invoking CPU will busy wait for the kicked cpu to enter the scheduler. This will be used to improve the exclusion guarantees in scx_example_pair. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 4 +++- kernel/sched/ext.c | 33 ++++++++++++++++++++++++++++++++- kernel/sched/ext.h | 20 ++++++++++++++++++++ kernel/sched/sched.h | 2 ++ 4 files changed, 57 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0780414f3c15..ff51977968fb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6019,8 +6019,10 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) for_each_active_class(class) { p = class->pick_next_task(rq); - if (p) + if (p) { + scx_notify_pick_next_task(rq, p, class); return p; + } } BUG(); /* The idle class should always have a runnable task. */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 736f764d0f62..fbdbe4603853 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -126,6 +126,9 @@ static struct { static bool __cacheline_aligned_in_smp scx_has_idle_cpus; #endif /* CONFIG_SMP */ +/* for %SCX_KICK_WAIT */ +static u64 __percpu *scx_kick_cpus_pnt_seqs; + /* * Direct dispatch marker. * @@ -3215,6 +3218,7 @@ static const struct sysrq_key_op sysrq_sched_ext_reset_op = { static void kick_cpus_irq_workfn(struct irq_work *irq_work) { struct rq *this_rq = this_rq(); + u64 *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); int this_cpu = cpu_of(this_rq); int cpu; @@ -3228,14 +3232,32 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) if (cpumask_test_cpu(cpu, this_rq->scx.cpus_to_preempt) && rq->curr->sched_class == &ext_sched_class) rq->curr->scx.slice = 0; + pseqs[cpu] = rq->scx.pnt_seq; resched_curr(rq); + } else { + cpumask_clear_cpu(cpu, this_rq->scx.cpus_to_wait); } raw_spin_rq_unlock_irqrestore(rq, flags); } + for_each_cpu_andnot(cpu, this_rq->scx.cpus_to_wait, + cpumask_of(this_cpu)) { + /* + * Pairs with smp_store_release() issued by this CPU in + * scx_notify_pick_next_task() on the resched path. + * + * We busy-wait here to guarantee that no other task can be + * scheduled on our core before the target CPU has entered the + * resched path. + */ + while (smp_load_acquire(&cpu_rq(cpu)->scx.pnt_seq) == pseqs[cpu]) + cpu_relax(); + } + cpumask_clear(this_rq->scx.cpus_to_kick); cpumask_clear(this_rq->scx.cpus_to_preempt); + cpumask_clear(this_rq->scx.cpus_to_wait); } void __init init_sched_ext_class(void) @@ -3249,7 +3271,7 @@ void __init init_sched_ext_class(void) * through the generated vmlinux.h. */ WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | - SCX_TG_ONLINE); + SCX_TG_ONLINE | SCX_KICK_PREEMPT); BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL); @@ -3257,6 +3279,12 @@ void __init init_sched_ext_class(void) BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL)); #endif + scx_kick_cpus_pnt_seqs = + __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * + num_possible_cpus(), + __alignof__(scx_kick_cpus_pnt_seqs[0])); + BUG_ON(!scx_kick_cpus_pnt_seqs); + for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); @@ -3265,6 +3293,7 @@ void __init init_sched_ext_class(void) BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL)); init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); } @@ -3533,6 +3562,8 @@ void scx_bpf_kick_cpu(s32 cpu, u64 flags) cpumask_set_cpu(cpu, rq->scx.cpus_to_kick); if (flags & SCX_KICK_PREEMPT) cpumask_set_cpu(cpu, rq->scx.cpus_to_preempt); + if (flags & SCX_KICK_WAIT) + cpumask_set_cpu(cpu, rq->scx.cpus_to_wait); irq_work_queue(&rq->scx.kick_cpus_irq_work); preempt_enable(); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 0c5a109e7e6d..fc27b28acfde 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -66,6 +66,7 @@ enum scx_tg_flags { enum scx_kick_flags { SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ + SCX_KICK_WAIT = 1LLU << 1, /* wait for the CPU to be rescheduled */ }; #ifdef CONFIG_SCHED_CLASS_EXT @@ -95,6 +96,22 @@ __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, #define scx_ops_error(fmt, args...) \ scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) +static inline void scx_notify_pick_next_task(struct rq *rq, + const struct task_struct *p, + const struct sched_class *active) +{ +#ifdef CONFIG_SMP + if (!scx_enabled()) + return; + /* + * Pairs with the smp_load_acquire() issued by a CPU in + * kick_cpus_irq_workfn() who is waiting for this CPU to perform a + * resched. + */ + smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); +#endif +} + static inline void scx_notify_sched_tick(void) { unsigned long last_check; @@ -149,6 +166,9 @@ static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } static inline bool scx_can_stop_tick(struct rq *rq) { return true; } static inline void init_sched_ext_class(void) {} +static inline void scx_notify_pick_next_task(struct rq *rq, + const struct task_struct *p, + const struct sched_class *active) {} static inline void scx_notify_sched_tick(void) {} #define for_each_active_class for_each_class diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3571cfda62a3..4b7f48239248 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -710,6 +710,8 @@ struct scx_rq { u32 flags; cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_preempt; + cpumask_var_t cpus_to_wait; + u64 pnt_seq; struct irq_work kick_cpus_irq_work; }; #endif /* CONFIG_SCHED_CLASS_EXT */ From patchwork Fri Mar 17 21:33:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71487 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp10529wrt; Fri, 17 Mar 2023 15:05:11 -0700 (PDT) X-Google-Smtp-Source: AK7set80RIA/inzDwCl1RBt2Cg/mlNaN16XEMLPAH3iluzx0mQroqQCdWXUCj9RYE+dtG4LzHHW/ X-Received: by 2002:a17:90b:358b:b0:237:40a5:77cb with SMTP id mm11-20020a17090b358b00b0023740a577cbmr7323662pjb.1.1679090711440; Fri, 17 Mar 2023 15:05:11 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679090711; cv=none; d=google.com; s=arc-20160816; b=GTUxo3HJeSfuv/SgRHYh+19TFo9Z+JofhJM3wb3rLYJ2R5yI62L5mswlY92FC9viY7 LLOjj3RC33M8jqZvXNVM7g7YLr4vFZ1EUD3QMbccdTQgPe02+yZKcfSGTRS7fvapNsPD 32cSDHrIl3G55FJCaacTi11jt/vnb8cnTcj67mnIypsl0lBbtyTUjlsdLcyLgwBmHYb+ Us6FvwDloAT/zCWQq3DyqhdjP3pJs2pOJO2wYh9zt6yR9dEECqDPKOST+Mh98jUWrQbo RHtYSkSQXuo+jsZU0yMVpJSLZjUKUe+ZLtEHBHAW7AV9wexcIGB/Vl9vWL/bVR23Jmn7 IeSQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=dhJQNNl9FqjR0n5o05XFS3TKjbMKnftZWZab6D6+g+0=; b=kg68ehzvvBqfAdGtTf15XhYQkgqhvzUsjvAqSfEiLFT3wEJDBw/vR1iCvslNLdxerJ bipvndtuLs15P2Jzfkt28bHKn98g/CmZ3DAoBEP3UzDEb98zFbWj6XUWDHmY3T8/u5zn vDMQ0wCfW3lZ+1h+xc1bvYitM4+9SqpgXZs2pXpMGCB6Qo+sqFLgUjyHO4K5tU/pX7D0 Bv73hkb/G1J7SZQiX4GaIc8JF991JoK94ktGOdE4/rI5IouUlniXpFo2itQILcNgyfcJ Yfq8zN+53i+xAnlU5kASyWGF+lBV5jWx+ajA3vR32e+y3dRfWknAussN405EGH3ivGa3 F5VA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="T/Ag+cK0"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id 17-20020a170902e9d100b0019b009d5d60si3329476plk.134.2023.03.17.15.04.58; Fri, 17 Mar 2023 15:05:11 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b="T/Ag+cK0"; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230265AbjCQVh5 (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:57 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:53418 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231210AbjCQVhA (ORCPT ); Fri, 17 Mar 2023 17:37:00 -0400 Received: from mail-pl1-x633.google.com (mail-pl1-x633.google.com [IPv6:2607:f8b0:4864:20::633]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 334C256530; Fri, 17 Mar 2023 14:36:00 -0700 (PDT) Received: by mail-pl1-x633.google.com with SMTP id ja10so6686364plb.5; Fri, 17 Mar 2023 14:36:00 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088871; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=dhJQNNl9FqjR0n5o05XFS3TKjbMKnftZWZab6D6+g+0=; b=T/Ag+cK0MHqAtayOplX6LJ1C21aiykSOYT16lhQq+CyfmiCunLsS93Me/mTV0PKHcN Od094eXwbvNTCNEtF5LWQe//3uDuJzcNMwr0l5AgMTWmOef3nmpZhfI8OSelmN2fvtro dUR/BpOvC0mUdMzXIW2t8nV/fCmF8Mp0eR0+aWgLqDNPLIoS9qs85O9c33qu1Zn+NC7x QajqVARlnNn4Zpi6esC6X92kGsuyBHHJHxvdOzEuKs8pJ7A52UBC8sIenKtdMoB4vfYQ l18ZzVelSQUlWkTjUP3WGl1CL3ohzbGZ2fGQOwmZHJTuFPWMLMZKk4NdPLWT/7GpIMNq YzFg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088871; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=dhJQNNl9FqjR0n5o05XFS3TKjbMKnftZWZab6D6+g+0=; b=gayJg4PPcJz2dxALlfOlvw7enG5lVcoozyvy8zmf31i9OhTWt0VoxU481d0Wrm8ft4 uv4x+ozM1qJzYeigu2i8h74PQAgN02HtSNVcvPsGavGZ6x5JSfJjsedvJZu23igeVRES E15VEEk2DfHVAo441bUMGF1ILIQ9h5K5gRcWneYegwm6yhhlEYjUXiaUQGuI0II+zkfN B3Zmj0pBFH3CBHMTufWdY5aNkZGJuuJbjoW0XoSJ1n7z8CmiVoCtF2ZEi1+52x7gQNNL ejS/SujyMbrpodAfFVMiEvUcQ1lh3LITuxizMtlILaM+/xXm1pR5GiQvYLeXCT9KrHN1 LhJg== X-Gm-Message-State: AO0yUKWFlwuU9P0+5HhTTnl8EsyPPz1MzfZo9kyZAIC/a6t8PUvdEiqQ jujZfaxb9CBeWxlZCdV5iB8= X-Received: by 2002:a17:90a:7289:b0:237:f18a:7959 with SMTP id e9-20020a17090a728900b00237f18a7959mr9407731pjg.25.1679088870478; Fri, 17 Mar 2023 14:34:30 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id g7-20020a17090a128700b00233ebab3770sm1856821pja.23.2023.03.17.14.34.29 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:30 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 26/32] sched_ext: Implement sched_ext_ops.cpu_acquire/release() Date: Fri, 17 Mar 2023 11:33:27 -1000 Message-Id: <20230317213333.2174969-27-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760654221621462812?= X-GMAIL-MSGID: =?utf-8?q?1760654221621462812?= From: David Vernet Scheduler classes are strictly ordered and when a higher priority class has tasks to run, the lower priority ones lose access to the CPU. Being able to monitor and act on these events are necessary for use cases includling strict core-scheduling and latency management. This patch adds two operations ops.cpu_acquire() and .cpu_release(). The former is invoked when a CPU becomes available to the BPF scheduler and the opposite for the latter. This patch also implements scx_bpf_reenqueue_local() which can be called from .cpu_release() to trigger requeueing of all tasks in the local dsq of the CPU so that the tasks can be reassigned to other available CPUs. scx_example_pair is updated to use .cpu_acquire/release() along with %SCX_KICK_WAIT to make the pair scheduling guarantee strict even when a CPU is preempted by a higher priority scheduler class. scx_example_qmap is updated to use .cpu_acquire/release() to empty the local dsq of a preempted CPU. A similar approach can be adopted by BPF schedulers that want to have a tight control over latency. v3: * Drop the const qualifier from scx_cpu_release_args.task. BPF enforces access control through the verifier, so the qualifier isn't actually operative and only gets in the way when interacting with various helpers. v2: * Add p->scx.kf_mask annotation to allow calling scx_bpf_reenqueue_local() from ops.cpu_release() nested inside ops.init() and other sleepable operations. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 53 +++++++++- kernel/sched/ext.c | 131 ++++++++++++++++++++++++- kernel/sched/ext.h | 24 ++++- kernel/sched/sched.h | 1 + tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_pair.bpf.c | 101 ++++++++++++++++++- tools/sched_ext/scx_example_qmap.bpf.c | 37 ++++++- tools/sched_ext/scx_example_qmap.c | 4 +- 8 files changed, 340 insertions(+), 12 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 9e47e320369d..826da32e29ba 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -135,6 +135,32 @@ struct scx_cgroup_init_args { u32 weight; }; +enum scx_cpu_preempt_reason { + /* next task is being scheduled by &sched_class_rt */ + SCX_CPU_PREEMPT_RT, + /* next task is being scheduled by &sched_class_dl */ + SCX_CPU_PREEMPT_DL, + /* next task is being scheduled by &sched_class_stop */ + SCX_CPU_PREEMPT_STOP, + /* unknown reason for SCX being preempted */ + SCX_CPU_PREEMPT_UNKNOWN, +}; + +/* + * Argument container for ops->cpu_acquire(). Currently empty, but may be + * expanded in the future. + */ +struct scx_cpu_acquire_args {}; + +/* argument container for ops->cpu_release() */ +struct scx_cpu_release_args { + /* the reason the CPU was preempted */ + enum scx_cpu_preempt_reason reason; + + /* the task that's going to be scheduled on the CPU */ + struct task_struct *task; +}; + /** * struct sched_ext_ops - Operation table for BPF scheduler implementation * @@ -330,6 +356,28 @@ struct sched_ext_ops { */ void (*update_idle)(s32 cpu, bool idle); + /** + * cpu_acquire - A CPU is becoming available to the BPF scheduler + * @cpu: The CPU being acquired by the BPF scheduler. + * @args: Acquire arguments, see the struct definition. + * + * A CPU that was previously released from the BPF scheduler is now once + * again under its control. + */ + void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); + + /** + * cpu_release - A CPU is taken away from the BPF scheduler + * @cpu: The CPU being released by the BPF scheduler. + * @args: Release arguments, see the struct definition. + * + * The specified CPU is no longer under the control of the BPF + * scheduler. This could be because it was preempted by a higher + * priority sched_class, though there may be other reasons as well. The + * caller should consult @args->reason to determine the cause. + */ + void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); + /** * prep_enable - Prepare to enable BPF scheduling for a task * @p: task to prepare BPF scheduling for @@ -534,12 +582,15 @@ enum scx_kf_mask { /* all non-sleepables may be nested inside INIT and SLEEPABLE */ SCX_KF_INIT = 1 << 0, /* running ops.init() */ SCX_KF_SLEEPABLE = 1 << 1, /* other sleepable init operations */ + /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */ + SCX_KF_CPU_RELEASE = 1 << 2, /* ops.cpu_release() */ /* ops.dequeue (in REST) may be nested inside DISPATCH */ SCX_KF_DISPATCH = 1 << 3, /* ops.dispatch() */ SCX_KF_ENQUEUE = 1 << 4, /* ops.enqueue() */ SCX_KF_REST = 1 << 5, /* other rq-locked operations */ - __SCX_KF_RQ_LOCKED = SCX_KF_DISPATCH | SCX_KF_ENQUEUE | SCX_KF_REST, + __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH | + SCX_KF_ENQUEUE | SCX_KF_REST, __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_REST, }; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index fbdbe4603853..4f342b7a6f45 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -83,6 +83,7 @@ static bool scx_warned_zero_slice; static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last); static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting); +DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); struct static_key_false scx_has_op[SCX_NR_ONLINE_OPS] = @@ -304,6 +305,12 @@ static __always_inline bool scx_kf_allowed(u32 mask) * inside ops.dispatch(). We don't need to check the SCX_KF_SLEEPABLE * boundary thanks to the above in_interrupt() check. */ + if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && + (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { + scx_ops_error("cpu_release kfunc called from a nested operation"); + return false; + } + if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { scx_ops_error("dispatch kfunc called from a nested operation"); @@ -1377,6 +1384,19 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, lockdep_assert_rq_held(rq); + if (static_branch_unlikely(&scx_ops_cpu_preempt) && + unlikely(rq->scx.cpu_released)) { + /* + * If the previous sched_class for the current CPU was not SCX, + * notify the BPF scheduler that it again has control of the + * core. This callback complements ->cpu_release(), which is + * emitted in scx_notify_pick_next_task(). + */ + if (SCX_HAS_OP(cpu_acquire)) + SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL); + rq->scx.cpu_released = false; + } + if (prev_on_scx) { WARN_ON_ONCE(prev->scx.flags & SCX_TASK_BAL_KEEP); update_curr_scx(rq); @@ -1384,7 +1404,9 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, /* * If @prev is runnable & has slice left, it has priority and * fetching more just increases latency for the fetched tasks. - * Tell put_prev_task_scx() to put @prev on local_dsq. + * Tell put_prev_task_scx() to put @prev on local_dsq. If the + * BPF scheduler wants to handle this explicitly, it should + * implement ->cpu_released(). * * See scx_ops_disable_workfn() for the explanation on the * disabling() test. @@ -1590,6 +1612,58 @@ static struct task_struct *pick_next_task_scx(struct rq *rq) return p; } +static enum scx_cpu_preempt_reason +preempt_reason_from_class(const struct sched_class *class) +{ +#ifdef CONFIG_SMP + if (class == &stop_sched_class) + return SCX_CPU_PREEMPT_STOP; +#endif + if (class == &dl_sched_class) + return SCX_CPU_PREEMPT_DL; + if (class == &rt_sched_class) + return SCX_CPU_PREEMPT_RT; + return SCX_CPU_PREEMPT_UNKNOWN; +} + +void __scx_notify_pick_next_task(struct rq *rq, struct task_struct *task, + const struct sched_class *active) +{ + lockdep_assert_rq_held(rq); + + /* + * The callback is conceptually meant to convey that the CPU is no + * longer under the control of SCX. Therefore, don't invoke the + * callback if the CPU is is staying on SCX, or going idle (in which + * case the SCX scheduler has actively decided not to schedule any + * tasks on the CPU). + */ + if (likely(active >= &ext_sched_class)) + return; + + /* + * At this point we know that SCX was preempted by a higher priority + * sched_class, so invoke the ->cpu_release() callback if we have not + * done so already. We only send the callback once between SCX being + * preempted, and it regaining control of the CPU. + * + * ->cpu_release() complements ->cpu_acquire(), which is emitted the + * next time that balance_scx() is invoked. + */ + if (!rq->scx.cpu_released) { + if (SCX_HAS_OP(cpu_release)) { + struct scx_cpu_release_args args = { + .reason = preempt_reason_from_class(active), + .task = task, + }; + + SCX_CALL_OP(SCX_KF_CPU_RELEASE, + cpu_release, cpu_of(rq), &args); + } + rq->scx.cpu_released = true; + } +} + #ifdef CONFIG_SMP static bool test_and_clear_cpu_idle(int cpu) @@ -2657,6 +2731,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work) static_branch_disable_cpuslocked(&scx_has_op[i]); static_branch_disable_cpuslocked(&scx_ops_enq_last); static_branch_disable_cpuslocked(&scx_ops_enq_exiting); + static_branch_disable_cpuslocked(&scx_ops_cpu_preempt); static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); synchronize_rcu(); @@ -2863,6 +2938,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops) if (ops->flags & SCX_OPS_ENQ_EXITING) static_branch_enable_cpuslocked(&scx_ops_enq_exiting); + if (scx_ops.cpu_acquire || scx_ops.cpu_release) + static_branch_enable_cpuslocked(&scx_ops_cpu_preempt); if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) { reset_idle_masks(); @@ -3532,6 +3609,56 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; +/** + * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ + * + * Iterate over all of the tasks currently enqueued on the local DSQ of the + * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of + * processed tasks. Can only be called from ops.cpu_release(). + */ +u32 scx_bpf_reenqueue_local(void) +{ + u32 nr_enqueued, i; + struct rq *rq; + struct scx_rq *scx_rq; + + if (!scx_kf_allowed(SCX_KF_CPU_RELEASE)) + return 0; + + rq = cpu_rq(smp_processor_id()); + lockdep_assert_rq_held(rq); + scx_rq = &rq->scx; + + /* + * Get the number of tasks on the local DSQ before iterating over it to + * pull off tasks. The enqueue callback below can signal that it wants + * the task to stay on the local DSQ, and we want to prevent the BPF + * scheduler from causing us to loop indefinitely. + */ + nr_enqueued = scx_rq->local_dsq.nr; + for (i = 0; i < nr_enqueued; i++) { + struct task_struct *p; + + p = first_local_task(rq); + WARN_ON_ONCE(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); + WARN_ON_ONCE(p->scx.holding_cpu != -1); + dispatch_dequeue(scx_rq, p); + do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); + } + + return nr_enqueued; +} + +BTF_SET8_START(scx_kfunc_ids_cpu_release) +BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) +BTF_SET8_END(scx_kfunc_ids_cpu_release) + +static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_cpu_release, +}; + /** * scx_bpf_kick_cpu - Trigger reschedule on a CPU * @cpu: cpu to kick @@ -3868,6 +3995,8 @@ static int __init register_ext_kfuncs(void) &scx_kfunc_set_enqueue_dispatch)) || (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_dispatch)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_cpu_release)) || (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_any))) { pr_err("sched_ext: failed to register kfunc sets (%d)\n", ret); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index fc27b28acfde..4b22219c0dd9 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -27,6 +27,17 @@ enum scx_enq_flags { */ SCX_ENQ_PREEMPT = 1LLU << 32, + /* + * The task being enqueued was previously enqueued on the current CPU's + * %SCX_DSQ_LOCAL, but was removed from it in a call to the + * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was + * invoked in a ->cpu_release() callback, and the task is again + * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the + * task will not be scheduled on the CPU until at least the next invocation + * of the ->cpu_acquire() callback. + */ + SCX_ENQ_REENQ = 1LLU << 40, + /* * The task being enqueued is the only task available for the cpu. By * default, ext core keeps executing such tasks but when @@ -82,6 +93,8 @@ DECLARE_STATIC_KEY_FALSE(__scx_switched_all); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) +DECLARE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); + bool task_on_scx(struct task_struct *p); void scx_pre_fork(struct task_struct *p); int scx_fork(struct task_struct *p); @@ -96,13 +109,17 @@ __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, #define scx_ops_error(fmt, args...) \ scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) +void __scx_notify_pick_next_task(struct rq *rq, + struct task_struct *p, + const struct sched_class *active); + static inline void scx_notify_pick_next_task(struct rq *rq, - const struct task_struct *p, + struct task_struct *p, const struct sched_class *active) { -#ifdef CONFIG_SMP if (!scx_enabled()) return; +#ifdef CONFIG_SMP /* * Pairs with the smp_load_acquire() issued by a CPU in * kick_cpus_irq_workfn() who is waiting for this CPU to perform a @@ -110,6 +127,9 @@ static inline void scx_notify_pick_next_task(struct rq *rq, */ smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); #endif + if (!static_branch_unlikely(&scx_ops_cpu_preempt)) + return; + __scx_notify_pick_next_task(rq, p, active); } static inline void scx_notify_sched_tick(void) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4b7f48239248..5dabe6cff2b9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -708,6 +708,7 @@ struct scx_rq { u64 extra_enq_flags; /* see move_task_to_local_dsq() */ u32 nr_running; u32 flags; + bool cpu_released; cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_preempt; cpumask_var_t cpus_to_wait; diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index 3f58737d80b1..95f30dc9d625 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -68,6 +68,7 @@ void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; bool scx_bpf_task_running(const struct task_struct *p) __ksym; s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym; +u32 scx_bpf_reenqueue_local(void) __ksym; #define BPF_STRUCT_OPS(name, args...) \ SEC("struct_ops/"#name) \ diff --git a/tools/sched_ext/scx_example_pair.bpf.c b/tools/sched_ext/scx_example_pair.bpf.c index e5ff39083181..279efe58b777 100644 --- a/tools/sched_ext/scx_example_pair.bpf.c +++ b/tools/sched_ext/scx_example_pair.bpf.c @@ -89,6 +89,28 @@ * be resolved in the near future which should allow greatly simplifying this * scheduler. * + * Dealing with preemption + * ----------------------- + * + * SCX is the lowest priority sched_class, and could be preempted by them at + * any time. To address this, the scheduler implements pair_cpu_release() and + * pair_cpu_acquire() callbacks which are invoked by the core scheduler when + * the scheduler loses and gains control of the CPU respectively. + * + * In pair_cpu_release(), we mark the pair_ctx as having been preempted, and + * then invoke: + * + * scx_bpf_kick_cpu(pair_cpu, SCX_KICK_PREEMPT | SCX_KICK_WAIT); + * + * This preempts the pair CPU, and waits until it has re-entered the scheduler + * before returning. This is necessary to ensure that the higher priority + * sched_class that preempted our scheduler does not schedule a task + * concurrently with our pair CPU. + * + * When the CPU is re-acquired in pair_cpu_acquire(), we unmark the preemption + * in the pair_ctx, and send another resched IPI to the pair CPU to re-enable + * pair scheduling. + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet @@ -129,6 +151,12 @@ struct pair_ctx { /* the CPUs that are currently active on the cgroup */ u32 active_mask; + + /* + * the CPUs that are currently preempted and running tasks in a + * different scheduler. + */ + u32 preempted_mask; }; struct { @@ -295,7 +323,7 @@ static int try_dispatch(s32 cpu) struct task_struct *p; u64 now = bpf_ktime_get_ns(); bool kick_pair = false; - bool expired; + bool expired, pair_preempted; u32 *vptr, in_pair_mask; s32 pid, q_idx; u64 cgid; @@ -324,10 +352,14 @@ static int try_dispatch(s32 cpu) */ pairc->draining = true; - if (pairc->active_mask) { + pair_preempted = pairc->preempted_mask; + if (pairc->active_mask || pair_preempted) { /* - * The other CPU is still active We want to wait until - * this cgroup expires. + * The other CPU is still active, or is no longer under + * our control due to e.g. being preempted by a higher + * priority sched_class. We want to wait until this + * cgroup expires, or until control of our pair CPU has + * been returned to us. * * If the pair controls its CPU, and the time already * expired, kick. When the other CPU arrives at @@ -336,7 +368,7 @@ static int try_dispatch(s32 cpu) */ __sync_fetch_and_add(&nr_exp_waits, 1); bpf_spin_unlock(&pairc->lock); - if (expired) + if (expired && !pair_preempted) kick_pair = true; goto out_maybe_kick; } @@ -475,6 +507,63 @@ void BPF_STRUCT_OPS(pair_dispatch, s32 cpu, struct task_struct *prev) } } +void BPF_STRUCT_OPS(pair_cpu_acquire, s32 cpu, struct scx_cpu_acquire_args *args) +{ + int ret; + u32 in_pair_mask; + struct pair_ctx *pairc; + bool kick_pair; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) + return; + + bpf_spin_lock(&pairc->lock); + pairc->preempted_mask &= ~in_pair_mask; + /* Kick the pair CPU, unless it was also preempted. */ + kick_pair = !pairc->preempted_mask; + bpf_spin_unlock(&pairc->lock); + + if (kick_pair) { + s32 *pair = (s32 *)MEMBER_VPTR(pair_cpu, [cpu]); + + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT); + } + } +} + +void BPF_STRUCT_OPS(pair_cpu_release, s32 cpu, struct scx_cpu_release_args *args) +{ + int ret; + u32 in_pair_mask; + struct pair_ctx *pairc; + bool kick_pair; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) + return; + + bpf_spin_lock(&pairc->lock); + pairc->preempted_mask |= in_pair_mask; + pairc->active_mask &= ~in_pair_mask; + /* Kick the pair CPU if it's still running. */ + kick_pair = pairc->active_mask; + pairc->draining = true; + bpf_spin_unlock(&pairc->lock); + + if (kick_pair) { + s32 *pair = (s32 *)MEMBER_VPTR(pair_cpu, [cpu]); + + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT | SCX_KICK_WAIT); + } + } + __sync_fetch_and_add(&nr_preemptions, 1); +} + s32 BPF_STRUCT_OPS(pair_cgroup_init, struct cgroup *cgrp) { u64 cgid = cgrp->kn->id; @@ -528,6 +617,8 @@ SEC(".struct_ops") struct sched_ext_ops pair_ops = { .enqueue = (void *)pair_enqueue, .dispatch = (void *)pair_dispatch, + .cpu_acquire = (void *)pair_cpu_acquire, + .cpu_release = (void *)pair_cpu_release, .cgroup_init = (void *)pair_cgroup_init, .cgroup_exit = (void *)pair_cgroup_exit, .init = (void *)pair_init, diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index ed704a4024c0..88e69b967004 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -11,6 +11,8 @@ * * - BPF-side queueing using PIDs. * - Sleepable per-task storage allocation using ops.prep_enable(). + * - Using ops.cpu_release() to handle a higher priority scheduling class taking + * the CPU away. * * This scheduler is primarily for demonstration and testing of sched_ext * features and unlikely to be useful for actual workloads. @@ -81,7 +83,7 @@ struct { } dispatch_idx_cnt SEC(".maps"); /* Statistics */ -unsigned long nr_enqueued, nr_dispatched, nr_dequeued; +unsigned long nr_enqueued, nr_dispatched, nr_reenqueued, nr_dequeued; s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) @@ -155,6 +157,22 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) return; } + /* + * If the task was re-enqueued due to the CPU being preempted by a + * higher priority scheduling class, just re-enqueue the task directly + * on the global DSQ. As we want another CPU to pick it up, find and + * kick an idle CPU. + */ + if (enq_flags & SCX_ENQ_REENQ) { + s32 cpu; + + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, 0, enq_flags); + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) + scx_bpf_kick_cpu(cpu, 0); + return; + } + ring = bpf_map_lookup_elem(&queue_arr, &idx); if (!ring) { scx_bpf_error("failed to find ring %d", idx); @@ -240,6 +258,22 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) } } +void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args) +{ + u32 cnt; + + /* + * Called when @cpu is taken by a higher priority scheduling class. This + * makes @cpu no longer available for executing sched_ext tasks. As we + * don't want the tasks in @cpu's local dsq to sit there until @cpu + * becomes available again, re-enqueue them into the global dsq. See + * %SCX_ENQ_REENQ handling in qmap_enqueue(). + */ + cnt = scx_bpf_reenqueue_local(); + if (cnt) + __sync_fetch_and_add(&nr_reenqueued, cnt); +} + s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, struct scx_enable_args *args) { @@ -275,6 +309,7 @@ struct sched_ext_ops qmap_ops = { .enqueue = (void *)qmap_enqueue, .dequeue = (void *)qmap_dequeue, .dispatch = (void *)qmap_dispatch, + .cpu_release = (void *)qmap_cpu_release, .prep_enable = (void *)qmap_prep_enable, .init = (void *)qmap_init, .exit = (void *)qmap_exit, diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index 3f68dae47bd0..2ae3794c9ea8 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -92,9 +92,9 @@ int main(int argc, char **argv) long nr_enqueued = skel->bss->nr_enqueued; long nr_dispatched = skel->bss->nr_dispatched; - printf("enq=%lu, dsp=%lu, delta=%ld, deq=%lu\n", + printf("enq=%lu, dsp=%lu, delta=%ld, reenq=%lu, deq=%lu\n", nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, - skel->bss->nr_dequeued); + skel->bss->nr_reenqueued, skel->bss->nr_dequeued); fflush(stdout); sleep(1); } From patchwork Fri Mar 17 21:33:28 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71472 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp3531wrt; Fri, 17 Mar 2023 14:42:16 -0700 (PDT) X-Google-Smtp-Source: AK7set+DQhWY+qA+pfq9xbJh/OiUTqI49fZiHdAn/IAzH/BEEBlzyP2NkoX/wFT/oHsFkC/VXUlr X-Received: by 2002:a17:903:2292:b0:197:8e8e:f15 with SMTP id b18-20020a170903229200b001978e8e0f15mr11342882plh.6.1679089336251; Fri, 17 Mar 2023 14:42:16 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089336; cv=none; d=google.com; s=arc-20160816; b=tG87B6iEuRxyf2Fm1yhEVb+2ay77UxBchH7/94AzqNFAcXzg4kSVE8GCvdIzq89MMJ Kq8Q2JT4Y9jDW+SLvxR2wvlRrjng6Woj6iWO9g8bg6I3SKYz9iKZ9AZynwU1RwrxeOVy yZEt/Hj+P0Oti/hP79CAB0NU5i2nbMMTSPw6OGQj9sNBPS3jE8p6Dty+jwfOSgzqUCRV 0t8dTD1CS3WIq+y/OPaEcv9SYFrQENbRz/untwM7WiZ0CYdExbFH47JP3hm7eSDLjYCM pye29TCKtPhGxIFdbBiWaSVYv3IvvpuYrRRB5VEnRvdXPuxyZueLfQwZ3f4wR6IFnsku UyLA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=mFCkEOmw90Jjcera7igv6WX4z3OoBiQTwIKlnmq8L8s=; b=rlYBCqD8o4jJbln/eXqxp7N9lRoZ26z+r2gkvme9RJh1yG88tcdfF0lJBi/ZymSOom IrTddWb29H5N72+SfQEZQj6OYraCrsIhoz2ewVdRbcskdIxvnUzI3XX9hQC/3lZhIysf T52ou5gMGdVHl57gBVckj/L+IV+FOJys2SNxuuxQU/ddbl5VBUiYCMa4RRvJFB5nFDWm WfczZ8OxB6li6h1p8ANLZ32AkiUx2NSyHoeen5EtnNf4H9sw9g8o0zVnOWozNzTkcXo2 0b3hVZgVHWKacm/rn9RyOjdT4XzG3zU/K5iBh66Z+PRIlRM4G1O7Rv4fH8nPF8WESERb 6NCw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=EQjZrfDd; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id j11-20020a170902da8b00b001a179347c13si3915437plx.552.2023.03.17.14.41.43; Fri, 17 Mar 2023 14:42:16 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=EQjZrfDd; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231224AbjCQVhW (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:22 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52120 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230456AbjCQVg3 (ORCPT ); Fri, 17 Mar 2023 17:36:29 -0400 Received: from mail-pg1-x52a.google.com (mail-pg1-x52a.google.com [IPv6:2607:f8b0:4864:20::52a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 527954BE95; Fri, 17 Mar 2023 14:35:44 -0700 (PDT) Received: by mail-pg1-x52a.google.com with SMTP id x37so3693643pga.1; Fri, 17 Mar 2023 14:35:44 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088872; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=mFCkEOmw90Jjcera7igv6WX4z3OoBiQTwIKlnmq8L8s=; b=EQjZrfDdctZarieGDQuSWN9o8QXycWg6RDZ8NtZ35BpwmjqQXpoBYsTmKlZHb3hIZt wn2wDlkUSrAdeK2JZQdzDdsXvqU6QZG9hPm2zJhkJOtgbukIgIzxG+wC7GOBPLN4Fz6I L0lEK4Iz5DYKOXksnTFd+ubMJX6AplWMvcyEBbkQcPUCzbidTesCkWdYy9MTSlk/Dmea 7CS8Owf1OA17Y+dLJ0EgFWnhMkpBT4FY18kC1fnnHIvKI0LIWnDyUpBs1vqYfddQO+/I u5mWoVvwKEBuMBwoYI9W8d+HVV9RFtcl7jYY7bFbHjh5uLhaU3WBCgZ7nfqyGWWA7zYe v4iw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088872; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=mFCkEOmw90Jjcera7igv6WX4z3OoBiQTwIKlnmq8L8s=; b=azyJAYKMtk/oKDY1Z0GNOBMBYqxsNkgT+EUqYVcVG/jEptbtVABrSTR5Vgn7p5zSo0 Gu8VNcs/ZmO/H8qiI3kq0LnCe12TVgyoKyGxttyDsaFuf0IL1Ni5JkxRkqE95Qq2QA4f 2ITXU/F3Mp9NO9ePee0IZ2/3B9IgXAxnf7WPW5XUB9KmnmFzo3cuFaD9nu40pYsdl4z5 xWzV9Ggace/BGv2eavm5y5hBFJhJZszMhCnD/MkNK+Tz2wbxaMCxDCLXtg55K9XckkuG jUW/IfvIfMtiitGVhpOTH6+iUDMncakVvnutk3GqwH5tq/fsBKy6iqscb71gz6csNLMN Yfhw== X-Gm-Message-State: AO0yUKVc0meSwjN2FxBHfmzsP61xtXm5ncsgd4SmLeibfFtqOOAzXqDT 6OVzPilapqOr7t9sVgGR5Z0= X-Received: by 2002:a62:1dc3:0:b0:622:749a:b9de with SMTP id d186-20020a621dc3000000b00622749ab9demr7318031pfd.27.1679088872276; Fri, 17 Mar 2023 14:34:32 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id 3-20020aa79143000000b0059261bd5bacsm1944724pfi.202.2023.03.17.14.34.31 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:31 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 27/32] sched_ext: Implement sched_ext_ops.cpu_online/offline() Date: Fri, 17 Mar 2023 11:33:28 -1000 Message-Id: <20230317213333.2174969-28-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760652779412276377?= X-GMAIL-MSGID: =?utf-8?q?1760652779412276377?= Add ops.cpu_online/offline() which are invoked when CPUs come online and offline respectively. As the enqueue path already automatically bypasses tasks to the local dsq on a deactivated CPU, BPF schedulers are guaranteed to see tasks only on CPUs which are between online() and offline(). Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 18 ++++++++++++++++++ kernel/sched/ext.c | 18 +++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 826da32e29ba..63a011860f59 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -378,6 +378,24 @@ struct sched_ext_ops { */ void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); + /** + * cpu_online - A CPU became online + * @cpu: CPU which just came up + * + * @cpu just came online. @cpu doesn't call ops.enqueue() or run tasks + * associated with other CPUs beforehand. + */ + void (*cpu_online)(s32 cpu); + + /** + * cpu_offline - A CPU is going offline + * @cpu: CPU which is going offline + * + * @cpu is going offline. @cpu doesn't call ops.enqueue() or run tasks + * associated with other CPUs afterwards. + */ + void (*cpu_offline)(s32 cpu); + /** * prep_enable - Prepare to enable BPF scheduling for a task * @p: task to prepare BPF scheduling for diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 4f342b7a6f45..dbeec22bee73 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1393,7 +1393,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, * emitted in scx_notify_pick_next_task(). */ if (SCX_HAS_OP(cpu_acquire)) - SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL); + SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq), + NULL); rq->scx.cpu_released = false; } @@ -1824,6 +1825,18 @@ void __scx_update_idle(struct rq *rq, bool idle) } } +static void rq_online_scx(struct rq *rq, enum rq_onoff_reason reason) +{ + if (SCX_HAS_OP(cpu_online) && reason == RQ_ONOFF_HOTPLUG) + SCX_CALL_OP(SCX_KF_REST, cpu_online, cpu_of(rq)); +} + +static void rq_offline_scx(struct rq *rq, enum rq_onoff_reason reason) +{ + if (SCX_HAS_OP(cpu_offline) && reason == RQ_ONOFF_HOTPLUG) + SCX_CALL_OP(SCX_KF_REST, cpu_offline, cpu_of(rq)); +} + #else /* !CONFIG_SMP */ static bool test_and_clear_cpu_idle(int cpu) { return false; } @@ -2329,6 +2342,9 @@ DEFINE_SCHED_CLASS(ext) = { .balance = balance_scx, .select_task_rq = select_task_rq_scx, .set_cpus_allowed = set_cpus_allowed_scx, + + .rq_online = rq_online_scx, + .rq_offline = rq_offline_scx, #endif .task_tick = task_tick_scx, From patchwork Fri Mar 17 21:33:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71477 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp5770wrt; Fri, 17 Mar 2023 14:51:05 -0700 (PDT) X-Google-Smtp-Source: AK7set8Pdv7t9UWQSTB+P+7eL/Xr81IPGldhU5cIvlkbxn8zO6sMA68N7Odr6ydkxozW7SRFGkl1 X-Received: by 2002:a17:903:3291:b0:1a1:956d:2281 with SMTP id jh17-20020a170903329100b001a1956d2281mr6676667plb.3.1679089865692; Fri, 17 Mar 2023 14:51:05 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089865; cv=none; d=google.com; s=arc-20160816; b=x1YaJ7f+qrCU7I39bPYyPV9HPRVlrFWToGt69QZTF4xeo/qz4A+Jtpty3LL5x2TuLa eL0dYNWs9nAgc1+cJ/2QdGnUAcad0UIU/kWP/WsFmQ1QK386rZm6h6kzrFB6vsGbbkzL MkULSPM1gKHwD1CU1lX6Ub64k8Mr2joUlvdx5H663HeYByKx5tWT7hTXUjhEfjBRPnkk zRCpP4yZbvvM+B1ZKDbOKlICd6NUNDcOVyHe7wZ11zbwLgkusch4a24f3MLsTM9mBQT4 TLS39dNBGnf9d4/XPWnut4RK9jnD/tHr9cPR+xuZFCxKLYulvyEBnolAciY2Vov3nlpn zLWQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=zcAq54KD+rWTpmvPHGmfB14ufok5F164YgicPOJDfJM=; b=fHZI0H33cAWDqHjpYSxETCOFkPwkc1AHUdNq36yJBLQJvUjyiHaxWbdYgoRejHuvLC HYdZavUW5KgREAWDSB6BZuoC6zprTs4Zr24hnZA7xjPqyVisKQCrNeVDAnBIB8Ooa9CR MY7pULlQI9FFd0YEUtCvoXyf4gZmkRa5yCQ1S+m13p+Y59yGDGYhVL0sqcG99qBYG+w+ kfzG/Ug1cBOzJTNot3Y0tbhBLbDaZClGbhI/aoXQfRbyJUiWCQZZH1pOSNaUwDDlQ3Kj YwtjLS+zMB6OkyP0eg7aPRFA1vK1HtlIRRg8EZdebzciMNby+rTbybu7Ownk3J9IYIlY G9vA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=OCg64XYy; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id j11-20020a170902da8b00b001a179347c13si3915437plx.552.2023.03.17.14.50.22; Fri, 17 Mar 2023 14:51:05 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=OCg64XYy; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231368AbjCQViF (ORCPT + 99 others); Fri, 17 Mar 2023 17:38:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52300 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229494AbjCQVhS (ORCPT ); Fri, 17 Mar 2023 17:37:18 -0400 Received: from mail-pj1-x1036.google.com (mail-pj1-x1036.google.com [IPv6:2607:f8b0:4864:20::1036]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 104FD5B41B; Fri, 17 Mar 2023 14:36:03 -0700 (PDT) Received: by mail-pj1-x1036.google.com with SMTP id p13-20020a17090a284d00b0023d2e945aebso8954266pjf.0; Fri, 17 Mar 2023 14:36:03 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088874; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=zcAq54KD+rWTpmvPHGmfB14ufok5F164YgicPOJDfJM=; b=OCg64XYySmuP+BoMBml8+fc0FN+P4woAcE09yZqeUg6Aopvg0dLK+1YRmW91h2DNuF Tiu8Zz3AAMgPaBu+wt2eXASWgcTjYHvxvu5T0PNc5M85gV7bJz8U5g+WL05Pxofxuh14 Cimc8PO2I3Ip1XXj4nkDK7tZJGFv0xWuXc7SnmWk57rx5fdPump+lof91pmiNDE1QqYq v7CYFcGOGddBB0r9VkQ0mlHe7LHSixKEWfXDgJSohr+8ckCbWWTgS0lyPwL/FoJVwbB0 zClTUd+3G8/It7HWeOnOIYVg09l3lIITEVs9BbQue9VMFWfvOu1g2IFxRiy7gkvF6WVz bvng== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088874; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=zcAq54KD+rWTpmvPHGmfB14ufok5F164YgicPOJDfJM=; b=VGPlkks39w947qPX0AIy59COKx8hi/hwV5b5QMegwSIkQk1j3nQlkGc69r5KYS1Dxz HG3UUY+rjRt0iqRo7ZhSQHE+GRhaxIWvlCizs7X9G2r2GLhYQV59bFHuGI/BJeaDPIzb CUVBjeQobplWgM5P//qOWLZlxVz0096o+95iaPNqv2xhaBtdMv3KN58gQLJqrJ4A7iav i7XY3of0stte2FuSrSIx6DnxWcbyx+6eKp4p/zLLcvWzWMqLqEz9VW8N3nWIPTcjlL1n 6y9RjqniGzWWkP0qDfUrJE62wVbKsWyBNY9l0+CAivBCyOAiWBABzrQJQE/Veifn98OH swtQ== X-Gm-Message-State: AO0yUKXzXnOtQ6DNPDgGJpz0KJ8mK/UJAv0mePfY3hkILlufhAgYmGqN Pq6CFfWFIMUy0jfbZd4hC0Q= X-Received: by 2002:a05:6a21:3398:b0:d5:909a:7955 with SMTP id yy24-20020a056a21339800b000d5909a7955mr10601946pzb.41.1679088874023; Fri, 17 Mar 2023 14:34:34 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id p9-20020a63f449000000b0050bf1d1cdc8sm1891659pgk.21.2023.03.17.14.34.33 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:33 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 28/32] sched_ext: Implement core-sched support Date: Fri, 17 Mar 2023 11:33:29 -1000 Message-Id: <20230317213333.2174969-29-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653335007389800?= X-GMAIL-MSGID: =?utf-8?q?1760653335007389800?= The core-sched support is composed of the following parts: * task_struct->scx.core_sched_at is added. This is a timestamp which can be used to order tasks. Depending on whether the BPF scheduler implements custom ordering, it tracks either global FIFO ordering of all tasks or local-DSQ ordering within the dispatched tasks on a CPU. * prio_less() is updated to call scx_prio_less() when comparing SCX tasks. scx_prio_less() calls ops.core_sched_before() if available or uses the core_sched_at timestamp. For global FIFO ordering, the BPF scheduler doesn't need to do anything. Otherwise, it should implement ops.core_sched_before() which reflects the ordering. * When core-sched is enabled, balance_scx() balances all SMT siblings so that they all have tasks dispatched if necessary before pick_task_scx() is called. pick_task_scx() picks between the current task and the first dispatched task on the local DSQ based on availability and the core_sched_at timestamps. Note that FIFO ordering is expected among the already dispatched tasks whether running or on the local DSQ, so this path always compares core_sched_at instead of calling into ops.core_sched_before(). qmap_core_sched_before() is added to scx_example_qmap. It scales the distances from the heads of the queues to compare the tasks across different priority queues and seems to behave as expected. v2: * Sched core added the const qualifiers to prio_less task arguments. Explicitly drop them for ops.core_sched_before() task arguments. BPF enforces access control through the verifier, so the qualifier isn't actually operative and only gets in the way when interacting with various helpers. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Reviewed-by: Josh Don --- include/linux/sched/ext.h | 21 +++ kernel/Kconfig.preempt | 2 +- kernel/sched/core.c | 12 +- kernel/sched/ext.c | 219 +++++++++++++++++++++++-- kernel/sched/ext.h | 13 ++ tools/sched_ext/scx_example_qmap.bpf.c | 87 +++++++++- tools/sched_ext/scx_example_qmap.c | 5 +- 7 files changed, 342 insertions(+), 17 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 63a011860f59..210b8516d197 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -316,6 +316,24 @@ struct sched_ext_ops { */ bool (*yield)(struct task_struct *from, struct task_struct *to); + /** + * core_sched_before - Task ordering for core-sched + * @a: task A + * @b: task B + * + * Used by core-sched to determine the ordering between two tasks. See + * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on + * core-sched. + * + * Both @a and @b are runnable and may or may not currently be queued on + * the BPF scheduler. Should return %true if @a should run before @b. + * %false if there's no required ordering or @b should run before @a. + * + * If not specified, the default is ordering them according to when they + * became runnable. + */ + bool (*core_sched_before)(struct task_struct *a,struct task_struct *b); + /** * set_weight - Set task weight * @p: task to set weight for @@ -628,6 +646,9 @@ struct sched_ext_entity { struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */ atomic64_t ops_state; unsigned long runnable_at; +#ifdef CONFIG_SCHED_CORE + u64 core_sched_at; /* see scx_prio_less() */ +#endif /* BPF scheduler modifiable fields */ diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 0afcda19bc50..e12a057ead7b 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -135,7 +135,7 @@ config SCHED_CORE config SCHED_CLASS_EXT bool "Extensible Scheduling Class" - depends on BPF_SYSCALL && BPF_JIT && !SCHED_CORE + depends on BPF_SYSCALL && BPF_JIT help This option enables a new scheduler class sched_ext (SCX), which allows scheduling policies to be implemented as BPF programs to diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ff51977968fb..146d736fe73b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -163,7 +163,12 @@ static inline int __task_prio(const struct task_struct *p) if (p->sched_class == &idle_sched_class) return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ - return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ +#ifdef CONFIG_SCHED_CLASS_EXT + if (p->sched_class == &ext_sched_class) + return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */ +#endif + + return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */ } /* @@ -192,6 +197,11 @@ static inline bool prio_less(const struct task_struct *a, if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ return cfs_prio_less(a, b, in_fi); +#ifdef CONFIG_SCHED_CLASS_EXT + if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */ + return scx_prio_less(a, b, in_fi); +#endif + return false; } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index dbeec22bee73..7f593f2012c2 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -530,6 +530,49 @@ static int ops_sanitize_err(const char *ops_name, s32 err) return -EPROTO; } +/** + * touch_core_sched - Update timestamp used for core-sched task ordering + * @rq: rq to read clock from, must be locked + * @p: task to update the timestamp for + * + * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to + * implement global or local-DSQ FIFO ordering for core-sched. Should be called + * when a task becomes runnable and its turn on the CPU ends (e.g. slice + * exhaustion). + */ +static void touch_core_sched(struct rq *rq, struct task_struct *p) +{ +#ifdef CONFIG_SCHED_CORE + /* + * It's okay to update the timestamp spuriously. Use + * sched_core_disabled() which is cheaper than enabled(). + */ + if (!sched_core_disabled()) + p->scx.core_sched_at = rq_clock_task(rq); +#endif +} + +/** + * touch_core_sched_dispatch - Update core-sched timestamp on dispatch + * @rq: rq to read clock from, must be locked + * @p: task being dispatched + * + * If the BPF scheduler implements custom core-sched ordering via + * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO + * ordering within each local DSQ. This function is called from dispatch paths + * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. + */ +static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) +{ + lockdep_assert_rq_held(rq); + assert_clock_updated(rq); + +#ifdef CONFIG_SCHED_CORE + if (SCX_HAS_OP(core_sched_before)) + touch_core_sched(rq, p); +#endif +} + static void update_curr_scx(struct rq *rq) { struct task_struct *curr = rq->curr; @@ -545,8 +588,11 @@ static void update_curr_scx(struct rq *rq) account_group_exec_runtime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec); - if (curr->scx.slice != SCX_SLICE_INF) + if (curr->scx.slice != SCX_SLICE_INF) { curr->scx.slice -= min(curr->scx.slice, delta_exec); + if (!curr->scx.slice) + touch_core_sched(rq, curr); + } } static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, @@ -702,6 +748,8 @@ static void direct_dispatch(struct task_struct *ddsp_task, struct task_struct *p return; } + touch_core_sched_dispatch(task_rq(p), p); + dsq = find_dsq_for_dispatch(task_rq(p), dsq_id, p); dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); @@ -785,12 +833,19 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, return; local: + /* + * For task-ordering, slice refill must be treated as implying the end + * of the current slice. Otherwise, the longer @p stays on the CPU, the + * higher priority it becomes from scx_prio_less()'s POV. + */ + touch_core_sched(rq, p); p->scx.slice = SCX_SLICE_DFL; local_norefill: dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); return; global: + touch_core_sched(rq, p); /* see the comment in local: */ p->scx.slice = SCX_SLICE_DFL; dispatch_enqueue(&scx_dsq_global, p, enq_flags); } @@ -847,6 +902,9 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags if (SCX_HAS_OP(runnable)) SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags); + if (enq_flags & SCX_ENQ_WAKEUP) + touch_core_sched(rq, p); + do_enqueue_task(rq, p, enq_flags, sticky_cpu); } @@ -1297,6 +1355,7 @@ static void finish_dispatch(struct rq *rq, struct rq_flags *rf, struct scx_dispatch_q *dsq; u64 opss; + touch_core_sched_dispatch(rq, p); retry: /* * No need for _acquire here. @p is accessed only after a successful @@ -1374,8 +1433,8 @@ static void flush_dispatch_buf(struct rq *rq, struct rq_flags *rf) dspc->buf_cursor = 0; } -static int balance_scx(struct rq *rq, struct task_struct *prev, - struct rq_flags *rf) +static int balance_one(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf, bool local) { struct scx_rq *scx_rq = &rq->scx; struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); @@ -1399,7 +1458,7 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, } if (prev_on_scx) { - WARN_ON_ONCE(prev->scx.flags & SCX_TASK_BAL_KEEP); + WARN_ON_ONCE(local && (prev->scx.flags & SCX_TASK_BAL_KEEP)); update_curr_scx(rq); /* @@ -1411,10 +1470,16 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, * * See scx_ops_disable_workfn() for the explanation on the * disabling() test. + * + * When balancing a remote CPU for core-sched, there won't be a + * following put_prev_task_scx() call and we don't own + * %SCX_TASK_BAL_KEEP. Instead, pick_task_scx() will test the + * same conditions later and pick @rq->curr accordingly. */ if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice && !scx_ops_disabling()) { - prev->scx.flags |= SCX_TASK_BAL_KEEP; + if (local) + prev->scx.flags |= SCX_TASK_BAL_KEEP; return 1; } } @@ -1470,10 +1535,55 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, return 0; } +static int balance_scx(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf) +{ + int ret; + + ret = balance_one(rq, prev, rf, true); + + /* + * When core-sched is enabled, this ops.balance() call will be followed + * by put_prev_scx() and pick_task_scx() on this CPU and pick_task_scx() + * on the SMT siblings. Balance the siblings too. + */ + if (sched_core_enabled(rq)) { + const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); + int scpu; + + for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) { + struct rq *srq = cpu_rq(scpu); + struct rq_flags srf; + struct task_struct *sprev = srq->curr; + + /* + * While core-scheduling, rq lock is shared among + * siblings but the debug annotations and rq clock + * aren't. Do pinning dance to transfer the ownership. + */ + WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq)); + rq_unpin_lock(rq, rf); + rq_pin_lock(srq, &srf); + + update_rq_clock(srq); + balance_one(srq, sprev, &srf, false); + + rq_unpin_lock(srq, &srf); + rq_repin_lock(rq, rf); + } + } + + return ret; +} + static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) { if (p->scx.flags & SCX_TASK_QUEUED) { - WARN_ON_ONCE(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + /* + * Core-sched might decide to execute @p before it is + * dispatched. Call ops_dequeue() to notify the BPF scheduler. + */ + ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC); dispatch_dequeue(&rq->scx, p); } @@ -1556,7 +1666,8 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) /* * If @p has slice left and balance_scx() didn't tag it for * keeping, @p is getting preempted by a higher priority - * scheduler class. Leave it at the head of the local DSQ. + * scheduler class or core-sched forcing a different task. Leave + * it at the head of the local DSQ. */ if (p->scx.slice && !scx_ops_disabling()) { dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); @@ -1613,6 +1724,84 @@ static struct task_struct *pick_next_task_scx(struct rq *rq) return p; } +#ifdef CONFIG_SCHED_CORE +/** + * scx_prio_less - Task ordering for core-sched + * @a: task A + * @b: task B + * + * Core-sched is implemented as an additional scheduling layer on top of the + * usual sched_class'es and needs to find out the expected task ordering. For + * SCX, core-sched calls this function to interrogate the task ordering. + * + * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used + * to implement the default task ordering. The older the timestamp, the higher + * prority the task - the global FIFO ordering matching the default scheduling + * behavior. + * + * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to + * implement FIFO ordering within each local DSQ. See pick_task_scx(). + */ +bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, + bool in_fi) +{ + /* + * The const qualifiers are dropped from task_struct pointers when + * calling ops.core_sched_before(). Accesses are controlled by the + * verifier. + */ + if (SCX_HAS_OP(core_sched_before) && !scx_ops_disabling()) + return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before, + (struct task_struct *)a, + (struct task_struct *)b); + else + return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); +} + +/** + * pick_task_scx - Pick a candidate task for core-sched + * @rq: rq to pick the candidate task from + * + * Core-sched calls this function on each SMT sibling to determine the next + * tasks to run on the SMT siblings. balance_one() has been called on all + * siblings and put_prev_task_scx() has been called only for the current CPU. + * + * As put_prev_task_scx() hasn't been called on remote CPUs, we can't just look + * at the first task in the local dsq. @rq->curr has to be considered explicitly + * to mimic %SCX_TASK_BAL_KEEP. + */ +static struct task_struct *pick_task_scx(struct rq *rq) +{ + struct task_struct *curr = rq->curr; + struct task_struct *first = first_local_task(rq); + + if (curr->scx.flags & SCX_TASK_QUEUED) { + /* is curr the only runnable task? */ + if (!first) + return curr; + + /* + * Does curr trump first? We can always go by core_sched_at for + * this comparison as it represents global FIFO ordering when + * the default core-sched ordering is used and local-DSQ FIFO + * ordering otherwise. + * + * We can have a task with an earlier timestamp on the DSQ. For + * example, when a current task is preempted by a sibling + * picking a different cookie, the task would be requeued at the + * head of the local DSQ with an earlier timestamp than the + * core-sched picked next task. Besides, the BPF scheduler may + * dispatch any tasks to the local DSQ anytime. + */ + if (curr->scx.slice && time_before64(curr->scx.core_sched_at, + first->scx.core_sched_at)) + return curr; + } + + return first; /* this may be %NULL */ +} +#endif /* CONFIG_SCHED_CORE */ + static enum scx_cpu_preempt_reason preempt_reason_from_class(const struct sched_class *class) { @@ -1893,11 +2082,13 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) update_curr_scx(rq); /* - * While disabling, always resched as we can't trust the slice - * management. + * While disabling, always resched and refresh core-sched timestamp as + * we can't trust the slice management or ops.core_sched_before(). */ - if (scx_ops_disabling()) + if (scx_ops_disabling()) { curr->scx.slice = 0; + touch_core_sched(rq, curr); + } if (!curr->scx.slice) resched_curr(rq); @@ -2347,6 +2538,10 @@ DEFINE_SCHED_CLASS(ext) = { .rq_offline = rq_offline_scx, #endif +#ifdef CONFIG_SCHED_CORE + .pick_task = pick_task_scx, +#endif + .task_tick = task_tick_scx, .switching_to = switching_to_scx, @@ -2675,9 +2870,11 @@ static void scx_ops_disable_workfn(struct kthread_work *work) * * b. balance_scx() never sets %SCX_TASK_BAL_KEEP as the slice value * can't be trusted. Whenever a tick triggers, the running task is - * rotated to the tail of the queue. + * rotated to the tail of the queue with core_sched_at touched. * * c. pick_next_task() suppresses zero slice warning. + * + * d. scx_prio_less() reverts to the default core_sched_at order. */ scx_ops.enqueue = scx_ops_fallback_enqueue; scx_ops.dispatch = scx_ops_fallback_dispatch; diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 4b22219c0dd9..7b7973e6d8c0 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -68,6 +68,14 @@ enum scx_enq_flags { enum scx_deq_flags { /* expose select DEQUEUE_* flags as enums */ SCX_DEQ_SLEEP = DEQUEUE_SLEEP, + + /* high 32bits are SCX specific */ + + /* + * The generic core-sched layer decided to execute the task even though + * it hasn't been dispatched yet. Dequeue from the BPF side. + */ + SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32, }; enum scx_tg_flags { @@ -173,6 +181,11 @@ static inline const struct sched_class *next_active_class(const struct sched_cla for_active_class_range(class, (prev_class) > &ext_sched_class ? \ &ext_sched_class : (prev_class), (end_class)) +#ifdef CONFIG_SCHED_CORE +bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, + bool in_fi); +#endif + #else /* CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 88e69b967004..579ab21ae403 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -13,6 +13,7 @@ * - Sleepable per-task storage allocation using ops.prep_enable(). * - Using ops.cpu_release() to handle a higher priority scheduling class taking * the CPU away. + * - Core-sched support. * * This scheduler is primarily for demonstration and testing of sched_ext * features and unlikely to be useful for actual workloads. @@ -62,9 +63,21 @@ struct { }, }; +/* + * Per-queue sequence numbers to implement core-sched ordering. + * + * Tail seq is assigned to each queued task and incremented. Head seq tracks the + * sequence number of the latest dispatched task. The distance between the a + * task's seq and the associated queue's head seq is called the queue distance + * and used when comparing two tasks for ordering. See qmap_core_sched_before(). + */ +static u64 core_sched_head_seqs[5]; +static u64 core_sched_tail_seqs[5]; + /* Per-task scheduling context */ struct task_ctx { bool force_local; /* Dispatch directly to local_dsq */ + u64 core_sched_seq; }; struct { @@ -84,6 +97,7 @@ struct { /* Statistics */ unsigned long nr_enqueued, nr_dispatched, nr_reenqueued, nr_dequeued; +unsigned long nr_core_sched_execed; s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) @@ -150,8 +164,18 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) return; } - /* Is select_cpu() is telling us to enqueue locally? */ - if (tctx->force_local) { + /* + * All enqueued tasks must have their core_sched_seq updated for correct + * core-sched ordering, which is why %SCX_OPS_ENQ_LAST is specified in + * qmap_ops.flags. + */ + tctx->core_sched_seq = core_sched_tail_seqs[idx]++; + + /* + * If qmap_select_cpu() is telling us to or this is the last runnable + * task on the CPU, enqueue locally. + */ + if (tctx->force_local || (enq_flags & SCX_ENQ_LAST)) { tctx->force_local = false; scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, enq_flags); return; @@ -195,6 +219,19 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) void BPF_STRUCT_OPS(qmap_dequeue, struct task_struct *p, u64 deq_flags) { __sync_fetch_and_add(&nr_dequeued, 1); + if (deq_flags & SCX_DEQ_CORE_SCHED_EXEC) + __sync_fetch_and_add(&nr_core_sched_execed, 1); +} + +static void update_core_sched_head_seq(struct task_struct *p) +{ + struct task_ctx *tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + int idx = weight_to_idx(p->scx.weight); + + if (tctx) + core_sched_head_seqs[idx] = tctx->core_sched_seq; + else + scx_bpf_error("task_ctx lookup failed"); } void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) @@ -247,6 +284,7 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) p = bpf_task_from_pid(pid); if (p) { + update_core_sched_head_seq(p); __sync_fetch_and_add(&nr_dispatched, 1); scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, 0); bpf_task_release(p); @@ -258,6 +296,49 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) } } +/* + * The distance from the head of the queue scaled by the weight of the queue. + * The lower the number, the older the task and the higher the priority. + */ +static s64 task_qdist(struct task_struct *p) +{ + int idx = weight_to_idx(p->scx.weight); + struct task_ctx *tctx; + s64 qdist; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("task_ctx lookup failed"); + return 0; + } + + qdist = tctx->core_sched_seq - core_sched_head_seqs[idx]; + + /* + * As queue index increments, the priority doubles. The queue w/ index 3 + * is dispatched twice more frequently than 2. Reflect the difference by + * scaling qdists accordingly. Note that the shift amount needs to be + * flipped depending on the sign to avoid flipping priority direction. + */ + if (qdist >= 0) + return qdist << (4 - idx); + else + return qdist << idx; +} + +/* + * This is called to determine the task ordering when core-sched is picking + * tasks to execute on SMT siblings and should encode about the same ordering as + * the regular scheduling path. Use the priority-scaled distances from the head + * of the queues to compare the two tasks which should be consistent with the + * dispatch path behavior. + */ +bool BPF_STRUCT_OPS(qmap_core_sched_before, + struct task_struct *a, struct task_struct *b) +{ + return task_qdist(a) > task_qdist(b); +} + void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args) { u32 cnt; @@ -309,10 +390,12 @@ struct sched_ext_ops qmap_ops = { .enqueue = (void *)qmap_enqueue, .dequeue = (void *)qmap_dequeue, .dispatch = (void *)qmap_dispatch, + .core_sched_before = (void *)qmap_core_sched_before, .cpu_release = (void *)qmap_cpu_release, .prep_enable = (void *)qmap_prep_enable, .init = (void *)qmap_init, .exit = (void *)qmap_exit, + .flags = SCX_OPS_ENQ_LAST, .timeout_ms = 5000U, .name = "qmap", }; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index 2ae3794c9ea8..ccb4814ee61b 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -92,9 +92,10 @@ int main(int argc, char **argv) long nr_enqueued = skel->bss->nr_enqueued; long nr_dispatched = skel->bss->nr_dispatched; - printf("enq=%lu, dsp=%lu, delta=%ld, reenq=%lu, deq=%lu\n", + printf("enq=%lu, dsp=%lu, delta=%ld, reenq=%lu, deq=%lu, core=%lu\n", nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, - skel->bss->nr_reenqueued, skel->bss->nr_dequeued); + skel->bss->nr_reenqueued, skel->bss->nr_dequeued, + skel->bss->nr_core_sched_execed); fflush(stdout); sleep(1); } From patchwork Fri Mar 17 21:33:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71496 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16631wrt; Fri, 17 Mar 2023 15:22:42 -0700 (PDT) X-Google-Smtp-Source: AK7set+t8LIMe00aK53zsofe/X3VhTL2IpXYaPokX/cvhiHY0VKGqukAPi3r+wxG96LunGDYl7UN X-Received: by 2002:a17:903:3291:b0:1a1:956d:2281 with SMTP id jh17-20020a170903329100b001a1956d2281mr6746792plb.3.1679091762342; Fri, 17 Mar 2023 15:22:42 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091762; cv=none; d=google.com; s=arc-20160816; b=s/uteHNFl3U/v2+dRCp2yvFDexPzaLgq1Vq2re9yhA2riHfA+6heBYsAIGMaN2z4mV 6lLo7ndDyYPqDMe6k7Y9vS6KyenNPCbPQCjq6lu13yENajBU2pl40xdNHZS/muplD/EG GKya7WOOGNqw1nbWcYSBHopILppENZspdZUySl9RS5f+pemejJzRioxYRCfAdE1GDU21 nf+zh4Eyo2GbsXUXl/+dohEfDWK4B9nNitalRcYEvtD3isa/ga0G2Sy6UocwNH6RJ3VT eTqt9JOGL9dRMPtu5LU7blmcBga/SQOtKdcuWa0OywNzYtUOFcH3iWve6yrLAT4K31PF l7OA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=e3v5a/QF4Ank0ha8voxvEMIZF0kN7Rw8IJi7UglpkSY=; b=cblMxZDvOuxVH5SjgSeSL2Wvs5o+IqLpM1w95QzmQL4KmdSBTUTWxNS5H4ucNR5zAt RyMWVHoGZuaztKXFAqQ8kRMJQfJD9LQ33IvW30bugxvOwwhb7nAu0KSQY417twOXSO7y nH2NrjvnttK6TjAyeqq9XrcdEEy9xWJQHk28yprJQ66wAATQLm7Z63pObnT5eXNGMMOx B/kzb35KOy4p9O5tK+87mZn4Lnyjti3djCFYILnuL14RqDtwYKCmoaQp5plW2oLuEIB+ yNju3XGAKq/52skYjH8OLck2d2efPMlYeNMFxhSCyy81LhXDG/6lDDJP/DuAbCt9hNfI 7xxw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=hWyzA4W8; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id c7-20020a170903234700b001a182f5b847si3621666plh.495.2023.03.17.15.22.27; Fri, 17 Mar 2023 15:22:42 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=hWyzA4W8; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231347AbjCQVh0 (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:26 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52806 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231238AbjCQVgg (ORCPT ); Fri, 17 Mar 2023 17:36:36 -0400 Received: from mail-pl1-x62c.google.com (mail-pl1-x62c.google.com [IPv6:2607:f8b0:4864:20::62c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 32B204FCC6; Fri, 17 Mar 2023 14:35:48 -0700 (PDT) Received: by mail-pl1-x62c.google.com with SMTP id ja10so6686555plb.5; Fri, 17 Mar 2023 14:35:48 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088876; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=e3v5a/QF4Ank0ha8voxvEMIZF0kN7Rw8IJi7UglpkSY=; b=hWyzA4W8Qkx9VaxO+OzO2zUJLyCKNVJhppjr1lYSmEEUwm7ODC7lDVxeHAYHPyfZPT h0UkI1/7VYTUYR1oxIFY6Osv4Qma3Ng3VhwfKPOhsepUufMddiBPKEma/q0mRkUkQDBc uUoZXR4ritnbC367k8dMQT9eUduIJrJyCd3KoWoNkuHRsa83hWbDmnTqckeyD8jdNHPw 5HXVtAyKqYXV2EIXulNXjc7Qx8il0sfE5DD2y4QVifKdFLSvw41hsUr5AJCvhDlXmtig u08LqUGGP7mSXyN7twbVo2fwM8/DQCWtmQHT1qq31ZwY3ylL+8DnkxMIdERluNYg5MCC RIaA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088876; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=e3v5a/QF4Ank0ha8voxvEMIZF0kN7Rw8IJi7UglpkSY=; b=Jb/TWKtRYaVNvQGUmOfAFhOMLK8pSMAOGI5UzQAY1Ch4OtT/3QfxfIcRlJSy2f8xqf rOPrl+0Zi/dRfQmTU0Dev+DtPOs7jO/G7TrsqDWn5uRXmMkr7uLFdeB9i76kwNbYr4Ul WGMuDh+cprYfrfpDL0JQyqnqDvD4AV8CP3o5mHEyK5izmT4WbDF2S4awYw7lCfzvyWEt HNeV2cbIWBTKFIVob7Bue1lKmam1YCm8N4PCo8yfLS1tTseUUceA4GRuxF+PtWdfoZV5 HhKO3dLrDHYKHbZg5b9Z3TdQ3nLbADJ2S0LcMtV7FFZL8MzveVxRqKSG6CaVsBUd55+I HEFQ== X-Gm-Message-State: AO0yUKWLUXYRwdmQnKrA3kQeawYe0OLX62wRrof3Bjuf0hlIynL/mAZy JmvOYQ92G/LLQUd5seQ2zI4= X-Received: by 2002:a17:903:11d2:b0:19c:d5c7:e3d5 with SMTP id q18-20020a17090311d200b0019cd5c7e3d5mr10272923plh.65.1679088875900; Fri, 17 Mar 2023 14:34:35 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id b2-20020a63d802000000b004f2c088328bsm1883612pgh.43.2023.03.17.14.34.35 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:35 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 29/32] sched_ext: Add vtime-ordered priority queue to dispatch_q's Date: Fri, 17 Mar 2023 11:33:30 -1000 Message-Id: <20230317213333.2174969-30-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655323763689769?= X-GMAIL-MSGID: =?utf-8?q?1760655323763689769?= Currently, a dsq is always a FIFO. A task which is dispatched earlier gets consumed or executed earlier. While this is sufficient when dsq's are used for simple staging areas for tasks which are ready to execute, it'd make dsq's a lot more useful if they can implement custom ordering. This patch adds a vtime-ordered priority queue to dsq's. When the BPF scheduler dispatches a task with the new scx_bpf_dispatch_vtime() helper, it can specify the vtime tha the task should be inserted at and the task is inserted into the priority queue in the dsq which is ordered according to time_before64() comparison of the vtime values. When executing or consuming the dsq, the FIFO is always processed first and the priority queue is processed iff the FIFO is empty. The design decision was made to allow both FIFO and priority queue to be available at the same timeq for all dsq's for three reasons. First, the new priority queue is useful for the local dsq's too but they also need the FIFO when consuming tasks from other dsq's as the vtimes may not be comparable across them. Second, the interface surface is smaller this way - the only additional interface necessary is scx_bpf_dispsatch_vtime(). Third, the overhead isn't meaningfully different whether they're available at the same time or not. This makes it very easy for the BPF schedulers to implement proper vtime based scheduling within each dsq very easy and efficient at a negligible cost in terms of code complexity and overhead. scx_example_simple and scx_example_flatcg are updated to default to weighted vtime scheduling (the latter within each cgroup). FIFO scheduling can be selected with -f option. Signed-off-by: Tejun Heo Reviewed-by: David Vernet --- include/linux/sched/ext.h | 16 ++- init/init_task.c | 2 +- kernel/sched/core.c | 3 +- kernel/sched/ext.c | 137 ++++++++++++++++++++--- kernel/sched/ext.h | 1 + tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_flatcg.bpf.c | 50 ++++++++- tools/sched_ext/scx_example_flatcg.c | 4 + tools/sched_ext/scx_example_simple.bpf.c | 66 ++++++++++- tools/sched_ext/scx_example_simple.c | 6 +- 10 files changed, 262 insertions(+), 24 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 210b8516d197..fe2b051230b2 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -583,6 +583,7 @@ struct sched_ext_ops { struct scx_dispatch_q { raw_spinlock_t lock; struct list_head fifo; /* processed in dispatching order */ + struct rb_root_cached priq; /* processed in p->scx.dsq_vtime order */ u32 nr; u64 id; struct rhash_head hash_node; @@ -595,6 +596,7 @@ enum scx_ent_flags { SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */ SCX_TASK_BAL_KEEP = 1 << 1, /* balance decided to keep current */ SCX_TASK_ENQ_LOCAL = 1 << 2, /* used by scx_select_cpu_dfl() to set SCX_ENQ_LOCAL */ + SCX_TASK_ON_DSQ_PRIQ = 1 << 3, /* task is queued on the priority queue of a dsq */ SCX_TASK_OPS_PREPPED = 1 << 8, /* prepared for BPF scheduler enable */ SCX_TASK_OPS_ENABLED = 1 << 9, /* task has BPF scheduler enabled */ @@ -636,7 +638,10 @@ enum scx_kf_mask { */ struct sched_ext_entity { struct scx_dispatch_q *dsq; - struct list_head dsq_node; + struct { + struct list_head fifo; /* dispatch order */ + struct rb_node priq; /* p->scx.dsq_vtime order */ + } dsq_node; struct list_head watchdog_node; u32 flags; /* protected by rq lock */ u32 weight; @@ -664,6 +669,15 @@ struct sched_ext_entity { */ u64 slice; + /* + * Used to order tasks when dispatching to the vtime-ordered priority + * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime() + * but can also be modified directly by the BPF scheduler. Modifying it + * while a task is queued on a dsq may mangle the ordering and is not + * recommended. + */ + u64 dsq_vtime; + /* * If set, reject future sched_setscheduler(2) calls updating the policy * to %SCHED_EXT with -%EACCES. diff --git a/init/init_task.c b/init/init_task.c index 913194aab623..7ea89ccd0cf1 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -105,7 +105,7 @@ struct task_struct init_task #endif #ifdef CONFIG_SCHED_CLASS_EXT .scx = { - .dsq_node = LIST_HEAD_INIT(init_task.scx.dsq_node), + .dsq_node.fifo = LIST_HEAD_INIT(init_task.scx.dsq_node.fifo), .watchdog_node = LIST_HEAD_INIT(init_task.scx.watchdog_node), .sticky_cpu = -1, .holding_cpu = -1, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 146d736fe73b..b33389e17765 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4495,7 +4495,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) #ifdef CONFIG_SCHED_CLASS_EXT p->scx.dsq = NULL; - INIT_LIST_HEAD(&p->scx.dsq_node); + INIT_LIST_HEAD(&p->scx.dsq_node.fifo); + RB_CLEAR_NODE(&p->scx.dsq_node.priq); INIT_LIST_HEAD(&p->scx.watchdog_node); p->scx.flags = 0; p->scx.weight = 0; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7f593f2012c2..869d11e738cd 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -595,12 +595,25 @@ static void update_curr_scx(struct rq *rq) } } +static bool scx_dsq_priq_less(struct rb_node *node_a, + const struct rb_node *node_b) +{ + const struct task_struct *a = + container_of(node_a, struct task_struct, scx.dsq_node.priq); + const struct task_struct *b = + container_of(node_b, struct task_struct, scx.dsq_node.priq); + + return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); +} + static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, u64 enq_flags) { bool is_local = dsq->id == SCX_DSQ_LOCAL; - WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_node)); + WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_node.fifo)); + WARN_ON_ONCE((p->scx.flags & SCX_TASK_ON_DSQ_PRIQ) || + !RB_EMPTY_NODE(&p->scx.dsq_node.priq)); if (!is_local) { raw_spin_lock(&dsq->lock); @@ -613,10 +626,16 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, } } - if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) - list_add(&p->scx.dsq_node, &dsq->fifo); - else - list_add_tail(&p->scx.dsq_node, &dsq->fifo); + if (enq_flags & SCX_ENQ_DSQ_PRIQ) { + p->scx.flags |= SCX_TASK_ON_DSQ_PRIQ; + rb_add_cached(&p->scx.dsq_node.priq, &dsq->priq, + scx_dsq_priq_less); + } else { + if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) + list_add(&p->scx.dsq_node.fifo, &dsq->fifo); + else + list_add_tail(&p->scx.dsq_node.fifo, &dsq->fifo); + } dsq->nr++; p->scx.dsq = dsq; @@ -645,13 +664,31 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, } } +static void task_unlink_from_dsq(struct task_struct *p, + struct scx_dispatch_q *dsq) +{ + if (p->scx.flags & SCX_TASK_ON_DSQ_PRIQ) { + rb_erase_cached(&p->scx.dsq_node.priq, &dsq->priq); + RB_CLEAR_NODE(&p->scx.dsq_node.priq); + p->scx.flags &= ~SCX_TASK_ON_DSQ_PRIQ; + } else { + list_del_init(&p->scx.dsq_node.fifo); + } +} + +static bool task_linked_on_dsq(struct task_struct *p) +{ + return !list_empty(&p->scx.dsq_node.fifo) || + !RB_EMPTY_NODE(&p->scx.dsq_node.priq); +} + static void dispatch_dequeue(struct scx_rq *scx_rq, struct task_struct *p) { struct scx_dispatch_q *dsq = p->scx.dsq; bool is_local = dsq == &scx_rq->local_dsq; if (!dsq) { - WARN_ON_ONCE(!list_empty(&p->scx.dsq_node)); + WARN_ON_ONCE(task_linked_on_dsq(p)); /* * When dispatching directly from the BPF scheduler to a local * DSQ, the task isn't associated with any DSQ but @@ -672,8 +709,8 @@ static void dispatch_dequeue(struct scx_rq *scx_rq, struct task_struct *p) */ if (p->scx.holding_cpu < 0) { /* @p must still be on @dsq, dequeue */ - WARN_ON_ONCE(list_empty(&p->scx.dsq_node)); - list_del_init(&p->scx.dsq_node); + WARN_ON_ONCE(!task_linked_on_dsq(p)); + task_unlink_from_dsq(p, dsq); dsq->nr--; } else { /* @@ -682,7 +719,7 @@ static void dispatch_dequeue(struct scx_rq *scx_rq, struct task_struct *p) * holding_cpu which tells dispatch_to_local_dsq() that it lost * the race. */ - WARN_ON_ONCE(!list_empty(&p->scx.dsq_node)); + WARN_ON_ONCE(task_linked_on_dsq(p)); p->scx.holding_cpu = -1; } p->scx.dsq = NULL; @@ -1146,33 +1183,52 @@ static void dispatch_to_local_dsq_unlock(struct rq *rq, struct rq_flags *rf, #endif /* CONFIG_SMP */ +static bool task_can_run_on_rq(struct task_struct *p, struct rq *rq) +{ + return likely(test_rq_online(rq)) && !is_migration_disabled(p) && + cpumask_test_cpu(cpu_of(rq), p->cpus_ptr); +} + static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf, struct scx_dispatch_q *dsq) { struct scx_rq *scx_rq = &rq->scx; struct task_struct *p; + struct rb_node *rb_node; struct rq *task_rq; bool moved = false; retry: - if (list_empty(&dsq->fifo)) + if (list_empty(&dsq->fifo) && !rb_first_cached(&dsq->priq)) return false; raw_spin_lock(&dsq->lock); - list_for_each_entry(p, &dsq->fifo, scx.dsq_node) { + + list_for_each_entry(p, &dsq->fifo, scx.dsq_node.fifo) { + task_rq = task_rq(p); + if (rq == task_rq) + goto this_rq; + if (task_can_run_on_rq(p, rq)) + goto remote_rq; + } + + for (rb_node = rb_first_cached(&dsq->priq); rb_node; + rb_node = rb_next(rb_node)) { + p = container_of(rb_node, struct task_struct, scx.dsq_node.priq); task_rq = task_rq(p); if (rq == task_rq) goto this_rq; - if (likely(test_rq_online(rq)) && !is_migration_disabled(p) && - cpumask_test_cpu(cpu_of(rq), p->cpus_ptr)) + if (task_can_run_on_rq(p, rq)) goto remote_rq; } + raw_spin_unlock(&dsq->lock); return false; this_rq: /* @dsq is locked and @p is on this rq */ WARN_ON_ONCE(p->scx.holding_cpu >= 0); - list_move_tail(&p->scx.dsq_node, &scx_rq->local_dsq.fifo); + task_unlink_from_dsq(p, dsq); + list_add_tail(&p->scx.dsq_node.fifo, &scx_rq->local_dsq.fifo); dsq->nr--; scx_rq->local_dsq.nr++; p->scx.dsq = &scx_rq->local_dsq; @@ -1189,7 +1245,7 @@ static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf, * move_task_to_local_dsq(). */ WARN_ON_ONCE(p->scx.holding_cpu >= 0); - list_del_init(&p->scx.dsq_node); + task_unlink_from_dsq(p, dsq); dsq->nr--; p->scx.holding_cpu = raw_smp_processor_id(); raw_spin_unlock(&dsq->lock); @@ -1692,8 +1748,18 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) static struct task_struct *first_local_task(struct rq *rq) { - return list_first_entry_or_null(&rq->scx.local_dsq.fifo, - struct task_struct, scx.dsq_node); + struct rb_node *rb_node; + + if (!list_empty(&rq->scx.local_dsq.fifo)) + return list_first_entry(&rq->scx.local_dsq.fifo, + struct task_struct, scx.dsq_node.fifo); + + rb_node = rb_first_cached(&rq->scx.local_dsq.priq); + if (rb_node) + return container_of(rb_node, + struct task_struct, scx.dsq_node.priq); + + return NULL; } static struct task_struct *pick_next_task_scx(struct rq *rq) @@ -3360,6 +3426,9 @@ static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, if (off >= offsetof(struct task_struct, scx.slice) && off + size <= offsetofend(struct task_struct, scx.slice)) return SCALAR_VALUE; + if (off >= offsetof(struct task_struct, scx.dsq_vtime) && + off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) + return SCALAR_VALUE; if (off >= offsetof(struct task_struct, scx.disallow) && off + size <= offsetofend(struct task_struct, scx.disallow)) return SCALAR_VALUE; @@ -3745,8 +3814,42 @@ void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, scx_dispatch_commit(p, dsq_id, enq_flags); } +/** + * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ + * @p: task_struct to dispatch + * @dsq_id: DSQ to dispatch to + * @slice: duration @p can run for in nsecs + * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ + * @enq_flags: SCX_ENQ_* + * + * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id. + * Tasks queued into the priority queue are ordered by @vtime and always + * consumed after the tasks in the FIFO queue. All other aspects are identical + * to scx_bpf_dispatch(). + * + * @vtime ordering is according to time_before64() which considers wrapping. A + * numerically larger vtime may indicate an earlier position in the ordering and + * vice-versa. + */ +void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, u64 slice, + u64 vtime, u64 enq_flags) +{ + if (!scx_dispatch_preamble(p, enq_flags)) + return; + + if (slice) + p->scx.slice = slice; + else + p->scx.slice = p->scx.slice ?: 1; + + p->scx.dsq_vtime = vtime; + + scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); +} + BTF_SET8_START(scx_kfunc_ids_enqueue_dispatch) BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU) BTF_SET8_END(scx_kfunc_ids_enqueue_dispatch) static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 7b7973e6d8c0..b5a31fae2168 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -63,6 +63,7 @@ enum scx_enq_flags { __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56, SCX_ENQ_CLEAR_OPSS = 1LLU << 56, + SCX_ENQ_DSQ_PRIQ = 1LLU << 57, }; enum scx_deq_flags { diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index 95f30dc9d625..06d98556ff19 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -57,6 +57,7 @@ s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; bool scx_bpf_consume(u64 dsq_id) __ksym; u32 scx_bpf_dispatch_nr_slots(void) __ksym; void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym; +void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym; void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; diff --git a/tools/sched_ext/scx_example_flatcg.bpf.c b/tools/sched_ext/scx_example_flatcg.bpf.c index 9632bab7f164..f6078b9a681f 100644 --- a/tools/sched_ext/scx_example_flatcg.bpf.c +++ b/tools/sched_ext/scx_example_flatcg.bpf.c @@ -38,6 +38,10 @@ * this isn't a real concern especially given the performance gain. Also, there * are ways to mitigate the problem further by e.g. introducing an extra * scheduling layer on cgroup delegation boundaries. + * + * The scheduler first picks the cgroup to run and then schedule the tasks + * within by using nested weighted vtime scheduling by default. The + * cgroup-internal scheduling can be switched to FIFO with the -f option. */ #include "scx_common.bpf.h" #include "user_exit_info.h" @@ -47,6 +51,7 @@ char _license[] SEC("license") = "GPL"; const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */ const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL; +const volatile bool fifo_sched; const volatile bool switch_partial; u64 cvtime_now; @@ -350,7 +355,21 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags) if (!cgc) goto out_release; - scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags); + if (fifo_sched) { + scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags); + } else { + u64 tvtime = p->scx.dsq_vtime; + + /* + * Limit the amount of budget that an idling task can accumulate + * to one slice. + */ + if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL)) + tvtime = cgc->tvtime_now - SCX_SLICE_DFL; + + scx_bpf_dispatch_vtime(p, cgrp->kn->id, SCX_SLICE_DFL, + tvtime, enq_flags); + } cgrp_enqueued(cgrp, cgc); out_release: @@ -462,12 +481,40 @@ void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags) bpf_cgroup_release(cgrp); } +void BPF_STRUCT_OPS(fcg_running, struct task_struct *p) +{ + struct cgroup *cgrp; + struct fcg_cgrp_ctx *cgc; + + if (fifo_sched) + return; + + cgrp = scx_bpf_task_cgroup(p); + cgc = find_cgrp_ctx(cgrp); + if (cgc) { + /* + * @cgc->tvtime_now always progresses forward as tasks start + * executing. The test and update can be performed concurrently + * from multiple CPUs and thus racy. Any error should be + * contained and temporary. Let's just live with it. + */ + if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime)) + cgc->tvtime_now = p->scx.dsq_vtime; + } + bpf_cgroup_release(cgrp); +} + void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) { struct fcg_task_ctx *taskc; struct cgroup *cgrp; struct fcg_cgrp_ctx *cgc; + /* scale the execution time by the inverse of the weight and charge */ + if (!fifo_sched) + p->scx.dsq_vtime += + (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; + taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); if (!taskc) { scx_bpf_error("task_ctx lookup failed"); @@ -811,6 +858,7 @@ struct sched_ext_ops flatcg_ops = { .enqueue = (void *)fcg_enqueue, .dispatch = (void *)fcg_dispatch, .runnable = (void *)fcg_runnable, + .running = (void *)fcg_running, .stopping = (void *)fcg_stopping, .quiescent = (void *)fcg_quiescent, .prep_enable = (void *)fcg_prep_enable, diff --git a/tools/sched_ext/scx_example_flatcg.c b/tools/sched_ext/scx_example_flatcg.c index 150f7e16996e..a78555cbe80d 100644 --- a/tools/sched_ext/scx_example_flatcg.c +++ b/tools/sched_ext/scx_example_flatcg.c @@ -30,6 +30,7 @@ const char help_fmt[] = "\n" " -s SLICE_US Override slice duration\n" " -i INTERVAL Report interval\n" +" -f Use FIFO scheduling instead of weighted vtime scheduling\n" " -p Switch only tasks on SCHED_EXT policy intead of all\n" " -h Display this help and exit\n"; @@ -149,6 +150,9 @@ int main(int argc, char **argv) case 'd': dump_cgrps = true; break; + case 'f': + skel->rodata->fifo_sched = true; + break; case 'p': skel->rodata->switch_partial = true; break; diff --git a/tools/sched_ext/scx_example_simple.bpf.c b/tools/sched_ext/scx_example_simple.bpf.c index fa5ae683ace1..4bccca3e2047 100644 --- a/tools/sched_ext/scx_example_simple.bpf.c +++ b/tools/sched_ext/scx_example_simple.bpf.c @@ -2,11 +2,20 @@ /* * A simple scheduler. * - * A simple global FIFO scheduler. It also demonstrates the following niceties. + * By default, it operates as a simple global weighted vtime scheduler and can + * be switched to FIFO scheduling. It also demonstrates the following niceties. * * - Statistics tracking how many tasks are queued to local and global dsq's. * - Termination notification for userspace. * + * While very simple, this scheduler should work reasonably well on CPUs with a + * uniform L3 cache topology. While preemption is not implemented, the fact that + * the scheduling queue is shared across all CPUs means that whatever is at the + * front of the queue is likely to be executed fairly quickly given enough + * number of CPUs. The FIFO scheduling mode may be beneficial to some workloads + * but comes with the usual problems with FIFO scheduling where saturating + * threads can easily drown out interactive ones. + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet @@ -15,8 +24,10 @@ char _license[] SEC("license") = "GPL"; +const volatile bool fifo_sched; const volatile bool switch_partial; +static u64 vtime_now; struct user_exit_info uei; struct { @@ -33,8 +44,18 @@ static void stat_inc(u32 idx) (*cnt_p)++; } +static inline bool vtime_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) { + /* + * If scx_select_cpu_dfl() is setting %SCX_ENQ_LOCAL, it indicates that + * running @p on its CPU directly shouldn't affect fairness. Just queue + * it on the local FIFO. + */ if (enq_flags & SCX_ENQ_LOCAL) { stat_inc(0); /* count local queueing */ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); @@ -42,7 +63,46 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) } stat_inc(1); /* count global queueing */ - scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + + if (fifo_sched) { + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } else { + u64 vtime = p->scx.dsq_vtime; + + /* + * Limit the amount of budget that an idling task can accumulate + * to one slice. + */ + if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL)) + vtime = vtime_now - SCX_SLICE_DFL; + + scx_bpf_dispatch_vtime(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, vtime, + enq_flags); + } +} + +void BPF_STRUCT_OPS(simple_running, struct task_struct *p) +{ + if (fifo_sched) + return; + + /* + * Global vtime always progresses forward as tasks start executing. The + * test and update can be performed concurrently from multiple CPUs and + * thus racy. Any error should be contained and temporary. Let's just + * live with it. + */ + if (vtime_before(vtime_now, p->scx.dsq_vtime)) + vtime_now = p->scx.dsq_vtime; +} + +void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) +{ + if (fifo_sched) + return; + + /* scale the execution time by the inverse of the weight and charge */ + p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; } s32 BPF_STRUCT_OPS(simple_init) @@ -60,6 +120,8 @@ void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) SEC(".struct_ops") struct sched_ext_ops simple_ops = { .enqueue = (void *)simple_enqueue, + .running = (void *)simple_running, + .stopping = (void *)simple_stopping, .init = (void *)simple_init, .exit = (void *)simple_exit, .name = "simple", diff --git a/tools/sched_ext/scx_example_simple.c b/tools/sched_ext/scx_example_simple.c index 868fd39e45c7..23741daa7bf7 100644 --- a/tools/sched_ext/scx_example_simple.c +++ b/tools/sched_ext/scx_example_simple.c @@ -21,6 +21,7 @@ const char help_fmt[] = "\n" "Usage: %s [-p]\n" "\n" +" -f Use FIFO scheduling instead of weighted vtime scheduling\n" " -p Switch only tasks on SCHED_EXT policy intead of all\n" " -h Display this help and exit\n"; @@ -65,8 +66,11 @@ int main(int argc, char **argv) skel = scx_example_simple__open(); assert(skel); - while ((opt = getopt(argc, argv, "ph")) != -1) { + while ((opt = getopt(argc, argv, "fph")) != -1) { switch (opt) { + case 'f': + skel->rodata->fifo_sched = true; + break; case 'p': skel->rodata->switch_partial = true; break; From patchwork Fri Mar 17 21:33:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71475 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp4722wrt; Fri, 17 Mar 2023 14:46:44 -0700 (PDT) X-Google-Smtp-Source: AK7set/5T5TQgRomVsPU1O7kdLMwIxsIXPZEfaAsa5oGmSr66KD+gBV1YURj/j5hwvf9nYPPcuNd X-Received: by 2002:a17:902:7296:b0:1a0:463d:fd09 with SMTP id d22-20020a170902729600b001a0463dfd09mr8157495pll.1.1679089604545; Fri, 17 Mar 2023 14:46:44 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089604; cv=none; d=google.com; s=arc-20160816; b=TXgPyKslGqkL+koTKBVU9kBPtXx4mhEDrixsrQ2dJ9/J1hxFixoW9Ibes/Ue5N1vx1 3eWBHG4DHBtk+o17FQoBxNlRTvEVS+p1HfK5jotIFdX9V7WuGTB9gSrcy3imzBX8inS5 fz7ATKNOhkRfyjBwoptpuUzAipRhzMEAcL3vpZTQc6teMnxzhMFiStlLYucRBYIJzC90 To1H5R3IBM+9WA5MLjq8qpyRcXNfVoZ359qYRomTLcUKNQzWfbu2COurSg0+/R/2fv/w 2MpsrnSGhti2kQLwHE3n/zfvuN3aqU5NpgBYHBMEc0yTT2CbENDI2SkO2YwNZnxRo81S 6KJg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=X7Hqrh9uN2SM85IaShnwVlwcijKtM51E3ZRSdB4nYhM=; b=Rb+FIwsnDanIGgq4oKrnrud4rw9WfPkqbwF/Vw0yiVIBfuVJNsNPi2WmVSmdZ8blIk 2KPxz3AyVmTfxoBPmvyg2ji8ZhGcYeCgJMemf2mQFqMykQz+HStNaticuKIORUkpYQ4f ymqmM8FLY9N0Tg4vl8rIqun1waAZHOYvWFGvliTRG/qqF9NGbwKFgD1R+afOT0Sh+5SY 65rB0AlcLnR5asKi/RZNXfTYDQrYNIvjrajkFQlQQGODGjmA84UBom+vUcjwn1YERSxZ QUdaZWIPwc/fBfnGK05rLOrd4ZewMt1rrMuCqLIjVMpwvy6wC4zgiPlDluOiA1Az2qoJ wBmA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=Cb2TShSH; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id u6-20020a170902a60600b001960922af15si2678891plq.239.2023.03.17.14.45.55; Fri, 17 Mar 2023 14:46:44 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=Cb2TShSH; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231294AbjCQVh3 (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:29 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52864 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231251AbjCQVgi (ORCPT ); Fri, 17 Mar 2023 17:36:38 -0400 Received: from mail-pl1-x62b.google.com (mail-pl1-x62b.google.com [IPv6:2607:f8b0:4864:20::62b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C3CC74FF22; Fri, 17 Mar 2023 14:35:48 -0700 (PDT) Received: by mail-pl1-x62b.google.com with SMTP id p20so6654423plw.13; Fri, 17 Mar 2023 14:35:48 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088878; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=X7Hqrh9uN2SM85IaShnwVlwcijKtM51E3ZRSdB4nYhM=; b=Cb2TShSHt6KJ55AyY5x9lfqprejDfYU0CXDVD+UW+x+n5Y6uLkK1KUI946QZDXxxuo cmxhGoEP5QZ6SYPpw4cwDVoFukLoWo75e4yR5m5+CLqRnXsB6cbqgAQRDcxBnt32pgM1 mTIlh7nrPWsY8C8Wb+kp6afrGjNRS4cL2DAKdhWcsYVbRc8lUDYgk0XqP7BwR8ADAqJJ Z8VYDaeB2g2mq0sXCPgXXnj3KbvrJg0p5hJLoP9DpjDHF+tLVirs0IPG/o5aqRV2h9v5 gJuhwJ/Md6PnfrkDzsYXsjH37QPEvpXuQhajrANZApByBAG+ye0ZlqzBVFzf46XaC7un 8hZQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088878; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=X7Hqrh9uN2SM85IaShnwVlwcijKtM51E3ZRSdB4nYhM=; b=Cy+oZuMkShu9yb8vrIfIKufq4NZr6avlrttxt92CNmxpdqivPGFGBuJ4hHmwMsxSwD GCVHL6SibbZ0bRjpUWVhD6dKmUMgzBlaO/747yYiOzaJCo9oLDS+z8IKkxrd3mR64BOa YbS2UT4IclV0MMX+/9+bdRTW4BJ4ATcgNOl6YS4bmCrZ1alSthdf8ZKY29FNX/IPSLvo 06rY33KAUJ/zHC3k1APkpsMTqCg8TVTDZqi2tB2ZK0jWvIf2nVpk/u8q8CIivRsFEX2q ZG2xW8SkDquUZ1ph4nV6gXJCdVyKgfJE2raMMJXMvBmdGou9DvZ5hkTIzF+hHGGyV+UM mc6Q== X-Gm-Message-State: AO0yUKWvcX7PhCyofiG9ycMM9cpFw90/Gt7wUqM8HXnT8VR4IsUA4xg7 MKcHiqwtIgVqI069+TqhvFk= X-Received: by 2002:a17:902:d411:b0:1a0:53b3:ee87 with SMTP id b17-20020a170902d41100b001a053b3ee87mr6657558ple.62.1679088877607; Fri, 17 Mar 2023 14:34:37 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id s19-20020a170902989300b00186cf82717fsm1970146plp.165.2023.03.17.14.34.37 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:37 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , Bagas Sanjaya Subject: [PATCH 30/32] sched_ext: Documentation: scheduler: Document extensible scheduler class Date: Fri, 17 Mar 2023 11:33:31 -1000 Message-Id: <20230317213333.2174969-31-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS,URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653060809678327?= X-GMAIL-MSGID: =?utf-8?q?1760653060809678327?= Add Documentation/scheduler/sched-ext.rst which gives a high-level overview and pointers to the examples. v2: Apply minor edits suggested by Bagas. Caveats section dropped as all of them are addressed. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Bagas Sanjaya Reviewed-by: Bagas Sanjaya --- Documentation/scheduler/index.rst | 1 + Documentation/scheduler/sched-ext.rst | 230 ++++++++++++++++++++++++++ include/linux/sched/ext.h | 2 + kernel/Kconfig.preempt | 2 + kernel/sched/ext.c | 2 + kernel/sched/ext.h | 2 + 6 files changed, 239 insertions(+) create mode 100644 Documentation/scheduler/sched-ext.rst diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst index 3170747226f6..0b650bb550e6 100644 --- a/Documentation/scheduler/index.rst +++ b/Documentation/scheduler/index.rst @@ -19,6 +19,7 @@ Scheduler sched-nice-design sched-rt-group sched-stats + sched-ext sched-debug text_files diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst new file mode 100644 index 000000000000..84c30b44f104 --- /dev/null +++ b/Documentation/scheduler/sched-ext.rst @@ -0,0 +1,230 @@ +========================== +Extensible Scheduler Class +========================== + +sched_ext is a scheduler class whose behavior can be defined by a set of BPF +programs - the BPF scheduler. + +* sched_ext exports a full scheduling interface so that any scheduling + algorithm can be implemented on top. + +* The BPF scheduler can group CPUs however it sees fit and schedule them + together, as tasks aren't tied to specific CPUs at the time of wakeup. + +* The BPF scheduler can be turned on and off dynamically anytime. + +* The system integrity is maintained no matter what the BPF scheduler does. + The default scheduling behavior is restored anytime an error is detected, + a runnable task stalls, or on invoking the SysRq key sequence + :kbd:`SysRq-S`. + +Switching to and from sched_ext +=============================== + +``CONFIG_SCHED_CLASS_EXT`` is the config option to enable sched_ext and +``tools/sched_ext`` contains the example schedulers. + +sched_ext is used only when the BPF scheduler is loaded and running. + +If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be +treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is +loaded. On load, such tasks will be switched to and scheduled by sched_ext. + +The BPF scheduler can choose to schedule all normal and lower class tasks by +calling ``scx_bpf_switch_all()`` from its ``init()`` operation. In this +case, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE`` and +``SCHED_EXT`` tasks are scheduled by sched_ext. In the example schedulers, +this mode can be selected with the ``-a`` option. + +Terminating the sched_ext scheduler program, triggering :kbd:`SysRq-S`, or +detection of any internal error including stalled runnable tasks aborts the +BPF scheduler and reverts all tasks back to CFS. + +.. code-block:: none + + # make -j16 -C tools/sched_ext + # tools/sched_ext/scx_example_simple + local=0 global=3 + local=5 global=24 + local=9 global=44 + local=13 global=56 + local=17 global=72 + ^CEXIT: BPF scheduler unregistered + +If ``CONFIG_SCHED_DEBUG`` is set, the current status of the BPF scheduler +and whether a given task is on sched_ext can be determined as follows: + +.. code-block:: none + + # cat /sys/kernel/debug/sched/ext + ops : simple + enabled : 1 + switching_all : 1 + switched_all : 1 + enable_state : enabled + + # grep ext /proc/self/sched + ext.enabled : 1 + +The Basics +========== + +Userspace can implement an arbitrary BPF scheduler by loading a set of BPF +programs that implement ``struct sched_ext_ops``. The only mandatory field +is ``ops.name`` which must be a valid BPF object name. All operations are +optional. The following modified excerpt is from +``tools/sched/scx_example_simple.bpf.c`` showing a minimal global FIFO +scheduler. + +.. code-block:: c + + s32 BPF_STRUCT_OPS(simple_init) + { + if (!switch_partial) + scx_bpf_switch_all(); + return 0; + } + + void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) + { + if (enq_flags & SCX_ENQ_LOCAL) + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, enq_flags); + else + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, enq_flags); + } + + void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) + { + exit_type = ei->type; + } + + SEC(".struct_ops") + struct sched_ext_ops simple_ops = { + .enqueue = (void *)simple_enqueue, + .init = (void *)simple_init, + .exit = (void *)simple_exit, + .name = "simple", + }; + +Dispatch Queues +--------------- + +To match the impedance between the scheduler core and the BPF scheduler, +sched_ext uses DSQs (dispatch queues) which can operate as both a FIFO and a +priority queue. By default, there is one global FIFO (``SCX_DSQ_GLOBAL``), +and one local dsq per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage +an arbitrary number of dsq's using ``scx_bpf_create_dsq()`` and +``scx_bpf_destroy_dsq()``. + +A CPU always executes a task from its local DSQ. A task is "dispatched" to a +DSQ. A non-local DSQ is "consumed" to transfer a task to the consuming CPU's +local DSQ. + +When a CPU is looking for the next task to run, if the local DSQ is not +empty, the first task is picked. Otherwise, the CPU tries to consume the +global DSQ. If that doesn't yield a runnable task either, ``ops.dispatch()`` +is invoked. + +Scheduling Cycle +---------------- + +The following briefly shows how a waking task is scheduled and executed. + +1. When a task is waking up, ``ops.select_cpu()`` is the first operation + invoked. This serves two purposes. First, CPU selection optimization + hint. Second, waking up the selected CPU if idle. + + The CPU selected by ``ops.select_cpu()`` is an optimization hint and not + binding. The actual decision is made at the last step of scheduling. + However, there is a small performance gain if the CPU + ``ops.select_cpu()`` returns matches the CPU the task eventually runs on. + + A side-effect of selecting a CPU is waking it up from idle. While a BPF + scheduler can wake up any cpu using the ``scx_bpf_kick_cpu()`` helper, + using ``ops.select_cpu()`` judiciously can be simpler and more efficient. + + Note that the scheduler core will ignore an invalid CPU selection, for + example, if it's outside the allowed cpumask of the task. + +2. Once the target CPU is selected, ``ops.enqueue()`` is invoked. It can + make one of the following decisions: + + * Immediately dispatch the task to either the global or local DSQ by + calling ``scx_bpf_dispatch()`` with ``SCX_DSQ_GLOBAL`` or + ``SCX_DSQ_LOCAL``, respectively. + + * Immediately dispatch the task to a custom DSQ by calling + ``scx_bpf_dispatch()`` with a DSQ ID which is smaller than 2^63. + + * Queue the task on the BPF side. + +3. When a CPU is ready to schedule, it first looks at its local DSQ. If + empty, it then looks at the global DSQ. If there still isn't a task to + run, ``ops.dispatch()`` is invoked which can use the following two + functions to populate the local DSQ. + + * ``scx_bpf_dispatch()`` dispatches a task to a DSQ. Any target DSQ can + be used - ``SCX_DSQ_LOCAL``, ``SCX_DSQ_LOCAL_ON | cpu``, + ``SCX_DSQ_GLOBAL`` or a custom DSQ. While ``scx_bpf_dispatch()`` + currently can't be called with BPF locks held, this is being worked on + and will be supported. ``scx_bpf_dispatch()`` schedules dispatching + rather than performing them immediately. There can be up to + ``ops.dispatch_max_batch`` pending tasks. + + * ``scx_bpf_consume()`` tranfers a task from the specified non-local DSQ + to the dispatching DSQ. This function cannot be called with any BPF + locks held. ``scx_bpf_consume()`` flushes the pending dispatched tasks + before trying to consume the specified DSQ. + +4. After ``ops.dispatch()`` returns, if there are tasks in the local DSQ, + the CPU runs the first one. If empty, the following steps are taken: + + * Try to consume the global DSQ. If successful, run the task. + + * If ``ops.dispatch()`` has dispatched any tasks, retry #3. + + * If the previous task is an SCX task and still runnable, keep executing + it (see ``SCX_OPS_ENQ_LAST``). + + * Go idle. + +Note that the BPF scheduler can always choose to dispatch tasks immediately +in ``ops.enqueue()`` as illustrated in the above simple example. If only the +built-in DSQs are used, there is no need to implement ``ops.dispatch()`` as +a task is never queued on the BPF scheduler and both the local and global +DSQs are consumed automatically. + +``scx_bpf_dispatch()`` queues the task on the FIFO of the target DSQ. Use +``scx_bpf_dispatch_vtime()`` for the priority queue. See the function +documentation and usage in ``tools/sched_ext/scx_example_simple.bpf.c`` for +more information. + +Where to Look +============= + +* ``include/linux/sched/ext.h`` defines the core data structures, ops table + and constants. + +* ``kernel/sched/ext.c`` contains sched_ext core implementation and helpers. + The functions prefixed with ``scx_bpf_`` can be called from the BPF + scheduler. + +* ``tools/sched_ext/`` hosts example BPF scheduler implementations. + + * ``scx_example_simple[.bpf].c``: Minimal global FIFO scheduler example + using a custom DSQ. + + * ``scx_example_qmap[.bpf].c``: A multi-level FIFO scheduler supporting + five levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``. + +ABI Instability +=============== + +The APIs provided by sched_ext to BPF schedulers programs have no stability +guarantees. This includes the ops table callbacks and constants defined in +``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in +``kernel/sched/ext.c``. + +While we will attempt to provide a relatively stable API surface when +possible, they are subject to change without warning between kernel +versions. diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index fe2b051230b2..61837aac8ab3 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index e12a057ead7b..bae49b743834 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -154,3 +154,5 @@ config SCHED_CLASS_EXT wish to implement scheduling policies. The struct_ops structure exported by sched_ext is struct sched_ext_ops, and is conceptually similar to struct sched_class. + + See Documentation/scheduler/sched-ext.rst for more details. diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 869d11e738cd..f4a2b1d1374a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index b5a31fae2168..998b790b3928 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet From patchwork Fri Mar 17 21:33:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71474 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp4717wrt; Fri, 17 Mar 2023 14:46:44 -0700 (PDT) X-Google-Smtp-Source: AK7set/+yq+8a3i75I1L0sXquypGoIjPNhAyuT1Ukw9TWruGBzwZHTbiiU4KHeybMTcLqPsFalE4 X-Received: by 2002:a17:902:7296:b0:1a0:463d:fd09 with SMTP id d22-20020a170902729600b001a0463dfd09mr8157477pll.1.1679089603994; Fri, 17 Mar 2023 14:46:43 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679089603; cv=none; d=google.com; s=arc-20160816; b=UZI/jW9tdc5AIrrbk9BfaLfki5/iQRhJQIrmtfpi8zANXqpbOPmXf1CtzEYWASajr7 BhYDlYJICSibWeP0w32242JMQ1UtaHqKbJvT1ExG2NVgANM5/z7kkj6oyr0fy9nH+q9e z1JrtPk653m75ot4EF5EGDRy4vLG1e6GphZGsqjxUvGkm6rLzlnPnzKaWmoEZ/m7rQcf 66EJD4F8LK7qXbpUH9O19O1upmS8s4o5jPS9MMwtSfDMpihjpn1FRo+Nvhvnik6/T78p NpVwKg5Ow4C+rtqt3q6hCWE1ZPuPFi6aGtcKsz46ghzK+TnhOI6S2dS7MPdhuPdcLXcP ZboQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=T8do32JSr7oejqGQ30BWjBKzAduphb25BMep91IGTCM=; b=J8Ab1Dh/FuVhbo59ZNWAtU+0HQ1iEgAqW9WXWGhIvEv25tIcFunv7qz0LHNCu0SXds zTXGmywTtRKdg1BaPINVaRahVrfZoL1jiqsa6mR1gLaL44QBEvxegaw1IcJqbKUYxFNZ wR6SJZU8bI5Vnx7NKJjcTbeew8kr3bS7yr6rwidtkXTqFpbyibmCs9rSoBn0mvltO6R9 xLIgIYjEB8igauraNVC7x7+8NbTJdBofImsHX+oTjlFdU1dVIVcyFRNXG+YSGqWie0CR fwdY9Jh6Dc5Q+WIHAd/PA5XtQ85Vgh+I+eu+Y4aGAYIv/wKXqmW+dpOEh3gSmYYVOXpp hScw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=qkiFbpSX; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id k9-20020a170902ba8900b0019ce74dd5c8si3195115pls.529.2023.03.17.14.45.59; Fri, 17 Mar 2023 14:46:43 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=qkiFbpSX; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229951AbjCQVhl (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:41 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:53024 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231277AbjCQVgo (ORCPT ); Fri, 17 Mar 2023 17:36:44 -0400 Received: from mail-pl1-x62b.google.com (mail-pl1-x62b.google.com [IPv6:2607:f8b0:4864:20::62b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 72B2D515E8; Fri, 17 Mar 2023 14:35:54 -0700 (PDT) Received: by mail-pl1-x62b.google.com with SMTP id v21so6665291ple.9; Fri, 17 Mar 2023 14:35:54 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088880; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=T8do32JSr7oejqGQ30BWjBKzAduphb25BMep91IGTCM=; b=qkiFbpSXdyO3RxkzruEXEWjXnTa1ccNkkLPMA79UD60VQ3+rsIgtRRNxudzIzoCBJ6 KXn/+NXfB4AwGUiRqFEdnolD0IY5XPEigUCxqjet69QyHeG0PyqVbfYFTBQj/Xs/DuqY 38uwgL2ADavWmK5ZxiF9zreqCl5KhfflnKymJFUhkXDbGLYb0JI7tBkZMGVyBztJy67p L4OIy0LavJamc5Mn/3ewvQxEmkLavcivrrxl8gb4+zF55cRn1hKyME0eShUdsXMsjO3M 6L38cJRof46BDMPYtM6sqmPBNlgaIPdfaNnJp5wfyEJ+dDpVa652IVJ9kMJpe5M5ofTC CrcA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088880; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=T8do32JSr7oejqGQ30BWjBKzAduphb25BMep91IGTCM=; b=aQkZQ60rpPv1GFYxF8Xnx9gBZv6D+hAbChOWSJcPWOMhfDmYgSWQns+9pqk2nVu/i9 U7SIl5tXb29CPT35vpWJq71KBD8LNF8i/Ek+soxdkxOhwRccRcviBQPSo2UUtAmdK9IE ddnWM9V0317bBoD4DSAWUMuGe4ryw9DorM/dsCbFanyN1tRfyzrp0cRA5xivA1M9hJCm BZlF8XKkS7x6kAR0Aad72iezwzM4ud2+79zJlySTSzx3qjvbmouBj/JbW+VYCwxB844X GIC8zNHxsI4WQeAWoGhOv8zyTK53GMsXq3pC6sUn1GRuVb92w2GAVVvXyxPKDX6rVmxZ eZNQ== X-Gm-Message-State: AO0yUKVXDFkZw6Tya371f7MwRO+LBX8uwLg5gX+xDfhsJo0cYC0kYqTo 9j0HRechJzR1kFRFXFhtq2kTzVvAFP4= X-Received: by 2002:a17:90b:1b05:b0:23d:1bbd:9fb5 with SMTP id nu5-20020a17090b1b0500b0023d1bbd9fb5mr10252488pjb.39.1679088879451; Fri, 17 Mar 2023 14:34:39 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id q10-20020a17090a304a00b00233acae2ce6sm5355186pjl.23.2023.03.17.14.34.38 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:39 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 31/32] sched_ext: Add a basic, userland vruntime scheduler Date: Fri, 17 Mar 2023 11:33:32 -1000 Message-Id: <20230317213333.2174969-32-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS,URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760653060372737879?= X-GMAIL-MSGID: =?utf-8?q?1760653060372737879?= From: David Vernet This patch adds a new scx_example_userland BPF scheduler that implements a fairly unsophisticated sorted-list vruntime scheduler in userland to demonstrate how most scheduling decisions can be delegated to userland. The scheduler doesn't implement load balancing, and treats all tasks as part of a single domain. v2: * Converted to BPF inline iterators. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo --- tools/sched_ext/.gitignore | 1 + tools/sched_ext/Makefile | 9 +- tools/sched_ext/scx_example_userland.bpf.c | 269 ++++++++++++ tools/sched_ext/scx_example_userland.c | 403 ++++++++++++++++++ tools/sched_ext/scx_example_userland_common.h | 19 + 5 files changed, 699 insertions(+), 2 deletions(-) create mode 100644 tools/sched_ext/scx_example_userland.bpf.c create mode 100644 tools/sched_ext/scx_example_userland.c create mode 100644 tools/sched_ext/scx_example_userland_common.h diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore index 769bc6f35cc6..a3240f9f7eba 100644 --- a/tools/sched_ext/.gitignore +++ b/tools/sched_ext/.gitignore @@ -3,6 +3,7 @@ scx_example_qmap scx_example_central scx_example_pair scx_example_flatcg +scx_example_userland *.skel.h *.subskel.h /tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index 8c7543bbff8d..71b5809243e3 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -116,7 +116,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -O2 -mcpu=v3 all: scx_example_simple scx_example_qmap scx_example_central scx_example_pair \ - scx_example_flatcg + scx_example_flatcg scx_example_userland # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -187,11 +187,16 @@ scx_example_flatcg: scx_example_flatcg.c scx_example_flatcg.skel.h user_exit_inf $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +scx_example_userland: scx_example_userland.c scx_example_userland.skel.h \ + scx_example_userland_common.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + clean: rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h rm -f scx_example_simple scx_example_qmap scx_example_central \ - scx_example_pair scx_example_flatcg + scx_example_pair scx_example_flatcg scx_example_userland .PHONY: all clean diff --git a/tools/sched_ext/scx_example_userland.bpf.c b/tools/sched_ext/scx_example_userland.bpf.c new file mode 100644 index 000000000000..a089bc6bbe86 --- /dev/null +++ b/tools/sched_ext/scx_example_userland.bpf.c @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A minimal userland scheduler. + * + * In terms of scheduling, this provides two different types of behaviors: + * 1. A global FIFO scheduling order for _any_ tasks that have CPU affinity. + * All such tasks are direct-dispatched from the kernel, and are never + * enqueued in user space. + * 2. A primitive vruntime scheduler that is implemented in user space, for all + * other tasks. + * + * Some parts of this example user space scheduler could be implemented more + * efficiently using more complex and sophisticated data structures. For + * example, rather than using BPF_MAP_TYPE_QUEUE's, + * BPF_MAP_TYPE_{USER_}RINGBUF's could be used for exchanging messages between + * user space and kernel space. Similarly, we use a simple vruntime-sorted list + * in user space, but an rbtree could be used instead. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include +#include "scx_common.bpf.h" +#include "scx_example_userland_common.h" + +char _license[] SEC("license") = "GPL"; + +const volatile bool switch_partial; +const volatile s32 usersched_pid; + +/* !0 for veristat, set during init */ +const volatile u32 num_possible_cpus = 64; + +/* Stats that are printed by user space. */ +u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues; + +struct user_exit_info uei; + +/* + * Whether the user space scheduler needs to be scheduled due to a task being + * enqueued in user space. + */ +static bool usersched_needed; + +/* + * The map containing tasks that are enqueued in user space from the kernel. + * + * This map is drained by the user space scheduler. + */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, USERLAND_MAX_TASKS); + __type(value, struct scx_userland_enqueued_task); +} enqueued SEC(".maps"); + +/* + * The map containing tasks that are dispatched to the kernel from user space. + * + * Drained by the kernel in userland_dispatch(). + */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, USERLAND_MAX_TASKS); + __type(value, s32); +} dispatched SEC(".maps"); + +/* Per-task scheduling context */ +struct task_ctx { + bool force_local; /* Dispatch directly to local DSQ */ +}; + +/* Map that contains task-local storage. */ +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct task_ctx); +} task_ctx_stor SEC(".maps"); + +static bool is_usersched_task(const struct task_struct *p) +{ + return p->pid == usersched_pid; +} + +static bool keep_in_kernel(const struct task_struct *p) +{ + return p->nr_cpus_allowed < num_possible_cpus; +} + +static struct task_struct *usersched_task(void) +{ + struct task_struct *p; + + p = bpf_task_from_pid(usersched_pid); + /* + * Should never happen -- the usersched task should always be managed + * by sched_ext. + */ + if (!p) { + scx_bpf_error("Failed to find usersched task %d", usersched_pid); + /* + * We should never hit this path, and we error out of the + * scheduler above just in case, so the scheduler will soon be + * be evicted regardless. So as to simplify the logic in the + * caller to not have to check for NULL, return an acquired + * reference to the current task here rather than NULL. + */ + return bpf_task_acquire(bpf_get_current_task_btf()); + } + + return p; +} + +s32 BPF_STRUCT_OPS(userland_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + if (keep_in_kernel(p)) { + s32 cpu; + struct task_ctx *tctx; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("Failed to look up task-local storage for %s", p->comm); + return -ESRCH; + } + + if (p->nr_cpus_allowed == 1 || + scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { + tctx->force_local = true; + return prev_cpu; + } + + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) { + tctx->force_local = true; + return cpu; + } + } + + return prev_cpu; +} + +static void dispatch_user_scheduler(void) +{ + struct task_struct *p; + + usersched_needed = false; + p = usersched_task(); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); +} + +static void enqueue_task_in_user_space(struct task_struct *p, u64 enq_flags) +{ + struct scx_userland_enqueued_task task; + + memset(&task, 0, sizeof(task)); + task.pid = p->pid; + task.sum_exec_runtime = p->se.sum_exec_runtime; + task.weight = p->scx.weight; + + if (bpf_map_push_elem(&enqueued, &task, 0)) { + /* + * If we fail to enqueue the task in user space, put it + * directly on the global DSQ. + */ + __sync_fetch_and_add(&nr_failed_enqueues, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } else { + __sync_fetch_and_add(&nr_user_enqueues, 1); + usersched_needed = true; + } +} + +void BPF_STRUCT_OPS(userland_enqueue, struct task_struct *p, u64 enq_flags) +{ + if (keep_in_kernel(p)) { + u64 dsq_id = SCX_DSQ_GLOBAL; + struct task_ctx *tctx; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("Failed to lookup task ctx for %s", p->comm); + return; + } + + if (tctx->force_local) + dsq_id = SCX_DSQ_LOCAL; + tctx->force_local = false; + scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags); + __sync_fetch_and_add(&nr_kernel_enqueues, 1); + return; + } else if (!is_usersched_task(p)) { + enqueue_task_in_user_space(p, enq_flags); + } +} + +void BPF_STRUCT_OPS(userland_dispatch, s32 cpu, struct task_struct *prev) +{ + if (usersched_needed) + dispatch_user_scheduler(); + + bpf_repeat(4096) { + s32 pid; + struct task_struct *p; + + if (bpf_map_pop_elem(&dispatched, &pid)) + break; + + /* + * The task could have exited by the time we get around to + * dispatching it. Treat this as a normal occurrence, and simply + * move onto the next iteration. + */ + p = bpf_task_from_pid(pid); + if (!p) + continue; + + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + } +} + +s32 BPF_STRUCT_OPS(userland_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + if (bpf_task_storage_get(&task_ctx_stor, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE)) + return 0; + else + return -ENOMEM; +} + +s32 BPF_STRUCT_OPS(userland_init) +{ + if (num_possible_cpus == 0) { + scx_bpf_error("User scheduler # CPUs uninitialized (%d)", + num_possible_cpus); + return -EINVAL; + } + + if (usersched_pid <= 0) { + scx_bpf_error("User scheduler pid uninitialized (%d)", + usersched_pid); + return -EINVAL; + } + + if (!switch_partial) + scx_bpf_switch_all(); + return 0; +} + +void BPF_STRUCT_OPS(userland_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops userland_ops = { + .select_cpu = (void *)userland_select_cpu, + .enqueue = (void *)userland_enqueue, + .dispatch = (void *)userland_dispatch, + .prep_enable = (void *)userland_prep_enable, + .init = (void *)userland_init, + .exit = (void *)userland_exit, + .timeout_ms = 3000, + .name = "userland", +}; diff --git a/tools/sched_ext/scx_example_userland.c b/tools/sched_ext/scx_example_userland.c new file mode 100644 index 000000000000..cb20e91c2d1f --- /dev/null +++ b/tools/sched_ext/scx_example_userland.c @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext user space scheduler which provides vruntime semantics + * using a simple ordered-list implementation. + * + * Each CPU in the system resides in a single, global domain. This precludes + * the need to do any load balancing between domains. The scheduler could + * easily be extended to support multiple domains, with load balancing + * happening in user space. + * + * Any task which has any CPU affinity is scheduled entirely in BPF. This + * program only schedules tasks which may run on any CPU. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "user_exit_info.h" +#include "scx_example_userland_common.h" +#include "scx_example_userland.skel.h" + +const char help_fmt[] = +"A minimal userland sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-a]\n" +"\n" +" -b The number of tasks to batch when dispatching.\n" +" Defaults to 8\n" +" -p Don't switch all, switch only tasks on SCHED_EXT policy\n" +" -h Display this help and exit\n"; + +/* Defined in UAPI */ +#define SCHED_EXT 7 + +/* Number of tasks to batch when dispatching to user space. */ +static __u32 batch_size = 8; + +static volatile int exit_req; +static int enqueued_fd, dispatched_fd; + +static struct scx_example_userland *skel; +static struct bpf_link *ops_link; + +/* Stats collected in user space. */ +static __u64 nr_vruntime_enqueues, nr_vruntime_dispatches; + +/* The data structure containing tasks that are enqueued in user space. */ +struct enqueued_task { + LIST_ENTRY(enqueued_task) entries; + __u64 sum_exec_runtime; + double vruntime; +}; + +/* + * Use a vruntime-sorted list to store tasks. This could easily be extended to + * a more optimal data structure, such as an rbtree as is done in CFS. We + * currently elect to use a sorted list to simplify the example for + * illustrative purposes. + */ +LIST_HEAD(listhead, enqueued_task); + +/* + * A vruntime-sorted list of tasks. The head of the list contains the task with + * the lowest vruntime. That is, the task that has the "highest" claim to be + * scheduled. + */ +static struct listhead vruntime_head = LIST_HEAD_INITIALIZER(vruntime_head); + +/* + * The statically allocated array of tasks. We use a statically allocated list + * here to avoid having to allocate on the enqueue path, which could cause a + * deadlock. A more substantive user space scheduler could e.g. provide a hook + * for newly enabled tasks that are passed to the scheduler from the + * .prep_enable() callback to allows the scheduler to allocate on safe paths. + */ +struct enqueued_task tasks[USERLAND_MAX_TASKS]; + +static double min_vruntime; + +static void sigint_handler(int userland) +{ + exit_req = 1; +} + +static __u32 task_pid(const struct enqueued_task *task) +{ + return ((uintptr_t)task - (uintptr_t)tasks) / sizeof(*task); +} + +static int dispatch_task(s32 pid) +{ + int err; + + err = bpf_map_update_elem(dispatched_fd, NULL, &pid, 0); + if (err) { + fprintf(stderr, "Failed to dispatch task %d\n", pid); + exit_req = 1; + } else { + nr_vruntime_dispatches++; + } + + return err; +} + +static struct enqueued_task *get_enqueued_task(__s32 pid) +{ + if (pid >= USERLAND_MAX_TASKS) + return NULL; + + return &tasks[pid]; +} + +static double calc_vruntime_delta(__u64 weight, __u64 delta) +{ + double weight_f = (double)weight / 100.0; + double delta_f = (double)delta; + + return delta_f / weight_f; +} + +static void update_enqueued(struct enqueued_task *enqueued, const struct scx_userland_enqueued_task *bpf_task) +{ + __u64 delta; + + delta = bpf_task->sum_exec_runtime - enqueued->sum_exec_runtime; + + enqueued->vruntime += calc_vruntime_delta(bpf_task->weight, delta); + if (min_vruntime > enqueued->vruntime) + enqueued->vruntime = min_vruntime; + enqueued->sum_exec_runtime = bpf_task->sum_exec_runtime; +} + +static int vruntime_enqueue(const struct scx_userland_enqueued_task *bpf_task) +{ + struct enqueued_task *curr, *enqueued, *prev; + + curr = get_enqueued_task(bpf_task->pid); + if (!curr) + return ENOENT; + + update_enqueued(curr, bpf_task); + nr_vruntime_enqueues++; + + /* + * Enqueue the task in a vruntime-sorted list. A more optimal data + * structure such as an rbtree could easily be used as well. We elect + * to use a list here simply because it's less code, and thus the + * example is less convoluted and better serves to illustrate what a + * user space scheduler could look like. + */ + + if (LIST_EMPTY(&vruntime_head)) { + LIST_INSERT_HEAD(&vruntime_head, curr, entries); + return 0; + } + + LIST_FOREACH(enqueued, &vruntime_head, entries) { + if (curr->vruntime <= enqueued->vruntime) { + LIST_INSERT_BEFORE(enqueued, curr, entries); + return 0; + } + prev = enqueued; + } + + LIST_INSERT_AFTER(prev, curr, entries); + + return 0; +} + +static void drain_enqueued_map(void) +{ + while (1) { + struct scx_userland_enqueued_task task; + int err; + + if (bpf_map_lookup_and_delete_elem(enqueued_fd, NULL, &task)) + return; + + err = vruntime_enqueue(&task); + if (err) { + fprintf(stderr, "Failed to enqueue task %d: %s\n", + task.pid, strerror(err)); + exit_req = 1; + return; + } + } +} + +static void dispatch_batch(void) +{ + __u32 i; + + for (i = 0; i < batch_size; i++) { + struct enqueued_task *task; + int err; + __s32 pid; + + task = LIST_FIRST(&vruntime_head); + if (!task) + return; + + min_vruntime = task->vruntime; + pid = task_pid(task); + LIST_REMOVE(task, entries); + err = dispatch_task(pid); + if (err) { + fprintf(stderr, "Failed to dispatch task %d in %u\n", + pid, i); + return; + } + } +} + +static void *run_stats_printer(void *arg) +{ + while (!exit_req) { + __u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues, total; + + nr_failed_enqueues = skel->bss->nr_failed_enqueues; + nr_kernel_enqueues = skel->bss->nr_kernel_enqueues; + nr_user_enqueues = skel->bss->nr_user_enqueues; + total = nr_failed_enqueues + nr_kernel_enqueues + nr_user_enqueues; + + printf("o-----------------------o\n"); + printf("| BPF ENQUEUES |\n"); + printf("|-----------------------|\n"); + printf("| kern: %10llu |\n", nr_kernel_enqueues); + printf("| user: %10llu |\n", nr_user_enqueues); + printf("| failed: %10llu |\n", nr_failed_enqueues); + printf("| -------------------- |\n"); + printf("| total: %10llu |\n", total); + printf("| |\n"); + printf("|-----------------------|\n"); + printf("| VRUNTIME / USER |\n"); + printf("|-----------------------|\n"); + printf("| enq: %10llu |\n", nr_vruntime_enqueues); + printf("| disp: %10llu |\n", nr_vruntime_dispatches); + printf("o-----------------------o\n"); + printf("\n\n"); + sleep(1); + } + + return NULL; +} + +static int spawn_stats_thread(void) +{ + pthread_t stats_printer; + + return pthread_create(&stats_printer, NULL, run_stats_printer, NULL); +} + +static int bootstrap(int argc, char **argv) +{ + int err; + __u32 opt; + struct sched_param sched_param = { + .sched_priority = sched_get_priority_max(SCHED_EXT), + }; + bool switch_partial = false; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + /* + * Enforce that the user scheduler task is managed by sched_ext. The + * task eagerly drains the list of enqueued tasks in its main work + * loop, and then yields the CPU. The BPF scheduler only schedules the + * user space scheduler task when at least one other task in the system + * needs to be scheduled. + */ + err = syscall(__NR_sched_setscheduler, getpid(), SCHED_EXT, &sched_param); + if (err) { + fprintf(stderr, "Failed to set scheduler to SCHED_EXT: %s\n", strerror(err)); + return err; + } + + while ((opt = getopt(argc, argv, "b:ph")) != -1) { + switch (opt) { + case 'b': + batch_size = strtoul(optarg, NULL, 0); + break; + case 'p': + switch_partial = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + exit(opt != 'h'); + } + } + + /* + * It's not always safe to allocate in a user space scheduler, as an + * enqueued task could hold a lock that we require in order to be able + * to allocate. + */ + err = mlockall(MCL_CURRENT | MCL_FUTURE); + if (err) { + fprintf(stderr, "Failed to prefault and lock address space: %s\n", + strerror(err)); + return err; + } + + skel = scx_example_userland__open(); + if (!skel) { + fprintf(stderr, "Failed to open scheduler: %s\n", strerror(errno)); + return errno; + } + skel->rodata->num_possible_cpus = libbpf_num_possible_cpus(); + assert(skel->rodata->num_possible_cpus > 0); + skel->rodata->usersched_pid = getpid(); + assert(skel->rodata->usersched_pid > 0); + skel->rodata->switch_partial = switch_partial; + + err = scx_example_userland__load(skel); + if (err) { + fprintf(stderr, "Failed to load scheduler: %s\n", strerror(err)); + goto destroy_skel; + } + + enqueued_fd = bpf_map__fd(skel->maps.enqueued); + dispatched_fd = bpf_map__fd(skel->maps.dispatched); + assert(enqueued_fd > 0); + assert(dispatched_fd > 0); + + err = spawn_stats_thread(); + if (err) { + fprintf(stderr, "Failed to spawn stats thread: %s\n", strerror(err)); + goto destroy_skel; + } + + ops_link = bpf_map__attach_struct_ops(skel->maps.userland_ops); + if (!ops_link) { + fprintf(stderr, "Failed to attach struct ops: %s\n", strerror(errno)); + err = errno; + goto destroy_skel; + } + + return 0; + +destroy_skel: + scx_example_userland__destroy(skel); + exit_req = 1; + return err; +} + +static void sched_main_loop(void) +{ + while (!exit_req) { + /* + * Perform the following work in the main user space scheduler + * loop: + * + * 1. Drain all tasks from the enqueued map, and enqueue them + * to the vruntime sorted list. + * + * 2. Dispatch a batch of tasks from the vruntime sorted list + * down to the kernel. + * + * 3. Yield the CPU back to the system. The BPF scheduler will + * reschedule the user space scheduler once another task has + * been enqueued to user space. + */ + drain_enqueued_map(); + dispatch_batch(); + sched_yield(); + } +} + +int main(int argc, char **argv) +{ + int err; + + err = bootstrap(argc, argv); + if (err) { + fprintf(stderr, "Failed to bootstrap scheduler: %s\n", strerror(err)); + return err; + } + + sched_main_loop(); + + exit_req = 1; + bpf_link__destroy(ops_link); + uei_print(&skel->bss->uei); + scx_example_userland__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_userland_common.h b/tools/sched_ext/scx_example_userland_common.h new file mode 100644 index 000000000000..639c6809c5ff --- /dev/null +++ b/tools/sched_ext/scx_example_userland_common.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta, Inc */ + +#ifndef __SCX_USERLAND_COMMON_H +#define __SCX_USERLAND_COMMON_H + +#define USERLAND_MAX_TASKS 8192 + +/* + * An instance of a task that has been enqueued by the kernel for consumption + * by a user space global scheduler thread. + */ +struct scx_userland_enqueued_task { + __s32 pid; + u64 sum_exec_runtime; + u64 weight; +}; + +#endif // __SCX_USERLAND_COMMON_H From patchwork Fri Mar 17 21:33:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 71493 Return-Path: Delivered-To: ouuuleilei@gmail.com Received: by 2002:a5d:604a:0:0:0:0:0 with SMTP id j10csp16347wrt; Fri, 17 Mar 2023 15:21:57 -0700 (PDT) X-Google-Smtp-Source: AK7set+3bE9f44ty5qv6LdIZmXC6/1spKSnzwqMmcbAfxfpit41HQBRZOtktd5Cvc5BIz8WAbvr0 X-Received: by 2002:a05:6a20:698c:b0:d5:58df:fb93 with SMTP id t12-20020a056a20698c00b000d558dffb93mr11719999pzk.1.1679091716613; Fri, 17 Mar 2023 15:21:56 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1679091716; cv=none; d=google.com; s=arc-20160816; b=t27O1zJwGIfUw+EHzQQcnXj818IUKX3PGMr+8/Vjc5ta67BmruNOkkeA+gCyCVnMx+ s3DqkQJaUfWkjeN4M7IkT6yWSm5rPeBZNYaAofbRrwDntsJhwNbprYRhLH0R67fGtZe0 Tpgf+q9ueKF8LENsgcWor6wu1K2p4m4SIIccX1l1qp9c4A2nz7xnMLVSNH+4gdyoS5U7 g+27NFjflez840l+N/lX6frKogPk3i1OwyMMLVP9+tH4v5GcRVARmTTYWRYF7IAtcpDh 36Rno+2qFq2j+ARX00/YIMRB/itu98EGMpZ7bjY2uhfWUw+s8/C8wfNfimKLsDsvC8nM kllA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from:sender :dkim-signature; bh=tfPZRaQhAGV0+TtwXcTY1QoPs114DP7+uwpO1Fv/2Zk=; b=AMDeuuS7w17gccrCa6sotAIdLHBLARuLWXK3MQlc8qsVMx59/mKobepZi9bspnfGyW 4s/6jvj3dRP+870LKOlMsc6R0n6yGAHOrygbi6B/dTB8ms7hvcWxyK3KU9LrugQLIE+3 EsFJXY7OGkx133adsV9+6f3xpyYcF4qOlhY+uZGDH8C9LBIOpyEOkzw/c6GcXP7WBUng raSj+wkHE5pKuXNAN4gis5huxSN4DHqypLSo0GKyA7zvoC9MUGel8AnvrlyNIZU73e7g zcpIbc3pPMi379xAO2vTkLIr0mz0Z/mR1KKEGQjsVFJBaOThjnfXVH8R1/CE4RWs1gQ4 m3aw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=XS+tueJn; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id y10-20020a62f24a000000b005a8f259220asi3375311pfl.66.2023.03.17.15.21.42; Fri, 17 Mar 2023 15:21:56 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20210112 header.b=XS+tueJn; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230083AbjCQVhh (ORCPT + 99 others); Fri, 17 Mar 2023 17:37:37 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:53018 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231274AbjCQVgo (ORCPT ); Fri, 17 Mar 2023 17:36:44 -0400 Received: from mail-pl1-x635.google.com (mail-pl1-x635.google.com [IPv6:2607:f8b0:4864:20::635]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 375DA4D61E; Fri, 17 Mar 2023 14:35:53 -0700 (PDT) Received: by mail-pl1-x635.google.com with SMTP id le6so6656759plb.12; Fri, 17 Mar 2023 14:35:53 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679088882; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=tfPZRaQhAGV0+TtwXcTY1QoPs114DP7+uwpO1Fv/2Zk=; b=XS+tueJnHC4weybJL77Vcq5V76nkonB5LBf+xd6zIvZLQJmc8xKmuTToja+lWDavjN ziI1N9xdhP0KeXYj78f+BuvP5kCDFZ1RSzffwwSQ8NvnxYHYBB+r7uEDigSoRYUIp8jX sEHrFKXL8/pSVkWz0hWRlkKdJclhr+84AADyOrz7eLFemGpuXTWxAYZxdUE0iCahIXhJ w3I+E74MwGBMBPYSwDiEKnOl6xbU57ETplgNa7huX3q20YAenQdBBGNGqVNoXbhxSAAz TuP/GdhfO+eVD6XRpsQ/7AT794CevcqrlMNTSF6Z04HGLvVPQ2CHF38bxBdHqlSZTqaY 1vUg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679088882; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=tfPZRaQhAGV0+TtwXcTY1QoPs114DP7+uwpO1Fv/2Zk=; b=4WlspRL53vVSxEdVHqSdEKMVIttM/NWwU4ybzO7+TXWJqn3Gkax6+HF9v/ZoQaxNUU 4qxjaEy7rwMaGzqMZb/qVHMJLeCnBmxIaXcacwday6tfpVB5vDX/8W16PftGMm0BiV6T gC0OdaudKLWY/hh7byKjHsbhSh3buPZttnZ2LrRjhrzUjH5IL/Xs9ARkQofaebgStXrQ ZU21sqCfvXFpbdR1GwxujoNGI/lnE2tpz/egEzgDdDqTrqBXzuxvBi75AzM8j4lBumx9 gbZxmI09YZqZVFWIf0yb2Sg42Pnj0h1m5R9L0YJEkOohFLBtFtrAVBCqE2av+9KvDPUP sXcw== X-Gm-Message-State: AO0yUKVMF9Ot1OoVq5LKZIuY9sMMOppDf+a7XVBRnsGTGq5GfSJN2ttq xIss7OfAwSTDfpl6xCDDvOw= X-Received: by 2002:a17:903:1111:b0:1a1:903f:de61 with SMTP id n17-20020a170903111100b001a1903fde61mr8271945plh.63.1679088881383; Fri, 17 Mar 2023 14:34:41 -0700 (PDT) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id je19-20020a170903265300b0019f3e339fb4sm1973148plb.187.2023.03.17.14.34.40 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 Mar 2023 14:34:41 -0700 (PDT) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 32/32] sched_ext: Add a rust userspace hybrid example scheduler Date: Fri, 17 Mar 2023 11:33:33 -1000 Message-Id: <20230317213333.2174969-33-tj@kernel.org> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230317213333.2174969-1-tj@kernel.org> References: <20230317213333.2174969-1-tj@kernel.org> MIME-Version: 1.0 X-Spam-Status: No, score=-1.5 required=5.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_EF,FREEMAIL_FORGED_FROMDOMAIN,FREEMAIL_FROM, HEADER_FROM_DIFFERENT_DOMAINS,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE, SPF_PASS,URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org X-getmail-retrieved-from-mailbox: =?utf-8?q?INBOX?= X-GMAIL-THRID: =?utf-8?q?1760655275831284177?= X-GMAIL-MSGID: =?utf-8?q?1760655275831284177?= From: Dan Schatzberg Atropos is a multi-domain BPF / userspace hybrid scheduler where the BPF part does simple round robin in each domain and the userspace part calculates the load factor of each domain and tells the BPF part how to load balance the domains. This scheduler demonstrates dividing scheduling logic between BPF and userspace and using rust to build the userspace part. An earlier variant of this scheduler was used to balance across six domains, each representing a chiplet in a six-chiplet AMD processor, and could match the performance of production setup using CFS. v3: * The userspace code is substantially restructured and rewritten. The binary is renamed to scx_atropos and can now figure out the domain topology automatically based on L3 cache configuration. The LB logic which was rather broken in the previous postings are revamped and should behave better. * Updated to support weighted vtime scheduling (can be turned off with --fifo-sched). Added a couple options (--slice_us, --kthreads-local) to modify scheduling behaviors. * Converted to use BPF inline iterators. v2: * Updated to use generic BPF cpumask helpers. Signed-off-by: Dan Schatzberg Signed-off-by: Tejun Heo --- tools/sched_ext/Makefile | 13 +- tools/sched_ext/atropos/.gitignore | 3 + tools/sched_ext/atropos/Cargo.toml | 28 + tools/sched_ext/atropos/build.rs | 70 ++ tools/sched_ext/atropos/rustfmt.toml | 8 + tools/sched_ext/atropos/src/atropos_sys.rs | 10 + tools/sched_ext/atropos/src/bpf/atropos.bpf.c | 751 ++++++++++++++ tools/sched_ext/atropos/src/bpf/atropos.h | 44 + tools/sched_ext/atropos/src/main.rs | 942 ++++++++++++++++++ 9 files changed, 1867 insertions(+), 2 deletions(-) create mode 100644 tools/sched_ext/atropos/.gitignore create mode 100644 tools/sched_ext/atropos/Cargo.toml create mode 100644 tools/sched_ext/atropos/build.rs create mode 100644 tools/sched_ext/atropos/rustfmt.toml create mode 100644 tools/sched_ext/atropos/src/atropos_sys.rs create mode 100644 tools/sched_ext/atropos/src/bpf/atropos.bpf.c create mode 100644 tools/sched_ext/atropos/src/bpf/atropos.h create mode 100644 tools/sched_ext/atropos/src/main.rs diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index 71b5809243e3..73c43782837d 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -85,6 +85,8 @@ CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \ -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) +CARGOFLAGS := --release + # Silence some warnings when compiled with clang ifneq ($(LLVM),) CFLAGS += -Wno-unused-command-line-argument @@ -116,7 +118,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -O2 -mcpu=v3 all: scx_example_simple scx_example_qmap scx_example_central scx_example_pair \ - scx_example_flatcg scx_example_userland + scx_example_flatcg scx_example_userland atropos # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -192,13 +194,20 @@ scx_example_userland: scx_example_userland.c scx_example_userland.skel.h \ $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +atropos: export RUSTFLAGS = -C link-args=-lzstd -C link-args=-lz -C link-args=-lelf -L $(BPFOBJ_DIR) +atropos: export ATROPOS_CLANG = $(CLANG) +atropos: export ATROPOS_BPF_CFLAGS = $(BPF_CFLAGS) +atropos: $(INCLUDE_DIR)/vmlinux.h + cargo build --manifest-path=atropos/Cargo.toml $(CARGOFLAGS) + clean: + cargo clean --manifest-path=atropos/Cargo.toml rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h rm -f scx_example_simple scx_example_qmap scx_example_central \ scx_example_pair scx_example_flatcg scx_example_userland -.PHONY: all clean +.PHONY: all atropos clean # delete failed targets .DELETE_ON_ERROR: diff --git a/tools/sched_ext/atropos/.gitignore b/tools/sched_ext/atropos/.gitignore new file mode 100644 index 000000000000..186dba259ec2 --- /dev/null +++ b/tools/sched_ext/atropos/.gitignore @@ -0,0 +1,3 @@ +src/bpf/.output +Cargo.lock +target diff --git a/tools/sched_ext/atropos/Cargo.toml b/tools/sched_ext/atropos/Cargo.toml new file mode 100644 index 000000000000..7462a836d53d --- /dev/null +++ b/tools/sched_ext/atropos/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "scx_atropos" +version = "0.5.0" +authors = ["Dan Schatzberg ", "Meta"] +edition = "2021" +description = "Userspace scheduling with BPF" +license = "GPL-2.0-only" + +[dependencies] +anyhow = "1.0.65" +bitvec = { version = "1.0", features = ["serde"] } +clap = { version = "4.1", features = ["derive", "env", "unicode", "wrap_help"] } +ctrlc = { version = "3.1", features = ["termination"] } +fb_procfs = { git = "https://github.com/facebookincubator/below.git", rev = "f305730"} +hex = "0.4.3" +libbpf-rs = "0.19.1" +libbpf-sys = { version = "1.0.4", features = ["novendor", "static"] } +libc = "0.2.137" +log = "0.4.17" +ordered-float = "3.4.0" +simplelog = "0.12.0" + +[build-dependencies] +bindgen = { version = "0.61.0", features = ["logging", "static"], default-features = false } +libbpf-cargo = "0.13.0" + +[features] +enable_backtrace = [] diff --git a/tools/sched_ext/atropos/build.rs b/tools/sched_ext/atropos/build.rs new file mode 100644 index 000000000000..26e792c5e17e --- /dev/null +++ b/tools/sched_ext/atropos/build.rs @@ -0,0 +1,70 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +extern crate bindgen; + +use std::env; +use std::fs::create_dir_all; +use std::path::Path; +use std::path::PathBuf; + +use libbpf_cargo::SkeletonBuilder; + +const HEADER_PATH: &str = "src/bpf/atropos.h"; + +fn bindgen_atropos() { + // Tell cargo to invalidate the built crate whenever the wrapper changes + println!("cargo:rerun-if-changed={}", HEADER_PATH); + + // The bindgen::Builder is the main entry point + // to bindgen, and lets you build up options for + // the resulting bindings. + let bindings = bindgen::Builder::default() + // The input header we would like to generate + // bindings for. + .header(HEADER_PATH) + // Tell cargo to invalidate the built crate whenever any of the + // included header files changed. + .parse_callbacks(Box::new(bindgen::CargoCallbacks)) + // Finish the builder and generate the bindings. + .generate() + // Unwrap the Result and panic on failure. + .expect("Unable to generate bindings"); + + // Write the bindings to the $OUT_DIR/bindings.rs file. + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + bindings + .write_to_file(out_path.join("atropos-sys.rs")) + .expect("Couldn't write bindings!"); +} + +fn gen_bpf_sched(name: &str) { + let bpf_cflags = env::var("ATROPOS_BPF_CFLAGS").unwrap(); + let clang = env::var("ATROPOS_CLANG").unwrap(); + eprintln!("{}", clang); + let outpath = format!("./src/bpf/.output/{}.skel.rs", name); + let skel = Path::new(&outpath); + let src = format!("./src/bpf/{}.bpf.c", name); + SkeletonBuilder::new() + .source(src.clone()) + .clang(clang) + .clang_args(bpf_cflags) + .build_and_generate(&skel) + .unwrap(); + println!("cargo:rerun-if-changed={}", src); +} + +fn main() { + bindgen_atropos(); + // It's unfortunate we cannot use `OUT_DIR` to store the generated skeleton. + // Reasons are because the generated skeleton contains compiler attributes + // that cannot be `include!()`ed via macro. And we cannot use the `#[path = "..."]` + // trick either because you cannot yet `concat!(env!("OUT_DIR"), "/skel.rs")` inside + // the path attribute either (see https://github.com/rust-lang/rust/pull/83366). + // + // However, there is hope! When the above feature stabilizes we can clean this + // all up. + create_dir_all("./src/bpf/.output").unwrap(); + gen_bpf_sched("atropos"); +} diff --git a/tools/sched_ext/atropos/rustfmt.toml b/tools/sched_ext/atropos/rustfmt.toml new file mode 100644 index 000000000000..b7258ed0a8d8 --- /dev/null +++ b/tools/sched_ext/atropos/rustfmt.toml @@ -0,0 +1,8 @@ +# Get help on options with `rustfmt --help=config` +# Please keep these in alphabetical order. +edition = "2021" +group_imports = "StdExternalCrate" +imports_granularity = "Item" +merge_derives = false +use_field_init_shorthand = true +version = "Two" diff --git a/tools/sched_ext/atropos/src/atropos_sys.rs b/tools/sched_ext/atropos/src/atropos_sys.rs new file mode 100644 index 000000000000..bbeaf856d40e --- /dev/null +++ b/tools/sched_ext/atropos/src/atropos_sys.rs @@ -0,0 +1,10 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(dead_code)] + +include!(concat!(env!("OUT_DIR"), "/atropos-sys.rs")); diff --git a/tools/sched_ext/atropos/src/bpf/atropos.bpf.c b/tools/sched_ext/atropos/src/bpf/atropos.bpf.c new file mode 100644 index 000000000000..c26ecf0e77a8 --- /dev/null +++ b/tools/sched_ext/atropos/src/bpf/atropos.bpf.c @@ -0,0 +1,751 @@ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ +/* + * This software may be used and distributed according to the terms of the + * GNU General Public License version 2. + * + * Atropos is a multi-domain BPF / userspace hybrid scheduler where the BPF + * part does simple round robin in each domain and the userspace part + * calculates the load factor of each domain and tells the BPF part how to load + * balance the domains. + * + * Every task has an entry in the task_data map which lists which domain the + * task belongs to. When a task first enters the system (atropos_prep_enable), + * they are round-robined to a domain. + * + * atropos_select_cpu is the primary scheduling logic, invoked when a task + * becomes runnable. The lb_data map is populated by userspace to inform the BPF + * scheduler that a task should be migrated to a new domain. Otherwise, the task + * is scheduled in priority order as follows: + * * The current core if the task was woken up synchronously and there are idle + * cpus in the system + * * The previous core, if idle + * * The pinned-to core if the task is pinned to a specific core + * * Any idle cpu in the domain + * + * If none of the above conditions are met, then the task is enqueued to a + * dispatch queue corresponding to the domain (atropos_enqueue). + * + * atropos_dispatch will attempt to consume a task from its domain's + * corresponding dispatch queue (this occurs after scheduling any tasks directly + * assigned to it due to the logic in atropos_select_cpu). If no task is found, + * then greedy load stealing will attempt to find a task on another dispatch + * queue to run. + * + * Load balancing is almost entirely handled by userspace. BPF populates the + * task weight, dom mask and current dom in the task_data map and executes the + * load balance based on userspace populating the lb_data map. + */ +#include "../../../scx_common.bpf.h" +#include "atropos.h" + +#include +#include +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +/* + * const volatiles are set during initialization and treated as consts by the + * jit compiler. + */ + +/* + * Domains and cpus + */ +const volatile __u32 nr_doms = 32; /* !0 for veristat, set during init */ +const volatile __u32 nr_cpus = 64; /* !0 for veristat, set during init */ +const volatile __u32 cpu_dom_id_map[MAX_CPUS]; +const volatile __u64 dom_cpumasks[MAX_DOMS][MAX_CPUS / 64]; + +const volatile bool kthreads_local; +const volatile bool fifo_sched; +const volatile bool switch_partial; +const volatile __u32 greedy_threshold; + +/* base slice duration */ +const volatile __u64 slice_us = 20000; + +/* + * Exit info + */ +int exit_type = SCX_EXIT_NONE; +char exit_msg[SCX_EXIT_MSG_LEN]; + +struct pcpu_ctx { + __u32 dom_rr_cur; /* used when scanning other doms */ + + /* libbpf-rs does not respect the alignment, so pad out the struct explicitly */ + __u8 _padding[CACHELINE_SIZE - sizeof(u64)]; +} __attribute__((aligned(CACHELINE_SIZE))); + +struct pcpu_ctx pcpu_ctx[MAX_CPUS]; + +/* + * Domain context + */ +struct dom_ctx { + struct bpf_cpumask __kptr *cpumask; + u64 vtime_now; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, struct dom_ctx); + __uint(max_entries, MAX_DOMS); + __uint(map_flags, 0); +} dom_ctx SEC(".maps"); + +/* + * Statistics + */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); + __uint(max_entries, ATROPOS_NR_STATS); +} stats SEC(".maps"); + +static inline void stat_add(enum stat_idx idx, u64 addend) +{ + u32 idx_v = idx; + + u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v); + if (cnt_p) + (*cnt_p) += addend; +} + +/* Map pid -> task_ctx */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, pid_t); + __type(value, struct task_ctx); + __uint(max_entries, 1000000); + __uint(map_flags, 0); +} task_data SEC(".maps"); + +/* + * This is populated from userspace to indicate which pids should be reassigned + * to new doms. + */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, pid_t); + __type(value, u32); + __uint(max_entries, 1000); + __uint(map_flags, 0); +} lb_data SEC(".maps"); + +static inline bool vtime_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + +static bool task_set_dsq(struct task_ctx *task_ctx, struct task_struct *p, + u32 new_dom_id) +{ + struct dom_ctx *old_domc, *new_domc; + struct bpf_cpumask *d_cpumask, *t_cpumask; + u32 old_dom_id = task_ctx->dom_id; + s64 vtime_delta; + + old_domc = bpf_map_lookup_elem(&dom_ctx, &old_dom_id); + if (!old_domc) { + scx_bpf_error("No dom%u", old_dom_id); + return false; + } + + vtime_delta = p->scx.dsq_vtime - old_domc->vtime_now; + + new_domc = bpf_map_lookup_elem(&dom_ctx, &new_dom_id); + if (!new_domc) { + scx_bpf_error("No dom%u", new_dom_id); + return false; + } + + d_cpumask = bpf_cpumask_kptr_get(&new_domc->cpumask); + if (!d_cpumask) { + scx_bpf_error("Failed to get domain %u cpumask kptr", + new_dom_id); + return false; + } + + t_cpumask = bpf_cpumask_kptr_get(&task_ctx->cpumask); + if (!t_cpumask) { + scx_bpf_error("Failed to look up task cpumask"); + bpf_cpumask_release(d_cpumask); + return false; + } + + /* + * set_cpumask might have happened between userspace requesting LB and + * here and @p might not be able to run in @dom_id anymore. Verify. + */ + if (bpf_cpumask_intersects((const struct cpumask *)d_cpumask, + p->cpus_ptr)) { + p->scx.dsq_vtime = new_domc->vtime_now + vtime_delta; + task_ctx->dom_id = new_dom_id; + bpf_cpumask_and(t_cpumask, (const struct cpumask *)d_cpumask, + p->cpus_ptr); + } + + bpf_cpumask_release(d_cpumask); + bpf_cpumask_release(t_cpumask); + + return task_ctx->dom_id == new_dom_id; +} + +s32 BPF_STRUCT_OPS(atropos_select_cpu, struct task_struct *p, int prev_cpu, + u32 wake_flags) +{ + s32 cpu; + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + struct bpf_cpumask *p_cpumask; + + if (!task_ctx) + return -ENOENT; + + if (kthreads_local && + (p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) { + cpu = prev_cpu; + stat_add(ATROPOS_STAT_DIRECT_DISPATCH, 1); + goto local; + } + + /* + * If WAKE_SYNC and the machine isn't fully saturated, wake up @p to the + * local dsq of the waker. + */ + if (p->nr_cpus_allowed > 1 && (wake_flags & SCX_WAKE_SYNC)) { + struct task_struct *current = (void *)bpf_get_current_task(); + + if (!(BPF_CORE_READ(current, flags) & PF_EXITING) && + task_ctx->dom_id < MAX_DOMS) { + struct dom_ctx *domc; + struct bpf_cpumask *d_cpumask; + const struct cpumask *idle_cpumask; + bool has_idle; + + domc = bpf_map_lookup_elem(&dom_ctx, &task_ctx->dom_id); + if (!domc) { + scx_bpf_error("Failed to find dom%u", + task_ctx->dom_id); + return prev_cpu; + } + d_cpumask = bpf_cpumask_kptr_get(&domc->cpumask); + if (!d_cpumask) { + scx_bpf_error("Failed to acquire domain %u cpumask kptr", + task_ctx->dom_id); + return prev_cpu; + } + + idle_cpumask = scx_bpf_get_idle_cpumask(); + + has_idle = bpf_cpumask_intersects((const struct cpumask *)d_cpumask, + idle_cpumask); + + bpf_cpumask_release(d_cpumask); + scx_bpf_put_idle_cpumask(idle_cpumask); + + if (has_idle) { + cpu = bpf_get_smp_processor_id(); + if (bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { + stat_add(ATROPOS_STAT_WAKE_SYNC, 1); + goto local; + } + } + } + } + + /* if the previous CPU is idle, dispatch directly to it */ + if (scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { + stat_add(ATROPOS_STAT_PREV_IDLE, 1); + cpu = prev_cpu; + goto local; + } + + /* If only one core is allowed, dispatch */ + if (p->nr_cpus_allowed == 1) { + stat_add(ATROPOS_STAT_PINNED, 1); + cpu = prev_cpu; + goto local; + } + + p_cpumask = bpf_cpumask_kptr_get(&task_ctx->cpumask); + if (!p_cpumask) + return -ENOENT; + + /* If there is an eligible idle CPU, dispatch directly */ + cpu = scx_bpf_pick_idle_cpu((const struct cpumask *)p_cpumask); + if (cpu >= 0) { + bpf_cpumask_release(p_cpumask); + stat_add(ATROPOS_STAT_DIRECT_DISPATCH, 1); + goto local; + } + + /* + * @prev_cpu may be in a different domain. Returning an out-of-domain + * CPU can lead to stalls as all in-domain CPUs may be idle by the time + * @p gets enqueued. + */ + if (bpf_cpumask_test_cpu(prev_cpu, (const struct cpumask *)p_cpumask)) + cpu = prev_cpu; + else + cpu = bpf_cpumask_any((const struct cpumask *)p_cpumask); + + bpf_cpumask_release(p_cpumask); + return cpu; + +local: + task_ctx->dispatch_local = true; + return cpu; +} + +void BPF_STRUCT_OPS(atropos_enqueue, struct task_struct *p, u32 enq_flags) +{ + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + u32 *new_dom; + + if (!task_ctx) { + scx_bpf_error("No task_ctx[%d]", pid); + return; + } + + new_dom = bpf_map_lookup_elem(&lb_data, &pid); + if (new_dom && *new_dom != task_ctx->dom_id && + task_set_dsq(task_ctx, p, *new_dom)) { + struct bpf_cpumask *p_cpumask; + s32 cpu; + + stat_add(ATROPOS_STAT_LOAD_BALANCE, 1); + + /* + * If dispatch_local is set, We own @p's idle state but we are + * not gonna put the task in the associated local dsq which can + * cause the CPU to stall. Kick it. + */ + if (task_ctx->dispatch_local) { + task_ctx->dispatch_local = false; + scx_bpf_kick_cpu(scx_bpf_task_cpu(p), 0); + } + + p_cpumask = bpf_cpumask_kptr_get(&task_ctx->cpumask); + if (!p_cpumask) { + scx_bpf_error("Failed to get task_ctx->cpumask"); + return; + } + cpu = scx_bpf_pick_idle_cpu((const struct cpumask *)p_cpumask); + bpf_cpumask_release(p_cpumask); + + if (cpu >= 0) + scx_bpf_kick_cpu(cpu, 0); + } + + if (task_ctx->dispatch_local) { + task_ctx->dispatch_local = false; + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_us * 1000, enq_flags); + return; + } + + if (fifo_sched) { + scx_bpf_dispatch(p, task_ctx->dom_id, slice_us * 1000, + enq_flags); + } else { + u64 vtime = p->scx.dsq_vtime; + u32 dom_id = task_ctx->dom_id; + struct dom_ctx *domc; + + domc = bpf_map_lookup_elem(&dom_ctx, &dom_id); + if (!domc) { + scx_bpf_error("No dom[%u]", dom_id); + return; + } + + /* + * Limit the amount of budget that an idling task can accumulate + * to one slice. + */ + if (vtime_before(vtime, domc->vtime_now - slice_us * 1000)) + vtime = domc->vtime_now - slice_us * 1000; + + scx_bpf_dispatch_vtime(p, task_ctx->dom_id, SCX_SLICE_DFL, vtime, + enq_flags); + } +} + +static u32 cpu_to_dom_id(s32 cpu) +{ + const volatile u32 *dom_idp; + + if (nr_doms <= 1) + return 0; + + dom_idp = MEMBER_VPTR(cpu_dom_id_map, [cpu]); + if (!dom_idp) + return MAX_DOMS; + + return *dom_idp; +} + +static bool cpumask_intersects_domain(const struct cpumask *cpumask, u32 dom_id) +{ + s32 cpu; + + if (dom_id >= MAX_DOMS) + return false; + + bpf_for(cpu, 0, nr_cpus) { + if (bpf_cpumask_test_cpu(cpu, cpumask) && + (dom_cpumasks[dom_id][cpu / 64] & (1LLU << (cpu % 64)))) + return true; + } + return false; +} + +static u32 dom_rr_next(s32 cpu) +{ + struct pcpu_ctx *pcpuc; + u32 dom_id; + + pcpuc = MEMBER_VPTR(pcpu_ctx, [cpu]); + if (!pcpuc) + return 0; + + dom_id = (pcpuc->dom_rr_cur + 1) % nr_doms; + + if (dom_id == cpu_to_dom_id(cpu)) + dom_id = (dom_id + 1) % nr_doms; + + pcpuc->dom_rr_cur = dom_id; + return dom_id; +} + +void BPF_STRUCT_OPS(atropos_dispatch, s32 cpu, struct task_struct *prev) +{ + u32 dom = cpu_to_dom_id(cpu); + + if (scx_bpf_consume(dom)) { + stat_add(ATROPOS_STAT_DSQ_DISPATCH, 1); + return; + } + + if (!greedy_threshold) + return; + + bpf_repeat(nr_doms - 1) { + u32 dom_id = dom_rr_next(cpu); + + if (scx_bpf_dsq_nr_queued(dom_id) >= greedy_threshold && + scx_bpf_consume(dom_id)) { + stat_add(ATROPOS_STAT_GREEDY, 1); + break; + } + } +} + +void BPF_STRUCT_OPS(atropos_runnable, struct task_struct *p, u64 enq_flags) +{ + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + + if (!task_ctx) { + scx_bpf_error("No task_ctx[%d]", pid); + return; + } + + task_ctx->runnable_at = bpf_ktime_get_ns(); +} + +void BPF_STRUCT_OPS(atropos_running, struct task_struct *p) +{ + struct task_ctx *taskc; + struct dom_ctx *domc; + pid_t pid = p->pid; + u32 dom_id; + + if (fifo_sched) + return; + + taskc = bpf_map_lookup_elem(&task_data, &pid); + if (!taskc) { + scx_bpf_error("No task_ctx[%d]", pid); + return; + } + dom_id = taskc->dom_id; + + domc = bpf_map_lookup_elem(&dom_ctx, &dom_id); + if (!domc) { + scx_bpf_error("No dom[%u]", dom_id); + return; + } + + /* + * Global vtime always progresses forward as tasks start executing. The + * test and update can be performed concurrently from multiple CPUs and + * thus racy. Any error should be contained and temporary. Let's just + * live with it. + */ + if (vtime_before(domc->vtime_now, p->scx.dsq_vtime)) + domc->vtime_now = p->scx.dsq_vtime; +} + +void BPF_STRUCT_OPS(atropos_stopping, struct task_struct *p, bool runnable) +{ + if (fifo_sched) + return; + + /* scale the execution time by the inverse of the weight and charge */ + p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; +} + +void BPF_STRUCT_OPS(atropos_quiescent, struct task_struct *p, u64 deq_flags) +{ + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + + if (!task_ctx) { + scx_bpf_error("No task_ctx[%d]", pid); + return; + } + + task_ctx->runnable_for += bpf_ktime_get_ns() - task_ctx->runnable_at; + task_ctx->runnable_at = 0; +} + +void BPF_STRUCT_OPS(atropos_set_weight, struct task_struct *p, u32 weight) +{ + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + + if (!task_ctx) { + scx_bpf_error("No task_ctx[%d]", pid); + return; + } + + task_ctx->weight = weight; +} + +struct pick_task_domain_loop_ctx { + struct task_struct *p; + const struct cpumask *cpumask; + u64 dom_mask; + u32 dom_rr_base; + u32 dom_id; +}; + +static int pick_task_domain_loopfn(u32 idx, void *data) +{ + struct pick_task_domain_loop_ctx *lctx = data; + u32 dom_id = (lctx->dom_rr_base + idx) % nr_doms; + + if (dom_id >= MAX_DOMS) + return 1; + + if (cpumask_intersects_domain(lctx->cpumask, dom_id)) { + lctx->dom_mask |= 1LLU << dom_id; + if (lctx->dom_id == MAX_DOMS) + lctx->dom_id = dom_id; + } + return 0; +} + +static u32 pick_task_domain(struct task_ctx *task_ctx, struct task_struct *p, + const struct cpumask *cpumask) +{ + struct pick_task_domain_loop_ctx lctx = { + .p = p, + .cpumask = cpumask, + .dom_id = MAX_DOMS, + }; + s32 cpu = bpf_get_smp_processor_id(); + + if (cpu < 0 || cpu >= MAX_CPUS) + return MAX_DOMS; + + lctx.dom_rr_base = ++(pcpu_ctx[cpu].dom_rr_cur); + + bpf_loop(nr_doms, pick_task_domain_loopfn, &lctx, 0); + task_ctx->dom_mask = lctx.dom_mask; + + return lctx.dom_id; +} + +static void task_set_domain(struct task_ctx *task_ctx, struct task_struct *p, + const struct cpumask *cpumask) +{ + u32 dom_id = 0; + + if (nr_doms > 1) + dom_id = pick_task_domain(task_ctx, p, cpumask); + + if (!task_set_dsq(task_ctx, p, dom_id)) + scx_bpf_error("Failed to set domain %d for %s[%d]", + dom_id, p->comm, p->pid); +} + +void BPF_STRUCT_OPS(atropos_set_cpumask, struct task_struct *p, + const struct cpumask *cpumask) +{ + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + if (!task_ctx) { + scx_bpf_error("No task_ctx[%d]", pid); + return; + } + + task_set_domain(task_ctx, p, cpumask); +} + +s32 BPF_STRUCT_OPS(atropos_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + struct bpf_cpumask *cpumask; + struct task_ctx task_ctx, *map_value; + long ret; + pid_t pid; + + memset(&task_ctx, 0, sizeof(task_ctx)); + + pid = p->pid; + ret = bpf_map_update_elem(&task_data, &pid, &task_ctx, BPF_NOEXIST); + if (ret) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + return ret; + } + + /* + * Read the entry from the map immediately so we can add the cpumask + * with bpf_kptr_xchg(). + */ + map_value = bpf_map_lookup_elem(&task_data, &pid); + if (!map_value) + /* Should never happen -- it was just inserted above. */ + return -EINVAL; + + cpumask = bpf_cpumask_create(); + if (!cpumask) { + bpf_map_delete_elem(&task_data, &pid); + return -ENOMEM; + } + + cpumask = bpf_kptr_xchg(&map_value->cpumask, cpumask); + if (cpumask) { + /* Should never happen as we just inserted it above. */ + bpf_cpumask_release(cpumask); + bpf_map_delete_elem(&task_data, &pid); + return -EINVAL; + } + + task_set_domain(map_value, p, p->cpus_ptr); + + return 0; +} + +void BPF_STRUCT_OPS(atropos_disable, struct task_struct *p) +{ + pid_t pid = p->pid; + long ret = bpf_map_delete_elem(&task_data, &pid); + if (ret) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + return; + } +} + +static int create_dom_dsq(u32 idx, void *data) +{ + struct dom_ctx domc_init = {}, *domc; + struct bpf_cpumask *cpumask; + u32 cpu, dom_id = idx; + s32 ret; + + ret = scx_bpf_create_dsq(dom_id, -1); + if (ret < 0) { + scx_bpf_error("Failed to create dsq %u (%d)", dom_id, ret); + return 1; + } + + ret = bpf_map_update_elem(&dom_ctx, &dom_id, &domc_init, 0); + if (ret) { + scx_bpf_error("Failed to add dom_ctx entry %u (%d)", dom_id, ret); + return 1; + } + + domc = bpf_map_lookup_elem(&dom_ctx, &dom_id); + if (!domc) { + /* Should never happen, we just inserted it above. */ + scx_bpf_error("No dom%u", dom_id); + return 1; + } + + cpumask = bpf_cpumask_create(); + if (!cpumask) { + scx_bpf_error("Failed to create BPF cpumask for domain %u", dom_id); + return 1; + } + + for (cpu = 0; cpu < MAX_CPUS; cpu++) { + const volatile __u64 *dmask; + + dmask = MEMBER_VPTR(dom_cpumasks, [dom_id][cpu / 64]); + if (!dmask) { + scx_bpf_error("array index error"); + bpf_cpumask_release(cpumask); + return 1; + } + + if (*dmask & (1LLU << (cpu % 64))) + bpf_cpumask_set_cpu(cpu, cpumask); + } + + cpumask = bpf_kptr_xchg(&domc->cpumask, cpumask); + if (cpumask) { + scx_bpf_error("Domain %u was already present", dom_id); + bpf_cpumask_release(cpumask); + return 1; + } + + return 0; +} + +int BPF_STRUCT_OPS_SLEEPABLE(atropos_init) +{ + if (!switch_partial) + scx_bpf_switch_all(); + + bpf_loop(nr_doms, create_dom_dsq, NULL, 0); + + for (u32 i = 0; i < nr_cpus; i++) + pcpu_ctx[i].dom_rr_cur = i; + + return 0; +} + +void BPF_STRUCT_OPS(atropos_exit, struct scx_exit_info *ei) +{ + bpf_probe_read_kernel_str(exit_msg, sizeof(exit_msg), ei->msg); + exit_type = ei->type; +} + +SEC(".struct_ops") +struct sched_ext_ops atropos = { + .select_cpu = (void *)atropos_select_cpu, + .enqueue = (void *)atropos_enqueue, + .dispatch = (void *)atropos_dispatch, + .runnable = (void *)atropos_runnable, + .running = (void *)atropos_running, + .stopping = (void *)atropos_stopping, + .quiescent = (void *)atropos_quiescent, + .set_weight = (void *)atropos_set_weight, + .set_cpumask = (void *)atropos_set_cpumask, + .prep_enable = (void *)atropos_prep_enable, + .disable = (void *)atropos_disable, + .init = (void *)atropos_init, + .exit = (void *)atropos_exit, + .flags = 0, + .name = "atropos", +}; diff --git a/tools/sched_ext/atropos/src/bpf/atropos.h b/tools/sched_ext/atropos/src/bpf/atropos.h new file mode 100644 index 000000000000..addf29ca104a --- /dev/null +++ b/tools/sched_ext/atropos/src/bpf/atropos.h @@ -0,0 +1,44 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#ifndef __ATROPOS_H +#define __ATROPOS_H + +#include +#ifndef __kptr +#ifdef __KERNEL__ +#error "__kptr_ref not defined in the kernel" +#endif +#define __kptr +#endif + +#define MAX_CPUS 512 +#define MAX_DOMS 64 /* limited to avoid complex bitmask ops */ +#define CACHELINE_SIZE 64 + +/* Statistics */ +enum stat_idx { + ATROPOS_STAT_TASK_GET_ERR, + ATROPOS_STAT_WAKE_SYNC, + ATROPOS_STAT_PREV_IDLE, + ATROPOS_STAT_PINNED, + ATROPOS_STAT_DIRECT_DISPATCH, + ATROPOS_STAT_DSQ_DISPATCH, + ATROPOS_STAT_GREEDY, + ATROPOS_STAT_LOAD_BALANCE, + ATROPOS_STAT_LAST_TASK, + ATROPOS_NR_STATS, +}; + +struct task_ctx { + unsigned long long dom_mask; /* the domains this task can run on */ + struct bpf_cpumask __kptr *cpumask; + unsigned int dom_id; + unsigned int weight; + unsigned long long runnable_at; + unsigned long long runnable_for; + bool dispatch_local; +}; + +#endif /* __ATROPOS_H */ diff --git a/tools/sched_ext/atropos/src/main.rs b/tools/sched_ext/atropos/src/main.rs new file mode 100644 index 000000000000..0d313662f713 --- /dev/null +++ b/tools/sched_ext/atropos/src/main.rs @@ -0,0 +1,942 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#[path = "bpf/.output/atropos.skel.rs"] +mod atropos; +pub use atropos::*; +pub mod atropos_sys; + +use std::cell::Cell; +use std::collections::{BTreeMap, BTreeSet}; +use std::ffi::CStr; +use std::ops::Bound::{Included, Unbounded}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; + +use ::fb_procfs as procfs; +use anyhow::{anyhow, bail, Context, Result}; +use bitvec::prelude::*; +use clap::Parser; +use log::{info, trace, warn}; +use ordered_float::OrderedFloat; + +/// Atropos is a multi-domain BPF / userspace hybrid scheduler where the BPF +/// part does simple round robin in each domain and the userspace part +/// calculates the load factor of each domain and tells the BPF part how to load +/// balance the domains. + +/// This scheduler demonstrates dividing scheduling logic between BPF and +/// userspace and using rust to build the userspace part. An earlier variant of +/// this scheduler was used to balance across six domains, each representing a +/// chiplet in a six-chiplet AMD processor, and could match the performance of +/// production setup using CFS. +#[derive(Debug, Parser)] +struct Opts { + /// Scheduling slice duration in microseconds. + #[clap(short, long, default_value = "20000")] + slice_us: u64, + + /// Monitoring and load balance interval in seconds. + #[clap(short, long, default_value = "2.0")] + interval: f64, + + /// Build domains according to how CPUs are grouped at this cache level + /// as determined by /sys/devices/system/cpu/cpuX/cache/indexI/id. + #[clap(short = 'c', long, default_value = "3")] + cache_level: u32, + + /// Instead of using cache locality, set the cpumask for each domain + /// manually, provide multiple --cpumasks, one for each domain. E.g. + /// --cpumasks 0xff_00ff --cpumasks 0xff00 will create two domains with + /// the corresponding CPUs belonging to each domain. Each CPU must + /// belong to precisely one domain. + #[clap(short = 'C', long, num_args = 1.., conflicts_with = "cache_level")] + cpumasks: Vec, + + /// When non-zero, enable greedy task stealing. When a domain is idle, a + /// cpu will attempt to steal tasks from a domain with at least + /// greedy_threshold tasks enqueued. These tasks aren't permanently + /// stolen from the domain. + #[clap(short, long, default_value = "4")] + greedy_threshold: u32, + + /// The load decay factor. Every interval, the existing load is decayed + /// by this factor and new load is added. Must be in the range [0.0, + /// 0.99]. The smaller the value, the more sensitive load calculation + /// is to recent changes. When 0.0, history is ignored and the load + /// value from the latest period is used directly. + #[clap(short, long, default_value = "0.5")] + load_decay_factor: f64, + + /// Disable load balancing. Unless disabled, periodically userspace will + /// calculate the load factor of each domain and instruct BPF which + /// processes to move. + #[clap(short, long, action = clap::ArgAction::SetTrue)] + no_load_balance: bool, + + /// Put per-cpu kthreads directly into local dsq's. + #[clap(short, long, action = clap::ArgAction::SetTrue)] + kthreads_local: bool, + + /// Use FIFO scheduling instead of weighted vtime scheduling. + #[clap(short, long, action = clap::ArgAction::SetTrue)] + fifo_sched: bool, + + /// If specified, only tasks which have their scheduling policy set to + /// SCHED_EXT using sched_setscheduler(2) are switched. Otherwise, all + /// tasks are switched. + #[clap(short, long, action = clap::ArgAction::SetTrue)] + partial: bool, + + /// Enable verbose output including libbpf details. Specify multiple + /// times to increase verbosity. + #[clap(short, long, action = clap::ArgAction::Count)] + verbose: u8, +} + +fn read_total_cpu(reader: &mut procfs::ProcReader) -> Result { + Ok(reader + .read_stat() + .context("Failed to read procfs")? + .total_cpu + .ok_or_else(|| anyhow!("Could not read total cpu stat in proc"))?) +} + +fn now_monotonic() -> u64 { + let mut time = libc::timespec { + tv_sec: 0, + tv_nsec: 0, + }; + let ret = unsafe { libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut time) }; + assert!(ret == 0); + time.tv_sec as u64 * 1_000_000_000 + time.tv_nsec as u64 +} + +fn clear_map(map: &mut libbpf_rs::Map) { + // XXX: libbpf_rs has some design flaw that make it impossible to + // delete while iterating despite it being safe so we alias it here + let deleter: &mut libbpf_rs::Map = unsafe { &mut *(map as *mut _) }; + for key in map.keys() { + let _ = deleter.delete(&key); + } +} + +#[derive(Debug)] +struct TaskLoad { + runnable_for: u64, + load: f64, +} + +#[derive(Debug)] +struct TaskInfo { + pid: i32, + dom_mask: u64, + migrated: Cell, +} + +struct LoadBalancer<'a, 'b, 'c> { + maps: AtroposMapsMut<'a>, + task_loads: &'b mut BTreeMap, + nr_doms: usize, + load_decay_factor: f64, + + tasks_by_load: Vec, TaskInfo>>, + load_avg: f64, + dom_loads: Vec, + + imbal: Vec, + doms_to_push: BTreeMap, u32>, + doms_to_pull: BTreeMap, u32>, + + nr_lb_data_errors: &'c mut u64, +} + +impl<'a, 'b, 'c> LoadBalancer<'a, 'b, 'c> { + const LOAD_IMBAL_HIGH_RATIO: f64 = 0.10; + const LOAD_IMBAL_REDUCTION_MIN_RATIO: f64 = 0.1; + const LOAD_IMBAL_PUSH_MAX_RATIO: f64 = 0.50; + + fn new( + maps: AtroposMapsMut<'a>, + task_loads: &'b mut BTreeMap, + nr_doms: usize, + load_decay_factor: f64, + nr_lb_data_errors: &'c mut u64, + ) -> Self { + Self { + maps, + task_loads, + nr_doms, + load_decay_factor, + + tasks_by_load: (0..nr_doms).map(|_| BTreeMap::<_, _>::new()).collect(), + load_avg: 0f64, + dom_loads: vec![0.0; nr_doms], + + imbal: vec![0.0; nr_doms], + doms_to_pull: BTreeMap::new(), + doms_to_push: BTreeMap::new(), + + nr_lb_data_errors, + } + } + + fn read_task_loads(&mut self, period: Duration) -> Result<()> { + let now_mono = now_monotonic(); + let task_data = self.maps.task_data(); + let mut this_task_loads = BTreeMap::::new(); + let mut load_sum = 0.0f64; + self.dom_loads = vec![0f64; self.nr_doms]; + + for key in task_data.keys() { + if let Some(task_ctx_vec) = task_data + .lookup(&key, libbpf_rs::MapFlags::ANY) + .context("Failed to lookup task_data")? + { + let task_ctx = + unsafe { &*(task_ctx_vec.as_slice().as_ptr() as *const atropos_sys::task_ctx) }; + let pid = i32::from_ne_bytes( + key.as_slice() + .try_into() + .context("Invalid key length in task_data map")?, + ); + + let (this_at, this_for, weight) = unsafe { + ( + std::ptr::read_volatile(&task_ctx.runnable_at as *const u64), + std::ptr::read_volatile(&task_ctx.runnable_for as *const u64), + std::ptr::read_volatile(&task_ctx.weight as *const u32), + ) + }; + + let (mut delta, prev_load) = match self.task_loads.get(&pid) { + Some(prev) => (this_for - prev.runnable_for, Some(prev.load)), + None => (this_for, None), + }; + + // Non-zero this_at indicates that the task is currently + // runnable. Note that we read runnable_at and runnable_for + // without any synchronization and there is a small window + // where we end up misaccounting. While this can cause + // temporary error, it's unlikely to cause any noticeable + // misbehavior especially given the load value clamping. + if this_at > 0 && this_at < now_mono { + delta += now_mono - this_at; + } + + delta = delta.min(period.as_nanos() as u64); + let this_load = (weight as f64 * delta as f64 / period.as_nanos() as f64) + .clamp(0.0, weight as f64); + + let this_load = match prev_load { + Some(prev_load) => { + prev_load * self.load_decay_factor + + this_load * (1.0 - self.load_decay_factor) + } + None => this_load, + }; + + this_task_loads.insert( + pid, + TaskLoad { + runnable_for: this_for, + load: this_load, + }, + ); + + load_sum += this_load; + self.dom_loads[task_ctx.dom_id as usize] += this_load; + // Only record pids that are eligible for load balancing + if task_ctx.dom_mask == (1u64 << task_ctx.dom_id) { + continue; + } + self.tasks_by_load[task_ctx.dom_id as usize].insert( + OrderedFloat(this_load), + TaskInfo { + pid, + dom_mask: task_ctx.dom_mask, + migrated: Cell::new(false), + }, + ); + } + } + + self.load_avg = load_sum / self.nr_doms as f64; + *self.task_loads = this_task_loads; + Ok(()) + } + + // To balance dom loads we identify doms with lower and higher load than average + fn calculate_dom_load_balance(&mut self) -> Result<()> { + for (dom, dom_load) in self.dom_loads.iter().enumerate() { + let imbal = dom_load - self.load_avg; + if imbal.abs() >= self.load_avg * Self::LOAD_IMBAL_HIGH_RATIO { + if imbal > 0f64 { + self.doms_to_push.insert(OrderedFloat(imbal), dom as u32); + } else { + self.doms_to_pull.insert(OrderedFloat(-imbal), dom as u32); + } + self.imbal[dom] = imbal; + } + } + Ok(()) + } + + // Find the first candidate pid which hasn't already been migrated and + // can run in @pull_dom. + fn find_first_candidate<'d, I>(tasks_by_load: I, pull_dom: u32) -> Option<(f64, &'d TaskInfo)> + where + I: IntoIterator, &'d TaskInfo)>, + { + match tasks_by_load + .into_iter() + .skip_while(|(_, task)| task.migrated.get() || task.dom_mask & (1 << pull_dom) == 0) + .next() + { + Some((OrderedFloat(load), task)) => Some((*load, task)), + None => None, + } + } + + fn pick_victim( + &self, + (push_dom, to_push): (u32, f64), + (pull_dom, to_pull): (u32, f64), + ) -> Option<(&TaskInfo, f64)> { + let to_xfer = to_pull.min(to_push); + + trace!( + "considering dom {}@{:.2} -> {}@{:.2}", + push_dom, + to_push, + pull_dom, + to_pull + ); + + let calc_new_imbal = |xfer: f64| (to_push - xfer).abs() + (to_pull - xfer).abs(); + + trace!( + "to_xfer={:.2} tasks_by_load={:?}", + to_xfer, + &self.tasks_by_load[push_dom as usize] + ); + + // We want to pick a task to transfer from push_dom to pull_dom to + // maximize the reduction of load imbalance between the two. IOW, + // pick a task which has the closest load value to $to_xfer that can + // be migrated. Find such task by locating the first migratable task + // while scanning left from $to_xfer and the counterpart while + // scanning right and picking the better of the two. + let (load, task, new_imbal) = match ( + Self::find_first_candidate( + self.tasks_by_load[push_dom as usize] + .range((Unbounded, Included(&OrderedFloat(to_xfer)))) + .rev(), + pull_dom, + ), + Self::find_first_candidate( + self.tasks_by_load[push_dom as usize] + .range((Included(&OrderedFloat(to_xfer)), Unbounded)), + pull_dom, + ), + ) { + (None, None) => return None, + (Some((load, task)), None) | (None, Some((load, task))) => { + (load, task, calc_new_imbal(load)) + } + (Some((load0, task0)), Some((load1, task1))) => { + let (new_imbal0, new_imbal1) = (calc_new_imbal(load0), calc_new_imbal(load1)); + if new_imbal0 <= new_imbal1 { + (load0, task0, new_imbal0) + } else { + (load1, task1, new_imbal1) + } + } + }; + + // If the best candidate can't reduce the imbalance, there's nothing + // to do for this pair. + let old_imbal = to_push + to_pull; + if old_imbal * (1.0 - Self::LOAD_IMBAL_REDUCTION_MIN_RATIO) < new_imbal { + trace!( + "skipping pid {}, dom {} -> {} won't improve imbal {:.2} -> {:.2}", + task.pid, + push_dom, + pull_dom, + old_imbal, + new_imbal + ); + return None; + } + + trace!( + "migrating pid {}, dom {} -> {}, imbal={:.2} -> {:.2}", + task.pid, + push_dom, + pull_dom, + old_imbal, + new_imbal, + ); + + Some((task, load)) + } + + // Actually execute the load balancing. Concretely this writes pid -> dom + // entries into the lb_data map for bpf side to consume. + fn load_balance(&mut self) -> Result<()> { + clear_map(self.maps.lb_data()); + + trace!("imbal={:?}", &self.imbal); + trace!("doms_to_push={:?}", &self.doms_to_push); + trace!("doms_to_pull={:?}", &self.doms_to_pull); + + // Push from the most imbalanced to least. + while let Some((OrderedFloat(mut to_push), push_dom)) = self.doms_to_push.pop_last() { + let push_max = self.dom_loads[push_dom as usize] * Self::LOAD_IMBAL_PUSH_MAX_RATIO; + let mut pushed = 0f64; + + // Transfer tasks from push_dom to reduce imbalance. + loop { + let last_pushed = pushed; + + // Pull from the most imbalaned to least. + let mut doms_to_pull = BTreeMap::<_, _>::new(); + std::mem::swap(&mut self.doms_to_pull, &mut doms_to_pull); + let mut pull_doms = doms_to_pull.into_iter().rev().collect::>(); + + for (to_pull, pull_dom) in pull_doms.iter_mut() { + if let Some((task, load)) = + self.pick_victim((push_dom, to_push), (*pull_dom, f64::from(*to_pull))) + { + // Execute migration. + task.migrated.set(true); + to_push -= load; + *to_pull -= load; + pushed += load; + + // Ask BPF code to execute the migration. + let pid = task.pid; + let cpid = (pid as libc::pid_t).to_ne_bytes(); + if let Err(e) = self.maps.lb_data().update( + &cpid, + &pull_dom.to_ne_bytes(), + libbpf_rs::MapFlags::NO_EXIST, + ) { + warn!( + "Failed to update lb_data map for pid={} error={:?}", + pid, &e + ); + *self.nr_lb_data_errors += 1; + } + + // Always break after a successful migration so that + // the pulling domains are always considered in the + // descending imbalance order. + break; + } + } + + pull_doms + .into_iter() + .map(|(k, v)| self.doms_to_pull.insert(k, v)) + .count(); + + // Stop repeating if nothing got transferred or pushed enough. + if pushed == last_pushed || pushed >= push_max { + break; + } + } + } + Ok(()) + } +} + +struct Scheduler<'a> { + skel: AtroposSkel<'a>, + struct_ops: Option, + + nr_cpus: usize, + nr_doms: usize, + load_decay_factor: f64, + balance_load: bool, + + proc_reader: procfs::ProcReader, + + prev_at: SystemTime, + prev_total_cpu: procfs::CpuStat, + task_loads: BTreeMap, + + nr_lb_data_errors: u64, +} + +impl<'a> Scheduler<'a> { + // Returns Vec of cpuset for each dq and a vec of dq for each cpu + fn parse_cpusets( + cpumasks: &[String], + nr_cpus: usize, + ) -> Result<(Vec>, Vec)> { + if cpumasks.len() > atropos_sys::MAX_DOMS as usize { + bail!( + "Number of requested DSQs ({}) is greater than MAX_DOMS ({})", + cpumasks.len(), + atropos_sys::MAX_DOMS + ); + } + let mut cpus = vec![-1i32; nr_cpus]; + let mut cpusets = + vec![bitvec![u64, Lsb0; 0; atropos_sys::MAX_CPUS as usize]; cpumasks.len()]; + for (dq, cpumask) in cpumasks.iter().enumerate() { + let hex_str = { + let mut tmp_str = cpumask + .strip_prefix("0x") + .unwrap_or(cpumask) + .replace('_', ""); + if tmp_str.len() % 2 != 0 { + tmp_str = "0".to_string() + &tmp_str; + } + tmp_str + }; + let byte_vec = hex::decode(&hex_str) + .with_context(|| format!("Failed to parse cpumask: {}", cpumask))?; + + for (index, &val) in byte_vec.iter().rev().enumerate() { + let mut v = val; + while v != 0 { + let lsb = v.trailing_zeros() as usize; + v &= !(1 << lsb); + let cpu = index * 8 + lsb; + if cpu > nr_cpus { + bail!( + concat!( + "Found cpu ({}) in cpumask ({}) which is larger", + " than the number of cpus on the machine ({})" + ), + cpu, + cpumask, + nr_cpus + ); + } + if cpus[cpu] != -1 { + bail!( + "Found cpu ({}) with dq ({}) but also in cpumask ({})", + cpu, + cpus[cpu], + cpumask + ); + } + cpus[cpu] = dq as i32; + cpusets[dq].set(cpu, true); + } + } + cpusets[dq].set_uninitialized(false); + } + + for (cpu, &dq) in cpus.iter().enumerate() { + if dq < 0 { + bail!( + "Cpu {} not assigned to any dq. Make sure it is covered by some --cpumasks argument.", + cpu + ); + } + } + + Ok((cpusets, cpus)) + } + + // Returns Vec of cpuset for each dq and a vec of dq for each cpu + fn cpusets_from_cache( + level: u32, + nr_cpus: usize, + ) -> Result<(Vec>, Vec)> { + let mut cpu_to_cache = vec![]; // (cpu_id, cache_id) + let mut cache_ids = BTreeSet::::new(); + let mut nr_not_found = 0; + + // Build cpu -> cache ID mapping. + for cpu in 0..nr_cpus { + let path = format!("/sys/devices/system/cpu/cpu{}/cache/index{}/id", cpu, level); + let id = match std::fs::read_to_string(&path) { + Ok(val) => val + .trim() + .parse::() + .with_context(|| format!("Failed to parse {:?}'s content {:?}", &path, &val))?, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + nr_not_found += 1; + 0 + } + Err(e) => return Err(e).with_context(|| format!("Failed to open {:?}", &path)), + }; + + cpu_to_cache.push(id); + cache_ids.insert(id); + } + + if nr_not_found > 1 { + warn!( + "Couldn't determine level {} cache IDs for {} CPUs out of {}, assigned to cache ID 0", + level, nr_not_found, nr_cpus + ); + } + + // Cache IDs may have holes. Assign consecutive domain IDs to + // existing cache IDs. + let mut cache_to_dom = BTreeMap::::new(); + let mut nr_doms = 0; + for cache_id in cache_ids.iter() { + cache_to_dom.insert(*cache_id, nr_doms); + nr_doms += 1; + } + + if nr_doms > atropos_sys::MAX_DOMS { + bail!( + "Total number of doms {} is greater than MAX_DOMS ({})", + nr_doms, + atropos_sys::MAX_DOMS + ); + } + + // Build and return dom -> cpumask and cpu -> dom mappings. + let mut cpusets = + vec![bitvec![u64, Lsb0; 0; atropos_sys::MAX_CPUS as usize]; nr_doms as usize]; + let mut cpu_to_dom = vec![]; + + for cpu in 0..nr_cpus { + let dom_id = cache_to_dom[&cpu_to_cache[cpu]]; + cpusets[dom_id as usize].set(cpu, true); + cpu_to_dom.push(dom_id as i32); + } + + Ok((cpusets, cpu_to_dom)) + } + + fn init(opts: &Opts) -> Result { + // Open the BPF prog first for verification. + let mut skel_builder = AtroposSkelBuilder::default(); + skel_builder.obj_builder.debug(opts.verbose > 0); + let mut skel = skel_builder.open().context("Failed to open BPF program")?; + + let nr_cpus = libbpf_rs::num_possible_cpus().unwrap(); + if nr_cpus > atropos_sys::MAX_CPUS as usize { + bail!( + "nr_cpus ({}) is greater than MAX_CPUS ({})", + nr_cpus, + atropos_sys::MAX_CPUS + ); + } + + // Initialize skel according to @opts. + let (cpusets, cpus) = if opts.cpumasks.len() > 0 { + Self::parse_cpusets(&opts.cpumasks, nr_cpus)? + } else { + Self::cpusets_from_cache(opts.cache_level, nr_cpus)? + }; + let nr_doms = cpusets.len(); + skel.rodata().nr_doms = nr_doms as u32; + skel.rodata().nr_cpus = nr_cpus as u32; + + for (cpu, dom) in cpus.iter().enumerate() { + skel.rodata().cpu_dom_id_map[cpu] = *dom as u32; + } + + for (dom, cpuset) in cpusets.iter().enumerate() { + let raw_cpuset_slice = cpuset.as_raw_slice(); + let dom_cpumask_slice = &mut skel.rodata().dom_cpumasks[dom]; + let (left, _) = dom_cpumask_slice.split_at_mut(raw_cpuset_slice.len()); + left.clone_from_slice(cpuset.as_raw_slice()); + let cpumask_str = dom_cpumask_slice + .iter() + .take((nr_cpus + 63) / 64) + .rev() + .fold(String::new(), |acc, x| format!("{} {:016X}", acc, x)); + info!( + "DOM[{:02}] cpumask{} ({} cpus)", + dom, + &cpumask_str, + cpuset.count_ones() + ); + } + + skel.rodata().slice_us = opts.slice_us; + skel.rodata().kthreads_local = opts.kthreads_local; + skel.rodata().fifo_sched = opts.fifo_sched; + skel.rodata().switch_partial = opts.partial; + skel.rodata().greedy_threshold = opts.greedy_threshold; + + // Attach. + let mut skel = skel.load().context("Failed to load BPF program")?; + skel.attach().context("Failed to attach BPF program")?; + let struct_ops = Some( + skel.maps_mut() + .atropos() + .attach_struct_ops() + .context("Failed to attach atropos struct ops")?, + ); + info!("Atropos Scheduler Attached"); + + // Other stuff. + let mut proc_reader = procfs::ProcReader::new(); + let prev_total_cpu = read_total_cpu(&mut proc_reader)?; + + Ok(Self { + skel, + struct_ops, // should be held to keep it attached + + nr_cpus, + nr_doms, + load_decay_factor: opts.load_decay_factor.clamp(0.0, 0.99), + balance_load: !opts.no_load_balance, + + proc_reader, + + prev_at: SystemTime::now(), + prev_total_cpu, + task_loads: BTreeMap::new(), + + nr_lb_data_errors: 0, + }) + } + + fn get_cpu_busy(&mut self) -> Result { + let total_cpu = read_total_cpu(&mut self.proc_reader)?; + let busy = match (&self.prev_total_cpu, &total_cpu) { + ( + procfs::CpuStat { + user_usec: Some(prev_user), + nice_usec: Some(prev_nice), + system_usec: Some(prev_system), + idle_usec: Some(prev_idle), + iowait_usec: Some(prev_iowait), + irq_usec: Some(prev_irq), + softirq_usec: Some(prev_softirq), + stolen_usec: Some(prev_stolen), + guest_usec: _, + guest_nice_usec: _, + }, + procfs::CpuStat { + user_usec: Some(curr_user), + nice_usec: Some(curr_nice), + system_usec: Some(curr_system), + idle_usec: Some(curr_idle), + iowait_usec: Some(curr_iowait), + irq_usec: Some(curr_irq), + softirq_usec: Some(curr_softirq), + stolen_usec: Some(curr_stolen), + guest_usec: _, + guest_nice_usec: _, + }, + ) => { + let idle_usec = curr_idle - prev_idle; + let iowait_usec = curr_iowait - prev_iowait; + let user_usec = curr_user - prev_user; + let system_usec = curr_system - prev_system; + let nice_usec = curr_nice - prev_nice; + let irq_usec = curr_irq - prev_irq; + let softirq_usec = curr_softirq - prev_softirq; + let stolen_usec = curr_stolen - prev_stolen; + + let busy_usec = + user_usec + system_usec + nice_usec + irq_usec + softirq_usec + stolen_usec; + let total_usec = idle_usec + busy_usec + iowait_usec; + busy_usec as f64 / total_usec as f64 + } + _ => { + bail!("Some procfs stats are not populated!"); + } + }; + + self.prev_total_cpu = total_cpu; + Ok(busy) + } + + fn read_bpf_stats(&mut self) -> Result> { + let mut maps = self.skel.maps_mut(); + let stats_map = maps.stats(); + let mut stats: Vec = Vec::new(); + let zero_vec = vec![vec![0u8; stats_map.value_size() as usize]; self.nr_cpus]; + + for stat in 0..atropos_sys::stat_idx_ATROPOS_NR_STATS { + let cpu_stat_vec = stats_map + .lookup_percpu(&(stat as u32).to_ne_bytes(), libbpf_rs::MapFlags::ANY) + .with_context(|| format!("Failed to lookup stat {}", stat))? + .expect("per-cpu stat should exist"); + let sum = cpu_stat_vec + .iter() + .map(|val| { + u64::from_ne_bytes( + val.as_slice() + .try_into() + .expect("Invalid value length in stat map"), + ) + }) + .sum(); + stats_map + .update_percpu( + &(stat as u32).to_ne_bytes(), + &zero_vec, + libbpf_rs::MapFlags::ANY, + ) + .context("Failed to zero stat")?; + stats.push(sum); + } + Ok(stats) + } + + fn report( + &self, + stats: &Vec, + cpu_busy: f64, + processing_dur: Duration, + load_avg: f64, + dom_loads: &Vec, + imbal: &Vec, + ) { + let stat = |idx| stats[idx as usize]; + let total = stat(atropos_sys::stat_idx_ATROPOS_STAT_WAKE_SYNC) + + stat(atropos_sys::stat_idx_ATROPOS_STAT_PREV_IDLE) + + stat(atropos_sys::stat_idx_ATROPOS_STAT_PINNED) + + stat(atropos_sys::stat_idx_ATROPOS_STAT_DIRECT_DISPATCH) + + stat(atropos_sys::stat_idx_ATROPOS_STAT_DSQ_DISPATCH) + + stat(atropos_sys::stat_idx_ATROPOS_STAT_GREEDY) + + stat(atropos_sys::stat_idx_ATROPOS_STAT_LAST_TASK); + + info!( + "cpu={:6.1} load_avg={:7.1} bal={} task_err={} lb_data_err={} proc={:?}ms", + cpu_busy * 100.0, + load_avg, + stats[atropos_sys::stat_idx_ATROPOS_STAT_LOAD_BALANCE as usize], + stats[atropos_sys::stat_idx_ATROPOS_STAT_TASK_GET_ERR as usize], + self.nr_lb_data_errors, + processing_dur.as_millis(), + ); + + let stat_pct = |idx| stat(idx) as f64 / total as f64 * 100.0; + + info!( + "tot={:6} wsync={:4.1} prev_idle={:4.1} pin={:4.1} dir={:4.1} dq={:4.1} greedy={:4.1}", + total, + stat_pct(atropos_sys::stat_idx_ATROPOS_STAT_WAKE_SYNC), + stat_pct(atropos_sys::stat_idx_ATROPOS_STAT_PREV_IDLE), + stat_pct(atropos_sys::stat_idx_ATROPOS_STAT_PINNED), + stat_pct(atropos_sys::stat_idx_ATROPOS_STAT_DIRECT_DISPATCH), + stat_pct(atropos_sys::stat_idx_ATROPOS_STAT_DSQ_DISPATCH), + stat_pct(atropos_sys::stat_idx_ATROPOS_STAT_GREEDY), + ); + + for i in 0..self.nr_doms { + info!( + "DOM[{:02}] load={:7.1} to_pull={:7.1} to_push={:7.1}", + i, + dom_loads[i], + if imbal[i] < 0.0 { -imbal[i] } else { 0.0 }, + if imbal[i] > 0.0 { imbal[i] } else { 0.0 }, + ); + } + } + + fn step(&mut self) -> Result<()> { + let started_at = std::time::SystemTime::now(); + let bpf_stats = self.read_bpf_stats()?; + let cpu_busy = self.get_cpu_busy()?; + + let mut lb = LoadBalancer::new( + self.skel.maps_mut(), + &mut self.task_loads, + self.nr_doms, + self.load_decay_factor, + &mut self.nr_lb_data_errors, + ); + + lb.read_task_loads(started_at.duration_since(self.prev_at)?)?; + lb.calculate_dom_load_balance()?; + + if self.balance_load { + lb.load_balance()?; + } + + // Extract fields needed for reporting and drop lb to release + // mutable borrows. + let (load_avg, dom_loads, imbal) = (lb.load_avg, lb.dom_loads, lb.imbal); + + self.report( + &bpf_stats, + cpu_busy, + std::time::SystemTime::now().duration_since(started_at)?, + load_avg, + &dom_loads, + &imbal, + ); + + self.prev_at = started_at; + Ok(()) + } + + fn read_bpf_exit_type(&mut self) -> i32 { + unsafe { std::ptr::read_volatile(&self.skel.bss().exit_type as *const _) } + } + + fn report_bpf_exit_type(&mut self) -> Result<()> { + // Report msg if EXT_OPS_EXIT_ERROR. + match self.read_bpf_exit_type() { + 0 => Ok(()), + etype if etype == 2 => { + let cstr = unsafe { CStr::from_ptr(self.skel.bss().exit_msg.as_ptr() as *const _) }; + let msg = cstr + .to_str() + .context("Failed to convert exit msg to string") + .unwrap(); + bail!("BPF exit_type={} msg={}", etype, msg); + } + etype => { + info!("BPF exit_type={}", etype); + Ok(()) + } + } + } +} + +impl<'a> Drop for Scheduler<'a> { + fn drop(&mut self) { + if let Some(struct_ops) = self.struct_ops.take() { + drop(struct_ops); + } + } +} + +fn main() -> Result<()> { + let opts = Opts::parse(); + + let llv = match opts.verbose { + 0 => simplelog::LevelFilter::Info, + 1 => simplelog::LevelFilter::Debug, + _ => simplelog::LevelFilter::Trace, + }; + let mut lcfg = simplelog::ConfigBuilder::new(); + lcfg.set_time_level(simplelog::LevelFilter::Error) + .set_location_level(simplelog::LevelFilter::Off) + .set_target_level(simplelog::LevelFilter::Off) + .set_thread_level(simplelog::LevelFilter::Off); + simplelog::TermLogger::init( + llv, + lcfg.build(), + simplelog::TerminalMode::Stderr, + simplelog::ColorChoice::Auto, + )?; + + let shutdown = Arc::new(AtomicBool::new(false)); + let shutdown_clone = shutdown.clone(); + ctrlc::set_handler(move || { + shutdown_clone.store(true, Ordering::Relaxed); + }) + .context("Error setting Ctrl-C handler")?; + + let mut sched = Scheduler::init(&opts)?; + + while !shutdown.load(Ordering::Relaxed) && sched.read_bpf_exit_type() == 0 { + std::thread::sleep(Duration::from_secs_f64(opts.interval)); + sched.step()?; + } + + sched.report_bpf_exit_type() +}