Hi Christoph,
kernel test robot noticed the following build warnings:
[auto build test WARNING on aa511ff8218b3fb328181fbaac48aa5e9c5c6d93]
url: https://github.com/intel-lab-lkp/linux/commits/Christoph-B-hmwalder/drbd-Rename-per-connection-worker-thread-to-sender/20230928-174054
base: aa511ff8218b3fb328181fbaac48aa5e9c5c6d93
patch link: https://lore.kernel.org/r/20230928093852.676786-2-christoph.boehmwalder%40linbit.com
patch subject: [PATCH 1/5] drbd: Rename per-connection "worker" thread to "sender"
config: i386-randconfig-061-20230929 (https://download.01.org/0day-ci/archive/20230929/202309291915.sFSScoxj-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230929/202309291915.sFSScoxj-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309291915.sFSScoxj-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/block/drbd/drbd_sender.c:618:39: sparse: sparse: incompatible types in comparison expression (different address spaces):
>> drivers/block/drbd/drbd_sender.c:618:39: sparse: struct disk_conf [noderef] __rcu *
>> drivers/block/drbd/drbd_sender.c:618:39: sparse: struct disk_conf *
drivers/block/drbd/drbd_sender.c:509:14: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:509:14: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:509:14: sparse: struct disk_conf *
drivers/block/drbd/drbd_sender.c:510:16: sparse: sparse: incompatible types in comparison expression (different address spaces):
>> drivers/block/drbd/drbd_sender.c:510:16: sparse: struct fifo_buffer [noderef] __rcu *
>> drivers/block/drbd/drbd_sender.c:510:16: sparse: struct fifo_buffer *
drivers/block/drbd/drbd_sender.c:560:13: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:560:13: sparse: struct fifo_buffer [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:560:13: sparse: struct fifo_buffer *
drivers/block/drbd/drbd_sender.c:564:39: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:564:39: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:564:39: sparse: struct disk_conf *
drivers/block/drbd/drbd_sender.c:999:22: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:999:22: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:999:22: sparse: struct disk_conf *
drivers/block/drbd/drbd_sender.c:1545:32: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:1545:32: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:1545:32: sparse: struct disk_conf *
drivers/block/drbd/drbd_sender.c:1654:32: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:1654:32: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:1654:32: sparse: struct disk_conf *
drivers/block/drbd/drbd_sender.c:1693:16: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:1693:16: sparse: struct fifo_buffer [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:1693:16: sparse: struct fifo_buffer *
drivers/block/drbd/drbd_sender.c:1722:34: sparse: sparse: incompatible types in comparison expression (different address spaces):
>> drivers/block/drbd/drbd_sender.c:1722:34: sparse: struct net_conf [noderef] __rcu *
>> drivers/block/drbd/drbd_sender.c:1722:34: sparse: struct net_conf *
drivers/block/drbd/drbd_sender.c:1895:38: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:1895:38: sparse: struct net_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:1895:38: sparse: struct net_conf *
drivers/block/drbd/drbd_sender.c:2102:14: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:2102:14: sparse: struct net_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:2102:14: sparse: struct net_conf *
drivers/block/drbd/drbd_sender.c:2157:14: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_sender.c:2157:14: sparse: struct net_conf [noderef] __rcu *
drivers/block/drbd/drbd_sender.c:2157:14: sparse: struct net_conf *
drivers/block/drbd/drbd_sender.c:59:25: sparse: sparse: context imbalance in 'drbd_md_endio' - unexpected unlock
drivers/block/drbd/drbd_sender.c: note: in included file:
drivers/block/drbd/drbd_int.h:1661:14: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_int.h:1661:14: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_int.h:1661:14: sparse: struct disk_conf *
drivers/block/drbd/drbd_int.h:1661:14: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_int.h:1661:14: sparse: struct disk_conf [noderef] __rcu *
drivers/block/drbd/drbd_int.h:1661:14: sparse: struct disk_conf *
drivers/block/drbd/drbd_int.h:2073:14: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/block/drbd/drbd_int.h:2073:14: sparse: struct net_conf [noderef] __rcu *
drivers/block/drbd/drbd_int.h:2073:14: sparse: struct net_conf *
vim +618 drivers/block/drbd/drbd_sender.c
9958c857c760eec drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 495
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 496 static int drbd_rs_controller(struct drbd_peer_device *peer_device, unsigned int sect_in)
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 497 {
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 498 struct drbd_device *device = peer_device->device;
daeda1cca91d58b drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 499 struct disk_conf *dc;
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 500 unsigned int want; /* The number of sectors we want in-flight */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 501 int req_sect; /* Number of sectors to request in this turn */
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 502 int correction; /* Number of sectors more we need in-flight */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 503 int cps; /* correction per invocation of drbd_rs_controller() */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 504 int steps; /* Number of time steps to plan ahead */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 505 int curr_corr;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 506 int max_sect;
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 507 struct fifo_buffer *plan;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 508
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 509 dc = rcu_dereference(device->ldev->disk_conf);
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 @510 plan = rcu_dereference(device->rs_plan_s);
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 511
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 512 steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 513
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 514 if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
daeda1cca91d58b drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 515 want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 516 } else { /* normal path */
daeda1cca91d58b drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 517 want = dc->c_fill_target ? dc->c_fill_target :
daeda1cca91d58b drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 518 sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 519 }
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 520
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 521 correction = want - device->rs_in_flight - plan->total;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 522
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 523 /* Plan ahead */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 524 cps = correction / steps;
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 525 fifo_add_val(plan, cps);
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 526 plan->total += cps * steps;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 527
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 528 /* What we do in this step */
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 529 curr_corr = fifo_push(plan, 0);
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 530 plan->total -= curr_corr;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 531
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 532 req_sect = sect_in + curr_corr;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 533 if (req_sect < 0)
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 534 req_sect = 0;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 535
daeda1cca91d58b drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 536 max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 537 if (req_sect > max_sect)
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 538 req_sect = max_sect;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 539
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 540 /*
d01801710265cfb drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 541 drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 542 sect_in, device->rs_in_flight, want, correction,
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 543 steps, cps, device->rs_planed, curr_corr, req_sect);
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 544 */
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 545
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 546 return req_sect;
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 547 }
778f271dfe7a717 drivers/block/drbd/drbd_worker.c Philipp Reisner 2010-07-06 548
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 549 static int drbd_rs_number_requests(struct drbd_peer_device *peer_device)
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 550 {
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 551 struct drbd_device *device = peer_device->device;
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 552 unsigned int sect_in; /* Number of sectors that came in since the last turn */
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 553 int number, mxb;
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 554
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 555 sect_in = atomic_xchg(&device->rs_sect_in, 0);
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 556 device->rs_in_flight -= sect_in;
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 557
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 558 rcu_read_lock();
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 559 mxb = drbd_get_max_buffers(device) / 2;
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 560 if (rcu_dereference(device->rs_plan_s)->size) {
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 561 number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9);
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 562 device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 563 } else {
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 564 device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 565 number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 566 }
813472ced7fac73 drivers/block/drbd/drbd_worker.c Philipp Reisner 2011-05-03 567 rcu_read_unlock();
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 568
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 569 /* Don't have more than "max-buffers"/2 in-flight.
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 570 * Otherwise we may cause the remote site to stall on drbd_alloc_pages(),
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 571 * potentially causing a distributed deadlock on congestion during
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 572 * online-verify or (checksum-based) resync, if max-buffers,
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 573 * socket buffer sizes and resync rate settings are mis-configured. */
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 574
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 575 /* note that "number" is in units of "BM_BLOCK_SIZE" (which is 4k),
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 576 * mxb (as used here, and in drbd_alloc_pages on the peer) is
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 577 * "number of pages" (typically also 4k),
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 578 * but "rs_in_flight" is in "sectors" (512 Byte). */
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 579 if (mxb - device->rs_in_flight/8 < number)
7f34f61490ee87a drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-22 580 number = mxb - device->rs_in_flight/8;
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 581
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 582 return number;
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 583 }
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 584
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 585 static int make_resync_request(struct drbd_peer_device *const peer_device, int cancel)
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 586 {
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 587 struct drbd_device *const device = peer_device->device;
44a4d551846b8c6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2013-11-22 588 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 589 unsigned long bit;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 590 sector_t sector;
155bd9d1abd6049 drivers/block/drbd/drbd_worker.c Christoph Hellwig 2020-09-25 591 const sector_t capacity = get_capacity(device->vdisk);
1816a2b47afae83 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-11 592 int max_bio_size;
e65f440d474d7d6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-11-05 593 int number, rollback_i, size;
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 594 int align, requeue = 0;
0f0601f4ea2f53c drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-08-11 595 int i = 0;
92d94ae66aebda5 drivers/block/drbd/drbd_worker.c Philipp Reisner 2016-06-14 596 int discard_granularity = 0;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 597
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 598 if (unlikely(cancel))
99920dc5c5fe521 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-03-16 599 return 0;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 600
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 601 if (device->rs_total == 0) {
af85e8e83d160f7 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-10-07 602 /* empty resync? */
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 603 drbd_resync_finished(peer_device);
99920dc5c5fe521 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-03-16 604 return 0;
af85e8e83d160f7 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-10-07 605 }
af85e8e83d160f7 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-10-07 606
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 607 if (!get_ldev(device)) {
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 608 /* Since we only need to access device->rsync a
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 609 get_ldev_if_state(device,D_FAILED) would be sufficient, but
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 610 to continue resync with a broken disk makes no sense at
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 611 all */
d01801710265cfb drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 612 drbd_err(device, "Disk broke down during resync!\n");
99920dc5c5fe521 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-03-16 613 return 0;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 614 }
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 615
9104d31a759fbad drivers/block/drbd/drbd_worker.c Lars Ellenberg 2016-06-14 616 if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
92d94ae66aebda5 drivers/block/drbd/drbd_worker.c Philipp Reisner 2016-06-14 617 rcu_read_lock();
92d94ae66aebda5 drivers/block/drbd/drbd_worker.c Philipp Reisner 2016-06-14 @618 discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
92d94ae66aebda5 drivers/block/drbd/drbd_worker.c Philipp Reisner 2016-06-14 619 rcu_read_unlock();
92d94ae66aebda5 drivers/block/drbd/drbd_worker.c Philipp Reisner 2016-06-14 620 }
92d94ae66aebda5 drivers/block/drbd/drbd_worker.c Philipp Reisner 2016-06-14 621
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 622 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 623 number = drbd_rs_number_requests(peer_device);
0e49d7b014c5d59 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-04-28 624 if (number <= 0)
0f0601f4ea2f53c drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-08-11 625 goto requeue;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 626
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 627 for (i = 0; i < number; i++) {
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 628 /* Stop generating RS requests when half of the send buffer is filled,
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 629 * but notify TCP that we'd like to have more space. */
44a4d551846b8c6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2013-11-22 630 mutex_lock(&connection->data.mutex);
44a4d551846b8c6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2013-11-22 631 if (connection->data.socket) {
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 632 struct sock *sk = connection->data.socket->sk;
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 633 int queued = sk->sk_wmem_queued;
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 634 int sndbuf = sk->sk_sndbuf;
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 635 if (queued > sndbuf / 2) {
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 636 requeue = 1;
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 637 if (sk->sk_socket)
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 638 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 639 }
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 640 } else
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 641 requeue = 1;
44a4d551846b8c6 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2013-11-22 642 mutex_unlock(&connection->data.mutex);
506afb6248af577 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2014-01-31 643 if (requeue)
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 644 goto requeue;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 645
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 646 next_sector:
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 647 size = BM_BLOCK_SIZE;
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 648 bit = drbd_bm_find_next(device, device->bm_resync_fo);
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 649
4b0715f09655e76 drivers/block/drbd/drbd_worker.c Lars Ellenberg 2010-12-14 650 if (bit == DRBD_END_OF_BITMAP) {
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 651 device->bm_resync_fo = drbd_bm_bits(device);
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 652 put_ldev(device);
99920dc5c5fe521 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-03-16 653 return 0;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 654 }
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 655
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 656 sector = BM_BIT_TO_SECT(bit);
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 657
0d11f3cf279c5ad drivers/block/drbd/drbd_worker.c Christoph Böhmwalder 2023-03-30 658 if (drbd_try_rs_begin_io(peer_device, sector)) {
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 659 device->bm_resync_fo = bit;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 660 goto requeue;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 661 }
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 662 device->bm_resync_fo = bit + 1;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 663
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 664 if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
b30ab7913b0a7b1 drivers/block/drbd/drbd_worker.c Andreas Gruenbacher 2011-07-03 665 drbd_rs_complete_io(device, sector);
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 666 goto next_sector;
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 667 }
b411b3637fa71fc drivers/block/drbd/drbd_worker.c Philipp Reisner 2009-09-25 668
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
drbd-y := drbd_buildtag.o drbd_bitmap.o drbd_proc.o
-drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
+drbd-y += drbd_sender.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
drbd-y += drbd_interval.o drbd_state.o
drbd-y += drbd_nla.o
@@ -665,7 +665,7 @@ struct drbd_connection {
/* empty member on older kernels without blk_start_plug() */
struct blk_plug receiver_plug;
struct drbd_thread receiver;
- struct drbd_thread worker;
+ struct drbd_thread sender;
struct drbd_thread ack_receiver;
struct workqueue_struct *ack_sender;
@@ -1075,7 +1075,7 @@ extern int drbd_bitmap_io(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device);
-extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
+extern int drbd_bitmap_io_from_sender(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device);
@@ -1422,12 +1422,12 @@ extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
extern int drbd_khelper(struct drbd_device *device, char *cmd);
-/* drbd_worker.c */
+/* drbd_sender.c */
/* bi_end_io handlers */
extern void drbd_md_endio(struct bio *bio);
extern void drbd_peer_request_endio(struct bio *bio);
extern void drbd_request_endio(struct bio *bio);
-extern int drbd_worker(struct drbd_thread *thi);
+extern int drbd_sender(struct drbd_thread *thi);
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
void drbd_resync_after_changed(struct drbd_device *device);
extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
@@ -1912,7 +1912,7 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* w_send_barrier
* _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
* it is much easier and equally valid to count what we queue for the
- * worker, even before it actually was queued or send.
+ * sender, even before it actually was queued or send.
* (drbd_make_request_common; recovery path on read io-error)
* decreased:
* got_BarrierAck (respective tl_clear, tl_clear_barrier)
@@ -279,7 +279,7 @@ void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
*
* This is called after the connection to the peer was lost. The storage covered
* by the requests on the transfer gets marked as our of sync. Called from the
- * receiver thread and the worker thread.
+ * receiver thread and the sender thread.
*/
void tl_clear(struct drbd_connection *connection)
{
@@ -2533,7 +2533,7 @@ int set_resource_options(struct drbd_resource *resource, struct res_opts *res_op
for_each_connection_rcu(connection, resource) {
connection->receiver.reset_cpu_mask = 1;
connection->ack_receiver.reset_cpu_mask = 1;
- connection->worker.reset_cpu_mask = 1;
+ connection->sender.reset_cpu_mask = 1;
}
}
err = 0;
@@ -2619,8 +2619,8 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
connection->receiver.connection = connection;
- drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
- connection->worker.connection = connection;
+ drbd_thread_init(resource, &connection->sender, drbd_sender, "sender");
+ connection->sender.connection = connection;
drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
connection->ack_receiver.connection = connection;
@@ -3497,7 +3497,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
*
* While IO on the bitmap happens we freeze application IO thus we ensure
* that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
- * called from worker context. It MUST NOT be used while a previous such
+ * called from sender context. It MUST NOT be used while a previous such
* work is still pending!
*
* Its worker function encloses the call of io_fn() by get_ldev() and
@@ -3509,7 +3509,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device)
{
- D_ASSERT(device, current == peer_device->connection->worker.task);
+ D_ASSERT(device, current == peer_device->connection->sender.task);
D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
@@ -3544,7 +3544,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* @flags: Bitmap flags
*
* freezes application IO while that the actual IO operations runs. This
- * functions MAY NOT be called from worker context.
+ * functions MAY NOT be called from sender context.
*/
int drbd_bitmap_io(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
@@ -3555,7 +3555,7 @@ int drbd_bitmap_io(struct drbd_device *device,
const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
int rv;
- D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->sender.task);
if (do_suspend_io)
drbd_suspend_io(device);
@@ -365,7 +365,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
struct sib_info sib;
int ret;
- if (current == connection->worker.task)
+ if (current == connection->sender.task)
set_bit(CALLBACK_PENDING, &connection->flags);
snprintf(mb, 14, "minor-%d", device_to_minor(device));
@@ -394,7 +394,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
drbd_bcast_event(device, &sib);
notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
- if (current == connection->worker.task)
+ if (current == connection->sender.task)
clear_bit(CALLBACK_PENDING, &connection->flags);
if (ret < 0) /* Ignore any ERRNOs we got. */
@@ -1349,14 +1349,14 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_ba
drbd_setup_queue_param(device, bdev, new, o);
}
-/* Starts the worker thread */
+/* Starts the sender thread */
static void conn_reconfig_start(struct drbd_connection *connection)
{
- drbd_thread_start(&connection->worker);
+ drbd_thread_start(&connection->sender);
drbd_flush_workqueue(&connection->sender_work);
}
-/* if still unconfigured, stops worker again. */
+/* if still unconfigured, stops sender again. */
static void conn_reconfig_done(struct drbd_connection *connection)
{
bool stop_threads;
@@ -1368,7 +1368,7 @@ static void conn_reconfig_done(struct drbd_connection *connection)
/* ack_receiver thread and ack_sender workqueue are implicitly
* stopped by receiver in conn_disconnect() */
drbd_thread_stop(&connection->receiver);
- drbd_thread_stop(&connection->worker);
+ drbd_thread_stop(&connection->sender);
}
}
@@ -4362,7 +4362,7 @@ static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
/* If the state engine hasn't stopped the sender thread yet, we
* need to flush the sender work queue before generating the
* DESTROY events here. */
- if (get_t_state(&connection->worker) == RUNNING)
+ if (get_t_state(&connection->sender) == RUNNING)
drbd_flush_workqueue(&connection->sender_work);
mutex_lock(¬ification_mutex);
@@ -4424,7 +4424,7 @@ static int adm_del_resource(struct drbd_resource *resource)
/* Make sure all threads have actually stopped: state handling only
* does drbd_thread_stop_nowait(). */
list_for_each_entry(connection, &resource->connections, connections)
- drbd_thread_stop(&connection->worker);
+ drbd_thread_stop(&connection->sender);
synchronize_rcu();
drbd_free_resource(resource);
return NO_ERROR;
@@ -202,6 +202,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* not yet completed by the local io subsystem
* these flags may get cleared in any order by
* the worker,
+ * the sender,
* the receiver,
* the bio_endio completion callbacks.
*/
@@ -717,7 +718,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case SEND_CANCELED:
case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
- * so it is no longer marked as on the worker queue */
+ * so it is no longer marked as on the sender queue
+ */
mod_rq_state(req, m, RQ_NET_QUEUED, 0);
break;
similarity index 99%
rename from drivers/block/drbd/drbd_worker.c
rename to drivers/block/drbd/drbd_sender.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- drbd_worker.c
+ drbd_sender.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
@@ -865,7 +865,7 @@ int drbd_resync_finished(struct drbd_peer_device *peer_device)
* resync LRU would be wrong. */
if (drbd_rs_del_all(device)) {
/* In case this is not possible now, most probably because
- * there are P_RS_DATA_REPLY Packets lingering on the worker's
+ * there are P_RS_DATA_REPLY Packets lingering on the sender's
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
@@ -1587,7 +1587,7 @@ static bool drbd_pause_after(struct drbd_device *device)
* drbd_resume_next() - Resume resync on all devices that may resync now
* @device: DRBD device.
*
- * Called from process context only (admin command and worker).
+ * Called from process context only (admin command and sender).
*/
static bool drbd_resume_next(struct drbd_device *device)
{
@@ -1783,8 +1783,8 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
}
}
- if (current == connection->worker.task) {
- /* The worker should not sleep waiting for state_mutex,
+ if (current == connection->sender.task) {
+ /* The sender should not sleep waiting for state_mutex,
that can take long */
if (!mutex_trylock(device->state_mutex)) {
set_bit(B_RS_H_DONE, &device->flags);
@@ -1977,7 +1977,7 @@ static void go_diskless(struct drbd_device *device)
* while we detach.
* Any modifications would not be expected anymore, though.
*/
- if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
+ if (drbd_bitmap_io_from_sender(device, drbd_bm_write,
"detach", BM_LOCKED_TEST_ALLOWED, peer_device)) {
if (test_bit(WAS_READ_ERROR, &device->flags)) {
drbd_md_set_flag(device, MDF_FULL_SYNC);
@@ -2142,7 +2142,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
break;
/* drbd_send() may have called flush_signals() */
- if (get_t_state(&connection->worker) != RUNNING)
+ if (get_t_state(&connection->sender) != RUNNING)
break;
schedule();
@@ -2167,7 +2167,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
mutex_unlock(&connection->data.mutex);
}
-int drbd_worker(struct drbd_thread *thi)
+int drbd_sender(struct drbd_thread *thi)
{
struct drbd_connection *connection = thi->connection;
struct drbd_work *w = NULL;
@@ -2191,7 +2191,7 @@ int drbd_worker(struct drbd_thread *thi)
if (signal_pending(current)) {
flush_signals(current);
if (get_t_state(thi) == RUNNING) {
- drbd_warn(connection, "Worker got an unexpected signal\n");
+ drbd_warn(connection, "Sender got an unexpected signal\n");
continue;
}
break;
@@ -622,7 +622,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
- D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->sender.task);
wait_for_completion(&done);
}
@@ -1519,14 +1519,14 @@ static void abw_start_sync(struct drbd_device *device, int rv)
}
}
-int drbd_bitmap_io_from_worker(struct drbd_device *device,
+int drbd_bitmap_io_from_sender(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device)
{
int rv;
- D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
+ D_ASSERT(device, current == first_peer_device(device)->connection->sender.task);
/* open coded non-blocking drbd_suspend_io(device); */
atomic_inc(&device->suspend_cnt);
@@ -1841,7 +1841,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* We may still be Primary ourselves.
* No harm done if the bitmap still changes,
* redirtied pages will follow later. */
- drbd_bitmap_io_from_worker(device, &drbd_bm_write,
+ drbd_bitmap_io_from_sender(device, &drbd_bm_write,
"demote diskless peer", BM_LOCKED_SET_ALLOWED, peer_device);
put_ldev(device);
}
@@ -1853,7 +1853,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
device->state.conn <= C_CONNECTED && get_ldev(device)) {
/* No changes to the bitmap expected this time, so assert that,
* even though no harm was done if it did change. */
- drbd_bitmap_io_from_worker(device, &drbd_bm_write,
+ drbd_bitmap_io_from_sender(device, &drbd_bm_write,
"demote", BM_LOCKED_TEST_ALLOWED, peer_device);
put_ldev(device);
}