Skip to content

Commit

Permalink
Merge pull request #1085 from sched-ext/htejun/layered-updates-more
Browse files Browse the repository at this point in the history
cpumask, scx_layered: Clean up Cpumask iterator
  • Loading branch information
htejun authored Dec 9, 2024
2 parents 27365f7 + 8d67a62 commit 7e3d61f
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 60 deletions.
93 changes: 43 additions & 50 deletions rust/scx_utils/src/cpumask.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,28 @@ impl Cpumask {
new.mask ^= other.mask.clone();
new
}
}

impl Cpumask {
/// Iterate over each element of a Cpumask, and return the indices with bits
/// set.
///
/// # Examples
///
/// ```rust
/// use log::info;
/// use scx_utils::Cpumask;
/// let str = String::from("all");
/// let mask = Cpumask::from_str(&str).unwrap();
/// for cpu in mask.iter() {
/// info!("cpu {} was set", cpu);
/// }
/// ```
pub fn iter(&self) -> CpumaskIterator {
CpumaskIterator {
mask: self,
index: 0,
}
}

fn fmt_with(&self, f: &mut fmt::Formatter<'_>, case: char) -> fmt::Result {
let mut masks: Vec<u32> = self
.as_raw_slice()
Expand Down Expand Up @@ -281,6 +300,28 @@ impl Cpumask {
}
}

pub struct CpumaskIterator<'a> {
mask: &'a Cpumask,
index: usize,
}

impl<'a> Iterator for CpumaskIterator<'a> {
type Item = usize;

fn next(&mut self) -> Option<Self::Item> {
while self.index < *NR_CPU_IDS {
let index = self.index;
self.index += 1;
let bit_val = self.mask.test_cpu(index);
if bit_val {
return Some(index);
}
}

None
}
}

impl fmt::Display for Cpumask {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.fmt_with(f, 'x')
Expand Down Expand Up @@ -316,51 +357,3 @@ impl BitXorAssign<&Self> for Cpumask {
self.mask ^= &rhs.mask;
}
}

pub struct CpumaskIntoIterator {
mask: Cpumask,
index: usize,
}

/// Iterate over each element of a Cpumask, and return the indices with bits
/// set.
///
/// # Examples
///
/// ```rust
/// use log::info;
/// use scx_utils::Cpumask;
/// let str = String::from("all");
/// let mask = Cpumask::from_str(&str).unwrap();
/// for cpu in mask.clone().into_iter() {
/// info!("cpu {} was set", cpu);
/// }
/// ```
impl IntoIterator for Cpumask {
type Item = usize;
type IntoIter = CpumaskIntoIterator;

fn into_iter(self) -> CpumaskIntoIterator {
CpumaskIntoIterator {
mask: self,
index: 0,
}
}
}

impl Iterator for CpumaskIntoIterator {
type Item = usize;

fn next(&mut self) -> Option<Self::Item> {
while self.index < *NR_CPU_IDS {
let index = self.index;
self.index += 1;
let bit_val = self.mask.test_cpu(index);
if bit_val {
return Some(index);
}
}

None
}
}
2 changes: 1 addition & 1 deletion scheds/rust/scx_layered/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ impl CpuPool {
let mut cpus = cpus_to_match.clone();
let mut cores = bitvec![0; topo.all_cores.len()];

while let Some(cpu) = cpus.as_raw_bitvec().first_one() {
while let Some(cpu) = cpus.iter().next() {
let core = &topo.all_cores[&topo.all_cpus[&cpu].core_id];

if core.span.and(&cpus_to_match.not()).weight() != 0 {
Expand Down
12 changes: 6 additions & 6 deletions scheds/rust/scx_layered/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1247,10 +1247,10 @@ impl<'a> Scheduler<'a> {
core_span.clear_cpu(cpu_id).unwrap();

// Convert them into arrays.
let mut sys_order: Vec<usize> = sys_span.into_iter().collect();
let mut node_order: Vec<usize> = node_span.into_iter().collect();
let mut llc_order: Vec<usize> = llc_span.into_iter().collect();
let mut core_order: Vec<usize> = core_span.into_iter().collect();
let mut sys_order: Vec<usize> = sys_span.iter().collect();
let mut node_order: Vec<usize> = node_span.iter().collect();
let mut llc_order: Vec<usize> = llc_span.iter().collect();
let mut core_order: Vec<usize> = core_span.iter().collect();

// Shuffle them so that different CPUs follow different orders.
// This isn't ideal as random shuffling won't give us complete
Expand Down Expand Up @@ -1680,15 +1680,15 @@ impl<'a> Scheduler<'a> {
let node_cpus = node.span.clone();
for (irq, irqmask) in netdev.irqs.iter_mut() {
irqmask.clear_all();
for cpu in available_cpus.as_raw_bitvec().iter_ones() {
for cpu in available_cpus.iter() {
if !node_cpus.test_cpu(cpu) {
continue;
}
let _ = irqmask.set_cpu(cpu);
}
// If no CPUs are available in the node then spread the load across the node
if irqmask.weight() == 0 {
for cpu in node_cpus.as_raw_bitvec().iter_ones() {
for cpu in node_cpus.iter() {
let _ = irqmask.set_cpu(cpu);
}
}
Expand Down
2 changes: 1 addition & 1 deletion scheds/rust/scx_rusty/src/domain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ impl DomainGroup {

let mut cpu_dom_map = BTreeMap::new();
for (id, dom) in doms.iter() {
for cpu in dom.mask.clone().into_iter() {
for cpu in dom.mask.iter() {
cpu_dom_map.insert(cpu, *id);
}
}
Expand Down
2 changes: 1 addition & 1 deletion scheds/rust/scx_rusty/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ impl<'a> Scheduler<'a> {
}

for (id, dom) in domains.doms().iter() {
for cpu in dom.mask().into_iter() {
for cpu in dom.mask().iter() {
skel.maps.rodata_data.cpu_dom_id_map[cpu] = id
.clone()
.try_into()
Expand Down
2 changes: 1 addition & 1 deletion scheds/rust/scx_rusty/src/tuner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ impl Tuner {

let mut avg_util = 0.0f64;
for (dom_id, dom) in self.dom_group.doms().iter() {
for cpu in dom.mask().into_iter() {
for cpu in dom.mask().iter() {
let cpu32 = cpu as u32;
if let (Some(curr), Some(prev)) =
(curr_cpu_stats.get(&cpu32), self.prev_cpu_stats.get(&cpu32))
Expand Down

0 comments on commit 7e3d61f

Please sign in to comment.