Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow spawning of an arbitrary number of tasks. #16

Merged
merged 4 commits into from
Apr 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,6 @@ pub use runtime::runner::{PortfolioRunner, Runner};
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct Config {
/// Maximum number of supported tasks (includes threads and async tasks)
pub max_tasks: usize,

/// Stack size allocated for each thread
pub stack_size: usize,

Expand All @@ -212,7 +209,6 @@ impl Config {
/// Create a new default configuration
pub fn new() -> Self {
Self {
max_tasks: 16usize,
stack_size: 0x8000,
failure_persistence: FailurePersistence::Print,
max_steps: MaxSteps::FailAfter(1_000_000),
Expand Down
13 changes: 6 additions & 7 deletions src/runtime/execution.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::runtime::failure::persist_failure;
use crate::runtime::task::{Task, TaskId, TaskState, TaskType, MAX_INLINE_TASKS};
use crate::runtime::task::{Task, TaskId, TaskState, TaskType, DEFAULT_INLINE_TASKS};
use crate::runtime::thread::future::ThreadFuture;
use crate::scheduler::{Schedule, Scheduler};
use crate::{Config, MaxSteps};
Expand Down Expand Up @@ -87,7 +87,7 @@ impl Execution {
.tasks
.iter()
.map(|t| (t.id, t.state))
.collect::<SmallVec<[_; MAX_INLINE_TASKS]>>();
.collect::<SmallVec<[_; DEFAULT_INLINE_TASKS]>>();
if task_states.iter().any(|(_, s)| *s == TaskState::Blocked) {
panic!(
"{}",
Expand Down Expand Up @@ -160,7 +160,7 @@ impl Execution {
pub(crate) struct ExecutionState {
pub config: Config,
// invariant: tasks are never removed from this list
tasks: Vec<Task>,
tasks: SmallVec<[Task; DEFAULT_INLINE_TASKS]>,
// invariant: if this transitions to Stopped or Finished, it can never change again
current_task: ScheduledTask,
// the task the scheduler has chosen to run next
Expand Down Expand Up @@ -201,10 +201,9 @@ impl ScheduledTask {

impl ExecutionState {
fn new(config: Config, scheduler: Rc<RefCell<dyn Scheduler>>, initial_schedule: Schedule) -> Self {
let max_tasks = config.max_tasks;
Self {
config,
tasks: Vec::with_capacity(max_tasks),
tasks: SmallVec::new(),
current_task: ScheduledTask::None,
next_task: ScheduledTask::None,
scheduler,
Expand Down Expand Up @@ -256,7 +255,7 @@ impl ExecutionState {
// invalid state, but no one should still be accessing the tasks anyway.
let (mut tasks, final_state) = Self::with(|state| {
assert!(state.current_task == ScheduledTask::Stopped || state.current_task == ScheduledTask::Finished);
(std::mem::replace(&mut state.tasks, Vec::new()), state.current_task)
(std::mem::replace(&mut state.tasks, SmallVec::new()), state.current_task)
});

for task in tasks.drain(..) {
Expand Down Expand Up @@ -379,7 +378,7 @@ impl ExecutionState {
.iter()
.filter(|t| t.runnable())
.map(|t| t.id)
.collect::<SmallVec<[_; MAX_INLINE_TASKS]>>();
.collect::<SmallVec<[_; DEFAULT_INLINE_TASKS]>>();

if runnable.is_empty() {
self.next_task = ScheduledTask::Finished;
Expand Down
22 changes: 13 additions & 9 deletions src/runtime/task/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use bitvec::prelude::*;
use bitvec::vec::BitVec;
use futures::future::BoxFuture;
use futures::task::Waker;
use smallvec::SmallVec;
use std::cell::RefCell;
use std::fmt::Debug;
use std::rc::Rc;
Expand Down Expand Up @@ -28,7 +29,7 @@ use waker::make_waker;
// continuation and runs it until that continuation yields, which happens when its thread decides
// it might want to context switch (e.g., because it's blocked on a lock).

pub(crate) const MAX_INLINE_TASKS: usize = 16;
pub(crate) const DEFAULT_INLINE_TASKS: usize = 16;

/// A `Task` represents a user-level unit of concurrency. Each task has an `id` that is unique within
/// the execution, and a `state` reflecting whether the task is runnable (enabled) or not.
Expand Down Expand Up @@ -175,31 +176,34 @@ impl From<TaskId> for usize {
// TODO this probably won't work well with large numbers of tasks -- maybe a BitVec?
#[derive(PartialEq, Eq)]
pub(crate) struct TaskSet {
tasks: SmallVec<[bool; MAX_INLINE_TASKS]>,
tasks: BitVec,
}

impl TaskSet {
pub fn new(max_tasks: usize) -> Self {
pub fn new() -> Self {
Self {
// Need to create the inner vec! to ensure elements are initialized
tasks: SmallVec::from_vec(vec![false; max_tasks]),
tasks: BitVec::from_bitslice(bits![0; DEFAULT_INLINE_TASKS]),
}
}

pub fn contains(&self, tid: TaskId) -> bool {
self.tasks[tid.0]
// Return false if tid is outside the TaskSet
(tid.0 < self.tasks.len()) && self.tasks[tid.0]
}

pub fn is_empty(&self) -> bool {
self.tasks.iter().all(|b| !*b)
}

pub fn insert(&mut self, tid: TaskId) {
self.tasks[tid.0] = true;
if tid.0 >= self.tasks.len() {
self.tasks.resize(1 + tid.0, false);
}
*self.tasks.get_mut(tid.0).unwrap() = true;
}

pub fn remove(&mut self, tid: TaskId) -> bool {
std::mem::replace(&mut self.tasks[tid.0], false)
std::mem::replace(&mut self.tasks.get_mut(tid.0).unwrap(), false)
}

pub fn iter(&self) -> impl Iterator<Item = TaskId> + '_ {
Expand Down
8 changes: 4 additions & 4 deletions src/scheduler/pct.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
use crate::runtime::task::TaskId;
use crate::runtime::task::{TaskId, DEFAULT_INLINE_TASKS};
use crate::scheduler::data::random::RandomDataSource;
use crate::scheduler::data::DataSource;
use crate::scheduler::{Schedule, Scheduler};
use crate::Config;
use rand::rngs::OsRng;
use rand::seq::SliceRandom;
use rand::{Rng, RngCore, SeedableRng};
Expand Down Expand Up @@ -41,13 +40,14 @@ impl PctScheduler {
assert!(max_depth > 0);

let rng = Pcg64Mcg::seed_from_u64(seed);
let config: Config = Default::default();

// TODO This implementation crashes if we have an application that spawns more than `DEFAULT_INLINE_TASKS`
// TODO Fix the code so that we can handle an arbitrary number of tasks
Self {
max_iterations,
max_depth,
iterations: 0,
priority_queue: (0..config.max_tasks).map(TaskId::from).collect::<Vec<_>>(),
priority_queue: (0..DEFAULT_INLINE_TASKS).map(TaskId::from).collect::<Vec<_>>(),
change_points: vec![],
max_steps: 0,
steps: 0,
Expand Down
6 changes: 3 additions & 3 deletions src/sync/mpsc.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::runtime::execution::ExecutionState;
use crate::runtime::task::{TaskId, MAX_INLINE_TASKS};
use crate::runtime::task::{TaskId, DEFAULT_INLINE_TASKS};
use crate::runtime::thread;
use smallvec::SmallVec;
use std::cell::RefCell;
Expand Down Expand Up @@ -54,8 +54,8 @@ struct ChannelState<T> {
messages: SmallVec<[T; MAX_INLINE_MESSAGES]>, // messages in the channel
known_senders: usize, // number of senders referencing this channel
known_receivers: usize, // number or receivers referencing this channel
waiting_senders: SmallVec<[TaskId; MAX_INLINE_TASKS]>, // list of currently blocked senders
waiting_receivers: SmallVec<[TaskId; MAX_INLINE_TASKS]>, // list of currently blocked receivers
waiting_senders: SmallVec<[TaskId; DEFAULT_INLINE_TASKS]>, // list of currently blocked senders
waiting_receivers: SmallVec<[TaskId; DEFAULT_INLINE_TASKS]>, // list of currently blocked receivers
}

impl<T> Debug for ChannelState<T> {
Expand Down
2 changes: 1 addition & 1 deletion src/sync/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ impl<T> Mutex<T> {
pub fn new(value: T) -> Self {
let state = MutexState {
holder: None,
waiters: TaskSet::new(ExecutionState::with(|s| s.config.max_tasks)),
waiters: TaskSet::new(),
};

Self {
Expand Down
7 changes: 3 additions & 4 deletions src/sync/rwlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,10 @@ enum RwLockType {
impl<T> RwLock<T> {
/// Create a new instance of an `RwLock<T>` which is unlocked.
pub fn new(value: T) -> Self {
let max_tasks = ExecutionState::with(|s| s.config.max_tasks);
let state = RwLockState {
holder: RwLockHolder::None,
waiting_readers: TaskSet::new(max_tasks),
waiting_writers: TaskSet::new(max_tasks),
waiting_readers: TaskSet::new(),
waiting_writers: TaskSet::new(),
};

Self {
Expand Down Expand Up @@ -146,7 +145,7 @@ impl<T> RwLock<T> {
state.holder = RwLockHolder::Write(me);
}
(RwLockType::Read, RwLockHolder::None) => {
let mut readers = TaskSet::new(ExecutionState::with(|s| s.config.max_tasks));
let mut readers = TaskSet::new();
readers.insert(me);
state.holder = RwLockHolder::Read(readers);
}
Expand Down
41 changes: 19 additions & 22 deletions tests/basic/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,29 @@ use shuttle::sync::Mutex;
use shuttle::{thread, Config, Runner};
use std::sync::Arc;

fn check_max_tasks(max_tasks: usize, num_spawn: usize) {
let mut config = Config::new();
config.max_tasks = max_tasks;

let scheduler = RandomScheduler::new(100);
#[test]
fn many_tasks_with_mutex() {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Once we fix PCT, we should duplicate this test and run it under PCT as well?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

let num_spawn = 1000;
let config = Config::new();
let scheduler = RandomScheduler::new(10);
let runner = Runner::new(scheduler, config);
runner.run(move || {
let lock = Arc::new(Mutex::new(()));
for _ in 0..num_spawn {
let mlock = Arc::clone(&lock);
thread::spawn(move || {
mlock.lock().unwrap();
});
}
});
}
let count = Arc::new(Mutex::new(0));
let handles = (0..num_spawn)
.map(|_| {
let count = Arc::clone(&count);
thread::spawn(move || {
*count.lock().unwrap() += 1;
})
})
.collect::<Vec<_>>();

#[test]
#[should_panic(expected = "index out of bounds")]
fn max_task_fail() {
check_max_tasks(5, 5); // initial thread adds 1
}
for handle in handles {
handle.join().unwrap();
}

#[test]
fn max_task_ok() {
check_max_tasks(5, 4);
assert_eq!(*count.lock().unwrap(), num_spawn);
});
}

#[test]
Expand Down
55 changes: 29 additions & 26 deletions tests/basic/mpsc.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use shuttle::sync::mpsc::{channel, sync_channel, RecvError};
use shuttle::{check_dfs, thread};
use shuttle::{check_dfs, check_random, thread};
use test_env_log::test;

// The following tests (prefixed with mpsc_loom) are from the
Expand Down Expand Up @@ -423,33 +423,36 @@ fn mpsc_oneshot_single_thread_recv_chan_close() {
);
}

fn mpsc_senders_with_blocking_inner(num_senders: usize, channel_size: usize) {
assert!(num_senders >= channel_size);
let num_receives = num_senders - channel_size;
let (tx, rx) = sync_channel::<usize>(channel_size);
let senders = (0..num_senders)
.map(move |i| {
let tx = tx.clone();
thread::spawn(move || {
tx.send(i).unwrap();
})
})
.collect::<Vec<_>>();

// Receive enough messages to ensure no sender will block
for _ in 0..num_receives {
rx.recv().unwrap();
}
for sender in senders {
sender.join().unwrap();
}
}

#[test]
fn mpsc_many_senders_with_blocking() {
const NUM_SENDERS: usize = 4;
const CHANNEL_SIZE: usize = 2;
const NUM_RECEIVES: usize = NUM_SENDERS - CHANNEL_SIZE;
check_dfs(
|| {
let (tx, rx) = sync_channel::<usize>(CHANNEL_SIZE);
let senders = (0..NUM_SENDERS)
.map(move |i| {
let tx = tx.clone();
thread::spawn(move || {
tx.send(i).unwrap();
})
})
.collect::<Vec<_>>();
fn mpsc_some_senders_with_blocking() {
check_dfs(|| mpsc_senders_with_blocking_inner(4, 2), None);
}

// Receive enough messages to ensure no sender will block
for _ in 0..NUM_RECEIVES {
rx.recv().unwrap();
}
for sender in senders {
sender.join().unwrap();
}
},
None,
);
#[test]
fn mpsc_many_senders_with_blocking() {
check_random(|| mpsc_senders_with_blocking_inner(1000, 500), 10);
}

#[test]
Expand Down