From 14c00f73e1f3baf5a72fc6be10d2044c5b327696 Mon Sep 17 00:00:00 2001 From: midnattsol Date: Tue, 3 Dec 2024 12:56:11 +0100 Subject: [PATCH 1/5] feat: add functions for concurrent downloads and file writing - Implemented and to handle concurrent downloads and efficient file writing. - Added the main function to coordinate multi-chunk downloads and writes. - Introduced support for cancellation using , enabling early termination of both downloads and writes. - Added to allow customization of concurrency, buffer size, and retry limits. These functions provide an efficient way to handle downloads from an while ensuring robust error handling and cancellation support. --- object_store/src/local.rs | 282 +++++++++++++++++++++++++++++++++++++- 1 file changed, 280 insertions(+), 2 deletions(-) diff --git a/object_store/src/local.rs b/object_store/src/local.rs index 78fce9c26224..79511b6c8b06 100644 --- a/object_store/src/local.rs +++ b/object_store/src/local.rs @@ -38,8 +38,9 @@ use crate::{ maybe_spawn_blocking, path::{absolute_path_to_url, Path}, util::InvalidGetRange, - Attributes, GetOptions, GetResult, GetResultPayload, ListResult, MultipartUpload, ObjectMeta, - ObjectStore, PutMode, PutMultipartOpts, PutOptions, PutPayload, PutResult, Result, UploadPart, + Attributes, GetOptions, GetRange, GetResult, GetResultPayload, ListResult, MultipartUpload, + ObjectMeta, ObjectStore, PutMode, PutMultipartOpts, PutOptions, PutPayload, PutResult, Result, + UploadPart, }; /// A specialized `Error` for filesystem object store-related errors @@ -155,6 +156,31 @@ pub(crate) enum Error { #[snafu(display("Upload aborted"))] Aborted, + + #[snafu(display("Failed to seek to position in file: {}", source))] + FileSeekError { + source: io::Error, + }, + + #[snafu(display("Failed to write to file: {}", source))] + FileWriteError { + source: io::Error, + }, + + #[snafu(display("Failed to send data to writer for file at '{}': {}", path, source))] + DataSendError { + path: String, + source: tokio::sync::mpsc::error::SendError<(usize, Bytes)>, + }, + + #[snafu(display("Failed to download file from '{}': {}", path, source))] + FileDownloadError { + source: io::Error, + path: String, + }, + + #[snafu(display("Download was aborted by user or system"))] + DownloadAborted, } impl From for super::Error { @@ -995,6 +1021,258 @@ fn convert_metadata(metadata: Metadata, location: Path) -> Result { }) } +/// Configuration for download transfer settings. +#[derive(Debug, Clone, Copy)] +pub struct DownloadTransferConfig { + /// The maximum number of concurrent chunks to download. + pub max_concurrent_chunks: usize, + /// The maximum number of bytes to buffer in memory for the download. + /// If `None`, defaults to the value of `max_concurrent_chunks`. + pub chunk_queue_size: Option, + /// The maximum number of retries for downloading a chunk. + pub max_retries: Option, +} + +/// Default implementation for `DownloadTransferConfig`. +impl Default for DownloadTransferConfig { + fn default() -> Self { + Self { + max_concurrent_chunks: 1, + chunk_queue_size: Some(2), + max_retries: None, + } + } +} + +/// Downloads a single chunk from the object store. +/// +/// # Arguments +/// - `store`: A reference-counted `ObjectStore` instance used for downloading. +/// - `location`: The path in the object store to download from. +/// - `opts`: Options for the download request, such as range and metadata preferences. +/// - `sender`: A sender for transmitting downloaded data chunks to the processing pipeline. +/// - `cancellation_alert`: A watch receiver to monitor for cancellation signals, allowing +/// the download to abort gracefully if needed. +/// - `max_retries`: The maximum number of retry attempts for a failed download. +/// +/// # Returns +/// A `Result` indicating success or failure. On success, it returns `()`; on failure, it returns +/// an `Error`. +/// +/// # Details +/// The function listens for cancellation signals using `cancellation_alert` and exits early +/// if a cancellation is detected. It also retries download attempts up to `max_retries` in case +/// of transient errors. +async fn download_chunk_stream( + store: Arc, + location: Path, + opts: GetOptions, + sender: tokio::sync::mpsc::Sender<(usize, Bytes)>, + cancellation_alert: tokio::sync::watch::Receiver, + max_retries: usize, +) -> Result<(), Error> { + let mut attempt = 0; + let request = store + .get_opts(&location, opts) + .await + .map_err(|e| Error::Metadata { + source: e.into(), + path: location.to_string(), + })?; + + if let GetResultPayload::Stream(mut stream) = request.payload { + let mut offset = request.range.start; + 'download_chunk: while !*cancellation_alert.borrow() { + let buffer = match stream.try_next().await { + Ok(Some(buffer)) => buffer, + Ok(None) => break 'download_chunk, + Err(_) if attempt < max_retries => { + attempt += 1; + continue; + } + Err(e) => { + return Err(Error::FileDownloadError { + source: e.into(), + path: location.to_string(), + }); + } + }; + + let bytes_readed = buffer.len(); + sender + .send((offset, buffer)) + .await + .map_err(|e| Error::DataSendError { + path: location.to_string(), + source: e, + })?; + offset += bytes_readed; + } + + if *cancellation_alert.borrow() { + return Err(Error::DownloadAborted); + } + } + Ok(()) +} + +/// Writes multiple chunks of downloaded data into a local file. +/// +/// # Arguments +/// - `file`: A mutable reference to the target local file for writing. +/// - `receiver`: A mutable receiver for fetching data chunks from the download pipeline. +/// - `cancellation_alert`: A watch receiver to monitor for cancellation signals, allowing +/// the writing process to abort gracefully if needed. +/// +/// # Returns +/// A `Result` containing the total number of bytes written or an error if the write operation fails. +/// +/// # Details +/// This function listens for cancellation signals via `cancellation_alert` and stops processing +/// further chunks if cancellation is detected. It ensures that no unnecessary writes are performed +/// once a cancellation request is received. + +async fn write_multi_chunks( + file: &mut File, + receiver: &mut tokio::sync::mpsc::Receiver<(usize, Bytes)>, + cancellation_alert: tokio::sync::watch::Receiver, +) -> Result { + let mut data = 0; + + while let Some((offset, buffer)) = receiver.recv().await { + // Verificar si se ha solicitado una cancelación + if *cancellation_alert.borrow() { + return Err(Error::DownloadAborted); + } + + file.seek(SeekFrom::Start(offset as u64)) + .map_err(|e| Error::FileSeekError { source: e })?; + file.write_all(&buffer) + .map_err(|e| Error::FileWriteError { source: e })?; + + data += buffer.len() as u64; + } + + Ok(data) +} + +/// Downloads a file from the object store to a local file. +/// +/// # Arguments +/// - `store`: A reference-counted `ObjectStore` instance used for the download. +/// - `location`: The path in the object store to download from. +/// - `opts`: Options for the download request. +/// - `file`: A mutable reference to the local file where the downloaded data will be written. +/// - `transfer_opts`: Optional transfer configuration for managing concurrent downloads and memory usage. +/// +/// # Details +/// The function determines the number of concurrent tasks (`concurrent_tasks`) and the size of the +/// channel buffer (`channel_size`). If `chunk_queue_size` is not provided, it defaults to the value of +/// `max_concurrent_chunks`. The total size of the file is divided into chunks, and each chunk is +/// processed by a concurrent task. +/// +/// The download process listens for cancellation signals via a shared `cancellation_alert`. If a +/// cancellation is requested, the process terminates early, and no further data is downloaded or written. +/// +/// # Returns +/// A `Result` containing the total number of bytes written to the local file or an error if the download fails. +/// +/// # Notes +/// This function spawns concurrent tasks for downloading and writing data chunks. The `write_multi_chunks` +/// task ensures that cancellation signals are respected, stopping all operations promptly. + +pub async fn download( + store: Arc, + location: &Path, + opts: GetOptions, + mut file: std::fs::File, + transfer_opts: Option<&DownloadTransferConfig>, +) -> Result { + let req = store.get_opts(&location, opts.clone()).await?; + let transfer_opts = *transfer_opts.unwrap_or(&DownloadTransferConfig::default()); + let concurrent_tasks = transfer_opts.max_concurrent_chunks; + let channel_size = transfer_opts.chunk_queue_size.unwrap_or(concurrent_tasks); + let mut written_bytes: u64 = 0; + match req.payload { + GetResultPayload::Stream(_) => { + let obj_size = req.meta.size; + let chunk_size = (obj_size as f64 / concurrent_tasks as f64).ceil() as usize; + let (sender, mut receiver) = tokio::sync::mpsc::channel(channel_size); + let (notify_cancellation, cancellation_alert) = tokio::sync::watch::channel(false); + + #[derive(Debug)] + enum TaskResult { + Download(Result<(), Error>), + Write(Result), + } + + let mut tasks = tokio::task::JoinSet::new(); + for i in 0..transfer_opts.max_concurrent_chunks { + let chunk_start = i * chunk_size; + let chunk_end = std::cmp::min((i + 1) * chunk_size - 1, obj_size - 1); + let ranged_opts = GetOptions { + range: Some(GetRange::Bounded(chunk_start..(chunk_end + 1)).into()), + ..opts.clone() + }; + let location_clone = location.clone(); + let sender_clone = sender.clone(); + let store_clone = Arc::clone(&store); + let max_retries = transfer_opts.max_retries.unwrap_or(0); + let cancellation_alert_clone = cancellation_alert.clone(); + tasks.spawn(async move { + TaskResult::Download( + download_chunk_stream( + store_clone, + location_clone, + ranged_opts, + sender_clone, + cancellation_alert_clone, + max_retries, + ) + .await, + ) + }); + } + drop(sender); + tasks.spawn(async move { + TaskResult::Write( + write_multi_chunks(&mut file, &mut receiver, cancellation_alert).await, + ) + }); + + while let Some(result) = tasks.join_next().await { + match result { + Ok(TaskResult::Download(Ok(()))) => {} + Ok(TaskResult::Download(Err(e))) => { + eprintln!("Error en descarga: {:?}", e); + let _ = notify_cancellation.send(true); + return Err(e.into()); + } + Ok(TaskResult::Write(Ok(bytes))) => { + written_bytes = bytes; + } + Ok(TaskResult::Write(Err(e))) => { + eprintln!("Error en escritura: {:?}", e); + let _ = notify_cancellation.send(true); + return Err(e.into()); + } + Err(e) => { + eprintln!("Error crítico al ejecutar una tarea: {:?}", e); + let _ = notify_cancellation.send(true); + return Err(e.into()); + } + } + } + } + GetResultPayload::File(mut source_file, _path) => { + let mut file = file.try_clone().unwrap(); + written_bytes = std::io::copy(&mut source_file, &mut file) + .map_err(|e| Error::FileWriteError { source: e })?; + } + } + Ok(written_bytes) +} + #[cfg(unix)] /// We include the inode when available to yield an ETag more resistant to collisions /// and as used by popular web servers such as [Apache](https://httpd.apache.org/docs/2.2/mod/core.html#fileetag) From ed2155d096ce8d85bd74458cb3bb138305927abf Mon Sep 17 00:00:00 2001 From: midnattsol Date: Wed, 4 Dec 2024 19:01:10 +0100 Subject: [PATCH 2/5] Renamed to to prepare the system for future extensions, including compatibility with uploads. This change enhances clarity by using a more generic and versatile name for transfer-related operations. Changes made: - Updated the name in the source code. - Adjusted related documentation and comments. No functional changes were introduced beyond the refactor. --- object_store/src/lib.rs | 42 +++++++++++++++++++++++++++++++++++++++ object_store/src/local.rs | 29 +++------------------------ 2 files changed, 45 insertions(+), 26 deletions(-) diff --git a/object_store/src/lib.rs b/object_store/src/lib.rs index 4d8d8f02a0bc..b66a5f7ce7dd 100644 --- a/object_store/src/lib.rs +++ b/object_store/src/lib.rs @@ -1099,6 +1099,48 @@ impl GetResult { } } +/// Configuration for controlling transfer behavior. +#[derive(Debug, Clone, Copy)] +pub struct TransferConfig { + /// Maximum number of concurrent chunks to transfer. + pub max_concurrent_chunks: usize, + /// Maximum number of chunks to buffer in memory during the transfer. + /// Defaults to `max_concurrent_chunks` if `None`. + pub chunk_queue_size: Option, + /// Maximum number of retries for a chunk transfer. + pub max_retries: Option, +} + +impl TransferConfig { + /// Creates a new `TransferConfig` with the specified parameters. + pub fn new( + max_concurrent_chunks: usize, + chunk_queue_size: Option, + max_retries: Option, + ) -> Self { + Self { + max_concurrent_chunks, + chunk_queue_size, + max_retries, + } + } +} + +/// Default implementation for `TransferConfig`. +/// +/// - `max_concurrent_chunks`: 1 +/// - `chunk_queue_size`: Some(2) +/// - `max_retries`: None +impl Default for TransferConfig { + fn default() -> Self { + Self { + max_concurrent_chunks: 1, + chunk_queue_size: Some(2), + max_retries: None, + } + } +} + /// Configure preconditions for the put operation #[derive(Debug, Clone, PartialEq, Eq, Default)] pub enum PutMode { diff --git a/object_store/src/local.rs b/object_store/src/local.rs index 79511b6c8b06..ccb392b72a9d 100644 --- a/object_store/src/local.rs +++ b/object_store/src/local.rs @@ -40,7 +40,7 @@ use crate::{ util::InvalidGetRange, Attributes, GetOptions, GetRange, GetResult, GetResultPayload, ListResult, MultipartUpload, ObjectMeta, ObjectStore, PutMode, PutMultipartOpts, PutOptions, PutPayload, PutResult, Result, - UploadPart, + TransferConfig, UploadPart, }; /// A specialized `Error` for filesystem object store-related errors @@ -1021,29 +1021,6 @@ fn convert_metadata(metadata: Metadata, location: Path) -> Result { }) } -/// Configuration for download transfer settings. -#[derive(Debug, Clone, Copy)] -pub struct DownloadTransferConfig { - /// The maximum number of concurrent chunks to download. - pub max_concurrent_chunks: usize, - /// The maximum number of bytes to buffer in memory for the download. - /// If `None`, defaults to the value of `max_concurrent_chunks`. - pub chunk_queue_size: Option, - /// The maximum number of retries for downloading a chunk. - pub max_retries: Option, -} - -/// Default implementation for `DownloadTransferConfig`. -impl Default for DownloadTransferConfig { - fn default() -> Self { - Self { - max_concurrent_chunks: 1, - chunk_queue_size: Some(2), - max_retries: None, - } - } -} - /// Downloads a single chunk from the object store. /// /// # Arguments @@ -1186,10 +1163,10 @@ pub async fn download( location: &Path, opts: GetOptions, mut file: std::fs::File, - transfer_opts: Option<&DownloadTransferConfig>, + transfer_opts: Option<&TransferConfig>, ) -> Result { let req = store.get_opts(&location, opts.clone()).await?; - let transfer_opts = *transfer_opts.unwrap_or(&DownloadTransferConfig::default()); + let transfer_opts = *transfer_opts.unwrap_or(&TransferConfig::default()); let concurrent_tasks = transfer_opts.max_concurrent_chunks; let channel_size = transfer_opts.chunk_queue_size.unwrap_or(concurrent_tasks); let mut written_bytes: u64 = 0; From f34166b99677f6bf41f8e01882379a98e338c52b Mon Sep 17 00:00:00 2001 From: midnattsol Date: Sat, 7 Dec 2024 00:58:12 +0100 Subject: [PATCH 3/5] Refactor: rename variables for better readability - Renamed struct TransferConfig to TransferOptions to improve code clarity and consistency. - Renamed fields in `TransferConfig`: `max_concurrent_chunks` to `concurrent_tasks`, and `chunk_queue_size` to `buffer_capacity` for clarity. - Updated references to old variables throughout the codebase. Rationale: - Improved readability and maintainability. - `` was ambiguous or inconsistent with naming conventions. No functional changes were made; this is purely a refactor for naming consistency. --- object_store/src/lib.rs | 34 ++++++++++++++++++---------------- object_store/src/local.rs | 21 ++++++++++----------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/object_store/src/lib.rs b/object_store/src/lib.rs index b66a5f7ce7dd..bf79eda2907f 100644 --- a/object_store/src/lib.rs +++ b/object_store/src/lib.rs @@ -1101,41 +1101,43 @@ impl GetResult { /// Configuration for controlling transfer behavior. #[derive(Debug, Clone, Copy)] -pub struct TransferConfig { +pub struct TransferOptions { /// Maximum number of concurrent chunks to transfer. - pub max_concurrent_chunks: usize, + pub concurrent_tasks: usize, /// Maximum number of chunks to buffer in memory during the transfer. - /// Defaults to `max_concurrent_chunks` if `None`. - pub chunk_queue_size: Option, + /// Defaults to `concurrent_tasks` if `None`. + pub buffer_capacity: Option, /// Maximum number of retries for a chunk transfer. pub max_retries: Option, } -impl TransferConfig { - /// Creates a new `TransferConfig` with the specified parameters. +impl TransferOptions { + /// Creates a new `TransferOptions` with the specified parameters. pub fn new( - max_concurrent_chunks: usize, - chunk_queue_size: Option, + concurrent_tasks: usize, + buffer_capacity: Option, max_retries: Option, ) -> Self { + let buffer_capacity = buffer_capacity.or(Some(concurrent_tasks)); + let max_retries = max_retries.or(Some(3)); Self { - max_concurrent_chunks, - chunk_queue_size, + concurrent_tasks, + buffer_capacity, max_retries, } } } -/// Default implementation for `TransferConfig`. +/// Default implementation for `TransferOptions`. /// -/// - `max_concurrent_chunks`: 1 -/// - `chunk_queue_size`: Some(2) +/// - `concurrent_tasks`: 1 +/// - `buffer_capacity`: Some(2) /// - `max_retries`: None -impl Default for TransferConfig { +impl Default for TransferOptions { fn default() -> Self { Self { - max_concurrent_chunks: 1, - chunk_queue_size: Some(2), + concurrent_tasks: 1, + buffer_capacity: Some(1), max_retries: None, } } diff --git a/object_store/src/local.rs b/object_store/src/local.rs index ccb392b72a9d..428aafc1c9e0 100644 --- a/object_store/src/local.rs +++ b/object_store/src/local.rs @@ -40,7 +40,7 @@ use crate::{ util::InvalidGetRange, Attributes, GetOptions, GetRange, GetResult, GetResultPayload, ListResult, MultipartUpload, ObjectMeta, ObjectStore, PutMode, PutMultipartOpts, PutOptions, PutPayload, PutResult, Result, - TransferConfig, UploadPart, + TransferOptions, UploadPart, }; /// A specialized `Error` for filesystem object store-related errors @@ -1040,7 +1040,7 @@ fn convert_metadata(metadata: Metadata, location: Path) -> Result { /// The function listens for cancellation signals using `cancellation_alert` and exits early /// if a cancellation is detected. It also retries download attempts up to `max_retries` in case /// of transient errors. -async fn download_chunk_stream( +async fn download_chunk( store: Arc, location: Path, opts: GetOptions, @@ -1117,7 +1117,6 @@ async fn write_multi_chunks( let mut data = 0; while let Some((offset, buffer)) = receiver.recv().await { - // Verificar si se ha solicitado una cancelación if *cancellation_alert.borrow() { return Err(Error::DownloadAborted); } @@ -1144,8 +1143,8 @@ async fn write_multi_chunks( /// /// # Details /// The function determines the number of concurrent tasks (`concurrent_tasks`) and the size of the -/// channel buffer (`channel_size`). If `chunk_queue_size` is not provided, it defaults to the value of -/// `max_concurrent_chunks`. The total size of the file is divided into chunks, and each chunk is +/// channel buffer (`channel_size`). If `buffer_capacity` is not provided, it defaults to the value of +/// `concurrent_tasks`. The total size of the file is divided into chunks, and each chunk is /// processed by a concurrent task. /// /// The download process listens for cancellation signals via a shared `cancellation_alert`. If a @@ -1163,12 +1162,12 @@ pub async fn download( location: &Path, opts: GetOptions, mut file: std::fs::File, - transfer_opts: Option<&TransferConfig>, + transfer_opts: Option<&TransferOptions>, ) -> Result { let req = store.get_opts(&location, opts.clone()).await?; - let transfer_opts = *transfer_opts.unwrap_or(&TransferConfig::default()); - let concurrent_tasks = transfer_opts.max_concurrent_chunks; - let channel_size = transfer_opts.chunk_queue_size.unwrap_or(concurrent_tasks); + let transfer_opts = *transfer_opts.unwrap_or(&TransferOptions::default()); + let concurrent_tasks = transfer_opts.concurrent_tasks; + let channel_size = transfer_opts.buffer_capacity.unwrap_or(concurrent_tasks); let mut written_bytes: u64 = 0; match req.payload { GetResultPayload::Stream(_) => { @@ -1184,7 +1183,7 @@ pub async fn download( } let mut tasks = tokio::task::JoinSet::new(); - for i in 0..transfer_opts.max_concurrent_chunks { + for i in 0..transfer_opts.concurrent_tasks { let chunk_start = i * chunk_size; let chunk_end = std::cmp::min((i + 1) * chunk_size - 1, obj_size - 1); let ranged_opts = GetOptions { @@ -1198,7 +1197,7 @@ pub async fn download( let cancellation_alert_clone = cancellation_alert.clone(); tasks.spawn(async move { TaskResult::Download( - download_chunk_stream( + download_chunk( store_clone, location_clone, ranged_opts, From f3198a68db58e3ef3979e3c2e73e6a06cd2e9517 Mon Sep 17 00:00:00 2001 From: midnattsol Date: Sat, 7 Dec 2024 12:45:29 +0100 Subject: [PATCH 4/5] refactor(errors): rename some errors and update the source of `UnableSendToChannel` formerly `SendDataError` - Renamed several error variants for better clarity and consistency. - Updated the `source` of one error to properly propagate the original cause. --- object_store/src/local.rs | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/object_store/src/local.rs b/object_store/src/local.rs index 428aafc1c9e0..aa1339ef7722 100644 --- a/object_store/src/local.rs +++ b/object_store/src/local.rs @@ -158,23 +158,22 @@ pub(crate) enum Error { Aborted, #[snafu(display("Failed to seek to position in file: {}", source))] - FileSeekError { + SeekFile { source: io::Error, }, #[snafu(display("Failed to write to file: {}", source))] - FileWriteError { + WriteFile { source: io::Error, }, - #[snafu(display("Failed to send data to writer for file at '{}': {}", path, source))] - DataSendError { - path: String, - source: tokio::sync::mpsc::error::SendError<(usize, Bytes)>, + #[snafu(display("Failed to send to channel: {}", source))] + UnableSendToChannel { + source: Box, }, #[snafu(display("Failed to download file from '{}': {}", path, source))] - FileDownloadError { + DownloadFile { source: io::Error, path: String, }, @@ -1068,7 +1067,7 @@ async fn download_chunk( continue; } Err(e) => { - return Err(Error::FileDownloadError { + return Err(Error::DownloadFile { source: e.into(), path: location.to_string(), }); @@ -1079,10 +1078,7 @@ async fn download_chunk( sender .send((offset, buffer)) .await - .map_err(|e| Error::DataSendError { - path: location.to_string(), - source: e, - })?; + .map_err(|e| Error::UnableSendToChannel { source: e.into() })?; offset += bytes_readed; } @@ -1122,9 +1118,9 @@ async fn write_multi_chunks( } file.seek(SeekFrom::Start(offset as u64)) - .map_err(|e| Error::FileSeekError { source: e })?; + .map_err(|e| Error::SeekFile { source: e })?; file.write_all(&buffer) - .map_err(|e| Error::FileWriteError { source: e })?; + .map_err(|e| Error::WriteFile { source: e })?; data += buffer.len() as u64; } @@ -1243,7 +1239,7 @@ pub async fn download( GetResultPayload::File(mut source_file, _path) => { let mut file = file.try_clone().unwrap(); written_bytes = std::io::copy(&mut source_file, &mut file) - .map_err(|e| Error::FileWriteError { source: e })?; + .map_err(|e| Error::WriteFile { source: e })?; } } Ok(written_bytes) From 9e86e8ccba22c0eed1fa91e22fa00bf84dfce639 Mon Sep 17 00:00:00 2001 From: midnattsol Date: Sun, 8 Dec 2024 13:06:49 +0100 Subject: [PATCH 5/5] docs(lib.rs): Update docs for `impl Default for TransferOptions` - Removed redundant documentation as it's already implicit in the code. --- object_store/src/lib.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/object_store/src/lib.rs b/object_store/src/lib.rs index bf79eda2907f..5d8cda68dbd9 100644 --- a/object_store/src/lib.rs +++ b/object_store/src/lib.rs @@ -1129,10 +1129,6 @@ impl TransferOptions { } /// Default implementation for `TransferOptions`. -/// -/// - `concurrent_tasks`: 1 -/// - `buffer_capacity`: Some(2) -/// - `max_retries`: None impl Default for TransferOptions { fn default() -> Self { Self {