summaryrefslogtreecommitdiff
path: root/policy/_policies/src/stream_read.rs
blob: 5cf77911457c4f014e6f414cd05fb07269477633 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
use crate::error::ChunkFailed;
use std::{collections::HashMap, path::Path};

pub async fn chunk_stream_process<T, F>(
    path: &Path,
    stream_data: &mut T,
    size: u32,
    params: &HashMap<&str, &str>,
    chunk_func: F,
) -> Result<Vec<u32>, ChunkFailed>
where
    T: Default,
    F: AsyncFn(&[u8], u32, &mut T, &HashMap<&str, &str>) -> Option<u32>,
{
    let mut file = tokio::fs::File::open(path)
        .await
        .map_err(|_| ChunkFailed::FileOpenFailed(path.to_path_buf()))?;
    let mut buffer = vec![0u8; size as usize];
    let mut splits = Vec::new();
    let mut total_read = 0;

    loop {
        let bytes_read = tokio::io::AsyncReadExt::read(&mut file, &mut buffer)
            .await
            .map_err(|_| ChunkFailed::FileReadFailed(path.to_path_buf()))?;

        if bytes_read == 0 {
            break Ok(splits);
        }

        // Process chunking on the buffer slice
        let chunk_result = chunk_func(
            &buffer[..bytes_read],
            bytes_read as u32,
            stream_data,
            params,
        )
        .await;

        if let Some(offset) = chunk_result {
            splits.push(total_read + offset);
        }

        total_read += bytes_read as u32;
    }
}