diff --git a/README.md b/README.md index 042b5ee..f41475f 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,107 @@ -# Introduction To DIG and the DIG Network +## Upload Protocol - README + +### Overview + +This upload protocol manages file uploads securely and efficiently in a distributed storage network. Each upload session is tied to a unique session ID and is managed via a temporary folder with a customizable Time-to-Live (TTL). The protocol validates key ownership using signatures and supports flexible session handling, including file uploads, session commits, and aborts. + +### Key Features + +- **Session-Based File Uploads**: Each upload occurs within a session that has a unique session ID. Files are uploaded to a temporary directory that has a defined TTL. +- **Key Ownership Validation**: File uploads require nonce-based validation with a key ownership signature to ensure the uploader has write permissions. +- **Session TTL Management**: Sessions are cleaned up automatically after inactivity, or the session TTL can be reset during active file uploads. +- **Commit and Abort Support**: Once files are uploaded, they can either be committed to the store's permanent directory or aborted, deleting all files associated with the session. + +### API Endpoints + +#### 1. **HEAD /stores/{storeId}** + **Description**: Check if a store exists and optionally verify if a specific root hash exists in that store. + - **Query Param**: `hasRootHash` (optional) — Use this query parameter to check if the root hash exists in the store. + - **Headers Returned**: + - `x-store-exists`: Indicates whether the store exists (`true` or `false`). + - `x-has-root-hash`: If the `hasRootHash` query is provided, this header will indicate whether the specific root hash exists (`true` or `false`). + + **Example**: + ```bash + HEAD /stores/myStoreId?hasRootHash=12345abcdef + ``` + The server will return the following headers: + - `x-store-exists: true` + - `x-has-root-hash: true` (if the root hash exists) + +#### 2. **POST /upload/{storeId}** + **Description**: Starts an upload session for a store. If the store does not exist, the user will be required to authenticate. + - **Response**: + - `sessionId`: A unique identifier for the upload session. + + **Example**: + ```bash + POST /upload/myStoreId + ``` + Response: + ```json + { + "message": "Upload session started for DataStore myStoreId.", + "sessionId": "12345-abcdef-67890" + } + ``` + +#### 3. **PUT /upload/{storeId}/{sessionId}/{filename}** + **Description**: Uploads a file to the store within an active session. Each file must be validated with a nonce, key ownership signature, and the uploader's public key. + - **Headers Required**: + - `x-key-ownership-sig`: A signature proving key ownership. + - `x-public-key`: The uploader's public key. + - `x-nonce`: A unique nonce used to generate the signature. + + **Example**: + ```bash + PUT /upload/myStoreId/12345-abcdef-67890/myfile.txt + ``` + Headers: + - `x-key-ownership-sig: ` + - `x-public-key: ` + - `x-nonce: ` + +#### 4. **POST /commit/{storeId}/{sessionId}** + **Description**: Finalizes the upload by moving files from the session's temporary folder to the store's permanent directory. + + **Example**: + ```bash + POST /commit/myStoreId/12345-abcdef-67890 + ``` + Response: + ```json + { + "message": "Upload for DataStore myStoreId under session 12345-abcdef-67890 committed successfully." + } + ``` + +#### 5. **POST /abort/{storeId}/{sessionId}** + **Description**: Aborts the upload session, deletes the temporary session folder, and removes the session from the cache. + + **Example**: + ```bash + POST /abort/myStoreId/12345-abcdef-67890 + ``` + Response: + ```json + { + "message": "Upload session 12345-abcdef-67890 for DataStore myStoreId aborted and cleaned up." + } + ``` + +### Example Workflow + +1. **Start an Upload Session**: + - Call the `POST /upload/{storeId}` endpoint to start an upload session. + - The server responds with a `sessionId` to track the session. + +2. **Upload a File**: + - For each file, send a `PUT /upload/{storeId}/{sessionId}/{filename}` request. + - Include the required headers (`x-key-ownership-sig`, `x-public-key`, and `x-nonce`). + +3. **Commit the Session**: + - After all files are uploaded, call the `POST /commit/{storeId}/{sessionId}` endpoint to commit the session. + +4. **Abort the Session (Optional)**: + - If you need to abort the session and discard the uploaded files, use the `POST /abort/{storeId}/{sessionId}` endpoint. -The DIG Network offers a robust solution for ensuring data integrity and censorship resistance by leveraging decentralized technology. When you add your data to DIG, it is encoded and served from a Merkle tree, with the Merkle root securely stored on the blockchain by the data owner. This structure guarantees that the data can be verified by anyone, ensuring that what they consume is exactly what was intended, free from manipulation or tampering. - -This capability is particularly valuable for decentralized applications (dApps). With DIG, a dApp developer can have their application backed up and served globally by a network of peers. The decentralized nature of the DIG Network means that even if a dApp is served by a random peer, users can trust that it remains unaltered. - -The network's ability to stitch together all the peers serving your content creates a unified Decentralized Content Delivery Network (D-CDN), making it easier for end-users to access the data without needing to manually locate the peers. The philosophy behind DIG suggests that if there are enough peers distributed across various legal jurisdictions worldwide, the dApp achieves a significant level of censorship resistance. This is because the chance of every peer being simultaneously shut down is extremely low. - -Moreover, DIG opens up possibilities for creating dApps where write access is controlled by a cryptographic key, potentially owned by a Decentralized Autonomous Organization (DAO). This could lead to the development of dApps that are not owned by any single entity but are maintained by anonymous DAO members, further enhancing the censorship resistance and resilience of the application. - -### Using DIG: A Step-by-Step Guide - -To effectively use the DIG Network, it's essential to become familiar with the DIG CLI commands. While it is recommended to explore all available commands, this guide focuses on the core workflow that developers will frequently use. - -**Before You Begin:** Ensure your DIG environment is properly set up by following the [SETUP.md](./SETUP.md) guide. - ---- - -#### Step 1: Prepare Your Project - -1. **Add `.dig` to `.gitignore`:** - - Open your dApp project. - - Add `.dig` to your `.gitignore` file to ensure that DIG-related files are not tracked by Git. - -2. **Build Your Project:** - - Compile your project, directing the output to the `./dist` folder (or any build folder of your choice). By default, the DIG CLI looks for the `./dist` folder. - ---- - -#### Step 2: Initialize DIG for Your Project - -1. **Initialize DIG:** - - Run the following command in your project directory: - ```bash - dig init - ``` - - This will create a `.dig` folder in your project directory. An empty data store will also be created and committed to the blockchain. Wait for the blockchain transaction to confirm before proceeding to the next step. - ---- - -#### Step 3: Commit Your Build to the Data Store - -1. **Commit the `dist` Folder:** - - Use the following command to commit your `dist` folder to the DIG data store: - ```bash - dig commit - ``` - - This command inserts all files from the `./dist` folder into the Merkle root of your new data store and updates the blockchain with the resulting Merkle root. This process involves another blockchain transaction, which you must wait to confirm. - ---- - -#### Step 4: Push Your Data to the DIG Node - -1. **Push to the DIG Node:** - - To make your data available on a DIG Node, run the following command: - ```bash - dig push - ``` - - This command uploads your files to the DIG Node, verifying integrity and permissions along the way. Ensure your DIG Node is set up according to the [SETUP.md](#) guide. - ---- - -#### Step 5: Verify Your dApp on the DIG Network - -1. **Check Availability:** - - After a few moments, your DIG Node will detect the new store and register it with the DIG Network. You can verify this by visiting: - ```http - http://your.ip.address - ``` - - You should be able to find and access your dApp. Congratulations, your dApp is now live on the DIG Network! - ---- - -#### Step 6: Accessing Your dApp via the Network - -Once your dApp is on the network, it can be discovered and accessed by any client, browser, or domain acting as a cache service using a unified identifier called the **Universal DataLayer Identifier (UDI)**. This feature is still under development and will be available soon. Updates will be provided as the UDI and associated technologies come online. - -**In the Meantime:** -- You can use **nginx** or a **reverse proxy** to map your store to a domain on your local machine and serve it like a traditional website. -- In the future, a custom browser will automatically load your app from the network using the UDI, potentially integrated with a decentralized name service. - -By following this workflow, you can securely deploy your dApp to the DIG Network, ensuring that it is backed up, served globally, and resistant to censorship. diff --git a/package-lock.json b/package-lock.json index e37000b..e3f9172 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,7 +14,9 @@ "chia-server-coin": "^0.0.5", "express": "^4.19.2", "node-cache": "^5.1.2", - "toad-scheduler": "^3.0.1" + "tmp": "^0.2.3", + "toad-scheduler": "^3.0.1", + "uuidv4": "^6.2.13" }, "bin": { "dig": "dist/index.js" @@ -23,6 +25,7 @@ "@types/express": "^4.17.21", "@types/mocha": "^10.0.7", "@types/node": "^22.1.0", + "@types/tmp": "^0.2.6", "copyfiles": "^2.4.1", "mocha": "^10.7.0", "standard-version": "^9.5.0", @@ -844,6 +847,17 @@ "@types/send": "*" } }, + "node_modules/@types/tmp": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/@types/tmp/-/tmp-0.2.6.tgz", + "integrity": "sha512-chhaNf2oKHlRkDGt+tiKE2Z5aJ6qalm7Z9rlLdBwmOiAAf09YQvvoLXjWK4HWPF1xU/fqvMgfNfpVoBscA/tKA==", + "dev": true + }, + "node_modules/@types/uuid": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-8.3.4.tgz", + "integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==" + }, "node_modules/@types/wrap-ansi": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@types/wrap-ansi/-/wrap-ansi-3.0.0.tgz", @@ -2768,6 +2782,17 @@ "node": ">=4" } }, + "node_modules/external-editor/node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, "node_modules/fast-fifo": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", @@ -5344,14 +5369,11 @@ } }, "node_modules/tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dependencies": { - "os-tmpdir": "~1.0.2" - }, + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz", + "integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==", "engines": { - "node": ">=0.6.0" + "node": ">=14.14" } }, "node_modules/to-regex-range": { @@ -5579,6 +5601,23 @@ "node": ">= 0.4.0" } }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/uuidv4": { + "version": "6.2.13", + "resolved": "https://registry.npmjs.org/uuidv4/-/uuidv4-6.2.13.tgz", + "integrity": "sha512-AXyzMjazYB3ovL3q051VLH06Ixj//Knx7QnUSi1T//Ie3io6CpsPu9nVMOx5MoLWh6xV0B9J0hIaxungxXUbPQ==", + "dependencies": { + "@types/uuid": "8.3.4", + "uuid": "8.3.2" + } + }, "node_modules/v8-compile-cache-lib": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", diff --git a/package.json b/package.json index 1a59917..138438d 100644 --- a/package.json +++ b/package.json @@ -30,12 +30,15 @@ "chia-server-coin": "^0.0.5", "express": "^4.19.2", "node-cache": "^5.1.2", - "toad-scheduler": "^3.0.1" + "tmp": "^0.2.3", + "toad-scheduler": "^3.0.1", + "uuidv4": "^6.2.13" }, "devDependencies": { "@types/express": "^4.17.21", "@types/mocha": "^10.0.7", "@types/node": "^22.1.0", + "@types/tmp": "^0.2.6", "copyfiles": "^2.4.1", "mocha": "^10.7.0", "standard-version": "^9.5.0", diff --git a/src/controllers/merkleTreeController.ts b/src/controllers/merkleTreeController.ts index f4b0090..eef72a6 100644 --- a/src/controllers/merkleTreeController.ts +++ b/src/controllers/merkleTreeController.ts @@ -1,243 +1,197 @@ import fs from "fs"; import path from "path"; import { Request, Response } from "express"; -import NodeCache from "node-cache"; +import { v4 as uuidv4 } from "uuid"; import { getCredentials } from "../utils/authUtils"; import { HttpError } from "../utils/HttpError"; -import { generateNonce, validateNonce } from "../utils/nonce"; - -// @ts-ignore -import { DataStore, Wallet, getStoresList } from "@dignetwork/dig-sdk"; -import { pipeline } from "stream"; +import { DataStore, Wallet } from "@dignetwork/dig-sdk"; import { promisify } from "util"; import { getStorageLocation } from "../utils/storage"; - -const streamPipeline = promisify(pipeline); - -// Create a cache instance with 1 minute TTL (time-to-live) -const storeOwnerCache = new NodeCache({ stdTTL: 60 }); -const generateCacheKey = (publicKey: string, storeId: string): string => { - return `${publicKey}_${storeId}`; -}; +import tmp from "tmp"; +import { PassThrough } from "stream"; +import NodeCache from "node-cache"; const digFolderPath = getStorageLocation(); - -export const storeStatus = async ( - req: Request, - res: Response -): Promise => { - try { - const { storeId } = req.params; - - if (!storeId) { - throw new HttpError(400, "Missing storeId in path parameters."); - } - - const dataStore = DataStore.from(storeId); - const synced = await dataStore.isSynced(); - - res.status(200).json({ synced }); - } catch (error: any) { - console.error("Error in storeStatus controller:", error); - - const statusCode = error instanceof HttpError ? error.statusCode : 500; - const errorMessage = error.message || "Failed to process request"; - - res.status(statusCode).json({ error: errorMessage }); +const streamPipeline = promisify(require("stream").pipeline); + +// Set the TTL for 5 minutes (in milliseconds) +const sessionTTL = 5 * 60 * 1000; // 5 minutes in milliseconds +const ownerCacheTTL = 3 * 60 * 1000; // Cache expiry for isOwner (3 minutes) + +// Cache for tracking session folders and owner permissions +const sessionCache: { + [key: string]: { + tmpDir: string; + cleanup: () => void; + timer: NodeJS.Timeout; + resetTtl: () => void; + }; +} = {}; +const ownerCache = new NodeCache({ stdTTL: ownerCacheTTL }); + +/** + * Creates a session directory with custom TTL logic. Each session has a TTL that can be reset + * when new files are uploaded to prevent early cleanup. + * + * @param {number} ttl - The TTL (Time-to-Live) in milliseconds before the session is cleaned up. + * @returns {string} sessionId - A unique session identifier. + */ +function createSessionWithTTL(ttl: number): string { + const tmpDirInfo = tmp.dirSync({ unsafeCleanup: true }); + const sessionId = uuidv4(); + + const resetTtl = () => { + clearTimeout(sessionCache[sessionId]?.timer); + sessionCache[sessionId].timer = setTimeout(() => { + cleanupSession(sessionId); + }, ttl); + }; + + sessionCache[sessionId] = { + tmpDir: tmpDirInfo.name, + cleanup: tmpDirInfo.removeCallback, + timer: setTimeout(() => cleanupSession(sessionId), ttl), + resetTtl, + }; + + return sessionId; +} + +/** + * Cleans up the session directory after the TTL expires or the upload is complete. + * + * @param {string} sessionId - The unique session ID to clean up. + */ +function cleanupSession(sessionId: string): void { + const session = sessionCache[sessionId]; + if (session) { + session.cleanup(); // Remove the temporary directory + clearTimeout(session.timer); // Clear the timeout + delete sessionCache[sessionId]; // Remove the session from the cache + console.log(`Session ${sessionId} cleaned up.`); } -}; - -// Controller to handle HEAD requests for /stores/:storeId +} + +/** + * Check if a store exists and optionally check if a root hash exists (HEAD /stores/{storeId}) + * If the store exists, set headers indicating that the store and optionally the root hash exist. + * @param {Request} req - The request object. + * @param {Response} res - The response object. + */ export const headStore = async (req: Request, res: Response): Promise => { try { - console.log("Received request in headStore controller."); - - // Extract the publicKey from query params if present (optional) - const publicKey = req.query.publicKey as string | undefined; - - let userNonce: string | null = null; - if (publicKey) { - // Generate a nonce if a valid public key is provided - userNonce = await generateNonce(publicKey); - console.log("Generated User Nonce for publicKey:", userNonce); - } - const { storeId } = req.params; - console.log("Store ID:", storeId); - - const hasRootHash = req.query.hasRootHash as string | undefined; - console.log("Has Root Hash Query:", hasRootHash); + const { hasRootHash } = req.query; // Extract optional query param hasRootHash if (!storeId) { - console.log("Missing path parameters."); - throw new HttpError(400, "Missing path parameters"); + throw new HttpError(400, "Missing storeId in the request."); } - const dataStore = DataStore.from(storeId); - console.log("Data Store initialized for Store ID:", storeId); - - // Check if the store exists on this machine - const storeList = getStoresList(); - const storeExists = storeList.includes(storeId); - console.log(`Store exists on this machine: ${storeExists}`); - - // Fetch root history and calculate the latest sync status - const rootHistory = await dataStore.getRootHistory(); - console.log("Root History:", rootHistory); - - const latestRootHash = rootHistory.length > 0 ? rootHistory[rootHistory.length - 1].root_hash : null; - let isSynced = false; - - if (latestRootHash) { - // If hasRootHash is provided, compare it to the latest root hash - if (hasRootHash) { - const hasRootHashInHistory = rootHistory.some( - (history) => history.root_hash === hasRootHash && history.synced - ); - console.log(`Root hash ${hasRootHash} in history and synced:`, hasRootHashInHistory); - res.setHeader("X-Has-RootHash", hasRootHashInHistory ? "true" : "false"); - } + const storePath = path.join(digFolderPath, "stores", storeId); - // Always check if the store is synced with the latest root hash - isSynced = rootHistory.some((history) => history.root_hash === latestRootHash && history.synced); - console.log(`Store is synced with latest root hash: ${isSynced}`); + // Check if the store exists + const storeExists = fs.existsSync(storePath); + res.setHeader("x-store-exists", storeExists ? "true" : "false"); + + // If the store exists and hasRootHash is provided, check for the root hash + if (storeExists && hasRootHash) { + const rootHashPath = path.join(storePath, `${hasRootHash}.dat`); + + // Check if the file for the root hash exists in the store's directory + const rootHashExists = fs.existsSync(rootHashPath); + res.setHeader("x-has-root-hash", rootHashExists ? "true" : "false"); } - // Response headers - res - .set({ - "x-store-id": storeId, - "x-store-exists": storeExists ? "true" : "false", - "x-synced": isSynced ? "true" : "false", - ...(userNonce && { "x-nonce": userNonce }), - }) - .status(200) - .end(); + // End the response (since HEAD requests shouldn't have a body) + res.status(200).end(); } catch (error: any) { - console.error("Error in headStore controller:", error); - + console.error("Error checking store existence:", error); const statusCode = error instanceof HttpError ? error.statusCode : 500; - const errorMessage = error.message || "Failed to process request"; - - res.status(statusCode).json({ error: errorMessage }); + res.status(statusCode).end(); } }; -// Controller to handle GET requests for /stores/:storeId -export const getStore = async (req: Request, res: Response) => { +/** + * Start an upload session for a DataStore (POST /upload/{storeId}) + * Creates a unique session folder under the DataStore for each upload session. + * Authentication is only required if the DataStore does not already exist. + * @param {Request} req - The request object. + * @param {Response} res - The response object. + */ +export const startUploadSession = async ( + req: Request, + res: Response +): Promise => { try { const { storeId } = req.params; - const relativeFilePath = req.params[0]; // This will capture the rest of the path after the storeId + const uploadPath = path.join(digFolderPath, "stores", storeId); - if (!storeId || !relativeFilePath) { - res.status(400).send("Missing storeId or file path."); - return; - } + // Check if the DataStore directory already exists + const storeExists = fs.existsSync(uploadPath); - // Construct the full file path - const fullPath = path.join( - digFolderPath, - "stores", - storeId, - relativeFilePath - ); + // If the store does not exist, require authentication + if (!storeExists) { + const authHeader = req.headers.authorization || ""; + if (!authHeader.startsWith("Basic ")) { + throw new HttpError(401, "Unauthorized"); + } - // Check if the file exists - if (!fs.existsSync(fullPath)) { - res.status(404).send("File not found."); - return; - } + const [providedUsername, providedPassword] = Buffer.from( + authHeader.split(" ")[1], + "base64" + ) + .toString("utf-8") + .split(":"); - // Stream the file to the response - const fileStream = fs.createReadStream(fullPath); + const { username, password } = await getCredentials(); - // Handle errors during streaming - fileStream.on("error", (err) => { - console.error("Error streaming file:", err); - res.status(500).send("Error streaming file."); - }); + if (providedUsername !== username || providedPassword !== password) { + throw new HttpError(401, "Unauthorized"); + } + } - // Set content type to application/octet-stream for all files - res.setHeader("Content-Type", "application/octet-stream"); + // Create a unique subdirectory for this upload session with custom TTL + const sessionId = createSessionWithTTL(sessionTTL); - // Stream the file to the response - fileStream.pipe(res); - } catch (error) { - console.error("Error in getStore controller:", error); - res.status(500).send("Failed to process the request."); + res.status(200).json({ + message: `Upload session started for DataStore ${storeId}.`, + sessionId, + }); + } catch (error: any) { + console.error("Error starting upload session:", error); + const statusCode = error instanceof HttpError ? error.statusCode : 500; + res.status(statusCode).json({ error: error.message }); } }; -export const putStore = async (req: Request, res: Response): Promise => { +/** + * Upload a file to a DataStore (PUT /upload/{storeId}/{sessionId}/{filename}) + * Each session has a unique session folder under the DataStore. + * Each file has a nonce, key ownership signature, and public key that must be validated before upload. + * Uploading a file continuously resets the session TTL during the upload. + * @param {Request} req - The request object. + * @param {Response} res - The response object. + */ +export const uploadFile = async ( + req: Request, + res: Response +): Promise => { try { - console.log("Received file upload request."); + const { storeId, sessionId, filename } = req.params; - const authHeader = req.headers.authorization || ""; - if (!authHeader.startsWith("Basic ")) { - console.log("Authorization header missing or incorrect format."); - throw new HttpError(401, "Unauthorized"); - } - - const [providedUsername, providedPassword] = Buffer.from( - authHeader.split(" ")[1], - "base64" - ) - .toString("utf-8") - .split(":"); - - console.log("Authorization credentials extracted."); - - const { storeId } = req.params; - if (!storeId) { - console.log("storeId is missing in the path parameters."); - throw new HttpError(400, "Missing storeId in path parameters."); - } - - const { username, password } = await getCredentials(); - - const storeList = getStoresList(); - - // If the store is already tracked by this peer, anyone that has write - // access to the store (checked further down) can upload updates without authorization since its - // essentially the same as if an upate was pull from another peer. - // You only need credentials to track new stores. - - if ( - !storeList.includes(storeId) && - (providedUsername !== username || providedPassword !== password) - ) { - console.log("Provided credentials do not match stored credentials."); - throw new HttpError(401, "Unauthorized"); - } - - console.log(`storeId received: ${storeId}`); - - // These parameters are expected to be in the query or headers, not the body for a file upload + // Get nonce, publicKey, and keyOwnershipSig from the headers const keyOwnershipSig = req.headers["x-key-ownership-sig"] as string; const publicKey = req.headers["x-public-key"] as string; const nonce = req.headers["x-nonce"] as string; - const filename = decodeURIComponent(req.path); - if (!keyOwnershipSig || !publicKey || !nonce || !filename) { - console.log("One or more required headers are missing."); - throw new HttpError(400, "Missing required headers."); - } - - console.log( - `Received headers: keyOwnershipSig=${keyOwnershipSig}, publicKey=${publicKey}, nonce=${nonce}, filename=${filename}` - ); - - let fileKey = path.join(filename); - - // Verify key ownership signature - console.log("Verifying key ownership signature..."); - const validNonce = validateNonce(publicKey, nonce); - - if (!validNonce) { - console.log("Invalid nonce."); - throw new HttpError(401, "Invalid nonce."); + if (!keyOwnershipSig || !publicKey || !nonce) { + throw new HttpError( + 400, + "Missing required headers: nonce, publicKey, or keyOwnershipSig." + ); } + // Validate the key ownership signature using the nonce const wallet = await Wallet.load("default"); const isSignatureValid = await wallet.verifyKeyOwnershipSignature( nonce, @@ -252,56 +206,118 @@ export const putStore = async (req: Request, res: Response): Promise => { console.log("Key ownership signature verified successfully."); - // Check store ownership - console.log("Checking store ownership..."); - - const cacheKey = generateCacheKey(publicKey, storeId); - let isOwner = storeOwnerCache.get(cacheKey); + // Check if the user has write permissions to the store + const cacheKey = `${publicKey}_${storeId}`; + let isOwner = ownerCache.get(cacheKey); if (isOwner === undefined) { - // If not in cache, check ownership and cache the result + // If the value isn't in the cache, check the actual permissions const dataStore = DataStore.from(storeId); isOwner = await dataStore.hasMetaWritePermissions( Buffer.from(publicKey, "hex") ); - - // Cache the result for 1 minute (60 seconds) - storeOwnerCache.set(cacheKey, isOwner); - } else { - console.log("Using cached isOwner value for publicKey and storeId:", cacheKey); - storeOwnerCache.ttl(cacheKey, 60); + ownerCache.set(cacheKey, isOwner); // Cache the result for future requests } if (!isOwner) { - console.log("User does not have write access to this store."); throw new HttpError(403, "You do not have write access to this store."); } - console.log("User has write access to the store."); + // Check if the session exists in the cache and reset the TTL if found + const session = sessionCache[sessionId]; + if (!session) { + throw new HttpError(404, "Session not found or expired."); + } - // Construct the full path where the file should be stored - const fullPath = path.join(digFolderPath, "stores", fileKey); - console.log("Saving file to:", fullPath); + // Reset the TTL periodically while streaming the file + const passThrough = new PassThrough(); + passThrough.on("data", () => { + session.resetTtl(); // Reset the TTL on each chunk of data + ownerCache.ttl(cacheKey, ownerCacheTTL); // Extend cache TTL + }); - // Ensure the directory exists - const directory = path.dirname(fullPath); - if (!fs.existsSync(directory)) { - console.log("Directory does not exist, creating:", directory); - fs.mkdirSync(directory, { recursive: true }); - } + // Proceed with file upload + const filePath = path.join(session.tmpDir, filename); + const fileStream = fs.createWriteStream(filePath); - // Stream the file to the destination - console.log("Streaming file to destination..."); - await streamPipeline(req, fs.createWriteStream(fullPath)); + await streamPipeline(req.pipe(passThrough), fileStream); - console.log("File uploaded successfully."); - res.status(200).json({ message: "File uploaded successfully." }); + res.status(200).json({ + message: `File ${filename} uploaded to DataStore ${storeId} under session ${sessionId}.`, + }); } catch (error: any) { - console.error("Error in putStore controller:", error); + console.error("Error uploading file:", error); + const statusCode = error instanceof HttpError ? error.statusCode : 500; + res.status(statusCode).json({ error: error.message }); + } +}; + +/** + * Commit the upload for a DataStore (POST /commit/{storeId}/{sessionId}) + * Moves files from the session's temporary upload folder to the store directory. + * @param {Request} req - The request object. + * @param {Response} res - The response object. + */ +export const commitUpload = async (req: Request, res: Response): Promise => { + try { + const { storeId, sessionId } = req.params; + const finalDir = path.join(digFolderPath, "stores", storeId); + + // Retrieve the session information from the cache + const session = sessionCache[sessionId]; + if (!session) { + throw new HttpError(404, "No upload session found or session expired."); + } + + const sessionUploadDir = session.tmpDir; + + // Ensure that the final store directory exists + if (!fs.existsSync(finalDir)) { + fs.mkdirSync(finalDir, { recursive: true }); + } + + // Move all files from the temporary session directory to the final store directory + fs.readdirSync(sessionUploadDir).forEach(file => { + const sourcePath = path.join(sessionUploadDir, file); + const destinationPath = path.join(finalDir, file); + fs.renameSync(sourcePath, destinationPath); + }); + + // Clean up the session folder after committing + cleanupSession(sessionId); + res.status(200).json({ message: `Upload for DataStore ${storeId} under session ${sessionId} committed successfully.` }); + } catch (error: any) { + console.error("Error committing upload:", error); const statusCode = error instanceof HttpError ? error.statusCode : 500; - const errorMessage = error.message || "Failed to upload the file."; + res.status(statusCode).json({ error: error.message }); + } +}; + +/** + * Abort the upload session for a DataStore (POST /abort/{storeId}/{sessionId}) + * Deletes the session's temporary upload folder and its contents. + * @param {Request} req - The request object. + * @param {Response} res - The response object. + */ +export const abortUpload = async (req: Request, res: Response): Promise => { + try { + const { storeId, sessionId } = req.params; + + // Retrieve the session information from the cache + const session = sessionCache[sessionId]; + if (!session) { + throw new HttpError(404, "No upload session found or session expired."); + } - res.status(statusCode).json({ error: errorMessage }); + // Clean up the session folder and remove it from the cache + fs.rmSync(session.tmpDir, { recursive: true, force: true }); + cleanupSession(sessionId); + + res.status(200).json({ message: `Upload session ${sessionId} for DataStore ${storeId} aborted and cleaned up.` }); + } catch (error: any) { + console.error("Error aborting upload session:", error); + const statusCode = error instanceof HttpError ? error.statusCode : 500; + res.status(statusCode).json({ error: error.message }); } }; diff --git a/src/routes/storeRoutes.ts b/src/routes/storeRoutes.ts index 96bc5e7..d57e179 100644 --- a/src/routes/storeRoutes.ts +++ b/src/routes/storeRoutes.ts @@ -1,9 +1,10 @@ import express from "express"; import { headStore, - getStore, - putStore, - storeStatus + startUploadSession, + uploadFile, + commitUpload, + abortUpload } from "../controllers/merkleTreeController"; import { setMnemonic } from "../controllers/configController"; @@ -16,12 +17,21 @@ const router = express.Router(); router.post("/unsubscribe", express.json(), unsubscribeToStore); router.post("/subscribe", express.json(), subscribeToStore); router.post("/mnemonic", express.json(), setMnemonic); -router.get("/status/:storeId", storeStatus); -// Route to handle HEAD, GET, and PUT requests for /stores/:storeId +// Head request to check if a store exists router.head("/:storeId", verifyMnemonic, headStore); -router.get("/:storeId/*", getStore); -router.put("/:storeId/*", putStore); + +// Start an upload session for a store +router.post("/upload/:storeId", verifyMnemonic, startUploadSession); + +// Upload a file to a store's session (PUT /upload/{storeId}/{sessionId}/{filename}) +router.put("/upload/:storeId/:sessionId/:filename", verifyMnemonic, uploadFile); + +// Commit an upload (POST /commit/{storeId}/{sessionId}) +router.post("/commit/:storeId/:sessionId", verifyMnemonic, commitUpload); + +// Abort an upload session (POST /abort/{storeId}/{sessionId}) +router.post("/abort/:storeId/:sessionId", verifyMnemonic, abortUpload); export { router as storeRoutes };