mirror of
https://github.com/mountain-loop/yaak.git
synced 2026-02-18 21:57:44 +01:00
Compare commits
1 Commits
cli-improv
...
copilot/cr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0cb42dedc9 |
@@ -1,72 +0,0 @@
|
||||
# Claude Context: Detaching Tauri from Yaak
|
||||
|
||||
## Goal
|
||||
Make Yaak runnable as a standalone CLI without Tauri as a dependency. The core Rust crates in `crates/` should be usable independently, while Tauri-specific code lives in `crates-tauri/`.
|
||||
|
||||
## Project Structure
|
||||
```
|
||||
crates/ # Core crates - should NOT depend on Tauri
|
||||
crates-tauri/ # Tauri-specific crates (yaak-app, yaak-tauri-utils, etc.)
|
||||
crates-cli/ # CLI crate (yaak-cli)
|
||||
```
|
||||
|
||||
## Completed Work
|
||||
|
||||
### 1. Folder Restructure
|
||||
- Moved Tauri-dependent app code to `crates-tauri/yaak-app/`
|
||||
- Created `crates-tauri/yaak-tauri-utils/` for shared Tauri utilities (window traits, api_client, error handling)
|
||||
- Created `crates-cli/yaak-cli/` for the standalone CLI
|
||||
|
||||
### 2. Decoupled Crates (no longer depend on Tauri)
|
||||
- **yaak-models**: Uses `init_standalone()` pattern for CLI database access
|
||||
- **yaak-http**: Removed Tauri plugin, HttpConnectionManager initialized in yaak-app setup
|
||||
- **yaak-common**: Only contains Tauri-free utilities (serde, platform)
|
||||
- **yaak-crypto**: Removed Tauri plugin, EncryptionManager initialized in yaak-app setup, commands moved to yaak-app
|
||||
- **yaak-grpc**: Replaced AppHandle with GrpcConfig struct, uses tokio::process::Command instead of Tauri sidecar
|
||||
|
||||
### 3. CLI Implementation
|
||||
- Basic CLI at `crates-cli/yaak-cli/src/main.rs`
|
||||
- Commands: workspaces, requests, send (by ID), get (ad-hoc URL), create
|
||||
- Uses same database as Tauri app via `yaak_models::init_standalone()`
|
||||
|
||||
## Remaining Work
|
||||
|
||||
### Crates Still Depending on Tauri (in `crates/`)
|
||||
1. **yaak-git** (3 files) - Moderate complexity
|
||||
2. **yaak-plugins** (13 files) - **Hardest** - deeply integrated with Tauri for plugin-to-window communication
|
||||
3. **yaak-sync** (4 files) - Moderate complexity
|
||||
4. **yaak-ws** (5 files) - Moderate complexity
|
||||
|
||||
### Pattern for Decoupling
|
||||
1. Remove Tauri plugin `init()` function from the crate
|
||||
2. Move commands to `yaak-app/src/commands.rs` or keep inline in `lib.rs`
|
||||
3. Move extension traits (e.g., `SomethingManagerExt`) to yaak-app or yaak-tauri-utils
|
||||
4. Initialize managers in yaak-app's `.setup()` block
|
||||
5. Remove `tauri` from Cargo.toml dependencies
|
||||
6. Update `crates-tauri/yaak-app/capabilities/default.json` to remove the plugin permission
|
||||
7. Replace `tauri::async_runtime::block_on` with `tokio::runtime::Handle::current().block_on()`
|
||||
|
||||
## Key Files
|
||||
- `crates-tauri/yaak-app/src/lib.rs` - Main Tauri app, setup block initializes managers
|
||||
- `crates-tauri/yaak-app/src/commands.rs` - Migrated Tauri commands
|
||||
- `crates-tauri/yaak-app/src/models_ext.rs` - Database plugin and extension traits
|
||||
- `crates-tauri/yaak-tauri-utils/src/window.rs` - WorkspaceWindowTrait for window state
|
||||
- `crates/yaak-models/src/lib.rs` - Contains `init_standalone()` for CLI usage
|
||||
|
||||
## Git Branch
|
||||
Working on `detach-tauri` branch.
|
||||
|
||||
## Recent Commits
|
||||
```
|
||||
c40cff40 Remove Tauri dependencies from yaak-crypto and yaak-grpc
|
||||
df495f1d Move Tauri utilities from yaak-common to yaak-tauri-utils
|
||||
481e0273 Remove Tauri dependencies from yaak-http and yaak-common
|
||||
10568ac3 Add HTTP request sending to yaak-cli
|
||||
bcb7d600 Add yaak-cli stub with basic database access
|
||||
e718a5f1 Refactor models_ext to use init_standalone from yaak-models
|
||||
```
|
||||
|
||||
## Testing
|
||||
- Run `cargo check -p <crate>` to verify a crate builds without Tauri
|
||||
- Run `npm run app-dev` to test the Tauri app still works
|
||||
- Run `cargo run -p yaak-cli -- --help` to test the CLI
|
||||
@@ -1,62 +0,0 @@
|
||||
---
|
||||
description: Review a PR in a new worktree
|
||||
allowed-tools: Bash(git worktree:*), Bash(gh pr:*), Bash(git branch:*)
|
||||
---
|
||||
|
||||
Check out a GitHub pull request for review.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/check-out-pr <PR_NUMBER>
|
||||
```
|
||||
|
||||
## What to do
|
||||
|
||||
1. If no PR number is provided, list all open pull requests and ask the user to select one
|
||||
2. Get PR information using `gh pr view <PR_NUMBER> --json number,headRefName`
|
||||
3. **Ask the user** whether they want to:
|
||||
- **A) Check out in current directory** — simple `gh pr checkout <PR_NUMBER>`
|
||||
- **B) Create a new worktree** — isolated copy at `../yaak-worktrees/pr-<PR_NUMBER>`
|
||||
4. Follow the appropriate path below
|
||||
|
||||
## Option A: Check out in current directory
|
||||
|
||||
1. Run `gh pr checkout <PR_NUMBER>`
|
||||
2. Inform the user which branch they're now on
|
||||
|
||||
## Option B: Create a new worktree
|
||||
|
||||
1. Create a new worktree at `../yaak-worktrees/pr-<PR_NUMBER>` using `git worktree add` with a timeout of at least 300000ms (5 minutes) since the post-checkout hook runs a bootstrap script
|
||||
2. Checkout the PR branch in the new worktree using `gh pr checkout <PR_NUMBER>`
|
||||
3. The post-checkout hook will automatically:
|
||||
- Create `.env.local` with unique ports
|
||||
- Copy editor config folders
|
||||
- Run `npm install && npm run bootstrap`
|
||||
4. Inform the user:
|
||||
- Where the worktree was created
|
||||
- What ports were assigned
|
||||
- How to access it (cd command)
|
||||
- How to run the dev server
|
||||
- How to remove the worktree when done
|
||||
|
||||
### Example worktree output
|
||||
|
||||
```
|
||||
Created worktree for PR #123 at ../yaak-worktrees/pr-123
|
||||
Branch: feature-auth
|
||||
Ports: Vite (1421), MCP (64344)
|
||||
|
||||
To start working:
|
||||
cd ../yaak-worktrees/pr-123
|
||||
npm run app-dev
|
||||
|
||||
To remove when done:
|
||||
git worktree remove ../yaak-worktrees/pr-123
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
- If the PR doesn't exist, show a helpful error
|
||||
- If the worktree already exists, inform the user and ask if they want to remove and recreate it
|
||||
- If `gh` CLI is not available, inform the user to install it
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
description: Generate formatted release notes for Yaak releases
|
||||
allowed-tools: Bash(git tag:*)
|
||||
---
|
||||
|
||||
Generate formatted release notes for Yaak releases by analyzing git history and pull request descriptions.
|
||||
|
||||
## What to do
|
||||
|
||||
1. Identifies the version tag and previous version
|
||||
2. Retrieves all commits between versions
|
||||
- If the version is a beta version, it retrieves commits between the beta version and previous beta version
|
||||
- If the version is a stable version, it retrieves commits between the stable version and the previous stable version
|
||||
3. Fetches PR descriptions for linked issues to find:
|
||||
- Feedback URLs (feedback.yaak.app)
|
||||
- Additional context and descriptions
|
||||
- Installation links for plugins
|
||||
4. Formats the release notes using the standard Yaak format:
|
||||
- Changelog badge at the top
|
||||
- Bulleted list of changes with PR links
|
||||
- Feedback links where available
|
||||
- Full changelog comparison link at the bottom
|
||||
|
||||
## Output Format
|
||||
|
||||
The skill generates markdown-formatted release notes following this structure:
|
||||
|
||||
```markdown
|
||||
[](https://yaak.app/changelog/VERSION)
|
||||
|
||||
- Feature/fix description in by @username [#123](https://github.com/mountain-loop/yaak/pull/123)
|
||||
- [Linked feedback item](https://feedback.yaak.app/p/item) by @username in [#456](https://github.com/mountain-loop/yaak/pull/456)
|
||||
- A simple item that doesn't have a feedback or PR link
|
||||
|
||||
**Full Changelog**: https://github.com/mountain-loop/yaak/compare/vPREV...vCURRENT
|
||||
```
|
||||
|
||||
**IMPORTANT**: Always add a blank lines around the markdown code fence and output the markdown code block last
|
||||
**IMPORTANT**: PRs by `@gschier` should not mention the @username
|
||||
|
||||
## After Generating Release Notes
|
||||
|
||||
After outputting the release notes, ask the user if they would like to create a draft GitHub release with these notes. If they confirm, create the release using:
|
||||
|
||||
```bash
|
||||
gh release create <tag> --draft --prerelease --title "Release <version>" --notes '<release notes>'
|
||||
```
|
||||
|
||||
**IMPORTANT**: The release title format is "Release XXXX" where XXXX is the version WITHOUT the `v` prefix. For example, tag `v2026.2.1-beta.1` gets title "Release 2026.2.1-beta.1".
|
||||
@@ -1,27 +0,0 @@
|
||||
# Project Rules
|
||||
|
||||
## General Development
|
||||
|
||||
- **NEVER** commit or push without explicit confirmation
|
||||
|
||||
## Build and Lint
|
||||
|
||||
- **ALWAYS** run `npm run lint` after modifying TypeScript or JavaScript files
|
||||
- Run `npm run bootstrap` after changing plugin runtime or MCP server code
|
||||
|
||||
## Plugin System
|
||||
|
||||
### Backend Constraints
|
||||
|
||||
- Always use `UpdateSource::Plugin` when calling database methods from plugin events
|
||||
- Never send timestamps (`createdAt`, `updatedAt`) from TypeScript - Rust backend controls these
|
||||
- Backend uses `NaiveDateTime` (no timezone) so avoid sending ISO timestamp strings
|
||||
|
||||
### MCP Server
|
||||
|
||||
- MCP server has **no active window context** - cannot call `window.workspaceId()`
|
||||
- Get workspace ID from `workspaceCtx.yaak.workspace.list()` instead
|
||||
|
||||
## Rust Type Generation
|
||||
|
||||
- Run `cargo test --package yaak-plugins` (and for other crates) to regenerate TypeScript bindings after modifying Rust event types
|
||||
@@ -1,35 +0,0 @@
|
||||
# Worktree Management Skill
|
||||
|
||||
## Creating Worktrees
|
||||
|
||||
When creating git worktrees for this project, ALWAYS use the path format:
|
||||
```
|
||||
../yaak-worktrees/<NAME>
|
||||
```
|
||||
|
||||
For example:
|
||||
- `git worktree add ../yaak-worktrees/feature-auth`
|
||||
- `git worktree add ../yaak-worktrees/bugfix-login`
|
||||
- `git worktree add ../yaak-worktrees/refactor-api`
|
||||
|
||||
## What Happens Automatically
|
||||
|
||||
The post-checkout hook will automatically:
|
||||
1. Create `.env.local` with unique ports (YAAK_DEV_PORT and YAAK_PLUGIN_MCP_SERVER_PORT)
|
||||
2. Copy gitignored editor config folders (.zed, .idea, etc.)
|
||||
3. Run `npm install && npm run bootstrap`
|
||||
|
||||
## Deleting Worktrees
|
||||
|
||||
```bash
|
||||
git worktree remove ../yaak-worktrees/<NAME>
|
||||
```
|
||||
|
||||
## Port Assignments
|
||||
|
||||
- Main worktree: 1420 (Vite), 64343 (MCP)
|
||||
- First worktree: 1421, 64344
|
||||
- Second worktree: 1422, 64345
|
||||
- etc.
|
||||
|
||||
Each worktree can run `npm run app-dev` simultaneously without conflicts.
|
||||
8
.gitattributes
vendored
8
.gitattributes
vendored
@@ -1,7 +1,5 @@
|
||||
crates-tauri/yaak-app/vendored/**/* linguist-generated=true
|
||||
crates-tauri/yaak-app/gen/schemas/**/* linguist-generated=true
|
||||
**/bindings/* linguist-generated=true
|
||||
crates/yaak-templates/pkg/* linguist-generated=true
|
||||
src-tauri/vendored/**/* linguist-generated=true
|
||||
src-tauri/gen/schemas/**/* linguist-generated=true
|
||||
|
||||
# Ensure consistent line endings for test files that check exact content
|
||||
crates/yaak-http/tests/test.txt text eol=lf
|
||||
src-tauri/yaak-http/tests/test.txt text eol=lf
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -18,13 +18,14 @@ jobs:
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: 'src-tauri'
|
||||
shared-key: ci
|
||||
cache-on-failure: true
|
||||
|
||||
- run: npm ci
|
||||
- run: npm run bootstrap
|
||||
- run: npm run lint
|
||||
- name: Run JS Tests
|
||||
run: npm test
|
||||
- name: Run Rust Tests
|
||||
run: cargo test --all
|
||||
working-directory: src-tauri
|
||||
|
||||
50
.github/workflows/claude.yml
vendored
50
.github/workflows/claude.yml
vendored
@@ -1,50 +0,0 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
# Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
|
||||
# prompt: 'Update the pull request description to include a summary of changes.'
|
||||
|
||||
# Optional: Add claude_args to customize behavior and configuration
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://code.claude.com/docs/en/cli-reference for available options
|
||||
# claude_args: '--allowed-tools Bash(gh pr:*)'
|
||||
|
||||
52
.github/workflows/flathub.yml
vendored
52
.github/workflows/flathub.yml
vendored
@@ -1,52 +0,0 @@
|
||||
name: Update Flathub
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update-flathub:
|
||||
name: Update Flathub manifest
|
||||
runs-on: ubuntu-latest
|
||||
# Only run for stable releases (skip betas/pre-releases)
|
||||
if: ${{ !github.event.release.prerelease }}
|
||||
steps:
|
||||
- name: Checkout app repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Checkout Flathub repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: flathub/app.yaak.Yaak
|
||||
token: ${{ secrets.FLATHUB_TOKEN }}
|
||||
path: flathub-repo
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Install source generators
|
||||
run: |
|
||||
pip install flatpak-node-generator tomlkit aiohttp
|
||||
git clone --depth 1 https://github.com/flatpak/flatpak-builder-tools flatpak/flatpak-builder-tools
|
||||
|
||||
- name: Run update-manifest.sh
|
||||
run: bash flatpak/update-manifest.sh "${{ github.event.release.tag_name }}" flathub-repo
|
||||
|
||||
- name: Commit and push to Flathub
|
||||
working-directory: flathub-repo
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git add -A
|
||||
git diff --cached --quiet && echo "No changes to commit" && exit 0
|
||||
git commit -m "Update to ${{ github.event.release.tag_name }}"
|
||||
git push
|
||||
98
.github/workflows/release.yml
vendored
98
.github/workflows/release.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Generate Artifacts
|
||||
on:
|
||||
push:
|
||||
tags: [v*]
|
||||
tags: [ v* ]
|
||||
|
||||
jobs:
|
||||
build-artifacts:
|
||||
@@ -13,37 +13,37 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: "macos-latest" # for Arm-based Macs (M1 and above).
|
||||
args: "--target aarch64-apple-darwin"
|
||||
yaak_arch: "arm64"
|
||||
os: "macos"
|
||||
targets: "aarch64-apple-darwin"
|
||||
- platform: "macos-latest" # for Intel-based Macs.
|
||||
args: "--target x86_64-apple-darwin"
|
||||
yaak_arch: "x64"
|
||||
os: "macos"
|
||||
targets: "x86_64-apple-darwin"
|
||||
- platform: "ubuntu-22.04"
|
||||
args: ""
|
||||
yaak_arch: "x64"
|
||||
os: "ubuntu"
|
||||
targets: ""
|
||||
- platform: "ubuntu-22.04-arm"
|
||||
args: ""
|
||||
yaak_arch: "arm64"
|
||||
os: "ubuntu"
|
||||
targets: ""
|
||||
- platform: "windows-latest"
|
||||
args: ""
|
||||
yaak_arch: "x64"
|
||||
os: "windows"
|
||||
targets: ""
|
||||
- platform: 'macos-latest' # for Arm-based Macs (M1 and above).
|
||||
args: '--target aarch64-apple-darwin'
|
||||
yaak_arch: 'arm64'
|
||||
os: 'macos'
|
||||
targets: 'aarch64-apple-darwin'
|
||||
- platform: 'macos-latest' # for Intel-based Macs.
|
||||
args: '--target x86_64-apple-darwin'
|
||||
yaak_arch: 'x64'
|
||||
os: 'macos'
|
||||
targets: 'x86_64-apple-darwin'
|
||||
- platform: 'ubuntu-22.04'
|
||||
args: ''
|
||||
yaak_arch: 'x64'
|
||||
os: 'ubuntu'
|
||||
targets: ''
|
||||
- platform: 'ubuntu-22.04-arm'
|
||||
args: ''
|
||||
yaak_arch: 'arm64'
|
||||
os: 'ubuntu'
|
||||
targets: ''
|
||||
- platform: 'windows-latest'
|
||||
args: ''
|
||||
yaak_arch: 'x64'
|
||||
os: 'windows'
|
||||
targets: ''
|
||||
# Windows ARM64
|
||||
- platform: "windows-latest"
|
||||
args: "--target aarch64-pc-windows-msvc"
|
||||
yaak_arch: "arm64"
|
||||
os: "windows"
|
||||
targets: "aarch64-pc-windows-msvc"
|
||||
- platform: 'windows-latest'
|
||||
args: '--target aarch64-pc-windows-msvc'
|
||||
yaak_arch: 'arm64'
|
||||
os: 'windows'
|
||||
targets: 'aarch64-pc-windows-msvc'
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 40
|
||||
steps:
|
||||
@@ -60,6 +60,7 @@ jobs:
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: 'src-tauri'
|
||||
shared-key: ci
|
||||
cache-on-failure: true
|
||||
|
||||
@@ -88,43 +89,18 @@ jobs:
|
||||
& $exe --version
|
||||
|
||||
- run: npm ci
|
||||
- run: npm run bootstrap
|
||||
env:
|
||||
YAAK_TARGET_ARCH: ${{ matrix.yaak_arch }}
|
||||
- run: npm run lint
|
||||
- name: Run JS Tests
|
||||
run: npm test
|
||||
- name: Run Rust Tests
|
||||
run: cargo test --all
|
||||
working-directory: src-tauri
|
||||
|
||||
- name: Set version
|
||||
run: npm run replace-version
|
||||
env:
|
||||
YAAK_VERSION: ${{ github.ref_name }}
|
||||
|
||||
- name: Sign vendored binaries (macOS only)
|
||||
if: matrix.os == 'macos'
|
||||
env:
|
||||
APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }}
|
||||
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
|
||||
APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }}
|
||||
KEYCHAIN_PASSWORD: ${{ secrets.KEYCHAIN_PASSWORD }}
|
||||
run: |
|
||||
# Create keychain
|
||||
KEYCHAIN_PATH=$RUNNER_TEMP/app-signing.keychain-db
|
||||
security create-keychain -p "$KEYCHAIN_PASSWORD" $KEYCHAIN_PATH
|
||||
security set-keychain-settings -lut 21600 $KEYCHAIN_PATH
|
||||
security unlock-keychain -p "$KEYCHAIN_PASSWORD" $KEYCHAIN_PATH
|
||||
|
||||
# Import certificate
|
||||
echo "$APPLE_CERTIFICATE" | base64 --decode > certificate.p12
|
||||
security import certificate.p12 -P "$APPLE_CERTIFICATE_PASSWORD" -A -t cert -f pkcs12 -k $KEYCHAIN_PATH
|
||||
security list-keychain -d user -s $KEYCHAIN_PATH
|
||||
|
||||
# Sign vendored binaries with hardened runtime and their specific entitlements
|
||||
codesign --force --options runtime --entitlements crates-tauri/yaak-app/macos/entitlements.yaakprotoc.plist --sign "$APPLE_SIGNING_IDENTITY" crates-tauri/yaak-app/vendored/protoc/yaakprotoc || true
|
||||
codesign --force --options runtime --entitlements crates-tauri/yaak-app/macos/entitlements.yaaknode.plist --sign "$APPLE_SIGNING_IDENTITY" crates-tauri/yaak-app/vendored/node/yaaknode || true
|
||||
|
||||
- uses: tauri-apps/tauri-action@v0
|
||||
env:
|
||||
YAAK_TARGET_ARCH: ${{ matrix.yaak_arch }}
|
||||
@@ -147,9 +123,9 @@ jobs:
|
||||
AZURE_CLIENT_SECRET: ${{ matrix.os == 'windows' && secrets.AZURE_CLIENT_SECRET }}
|
||||
AZURE_TENANT_ID: ${{ matrix.os == 'windows' && secrets.AZURE_TENANT_ID }}
|
||||
with:
|
||||
tagName: "v__VERSION__"
|
||||
releaseName: "Release __VERSION__"
|
||||
releaseBody: "[Changelog __VERSION__](https://yaak.app/blog/__VERSION__)"
|
||||
tagName: 'v__VERSION__'
|
||||
releaseName: 'Release __VERSION__'
|
||||
releaseBody: '[Changelog __VERSION__](https://yaak.app/blog/__VERSION__)'
|
||||
releaseDraft: true
|
||||
prerelease: true
|
||||
args: "${{ matrix.args }} --config ./crates-tauri/yaak-app/tauri.release.conf.json"
|
||||
args: '${{ matrix.args }} --config ./src-tauri/tauri.release.conf.json'
|
||||
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -36,21 +36,3 @@ out
|
||||
tmp
|
||||
.zed
|
||||
codebook.toml
|
||||
target
|
||||
|
||||
# Per-worktree Tauri config (generated by post-checkout hook)
|
||||
crates-tauri/yaak-app/tauri.worktree.conf.json
|
||||
|
||||
# Tauri auto-generated permission files
|
||||
**/permissions/autogenerated
|
||||
**/permissions/schemas
|
||||
|
||||
# Flatpak build artifacts
|
||||
flatpak-repo/
|
||||
.flatpak-builder/
|
||||
flatpak/flatpak-builder-tools/
|
||||
flatpak/cargo-sources.json
|
||||
flatpak/node-sources.json
|
||||
|
||||
# Local Codex desktop env state
|
||||
.codex/environments/environment.toml
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
node scripts/git-hooks/post-checkout.mjs "$@"
|
||||
73
Cargo.toml
73
Cargo.toml
@@ -1,73 +0,0 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/yaak",
|
||||
# Shared crates (no Tauri dependency)
|
||||
"crates/yaak-core",
|
||||
"crates/yaak-common",
|
||||
"crates/yaak-crypto",
|
||||
"crates/yaak-git",
|
||||
"crates/yaak-grpc",
|
||||
"crates/yaak-http",
|
||||
"crates/yaak-models",
|
||||
"crates/yaak-plugins",
|
||||
"crates/yaak-sse",
|
||||
"crates/yaak-sync",
|
||||
"crates/yaak-templates",
|
||||
"crates/yaak-tls",
|
||||
"crates/yaak-ws",
|
||||
"crates/yaak-api",
|
||||
# CLI crates
|
||||
"crates-cli/yaak-cli",
|
||||
# Tauri-specific crates
|
||||
"crates-tauri/yaak-app",
|
||||
"crates-tauri/yaak-fonts",
|
||||
"crates-tauri/yaak-license",
|
||||
"crates-tauri/yaak-mac-window",
|
||||
"crates-tauri/yaak-tauri-utils",
|
||||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
chrono = "0.4.42"
|
||||
hex = "0.4.3"
|
||||
keyring = "3.6.3"
|
||||
log = "0.4.29"
|
||||
reqwest = "0.12.20"
|
||||
rustls = { version = "0.23.34", default-features = false }
|
||||
rustls-platform-verifier = "0.6.2"
|
||||
serde = "1.0.228"
|
||||
serde_json = "1.0.145"
|
||||
sha2 = "0.10.9"
|
||||
tauri = "2.9.5"
|
||||
tauri-plugin = "2.5.2"
|
||||
tauri-plugin-dialog = "2.4.2"
|
||||
tauri-plugin-shell = "2.3.3"
|
||||
thiserror = "2.0.17"
|
||||
tokio = "1.48.0"
|
||||
ts-rs = "11.1.0"
|
||||
|
||||
# Internal crates - shared
|
||||
yaak-core = { path = "crates/yaak-core" }
|
||||
yaak = { path = "crates/yaak" }
|
||||
yaak-common = { path = "crates/yaak-common" }
|
||||
yaak-crypto = { path = "crates/yaak-crypto" }
|
||||
yaak-git = { path = "crates/yaak-git" }
|
||||
yaak-grpc = { path = "crates/yaak-grpc" }
|
||||
yaak-http = { path = "crates/yaak-http" }
|
||||
yaak-models = { path = "crates/yaak-models" }
|
||||
yaak-plugins = { path = "crates/yaak-plugins" }
|
||||
yaak-sse = { path = "crates/yaak-sse" }
|
||||
yaak-sync = { path = "crates/yaak-sync" }
|
||||
yaak-templates = { path = "crates/yaak-templates" }
|
||||
yaak-tls = { path = "crates/yaak-tls" }
|
||||
yaak-ws = { path = "crates/yaak-ws" }
|
||||
yaak-api = { path = "crates/yaak-api" }
|
||||
|
||||
# Internal crates - Tauri-specific
|
||||
yaak-fonts = { path = "crates-tauri/yaak-fonts" }
|
||||
yaak-license = { path = "crates-tauri/yaak-license" }
|
||||
yaak-mac-window = { path = "crates-tauri/yaak-mac-window" }
|
||||
yaak-tauri-utils = { path = "crates-tauri/yaak-tauri-utils" }
|
||||
|
||||
[profile.release]
|
||||
strip = false
|
||||
@@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://github.com/JamesIves/github-sponsors-readme-action">
|
||||
<img width="200px" src="https://github.com/mountain-loop/yaak/raw/main/crates-tauri/yaak-app/icons/icon.png">
|
||||
<img width="200px" src="https://github.com/mountain-loop/yaak/raw/main/src-tauri/icons/icon.png">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
@@ -19,10 +19,10 @@
|
||||
|
||||
|
||||
<p align="center">
|
||||
<!-- sponsors-premium --><a href="https://github.com/MVST-Solutions"><img src="https://github.com/MVST-Solutions.png" width="80px" alt="User avatar: MVST-Solutions" /></a> <a href="https://github.com/dharsanb"><img src="https://github.com/dharsanb.png" width="80px" alt="User avatar: dharsanb" /></a> <a href="https://github.com/railwayapp"><img src="https://github.com/railwayapp.png" width="80px" alt="User avatar: railwayapp" /></a> <a href="https://github.com/caseyamcl"><img src="https://github.com/caseyamcl.png" width="80px" alt="User avatar: caseyamcl" /></a> <a href="https://github.com/bytebase"><img src="https://github.com/bytebase.png" width="80px" alt="User avatar: bytebase" /></a> <a href="https://github.com/"><img src="https://raw.githubusercontent.com/JamesIves/github-sponsors-readme-action/dev/.github/assets/placeholder.png" width="80px" alt="User avatar: " /></a> <!-- sponsors-premium -->
|
||||
<!-- sponsors-premium --><a href="https://github.com/MVST-Solutions"><img src="https://github.com/MVST-Solutions.png" width="80px" alt="User avatar: MVST-Solutions" /></a> <a href="https://github.com/dharsanb"><img src="https://github.com/dharsanb.png" width="80px" alt="User avatar: dharsanb" /></a> <a href="https://github.com/railwayapp"><img src="https://github.com/railwayapp.png" width="80px" alt="User avatar: railwayapp" /></a> <a href="https://github.com/caseyamcl"><img src="https://github.com/caseyamcl.png" width="80px" alt="User avatar: caseyamcl" /></a> <a href="https://github.com/"><img src="https://raw.githubusercontent.com/JamesIves/github-sponsors-readme-action/dev/.github/assets/placeholder.png" width="80px" alt="User avatar: " /></a> <!-- sponsors-premium -->
|
||||
</p>
|
||||
<p align="center">
|
||||
<!-- sponsors-base --><a href="https://github.com/seanwash"><img src="https://github.com/seanwash.png" width="50px" alt="User avatar: seanwash" /></a> <a href="https://github.com/jerath"><img src="https://github.com/jerath.png" width="50px" alt="User avatar: jerath" /></a> <a href="https://github.com/itsa-sh"><img src="https://github.com/itsa-sh.png" width="50px" alt="User avatar: itsa-sh" /></a> <a href="https://github.com/dmmulroy"><img src="https://github.com/dmmulroy.png" width="50px" alt="User avatar: dmmulroy" /></a> <a href="https://github.com/timcole"><img src="https://github.com/timcole.png" width="50px" alt="User avatar: timcole" /></a> <a href="https://github.com/VLZH"><img src="https://github.com/VLZH.png" width="50px" alt="User avatar: VLZH" /></a> <a href="https://github.com/terasaka2k"><img src="https://github.com/terasaka2k.png" width="50px" alt="User avatar: terasaka2k" /></a> <a href="https://github.com/andriyor"><img src="https://github.com/andriyor.png" width="50px" alt="User avatar: andriyor" /></a> <a href="https://github.com/majudhu"><img src="https://github.com/majudhu.png" width="50px" alt="User avatar: majudhu" /></a> <a href="https://github.com/axelrindle"><img src="https://github.com/axelrindle.png" width="50px" alt="User avatar: axelrindle" /></a> <a href="https://github.com/jirizverina"><img src="https://github.com/jirizverina.png" width="50px" alt="User avatar: jirizverina" /></a> <a href="https://github.com/chip-well"><img src="https://github.com/chip-well.png" width="50px" alt="User avatar: chip-well" /></a> <a href="https://github.com/GRAYAH"><img src="https://github.com/GRAYAH.png" width="50px" alt="User avatar: GRAYAH" /></a> <a href="https://github.com/flashblaze"><img src="https://github.com/flashblaze.png" width="50px" alt="User avatar: flashblaze" /></a> <!-- sponsors-base -->
|
||||
<!-- sponsors-base --><a href="https://github.com/seanwash"><img src="https://github.com/seanwash.png" width="50px" alt="User avatar: seanwash" /></a> <a href="https://github.com/jerath"><img src="https://github.com/jerath.png" width="50px" alt="User avatar: jerath" /></a> <a href="https://github.com/itsa-sh"><img src="https://github.com/itsa-sh.png" width="50px" alt="User avatar: itsa-sh" /></a> <a href="https://github.com/dmmulroy"><img src="https://github.com/dmmulroy.png" width="50px" alt="User avatar: dmmulroy" /></a> <a href="https://github.com/timcole"><img src="https://github.com/timcole.png" width="50px" alt="User avatar: timcole" /></a> <a href="https://github.com/VLZH"><img src="https://github.com/VLZH.png" width="50px" alt="User avatar: VLZH" /></a> <a href="https://github.com/terasaka2k"><img src="https://github.com/terasaka2k.png" width="50px" alt="User avatar: terasaka2k" /></a> <a href="https://github.com/andriyor"><img src="https://github.com/andriyor.png" width="50px" alt="User avatar: andriyor" /></a> <a href="https://github.com/majudhu"><img src="https://github.com/majudhu.png" width="50px" alt="User avatar: majudhu" /></a> <a href="https://github.com/axelrindle"><img src="https://github.com/axelrindle.png" width="50px" alt="User avatar: axelrindle" /></a> <a href="https://github.com/jirizverina"><img src="https://github.com/jirizverina.png" width="50px" alt="User avatar: jirizverina" /></a> <a href="https://github.com/chip-well"><img src="https://github.com/chip-well.png" width="50px" alt="User avatar: chip-well" /></a> <a href="https://github.com/GRAYAH"><img src="https://github.com/GRAYAH.png" width="50px" alt="User avatar: GRAYAH" /></a> <!-- sponsors-base -->
|
||||
</p>
|
||||
|
||||

|
||||
@@ -64,7 +64,7 @@ visit [`DEVELOPMENT.md`](DEVELOPMENT.md) for tips on setting up your environment
|
||||
## Useful Resources
|
||||
|
||||
- [Feedback and Bug Reports](https://feedback.yaak.app)
|
||||
- [Documentation](https://yaak.app/docs)
|
||||
- [Documentation](https://feedback.yaak.app/help)
|
||||
- [Yaak vs Postman](https://yaak.app/alternatives/postman)
|
||||
- [Yaak vs Bruno](https://yaak.app/alternatives/bruno)
|
||||
- [Yaak vs Insomnia](https://yaak.app/alternatives/insomnia)
|
||||
|
||||
13
biome.json
13
biome.json
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"$schema": "https://biomejs.dev/schemas/2.3.11/schema.json",
|
||||
"$schema": "https://biomejs.dev/schemas/2.3.7/schema.json",
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
@@ -38,17 +38,14 @@
|
||||
"!**/node_modules",
|
||||
"!**/dist",
|
||||
"!**/build",
|
||||
"!target",
|
||||
"!scripts",
|
||||
"!crates",
|
||||
"!crates-tauri",
|
||||
"!packages/plugin-runtime",
|
||||
"!packages/plugin-runtime-types",
|
||||
"!src-tauri",
|
||||
"!src-web/tailwind.config.cjs",
|
||||
"!src-web/postcss.config.cjs",
|
||||
"!src-web/vite.config.ts",
|
||||
"!src-web/routeTree.gen.ts",
|
||||
"!packages/plugin-runtime-types/lib",
|
||||
"!**/bindings",
|
||||
"!flatpak"
|
||||
"!src-web/routeTree.gen.ts"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
[package]
|
||||
name = "yaak-cli"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[[bin]]
|
||||
name = "yaakcli"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
dirs = "6"
|
||||
env_logger = "0.11"
|
||||
futures = "0.3"
|
||||
log = { workspace = true }
|
||||
schemars = { version = "0.8.22", features = ["chrono"] }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
yaak = { workspace = true }
|
||||
yaak-crypto = { workspace = true }
|
||||
yaak-http = { workspace = true }
|
||||
yaak-models = { workspace = true }
|
||||
yaak-plugins = { workspace = true }
|
||||
yaak-templates = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
predicates = "3"
|
||||
tempfile = "3"
|
||||
@@ -1,341 +0,0 @@
|
||||
# CLI Command Architecture Plan
|
||||
|
||||
## Goal
|
||||
|
||||
Redesign the yaak-cli command structure to use a resource-oriented `<resource> <action>`
|
||||
pattern that scales well, is discoverable, and supports both human and LLM workflows.
|
||||
|
||||
## Status Snapshot
|
||||
|
||||
Current branch state:
|
||||
|
||||
- Modular CLI structure with command modules and shared `CliContext`
|
||||
- Resource/action hierarchy in place for:
|
||||
- `workspace list|show|create|update|delete`
|
||||
- `request list|show|create|update|send|delete|schema`
|
||||
- `folder list|show|create|update|delete`
|
||||
- `environment list|show|create|update|delete`
|
||||
- Top-level `send` resolves request/folder/workspace IDs and supports `--sequential|--parallel` + `--fail-fast`
|
||||
- Legacy `get` command removed
|
||||
- JSON create/update flow implemented (`--json` and positional JSON shorthand)
|
||||
- Request schema generation implemented via `schemars`, with plugin auth-field merge into `authentication`
|
||||
- `request send` is polymorphic via `get_any_request`; HTTP is implemented, gRPC/WebSocket return explicit NYI errors
|
||||
|
||||
Progress checklist:
|
||||
|
||||
- [x] Phase 1 complete
|
||||
- [x] Phase 2 complete
|
||||
- [x] Phase 3 complete
|
||||
- [x] Phase 4 complete
|
||||
- [x] Phase 5 complete
|
||||
- [x] Phase 6 complete
|
||||
|
||||
## Command Architecture
|
||||
|
||||
### Design Principles
|
||||
|
||||
- **Resource-oriented**: top-level commands are nouns, subcommands are verbs
|
||||
- **Polymorphic requests**: `request` covers HTTP, gRPC, and WebSocket — the CLI
|
||||
resolves the type via `get_any_request` and adapts behavior accordingly
|
||||
- **Simple creation, full-fidelity via JSON**: human-friendly flags for basic creation,
|
||||
`--json` for full control (targeted at LLM and scripting workflows)
|
||||
- **Runtime schema introspection**: `request schema` outputs JSON Schema for the request
|
||||
models, with dynamic auth fields populated from loaded plugins at runtime
|
||||
- **Destructive actions require confirmation**: `delete` commands prompt for user
|
||||
confirmation before proceeding. Can be bypassed with `--yes` / `-y` for scripting
|
||||
|
||||
### Commands
|
||||
|
||||
```
|
||||
# Top-level shortcut
|
||||
yaakcli send <id> [--sequential|--parallel] [--fail-fast] [-e <env_id>]
|
||||
|
||||
# Resource commands
|
||||
yaakcli workspace list
|
||||
yaakcli workspace show <id>
|
||||
yaakcli workspace create --name <name>
|
||||
yaakcli workspace create --json '{"name": "My Workspace"}'
|
||||
yaakcli workspace create '{"name": "My Workspace"}' # positional JSON shorthand
|
||||
yaakcli workspace update --json '{"id": "wk_abc", "name": "New Name"}'
|
||||
yaakcli workspace delete <id>
|
||||
|
||||
yaakcli request list <workspace_id>
|
||||
yaakcli request show <id>
|
||||
yaakcli request create <workspace_id> --name <name> --url <url> [--method GET]
|
||||
yaakcli request create --json '{"workspaceId": "wk_abc", "url": "..."}'
|
||||
yaakcli request update --json '{"id": "rq_abc", "url": "https://new.com"}'
|
||||
yaakcli request send <id> [-e <env_id>]
|
||||
yaakcli request delete <id>
|
||||
yaakcli request schema <http|grpc|websocket>
|
||||
|
||||
yaakcli folder list <workspace_id>
|
||||
yaakcli folder show <id>
|
||||
yaakcli folder create <workspace_id> --name <name>
|
||||
yaakcli folder create --json '{"workspaceId": "wk_abc", "name": "Auth"}'
|
||||
yaakcli folder update --json '{"id": "fl_abc", "name": "New Name"}'
|
||||
yaakcli folder delete <id>
|
||||
|
||||
yaakcli environment list <workspace_id>
|
||||
yaakcli environment show <id>
|
||||
yaakcli environment create <workspace_id> --name <name>
|
||||
yaakcli environment create --json '{"workspaceId": "wk_abc", "name": "Production"}'
|
||||
yaakcli environment update --json '{"id": "ev_abc", ...}'
|
||||
yaakcli environment delete <id>
|
||||
|
||||
```
|
||||
|
||||
### `send` — Top-Level Shortcut
|
||||
|
||||
`yaakcli send <id>` is a convenience alias that accepts any sendable ID. It tries
|
||||
each type in order via DB lookups (short-circuiting on first match):
|
||||
|
||||
1. Request (HTTP, gRPC, or WebSocket via `get_any_request`)
|
||||
2. Folder (sends all requests in the folder)
|
||||
3. Workspace (sends all requests in the workspace)
|
||||
|
||||
ID prefixes exist (e.g. `rq_`, `fl_`, `wk_`) but are not relied upon — resolution
|
||||
is purely by DB lookup.
|
||||
|
||||
`request send <id>` is the same but restricted to request IDs only.
|
||||
|
||||
### Request Send — Polymorphic Behavior
|
||||
|
||||
`send` means "execute this request" regardless of protocol:
|
||||
|
||||
- **HTTP**: send request, print response, exit
|
||||
- **gRPC**: currently returns explicit "not implemented yet in yaak-cli"
|
||||
- **WebSocket**: currently returns explicit "not implemented yet in yaak-cli"
|
||||
|
||||
### `request schema` — Runtime JSON Schema
|
||||
|
||||
Outputs a JSON Schema describing the full request shape, including dynamic fields:
|
||||
|
||||
1. Generate base schema from `schemars::JsonSchema` derive on the Rust model structs
|
||||
2. Load plugins, collect auth strategy definitions and their form inputs
|
||||
3. Merge plugin-defined auth fields into the `authentication` property as a `oneOf`
|
||||
4. Output the combined schema as JSON
|
||||
|
||||
This lets an LLM call `schema`, read the shape, and construct valid JSON for
|
||||
`create --json` or `update --json`.
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Phase 1: Restructure commands (no new functionality)
|
||||
|
||||
Refactor `main.rs` into the new resource/action pattern using clap subcommand nesting.
|
||||
Existing behavior stays the same, just reorganized. Remove the `get` command.
|
||||
|
||||
1. Create module structure: `commands/workspace.rs`, `commands/request.rs`, etc.
|
||||
2. Define nested clap enums:
|
||||
```rust
|
||||
enum Commands {
|
||||
Send(SendArgs),
|
||||
Workspace(WorkspaceArgs),
|
||||
Request(RequestArgs),
|
||||
Folder(FolderArgs),
|
||||
Environment(EnvironmentArgs),
|
||||
}
|
||||
```
|
||||
3. Move existing `Workspaces` logic into `workspace list`
|
||||
4. Move existing `Requests` logic into `request list`
|
||||
5. Move existing `Send` logic into `request send`
|
||||
6. Move existing `Create` logic into `request create`
|
||||
7. Delete the `Get` command entirely
|
||||
8. Extract shared setup (DB init, plugin init, encryption) into a reusable context struct
|
||||
|
||||
### Phase 2: Add missing CRUD commands
|
||||
|
||||
Status: complete
|
||||
|
||||
1. `workspace show <id>`
|
||||
2. `workspace create --name <name>` (and `--json`)
|
||||
3. `workspace update --json`
|
||||
4. `workspace delete <id>`
|
||||
5. `request show <id>` (JSON output of the full request model)
|
||||
6. `request delete <id>`
|
||||
7. `folder list <workspace_id>`
|
||||
8. `folder show <id>`
|
||||
9. `folder create <workspace_id> --name <name>` (and `--json`)
|
||||
10. `folder update --json`
|
||||
11. `folder delete <id>`
|
||||
12. `environment list <workspace_id>`
|
||||
13. `environment show <id>`
|
||||
14. `environment create <workspace_id> --name <name>` (and `--json`)
|
||||
15. `environment update --json`
|
||||
16. `environment delete <id>`
|
||||
|
||||
### Phase 3: JSON input for create/update
|
||||
|
||||
Both commands accept JSON via `--json <string>` or as a positional argument (detected
|
||||
by leading `{`). They follow the same upsert pattern as the plugin API.
|
||||
|
||||
- **`create --json`**: JSON must include `workspaceId`. Must NOT include `id` (or
|
||||
use empty string `""`). Deserializes into the model with defaults for missing fields,
|
||||
then upserts (insert).
|
||||
- **`update --json`**: JSON must include `id`. Performs a fetch-merge-upsert:
|
||||
1. Fetch the existing model from DB
|
||||
2. Serialize it to `serde_json::Value`
|
||||
3. Deep-merge the user's partial JSON on top (JSON Merge Patch / RFC 7386 semantics)
|
||||
4. Deserialize back into the typed model
|
||||
5. Upsert (update)
|
||||
|
||||
This matches how the MCP server plugin already does it (fetch existing, spread, override),
|
||||
but the CLI handles the merge server-side so callers don't have to.
|
||||
|
||||
Setting a field to `null` removes it (for `Option<T>` fields), per RFC 7386.
|
||||
|
||||
Implementation:
|
||||
1. Add `--json` flag and positional JSON detection to `create` commands
|
||||
2. Add `update` commands with required `--json` flag
|
||||
3. Implement JSON merge utility (or use `json-patch` crate)
|
||||
|
||||
### Phase 4: Runtime schema generation
|
||||
|
||||
1. Add `schemars` dependency to `yaak-models`
|
||||
2. Derive `JsonSchema` on `HttpRequest`, `GrpcRequest`, `WebsocketRequest`, and their
|
||||
nested types (`HttpRequestHeader`, `HttpUrlParameter`, etc.)
|
||||
3. Implement `request schema` command:
|
||||
- Generate base schema from schemars
|
||||
- Query plugins for auth strategy form inputs
|
||||
- Convert plugin form inputs into JSON Schema properties
|
||||
- Merge into the `authentication` field
|
||||
- Print to stdout
|
||||
|
||||
### Phase 5: Polymorphic send
|
||||
|
||||
1. Update `request send` to use `get_any_request` to resolve the request type
|
||||
2. Match on `AnyRequest` variant and dispatch to the appropriate sender:
|
||||
- `AnyRequest::HttpRequest` — existing HTTP send logic
|
||||
- `AnyRequest::GrpcRequest` — gRPC invoke (future implementation)
|
||||
- `AnyRequest::WebsocketRequest` — WebSocket connect (future implementation)
|
||||
3. gRPC and WebSocket send can initially return "not yet implemented" errors
|
||||
|
||||
### Phase 6: Top-level `send` and folder/workspace send
|
||||
|
||||
1. Add top-level `yaakcli send <id>` command
|
||||
2. Resolve ID by trying DB lookups in order: any_request → folder → workspace
|
||||
3. For folder: list all requests in folder, send each
|
||||
4. For workspace: list all requests in workspace, send each
|
||||
5. Add execution options: `--sequential` (default), `--parallel`, `--fail-fast`
|
||||
|
||||
## Execution Plan (PR Slices)
|
||||
|
||||
### PR 1: Command tree refactor + compatibility aliases
|
||||
|
||||
Scope:
|
||||
|
||||
1. Introduce `commands/` modules and a `CliContext` for shared setup
|
||||
2. Add new clap hierarchy (`workspace`, `request`, `folder`, `environment`)
|
||||
3. Route existing behavior into:
|
||||
- `workspace list`
|
||||
- `request list <workspace_id>`
|
||||
- `request send <id>`
|
||||
- `request create <workspace_id> ...`
|
||||
4. Keep compatibility aliases temporarily:
|
||||
- `workspaces` -> `workspace list`
|
||||
- `requests <workspace_id>` -> `request list <workspace_id>`
|
||||
- `create ...` -> `request create ...`
|
||||
5. Remove `get` and update help text
|
||||
|
||||
Acceptance criteria:
|
||||
|
||||
- `yaakcli --help` shows noun/verb structure
|
||||
- Existing list/send/create workflows still work
|
||||
- No behavior change in HTTP send output format
|
||||
|
||||
### PR 2: CRUD surface area
|
||||
|
||||
Scope:
|
||||
|
||||
1. Implement `show/create/update/delete` for `workspace`, `request`, `folder`, `environment`
|
||||
2. Ensure delete commands require confirmation by default (`--yes` bypass)
|
||||
3. Normalize output format for list/show/create/update/delete responses
|
||||
|
||||
Acceptance criteria:
|
||||
|
||||
- Every command listed in the "Commands" section parses and executes
|
||||
- Delete commands are safe by default in interactive terminals
|
||||
- `--yes` supports non-interactive scripts
|
||||
|
||||
### PR 3: JSON input + merge patch semantics
|
||||
|
||||
Scope:
|
||||
|
||||
1. Add shared parser for `--json` and positional JSON shorthand
|
||||
2. Add `create --json` and `update --json` for all mutable resources
|
||||
3. Implement server-side RFC 7386 merge patch behavior
|
||||
4. Add guardrails:
|
||||
- `create --json`: reject non-empty `id`
|
||||
- `update --json`: require `id`
|
||||
|
||||
Acceptance criteria:
|
||||
|
||||
- Partial `update --json` only modifies provided keys
|
||||
- `null` clears optional values
|
||||
- Invalid JSON and missing required fields return actionable errors
|
||||
|
||||
### PR 4: `request schema` and plugin auth integration
|
||||
|
||||
Scope:
|
||||
|
||||
1. Add `schemars` to `yaak-models` and derive `JsonSchema` for request models
|
||||
2. Implement `request schema <http|grpc|websocket>`
|
||||
3. Merge plugin auth form inputs into `authentication` schema at runtime
|
||||
|
||||
Acceptance criteria:
|
||||
|
||||
- Command prints valid JSON schema
|
||||
- Schema reflects installed auth providers at runtime
|
||||
- No panic when plugins fail to initialize (degrade gracefully)
|
||||
|
||||
### PR 5: Polymorphic request send
|
||||
|
||||
Scope:
|
||||
|
||||
1. Replace request resolution in `request send` with `get_any_request`
|
||||
2. Dispatch by request type
|
||||
3. Keep HTTP fully functional
|
||||
4. Return explicit NYI errors for gRPC/WebSocket until implemented
|
||||
|
||||
Acceptance criteria:
|
||||
|
||||
- HTTP behavior remains unchanged
|
||||
- gRPC/WebSocket IDs are recognized and return explicit status
|
||||
|
||||
### PR 6: Top-level `send` + bulk execution
|
||||
|
||||
Scope:
|
||||
|
||||
1. Add top-level `send <id>` for request/folder/workspace IDs
|
||||
2. Implement folder/workspace fan-out execution
|
||||
3. Add execution controls: `--sequential`, `--parallel`, `--fail-fast`
|
||||
|
||||
Acceptance criteria:
|
||||
|
||||
- Correct ID dispatch order: request -> folder -> workspace
|
||||
- Deterministic summary output (success/failure counts)
|
||||
- Non-zero exit code when any request fails (unless explicitly configured otherwise)
|
||||
|
||||
## Validation Matrix
|
||||
|
||||
1. CLI parsing tests for every command path (including aliases while retained)
|
||||
2. Integration tests against temp SQLite DB for CRUD flows
|
||||
3. Snapshot tests for output text where scripting compatibility matters
|
||||
4. Manual smoke tests:
|
||||
- Send HTTP request with template/rendered vars
|
||||
- JSON create/update for each resource
|
||||
- Delete confirmation and `--yes`
|
||||
- Top-level `send` on request/folder/workspace
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. Should compatibility aliases (`workspaces`, `requests`, `create`) be removed immediately or after one release cycle?
|
||||
2. For bulk `send`, should default behavior stop on first failure or continue and summarize?
|
||||
3. Should command output default to human-readable text with an optional `--format json`, or return JSON by default for `show`/`list`?
|
||||
4. For `request schema`, should plugin-derived auth fields be namespaced by plugin ID to avoid collisions?
|
||||
|
||||
## Crate Changes
|
||||
|
||||
- **yaak-cli**: restructure into modules, new clap hierarchy
|
||||
- **yaak-models**: add `schemars` dependency, derive `JsonSchema` on model structs
|
||||
(current derives: `Debug, Clone, PartialEq, Serialize, Deserialize, Default, TS`)
|
||||
@@ -1,87 +0,0 @@
|
||||
# yaak-cli
|
||||
|
||||
Command-line interface for Yaak.
|
||||
|
||||
## Command Overview
|
||||
|
||||
Current top-level commands:
|
||||
|
||||
```text
|
||||
yaakcli send <request_id>
|
||||
yaakcli workspace list
|
||||
yaakcli workspace show <workspace_id>
|
||||
yaakcli workspace create --name <name>
|
||||
yaakcli workspace create --json '{"name":"My Workspace"}'
|
||||
yaakcli workspace create '{"name":"My Workspace"}'
|
||||
yaakcli workspace update --json '{"id":"wk_abc","description":"Updated"}'
|
||||
yaakcli workspace delete <workspace_id> [--yes]
|
||||
yaakcli request list <workspace_id>
|
||||
yaakcli request show <request_id>
|
||||
yaakcli request send <request_id>
|
||||
yaakcli request create <workspace_id> --name <name> --url <url> [--method GET]
|
||||
yaakcli request create --json '{"workspaceId":"wk_abc","name":"Users","url":"https://api.example.com/users"}'
|
||||
yaakcli request create '{"workspaceId":"wk_abc","name":"Users","url":"https://api.example.com/users"}'
|
||||
yaakcli request update --json '{"id":"rq_abc","name":"Users v2"}'
|
||||
yaakcli request delete <request_id> [--yes]
|
||||
yaakcli folder list <workspace_id>
|
||||
yaakcli folder show <folder_id>
|
||||
yaakcli folder create <workspace_id> --name <name>
|
||||
yaakcli folder create --json '{"workspaceId":"wk_abc","name":"Auth"}'
|
||||
yaakcli folder create '{"workspaceId":"wk_abc","name":"Auth"}'
|
||||
yaakcli folder update --json '{"id":"fl_abc","name":"Auth v2"}'
|
||||
yaakcli folder delete <folder_id> [--yes]
|
||||
yaakcli environment list <workspace_id>
|
||||
yaakcli environment show <environment_id>
|
||||
yaakcli environment create <workspace_id> --name <name>
|
||||
yaakcli environment create --json '{"workspaceId":"wk_abc","name":"Production"}'
|
||||
yaakcli environment create '{"workspaceId":"wk_abc","name":"Production"}'
|
||||
yaakcli environment update --json '{"id":"ev_abc","color":"#00ff00"}'
|
||||
yaakcli environment delete <environment_id> [--yes]
|
||||
```
|
||||
|
||||
Global options:
|
||||
|
||||
- `--data-dir <path>`: use a custom data directory
|
||||
- `-e, --environment <id>`: environment to use during request rendering/sending
|
||||
- `-v, --verbose`: verbose logging and send output
|
||||
|
||||
Notes:
|
||||
|
||||
- `send` is currently a shortcut for sending an HTTP request ID.
|
||||
- `delete` commands prompt for confirmation unless `--yes` is provided.
|
||||
- In non-interactive mode, `delete` commands require `--yes`.
|
||||
- `create` and `update` commands support `--json` and positional JSON shorthand.
|
||||
- `update` uses JSON Merge Patch semantics (RFC 7386) for partial updates.
|
||||
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
yaakcli workspace list
|
||||
yaakcli workspace create --name "My Workspace"
|
||||
yaakcli workspace show wk_abc
|
||||
yaakcli workspace update --json '{"id":"wk_abc","description":"Team workspace"}'
|
||||
yaakcli request list wk_abc
|
||||
yaakcli request show rq_abc
|
||||
yaakcli request create wk_abc --name "Users" --url "https://api.example.com/users"
|
||||
yaakcli request update --json '{"id":"rq_abc","name":"Users v2"}'
|
||||
yaakcli request send rq_abc -e ev_abc
|
||||
yaakcli request delete rq_abc --yes
|
||||
yaakcli folder create wk_abc --name "Auth"
|
||||
yaakcli folder update --json '{"id":"fl_abc","name":"Auth v2"}'
|
||||
yaakcli environment create wk_abc --name "Production"
|
||||
yaakcli environment update --json '{"id":"ev_abc","color":"#00ff00"}'
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
Planned command expansion (request schema and polymorphic send) is tracked in `PLAN.md`.
|
||||
|
||||
When command behavior changes, update this README and verify with:
|
||||
|
||||
```bash
|
||||
cargo run -q -p yaak-cli -- --help
|
||||
cargo run -q -p yaak-cli -- request --help
|
||||
cargo run -q -p yaak-cli -- workspace --help
|
||||
cargo run -q -p yaak-cli -- folder --help
|
||||
cargo run -q -p yaak-cli -- environment --help
|
||||
```
|
||||
@@ -1,307 +0,0 @@
|
||||
use clap::{Args, Parser, Subcommand, ValueEnum};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "yaakcli")]
|
||||
#[command(about = "Yaak CLI - API client from the command line")]
|
||||
pub struct Cli {
|
||||
/// Use a custom data directory
|
||||
#[arg(long, global = true)]
|
||||
pub data_dir: Option<PathBuf>,
|
||||
|
||||
/// Environment ID to use for variable substitution
|
||||
#[arg(long, short, global = true)]
|
||||
pub environment: Option<String>,
|
||||
|
||||
/// Enable verbose logging
|
||||
#[arg(long, short, global = true)]
|
||||
pub verbose: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
pub command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum Commands {
|
||||
/// Send a request, folder, or workspace by ID
|
||||
Send(SendArgs),
|
||||
|
||||
/// Workspace commands
|
||||
Workspace(WorkspaceArgs),
|
||||
|
||||
/// Request commands
|
||||
Request(RequestArgs),
|
||||
|
||||
/// Folder commands
|
||||
Folder(FolderArgs),
|
||||
|
||||
/// Environment commands
|
||||
Environment(EnvironmentArgs),
|
||||
}
|
||||
|
||||
#[derive(Args)]
|
||||
pub struct SendArgs {
|
||||
/// Request, folder, or workspace ID
|
||||
pub id: String,
|
||||
|
||||
/// Execute requests sequentially (default)
|
||||
#[arg(long, conflicts_with = "parallel")]
|
||||
pub sequential: bool,
|
||||
|
||||
/// Execute requests in parallel
|
||||
#[arg(long, conflicts_with = "sequential")]
|
||||
pub parallel: bool,
|
||||
|
||||
/// Stop on first request failure when sending folders/workspaces
|
||||
#[arg(long, conflicts_with = "parallel")]
|
||||
pub fail_fast: bool,
|
||||
}
|
||||
|
||||
#[derive(Args)]
|
||||
pub struct WorkspaceArgs {
|
||||
#[command(subcommand)]
|
||||
pub command: WorkspaceCommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum WorkspaceCommands {
|
||||
/// List all workspaces
|
||||
List,
|
||||
|
||||
/// Show a workspace as JSON
|
||||
Show {
|
||||
/// Workspace ID
|
||||
workspace_id: String,
|
||||
},
|
||||
|
||||
/// Create a workspace
|
||||
Create {
|
||||
/// Workspace name
|
||||
#[arg(short, long)]
|
||||
name: Option<String>,
|
||||
|
||||
/// JSON payload
|
||||
#[arg(long, conflicts_with = "json_input")]
|
||||
json: Option<String>,
|
||||
|
||||
/// JSON payload shorthand
|
||||
#[arg(value_name = "JSON", conflicts_with = "json")]
|
||||
json_input: Option<String>,
|
||||
},
|
||||
|
||||
/// Update a workspace
|
||||
Update {
|
||||
/// JSON payload
|
||||
#[arg(long, conflicts_with = "json_input")]
|
||||
json: Option<String>,
|
||||
|
||||
/// JSON payload shorthand
|
||||
#[arg(value_name = "JSON", conflicts_with = "json")]
|
||||
json_input: Option<String>,
|
||||
},
|
||||
|
||||
/// Delete a workspace
|
||||
Delete {
|
||||
/// Workspace ID
|
||||
workspace_id: String,
|
||||
|
||||
/// Skip confirmation prompt
|
||||
#[arg(short, long)]
|
||||
yes: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Args)]
|
||||
pub struct RequestArgs {
|
||||
#[command(subcommand)]
|
||||
pub command: RequestCommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum RequestCommands {
|
||||
/// List requests in a workspace
|
||||
List {
|
||||
/// Workspace ID
|
||||
workspace_id: String,
|
||||
},
|
||||
|
||||
/// Show a request as JSON
|
||||
Show {
|
||||
/// Request ID
|
||||
request_id: String,
|
||||
},
|
||||
|
||||
/// Send a request by ID
|
||||
Send {
|
||||
/// Request ID
|
||||
request_id: String,
|
||||
},
|
||||
|
||||
/// Output JSON schema for request create/update payloads
|
||||
Schema {
|
||||
#[arg(value_enum)]
|
||||
request_type: RequestSchemaType,
|
||||
},
|
||||
|
||||
/// Create a new HTTP request
|
||||
Create {
|
||||
/// Workspace ID (or positional JSON payload shorthand)
|
||||
workspace_id: Option<String>,
|
||||
|
||||
/// Request name
|
||||
#[arg(short, long)]
|
||||
name: Option<String>,
|
||||
|
||||
/// HTTP method
|
||||
#[arg(short, long)]
|
||||
method: Option<String>,
|
||||
|
||||
/// URL
|
||||
#[arg(short, long)]
|
||||
url: Option<String>,
|
||||
|
||||
/// JSON payload
|
||||
#[arg(long)]
|
||||
json: Option<String>,
|
||||
},
|
||||
|
||||
/// Update an HTTP request
|
||||
Update {
|
||||
/// JSON payload
|
||||
#[arg(long, conflicts_with = "json_input")]
|
||||
json: Option<String>,
|
||||
|
||||
/// JSON payload shorthand
|
||||
#[arg(value_name = "JSON", conflicts_with = "json")]
|
||||
json_input: Option<String>,
|
||||
},
|
||||
|
||||
/// Delete a request
|
||||
Delete {
|
||||
/// Request ID
|
||||
request_id: String,
|
||||
|
||||
/// Skip confirmation prompt
|
||||
#[arg(short, long)]
|
||||
yes: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, ValueEnum)]
|
||||
pub enum RequestSchemaType {
|
||||
Http,
|
||||
Grpc,
|
||||
Websocket,
|
||||
}
|
||||
|
||||
#[derive(Args)]
|
||||
pub struct FolderArgs {
|
||||
#[command(subcommand)]
|
||||
pub command: FolderCommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum FolderCommands {
|
||||
/// List folders in a workspace
|
||||
List {
|
||||
/// Workspace ID
|
||||
workspace_id: String,
|
||||
},
|
||||
|
||||
/// Show a folder as JSON
|
||||
Show {
|
||||
/// Folder ID
|
||||
folder_id: String,
|
||||
},
|
||||
|
||||
/// Create a folder
|
||||
Create {
|
||||
/// Workspace ID (or positional JSON payload shorthand)
|
||||
workspace_id: Option<String>,
|
||||
|
||||
/// Folder name
|
||||
#[arg(short, long)]
|
||||
name: Option<String>,
|
||||
|
||||
/// JSON payload
|
||||
#[arg(long)]
|
||||
json: Option<String>,
|
||||
},
|
||||
|
||||
/// Update a folder
|
||||
Update {
|
||||
/// JSON payload
|
||||
#[arg(long, conflicts_with = "json_input")]
|
||||
json: Option<String>,
|
||||
|
||||
/// JSON payload shorthand
|
||||
#[arg(value_name = "JSON", conflicts_with = "json")]
|
||||
json_input: Option<String>,
|
||||
},
|
||||
|
||||
/// Delete a folder
|
||||
Delete {
|
||||
/// Folder ID
|
||||
folder_id: String,
|
||||
|
||||
/// Skip confirmation prompt
|
||||
#[arg(short, long)]
|
||||
yes: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Args)]
|
||||
pub struct EnvironmentArgs {
|
||||
#[command(subcommand)]
|
||||
pub command: EnvironmentCommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum EnvironmentCommands {
|
||||
/// List environments in a workspace
|
||||
List {
|
||||
/// Workspace ID
|
||||
workspace_id: String,
|
||||
},
|
||||
|
||||
/// Show an environment as JSON
|
||||
Show {
|
||||
/// Environment ID
|
||||
environment_id: String,
|
||||
},
|
||||
|
||||
/// Create an environment
|
||||
Create {
|
||||
/// Workspace ID (or positional JSON payload shorthand)
|
||||
workspace_id: Option<String>,
|
||||
|
||||
/// Environment name
|
||||
#[arg(short, long)]
|
||||
name: Option<String>,
|
||||
|
||||
/// JSON payload
|
||||
#[arg(long)]
|
||||
json: Option<String>,
|
||||
},
|
||||
|
||||
/// Update an environment
|
||||
Update {
|
||||
/// JSON payload
|
||||
#[arg(long, conflicts_with = "json_input")]
|
||||
json: Option<String>,
|
||||
|
||||
/// JSON payload shorthand
|
||||
#[arg(value_name = "JSON", conflicts_with = "json")]
|
||||
json_input: Option<String>,
|
||||
},
|
||||
|
||||
/// Delete an environment
|
||||
Delete {
|
||||
/// Environment ID
|
||||
environment_id: String,
|
||||
|
||||
/// Skip confirmation prompt
|
||||
#[arg(short, long)]
|
||||
yes: bool,
|
||||
},
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
use crate::cli::{EnvironmentArgs, EnvironmentCommands};
|
||||
use crate::context::CliContext;
|
||||
use crate::utils::confirm::confirm_delete;
|
||||
use crate::utils::json::{
|
||||
apply_merge_patch, is_json_shorthand, parse_optional_json, parse_required_json, require_id,
|
||||
validate_create_id,
|
||||
};
|
||||
use yaak_models::models::Environment;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
type CommandResult<T = ()> = std::result::Result<T, String>;
|
||||
|
||||
pub fn run(ctx: &CliContext, args: EnvironmentArgs) -> i32 {
|
||||
let result = match args.command {
|
||||
EnvironmentCommands::List { workspace_id } => list(ctx, &workspace_id),
|
||||
EnvironmentCommands::Show { environment_id } => show(ctx, &environment_id),
|
||||
EnvironmentCommands::Create { workspace_id, name, json } => {
|
||||
create(ctx, workspace_id, name, json)
|
||||
}
|
||||
EnvironmentCommands::Update { json, json_input } => update(ctx, json, json_input),
|
||||
EnvironmentCommands::Delete { environment_id, yes } => delete(ctx, &environment_id, yes),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn list(ctx: &CliContext, workspace_id: &str) -> CommandResult {
|
||||
let environments = ctx
|
||||
.db()
|
||||
.list_environments_ensure_base(workspace_id)
|
||||
.map_err(|e| format!("Failed to list environments: {e}"))?;
|
||||
|
||||
if environments.is_empty() {
|
||||
println!("No environments found in workspace {}", workspace_id);
|
||||
} else {
|
||||
for environment in environments {
|
||||
println!("{} - {} ({})", environment.id, environment.name, environment.parent_model);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn show(ctx: &CliContext, environment_id: &str) -> CommandResult {
|
||||
let environment = ctx
|
||||
.db()
|
||||
.get_environment(environment_id)
|
||||
.map_err(|e| format!("Failed to get environment: {e}"))?;
|
||||
let output = serde_json::to_string_pretty(&environment)
|
||||
.map_err(|e| format!("Failed to serialize environment: {e}"))?;
|
||||
println!("{output}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create(
|
||||
ctx: &CliContext,
|
||||
workspace_id: Option<String>,
|
||||
name: Option<String>,
|
||||
json: Option<String>,
|
||||
) -> CommandResult {
|
||||
if json.is_some() && workspace_id.as_deref().is_some_and(|v| !is_json_shorthand(v)) {
|
||||
return Err(
|
||||
"environment create cannot combine workspace_id with --json payload".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
let payload = parse_optional_json(
|
||||
json,
|
||||
workspace_id.clone().filter(|v| is_json_shorthand(v)),
|
||||
"environment create",
|
||||
)?;
|
||||
|
||||
if let Some(payload) = payload {
|
||||
if name.is_some() {
|
||||
return Err("environment create cannot combine --name with JSON payload".to_string());
|
||||
}
|
||||
|
||||
validate_create_id(&payload, "environment")?;
|
||||
let mut environment: Environment = serde_json::from_value(payload)
|
||||
.map_err(|e| format!("Failed to parse environment create JSON: {e}"))?;
|
||||
|
||||
if environment.workspace_id.is_empty() {
|
||||
return Err("environment create JSON requires non-empty \"workspaceId\"".to_string());
|
||||
}
|
||||
|
||||
if environment.parent_model.is_empty() {
|
||||
environment.parent_model = "environment".to_string();
|
||||
}
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_environment(&environment, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create environment: {e}"))?;
|
||||
|
||||
println!("Created environment: {}", created.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let workspace_id = workspace_id.ok_or_else(|| {
|
||||
"environment create requires workspace_id unless JSON payload is provided".to_string()
|
||||
})?;
|
||||
let name = name.ok_or_else(|| {
|
||||
"environment create requires --name unless JSON payload is provided".to_string()
|
||||
})?;
|
||||
|
||||
let environment = Environment {
|
||||
workspace_id,
|
||||
name,
|
||||
parent_model: "environment".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_environment(&environment, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create environment: {e}"))?;
|
||||
|
||||
println!("Created environment: {}", created.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update(ctx: &CliContext, json: Option<String>, json_input: Option<String>) -> CommandResult {
|
||||
let patch = parse_required_json(json, json_input, "environment update")?;
|
||||
let id = require_id(&patch, "environment update")?;
|
||||
|
||||
let existing = ctx
|
||||
.db()
|
||||
.get_environment(&id)
|
||||
.map_err(|e| format!("Failed to get environment for update: {e}"))?;
|
||||
let updated = apply_merge_patch(&existing, &patch, &id, "environment update")?;
|
||||
|
||||
let saved = ctx
|
||||
.db()
|
||||
.upsert_environment(&updated, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to update environment: {e}"))?;
|
||||
|
||||
println!("Updated environment: {}", saved.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(ctx: &CliContext, environment_id: &str, yes: bool) -> CommandResult {
|
||||
if !yes && !confirm_delete("environment", environment_id) {
|
||||
println!("Aborted");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let deleted = ctx
|
||||
.db()
|
||||
.delete_environment_by_id(environment_id, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to delete environment: {e}"))?;
|
||||
|
||||
println!("Deleted environment: {}", deleted.id);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
use crate::cli::{FolderArgs, FolderCommands};
|
||||
use crate::context::CliContext;
|
||||
use crate::utils::confirm::confirm_delete;
|
||||
use crate::utils::json::{
|
||||
apply_merge_patch, is_json_shorthand, parse_optional_json, parse_required_json, require_id,
|
||||
validate_create_id,
|
||||
};
|
||||
use yaak_models::models::Folder;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
type CommandResult<T = ()> = std::result::Result<T, String>;
|
||||
|
||||
pub fn run(ctx: &CliContext, args: FolderArgs) -> i32 {
|
||||
let result = match args.command {
|
||||
FolderCommands::List { workspace_id } => list(ctx, &workspace_id),
|
||||
FolderCommands::Show { folder_id } => show(ctx, &folder_id),
|
||||
FolderCommands::Create { workspace_id, name, json } => {
|
||||
create(ctx, workspace_id, name, json)
|
||||
}
|
||||
FolderCommands::Update { json, json_input } => update(ctx, json, json_input),
|
||||
FolderCommands::Delete { folder_id, yes } => delete(ctx, &folder_id, yes),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn list(ctx: &CliContext, workspace_id: &str) -> CommandResult {
|
||||
let folders =
|
||||
ctx.db().list_folders(workspace_id).map_err(|e| format!("Failed to list folders: {e}"))?;
|
||||
if folders.is_empty() {
|
||||
println!("No folders found in workspace {}", workspace_id);
|
||||
} else {
|
||||
for folder in folders {
|
||||
println!("{} - {}", folder.id, folder.name);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn show(ctx: &CliContext, folder_id: &str) -> CommandResult {
|
||||
let folder =
|
||||
ctx.db().get_folder(folder_id).map_err(|e| format!("Failed to get folder: {e}"))?;
|
||||
let output = serde_json::to_string_pretty(&folder)
|
||||
.map_err(|e| format!("Failed to serialize folder: {e}"))?;
|
||||
println!("{output}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create(
|
||||
ctx: &CliContext,
|
||||
workspace_id: Option<String>,
|
||||
name: Option<String>,
|
||||
json: Option<String>,
|
||||
) -> CommandResult {
|
||||
if json.is_some() && workspace_id.as_deref().is_some_and(|v| !is_json_shorthand(v)) {
|
||||
return Err("folder create cannot combine workspace_id with --json payload".to_string());
|
||||
}
|
||||
|
||||
let payload = parse_optional_json(
|
||||
json,
|
||||
workspace_id.clone().filter(|v| is_json_shorthand(v)),
|
||||
"folder create",
|
||||
)?;
|
||||
|
||||
if let Some(payload) = payload {
|
||||
if name.is_some() {
|
||||
return Err("folder create cannot combine --name with JSON payload".to_string());
|
||||
}
|
||||
|
||||
validate_create_id(&payload, "folder")?;
|
||||
let folder: Folder = serde_json::from_value(payload)
|
||||
.map_err(|e| format!("Failed to parse folder create JSON: {e}"))?;
|
||||
|
||||
if folder.workspace_id.is_empty() {
|
||||
return Err("folder create JSON requires non-empty \"workspaceId\"".to_string());
|
||||
}
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_folder(&folder, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create folder: {e}"))?;
|
||||
|
||||
println!("Created folder: {}", created.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let workspace_id = workspace_id.ok_or_else(|| {
|
||||
"folder create requires workspace_id unless JSON payload is provided".to_string()
|
||||
})?;
|
||||
let name = name.ok_or_else(|| {
|
||||
"folder create requires --name unless JSON payload is provided".to_string()
|
||||
})?;
|
||||
|
||||
let folder = Folder { workspace_id, name, ..Default::default() };
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_folder(&folder, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create folder: {e}"))?;
|
||||
|
||||
println!("Created folder: {}", created.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update(ctx: &CliContext, json: Option<String>, json_input: Option<String>) -> CommandResult {
|
||||
let patch = parse_required_json(json, json_input, "folder update")?;
|
||||
let id = require_id(&patch, "folder update")?;
|
||||
|
||||
let existing =
|
||||
ctx.db().get_folder(&id).map_err(|e| format!("Failed to get folder for update: {e}"))?;
|
||||
let updated = apply_merge_patch(&existing, &patch, &id, "folder update")?;
|
||||
|
||||
let saved = ctx
|
||||
.db()
|
||||
.upsert_folder(&updated, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to update folder: {e}"))?;
|
||||
|
||||
println!("Updated folder: {}", saved.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(ctx: &CliContext, folder_id: &str, yes: bool) -> CommandResult {
|
||||
if !yes && !confirm_delete("folder", folder_id) {
|
||||
println!("Aborted");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let deleted = ctx
|
||||
.db()
|
||||
.delete_folder_by_id(folder_id, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to delete folder: {e}"))?;
|
||||
|
||||
println!("Deleted folder: {}", deleted.id);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
pub mod environment;
|
||||
pub mod folder;
|
||||
pub mod request;
|
||||
pub mod send;
|
||||
pub mod workspace;
|
||||
@@ -1,485 +0,0 @@
|
||||
use crate::cli::{RequestArgs, RequestCommands, RequestSchemaType};
|
||||
use crate::context::CliContext;
|
||||
use crate::utils::confirm::confirm_delete;
|
||||
use crate::utils::json::{
|
||||
apply_merge_patch, is_json_shorthand, parse_optional_json, parse_required_json, require_id,
|
||||
validate_create_id,
|
||||
};
|
||||
use schemars::schema_for;
|
||||
use serde_json::{Map, Value, json};
|
||||
use std::collections::HashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use yaak::send::{SendHttpRequestByIdWithPluginsParams, send_http_request_by_id_with_plugins};
|
||||
use yaak_models::models::{GrpcRequest, HttpRequest, WebsocketRequest};
|
||||
use yaak_models::queries::any_request::AnyRequest;
|
||||
use yaak_models::util::UpdateSource;
|
||||
use yaak_plugins::events::{FormInput, FormInputBase, JsonPrimitive, PluginContext};
|
||||
|
||||
type CommandResult<T = ()> = std::result::Result<T, String>;
|
||||
|
||||
pub async fn run(
|
||||
ctx: &CliContext,
|
||||
args: RequestArgs,
|
||||
environment: Option<&str>,
|
||||
verbose: bool,
|
||||
) -> i32 {
|
||||
let result = match args.command {
|
||||
RequestCommands::List { workspace_id } => list(ctx, &workspace_id),
|
||||
RequestCommands::Show { request_id } => show(ctx, &request_id),
|
||||
RequestCommands::Send { request_id } => {
|
||||
return match send_request_by_id(ctx, &request_id, environment, verbose).await {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
};
|
||||
}
|
||||
RequestCommands::Schema { request_type } => {
|
||||
return match schema(ctx, request_type).await {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
};
|
||||
}
|
||||
RequestCommands::Create { workspace_id, name, method, url, json } => {
|
||||
create(ctx, workspace_id, name, method, url, json)
|
||||
}
|
||||
RequestCommands::Update { json, json_input } => update(ctx, json, json_input),
|
||||
RequestCommands::Delete { request_id, yes } => delete(ctx, &request_id, yes),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn list(ctx: &CliContext, workspace_id: &str) -> CommandResult {
|
||||
let requests = ctx
|
||||
.db()
|
||||
.list_http_requests(workspace_id)
|
||||
.map_err(|e| format!("Failed to list requests: {e}"))?;
|
||||
if requests.is_empty() {
|
||||
println!("No requests found in workspace {}", workspace_id);
|
||||
} else {
|
||||
for request in requests {
|
||||
println!("{} - {} {}", request.id, request.method, request.name);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn schema(ctx: &CliContext, request_type: RequestSchemaType) -> CommandResult {
|
||||
let mut schema = match request_type {
|
||||
RequestSchemaType::Http => serde_json::to_value(schema_for!(HttpRequest))
|
||||
.map_err(|e| format!("Failed to serialize HTTP request schema: {e}"))?,
|
||||
RequestSchemaType::Grpc => serde_json::to_value(schema_for!(GrpcRequest))
|
||||
.map_err(|e| format!("Failed to serialize gRPC request schema: {e}"))?,
|
||||
RequestSchemaType::Websocket => serde_json::to_value(schema_for!(WebsocketRequest))
|
||||
.map_err(|e| format!("Failed to serialize WebSocket request schema: {e}"))?,
|
||||
};
|
||||
|
||||
if let Err(error) = merge_auth_schema_from_plugins(ctx, &mut schema).await {
|
||||
eprintln!("Warning: Failed to enrich authentication schema from plugins: {error}");
|
||||
}
|
||||
|
||||
let output = serde_json::to_string_pretty(&schema)
|
||||
.map_err(|e| format!("Failed to format schema JSON: {e}"))?;
|
||||
println!("{output}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn merge_auth_schema_from_plugins(
|
||||
ctx: &CliContext,
|
||||
schema: &mut Value,
|
||||
) -> Result<(), String> {
|
||||
let plugin_context = PluginContext::new_empty();
|
||||
let plugin_manager = ctx.plugin_manager();
|
||||
let summaries = plugin_manager
|
||||
.get_http_authentication_summaries(&plugin_context)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let mut auth_variants = Vec::new();
|
||||
for (_, summary) in summaries {
|
||||
let config = match plugin_manager
|
||||
.get_http_authentication_config(
|
||||
&plugin_context,
|
||||
&summary.name,
|
||||
HashMap::<String, JsonPrimitive>::new(),
|
||||
"yaakcli_request_schema",
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(error) => {
|
||||
eprintln!(
|
||||
"Warning: Failed to load auth config for strategy '{}': {}",
|
||||
summary.name, error
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
auth_variants.push(auth_variant_schema(&summary.name, &summary.label, &config.args));
|
||||
}
|
||||
|
||||
let Some(properties) = schema.get_mut("properties").and_then(Value::as_object_mut) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let Some(auth_schema) = properties.get_mut("authentication") else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if !auth_variants.is_empty() {
|
||||
let mut one_of = vec![auth_schema.clone()];
|
||||
one_of.extend(auth_variants);
|
||||
*auth_schema = json!({ "oneOf": one_of });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn auth_variant_schema(auth_name: &str, auth_label: &str, args: &[FormInput]) -> Value {
|
||||
let mut properties = Map::new();
|
||||
let mut required = Vec::new();
|
||||
for input in args {
|
||||
add_input_schema(input, &mut properties, &mut required);
|
||||
}
|
||||
|
||||
let mut schema = json!({
|
||||
"title": auth_label,
|
||||
"description": format!("Authentication values for strategy '{}'", auth_name),
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"additionalProperties": true
|
||||
});
|
||||
|
||||
if !required.is_empty() {
|
||||
schema["required"] = json!(required);
|
||||
}
|
||||
|
||||
schema
|
||||
}
|
||||
|
||||
fn add_input_schema(
|
||||
input: &FormInput,
|
||||
properties: &mut Map<String, Value>,
|
||||
required: &mut Vec<String>,
|
||||
) {
|
||||
match input {
|
||||
FormInput::Text(v) => add_base_schema(
|
||||
&v.base,
|
||||
json!({
|
||||
"type": "string",
|
||||
"writeOnly": v.password.unwrap_or(false),
|
||||
}),
|
||||
properties,
|
||||
required,
|
||||
),
|
||||
FormInput::Editor(v) => add_base_schema(
|
||||
&v.base,
|
||||
json!({
|
||||
"type": "string",
|
||||
"x-editorLanguage": v.language.clone(),
|
||||
}),
|
||||
properties,
|
||||
required,
|
||||
),
|
||||
FormInput::Select(v) => {
|
||||
let options: Vec<Value> =
|
||||
v.options.iter().map(|o| Value::String(o.value.clone())).collect();
|
||||
add_base_schema(
|
||||
&v.base,
|
||||
json!({
|
||||
"type": "string",
|
||||
"enum": options,
|
||||
}),
|
||||
properties,
|
||||
required,
|
||||
);
|
||||
}
|
||||
FormInput::Checkbox(v) => {
|
||||
add_base_schema(&v.base, json!({ "type": "boolean" }), properties, required);
|
||||
}
|
||||
FormInput::File(v) => {
|
||||
if v.multiple.unwrap_or(false) {
|
||||
add_base_schema(
|
||||
&v.base,
|
||||
json!({
|
||||
"type": "array",
|
||||
"items": { "type": "string" },
|
||||
}),
|
||||
properties,
|
||||
required,
|
||||
);
|
||||
} else {
|
||||
add_base_schema(&v.base, json!({ "type": "string" }), properties, required);
|
||||
}
|
||||
}
|
||||
FormInput::HttpRequest(v) => {
|
||||
add_base_schema(&v.base, json!({ "type": "string" }), properties, required);
|
||||
}
|
||||
FormInput::KeyValue(v) => {
|
||||
add_base_schema(
|
||||
&v.base,
|
||||
json!({
|
||||
"type": "object",
|
||||
"additionalProperties": true,
|
||||
}),
|
||||
properties,
|
||||
required,
|
||||
);
|
||||
}
|
||||
FormInput::Accordion(v) => {
|
||||
if let Some(children) = &v.inputs {
|
||||
for child in children {
|
||||
add_input_schema(child, properties, required);
|
||||
}
|
||||
}
|
||||
}
|
||||
FormInput::HStack(v) => {
|
||||
if let Some(children) = &v.inputs {
|
||||
for child in children {
|
||||
add_input_schema(child, properties, required);
|
||||
}
|
||||
}
|
||||
}
|
||||
FormInput::Banner(v) => {
|
||||
if let Some(children) = &v.inputs {
|
||||
for child in children {
|
||||
add_input_schema(child, properties, required);
|
||||
}
|
||||
}
|
||||
}
|
||||
FormInput::Markdown(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn add_base_schema(
|
||||
base: &FormInputBase,
|
||||
mut schema: Value,
|
||||
properties: &mut Map<String, Value>,
|
||||
required: &mut Vec<String>,
|
||||
) {
|
||||
if base.hidden.unwrap_or(false) || base.name.trim().is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(description) = &base.description {
|
||||
schema["description"] = Value::String(description.clone());
|
||||
}
|
||||
if let Some(label) = &base.label {
|
||||
schema["title"] = Value::String(label.clone());
|
||||
}
|
||||
if let Some(default_value) = &base.default_value {
|
||||
schema["default"] = Value::String(default_value.clone());
|
||||
}
|
||||
|
||||
let name = base.name.clone();
|
||||
properties.insert(name.clone(), schema);
|
||||
if !base.optional.unwrap_or(false) {
|
||||
required.push(name);
|
||||
}
|
||||
}
|
||||
|
||||
fn create(
|
||||
ctx: &CliContext,
|
||||
workspace_id: Option<String>,
|
||||
name: Option<String>,
|
||||
method: Option<String>,
|
||||
url: Option<String>,
|
||||
json: Option<String>,
|
||||
) -> CommandResult {
|
||||
if json.is_some() && workspace_id.as_deref().is_some_and(|v| !is_json_shorthand(v)) {
|
||||
return Err("request create cannot combine workspace_id with --json payload".to_string());
|
||||
}
|
||||
|
||||
let payload = parse_optional_json(
|
||||
json,
|
||||
workspace_id.clone().filter(|v| is_json_shorthand(v)),
|
||||
"request create",
|
||||
)?;
|
||||
|
||||
if let Some(payload) = payload {
|
||||
if name.is_some() || method.is_some() || url.is_some() {
|
||||
return Err("request create cannot combine simple flags with JSON payload".to_string());
|
||||
}
|
||||
|
||||
validate_create_id(&payload, "request")?;
|
||||
let request: HttpRequest = serde_json::from_value(payload)
|
||||
.map_err(|e| format!("Failed to parse request create JSON: {e}"))?;
|
||||
|
||||
if request.workspace_id.is_empty() {
|
||||
return Err("request create JSON requires non-empty \"workspaceId\"".to_string());
|
||||
}
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_http_request(&request, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create request: {e}"))?;
|
||||
|
||||
println!("Created request: {}", created.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let workspace_id = workspace_id.ok_or_else(|| {
|
||||
"request create requires workspace_id unless JSON payload is provided".to_string()
|
||||
})?;
|
||||
let name = name.unwrap_or_default();
|
||||
let url = url.unwrap_or_default();
|
||||
let method = method.unwrap_or_else(|| "GET".to_string());
|
||||
|
||||
let request = HttpRequest {
|
||||
workspace_id,
|
||||
name,
|
||||
method: method.to_uppercase(),
|
||||
url,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_http_request(&request, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create request: {e}"))?;
|
||||
|
||||
println!("Created request: {}", created.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update(ctx: &CliContext, json: Option<String>, json_input: Option<String>) -> CommandResult {
|
||||
let patch = parse_required_json(json, json_input, "request update")?;
|
||||
let id = require_id(&patch, "request update")?;
|
||||
|
||||
let existing = ctx
|
||||
.db()
|
||||
.get_http_request(&id)
|
||||
.map_err(|e| format!("Failed to get request for update: {e}"))?;
|
||||
let updated = apply_merge_patch(&existing, &patch, &id, "request update")?;
|
||||
|
||||
let saved = ctx
|
||||
.db()
|
||||
.upsert_http_request(&updated, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to update request: {e}"))?;
|
||||
|
||||
println!("Updated request: {}", saved.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn show(ctx: &CliContext, request_id: &str) -> CommandResult {
|
||||
let request =
|
||||
ctx.db().get_http_request(request_id).map_err(|e| format!("Failed to get request: {e}"))?;
|
||||
let output = serde_json::to_string_pretty(&request)
|
||||
.map_err(|e| format!("Failed to serialize request: {e}"))?;
|
||||
println!("{output}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(ctx: &CliContext, request_id: &str, yes: bool) -> CommandResult {
|
||||
if !yes && !confirm_delete("request", request_id) {
|
||||
println!("Aborted");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let deleted = ctx
|
||||
.db()
|
||||
.delete_http_request_by_id(request_id, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to delete request: {e}"))?;
|
||||
println!("Deleted request: {}", deleted.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send a request by ID and print response in the same format as legacy `send`.
|
||||
pub async fn send_request_by_id(
|
||||
ctx: &CliContext,
|
||||
request_id: &str,
|
||||
environment: Option<&str>,
|
||||
verbose: bool,
|
||||
) -> Result<(), String> {
|
||||
let request =
|
||||
ctx.db().get_any_request(request_id).map_err(|e| format!("Failed to get request: {e}"))?;
|
||||
match request {
|
||||
AnyRequest::HttpRequest(http_request) => {
|
||||
send_http_request_by_id(
|
||||
ctx,
|
||||
&http_request.id,
|
||||
&http_request.workspace_id,
|
||||
environment,
|
||||
verbose,
|
||||
)
|
||||
.await
|
||||
}
|
||||
AnyRequest::GrpcRequest(_) => {
|
||||
Err("gRPC request send is not implemented yet in yaak-cli".to_string())
|
||||
}
|
||||
AnyRequest::WebsocketRequest(_) => {
|
||||
Err("WebSocket request send is not implemented yet in yaak-cli".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_http_request_by_id(
|
||||
ctx: &CliContext,
|
||||
request_id: &str,
|
||||
workspace_id: &str,
|
||||
environment: Option<&str>,
|
||||
verbose: bool,
|
||||
) -> Result<(), String> {
|
||||
let plugin_context = PluginContext::new(None, Some(workspace_id.to_string()));
|
||||
|
||||
let (event_tx, mut event_rx) = mpsc::channel(100);
|
||||
let event_handle = tokio::spawn(async move {
|
||||
while let Some(event) = event_rx.recv().await {
|
||||
if verbose {
|
||||
println!("{}", event);
|
||||
}
|
||||
}
|
||||
});
|
||||
let response_dir = ctx.data_dir().join("responses");
|
||||
|
||||
let result = send_http_request_by_id_with_plugins(SendHttpRequestByIdWithPluginsParams {
|
||||
query_manager: ctx.query_manager(),
|
||||
blob_manager: ctx.blob_manager(),
|
||||
request_id,
|
||||
environment_id: environment,
|
||||
update_source: UpdateSource::Sync,
|
||||
cookie_jar_id: None,
|
||||
response_dir: &response_dir,
|
||||
emit_events_to: Some(event_tx),
|
||||
plugin_manager: ctx.plugin_manager(),
|
||||
encryption_manager: ctx.encryption_manager.clone(),
|
||||
plugin_context: &plugin_context,
|
||||
cancelled_rx: None,
|
||||
connection_manager: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
let _ = event_handle.await;
|
||||
let result = result.map_err(|e| e.to_string())?;
|
||||
|
||||
if verbose {
|
||||
println!();
|
||||
}
|
||||
println!(
|
||||
"HTTP {} {}",
|
||||
result.response.status,
|
||||
result.response.status_reason.as_deref().unwrap_or("")
|
||||
);
|
||||
if verbose {
|
||||
for header in &result.response.headers {
|
||||
println!("{}: {}", header.name, header.value);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
let body = String::from_utf8(result.response_body)
|
||||
.map_err(|e| format!("Failed to read response body: {e}"))?;
|
||||
println!("{}", body);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use crate::cli::SendArgs;
|
||||
use crate::commands::request;
|
||||
use crate::context::CliContext;
|
||||
use futures::future::join_all;
|
||||
|
||||
enum ExecutionMode {
|
||||
Sequential,
|
||||
Parallel,
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
ctx: &CliContext,
|
||||
args: SendArgs,
|
||||
environment: Option<&str>,
|
||||
verbose: bool,
|
||||
) -> i32 {
|
||||
match send_target(ctx, args, environment, verbose).await {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_target(
|
||||
ctx: &CliContext,
|
||||
args: SendArgs,
|
||||
environment: Option<&str>,
|
||||
verbose: bool,
|
||||
) -> Result<(), String> {
|
||||
let mode = if args.parallel { ExecutionMode::Parallel } else { ExecutionMode::Sequential };
|
||||
|
||||
if ctx.db().get_any_request(&args.id).is_ok() {
|
||||
return request::send_request_by_id(ctx, &args.id, environment, verbose).await;
|
||||
}
|
||||
|
||||
if ctx.db().get_folder(&args.id).is_ok() {
|
||||
let request_ids = collect_folder_request_ids(ctx, &args.id)?;
|
||||
if request_ids.is_empty() {
|
||||
println!("No requests found in folder {}", args.id);
|
||||
return Ok(());
|
||||
}
|
||||
return send_many(ctx, request_ids, mode, args.fail_fast, environment, verbose).await;
|
||||
}
|
||||
|
||||
if ctx.db().get_workspace(&args.id).is_ok() {
|
||||
let request_ids = collect_workspace_request_ids(ctx, &args.id)?;
|
||||
if request_ids.is_empty() {
|
||||
println!("No requests found in workspace {}", args.id);
|
||||
return Ok(());
|
||||
}
|
||||
return send_many(ctx, request_ids, mode, args.fail_fast, environment, verbose).await;
|
||||
}
|
||||
|
||||
Err(format!("Could not resolve ID '{}' as request, folder, or workspace", args.id))
|
||||
}
|
||||
|
||||
fn collect_folder_request_ids(ctx: &CliContext, folder_id: &str) -> Result<Vec<String>, String> {
|
||||
let mut ids = Vec::new();
|
||||
|
||||
let mut http_ids = ctx
|
||||
.db()
|
||||
.list_http_requests_for_folder_recursive(folder_id)
|
||||
.map_err(|e| format!("Failed to list HTTP requests in folder: {e}"))?
|
||||
.into_iter()
|
||||
.map(|r| r.id)
|
||||
.collect::<Vec<_>>();
|
||||
ids.append(&mut http_ids);
|
||||
|
||||
let mut grpc_ids = ctx
|
||||
.db()
|
||||
.list_grpc_requests_for_folder_recursive(folder_id)
|
||||
.map_err(|e| format!("Failed to list gRPC requests in folder: {e}"))?
|
||||
.into_iter()
|
||||
.map(|r| r.id)
|
||||
.collect::<Vec<_>>();
|
||||
ids.append(&mut grpc_ids);
|
||||
|
||||
let mut websocket_ids = ctx
|
||||
.db()
|
||||
.list_websocket_requests_for_folder_recursive(folder_id)
|
||||
.map_err(|e| format!("Failed to list WebSocket requests in folder: {e}"))?
|
||||
.into_iter()
|
||||
.map(|r| r.id)
|
||||
.collect::<Vec<_>>();
|
||||
ids.append(&mut websocket_ids);
|
||||
|
||||
Ok(ids)
|
||||
}
|
||||
|
||||
fn collect_workspace_request_ids(
|
||||
ctx: &CliContext,
|
||||
workspace_id: &str,
|
||||
) -> Result<Vec<String>, String> {
|
||||
let mut ids = Vec::new();
|
||||
|
||||
let mut http_ids = ctx
|
||||
.db()
|
||||
.list_http_requests(workspace_id)
|
||||
.map_err(|e| format!("Failed to list HTTP requests in workspace: {e}"))?
|
||||
.into_iter()
|
||||
.map(|r| r.id)
|
||||
.collect::<Vec<_>>();
|
||||
ids.append(&mut http_ids);
|
||||
|
||||
let mut grpc_ids = ctx
|
||||
.db()
|
||||
.list_grpc_requests(workspace_id)
|
||||
.map_err(|e| format!("Failed to list gRPC requests in workspace: {e}"))?
|
||||
.into_iter()
|
||||
.map(|r| r.id)
|
||||
.collect::<Vec<_>>();
|
||||
ids.append(&mut grpc_ids);
|
||||
|
||||
let mut websocket_ids = ctx
|
||||
.db()
|
||||
.list_websocket_requests(workspace_id)
|
||||
.map_err(|e| format!("Failed to list WebSocket requests in workspace: {e}"))?
|
||||
.into_iter()
|
||||
.map(|r| r.id)
|
||||
.collect::<Vec<_>>();
|
||||
ids.append(&mut websocket_ids);
|
||||
|
||||
Ok(ids)
|
||||
}
|
||||
|
||||
async fn send_many(
|
||||
ctx: &CliContext,
|
||||
request_ids: Vec<String>,
|
||||
mode: ExecutionMode,
|
||||
fail_fast: bool,
|
||||
environment: Option<&str>,
|
||||
verbose: bool,
|
||||
) -> Result<(), String> {
|
||||
let mut success_count = 0usize;
|
||||
let mut failures: Vec<(String, String)> = Vec::new();
|
||||
|
||||
match mode {
|
||||
ExecutionMode::Sequential => {
|
||||
for request_id in request_ids {
|
||||
match request::send_request_by_id(ctx, &request_id, environment, verbose).await {
|
||||
Ok(()) => success_count += 1,
|
||||
Err(error) => {
|
||||
failures.push((request_id, error));
|
||||
if fail_fast {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ExecutionMode::Parallel => {
|
||||
let tasks = request_ids
|
||||
.iter()
|
||||
.map(|request_id| async move {
|
||||
(
|
||||
request_id.clone(),
|
||||
request::send_request_by_id(ctx, request_id, environment, verbose).await,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (request_id, result) in join_all(tasks).await {
|
||||
match result {
|
||||
Ok(()) => success_count += 1,
|
||||
Err(error) => failures.push((request_id, error)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let failure_count = failures.len();
|
||||
println!("Send summary: {success_count} succeeded, {failure_count} failed");
|
||||
|
||||
if failure_count == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for (request_id, error) in failures {
|
||||
eprintln!(" {}: {}", request_id, error);
|
||||
}
|
||||
Err("One or more requests failed".to_string())
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
use crate::cli::{WorkspaceArgs, WorkspaceCommands};
|
||||
use crate::context::CliContext;
|
||||
use crate::utils::confirm::confirm_delete;
|
||||
use crate::utils::json::{
|
||||
apply_merge_patch, parse_optional_json, parse_required_json, require_id, validate_create_id,
|
||||
};
|
||||
use yaak_models::models::Workspace;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
type CommandResult<T = ()> = std::result::Result<T, String>;
|
||||
|
||||
pub fn run(ctx: &CliContext, args: WorkspaceArgs) -> i32 {
|
||||
let result = match args.command {
|
||||
WorkspaceCommands::List => list(ctx),
|
||||
WorkspaceCommands::Show { workspace_id } => show(ctx, &workspace_id),
|
||||
WorkspaceCommands::Create { name, json, json_input } => create(ctx, name, json, json_input),
|
||||
WorkspaceCommands::Update { json, json_input } => update(ctx, json, json_input),
|
||||
WorkspaceCommands::Delete { workspace_id, yes } => delete(ctx, &workspace_id, yes),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(()) => 0,
|
||||
Err(error) => {
|
||||
eprintln!("Error: {error}");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn list(ctx: &CliContext) -> CommandResult {
|
||||
let workspaces =
|
||||
ctx.db().list_workspaces().map_err(|e| format!("Failed to list workspaces: {e}"))?;
|
||||
if workspaces.is_empty() {
|
||||
println!("No workspaces found");
|
||||
} else {
|
||||
for workspace in workspaces {
|
||||
println!("{} - {}", workspace.id, workspace.name);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn show(ctx: &CliContext, workspace_id: &str) -> CommandResult {
|
||||
let workspace = ctx
|
||||
.db()
|
||||
.get_workspace(workspace_id)
|
||||
.map_err(|e| format!("Failed to get workspace: {e}"))?;
|
||||
let output = serde_json::to_string_pretty(&workspace)
|
||||
.map_err(|e| format!("Failed to serialize workspace: {e}"))?;
|
||||
println!("{output}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create(
|
||||
ctx: &CliContext,
|
||||
name: Option<String>,
|
||||
json: Option<String>,
|
||||
json_input: Option<String>,
|
||||
) -> CommandResult {
|
||||
let payload = parse_optional_json(json, json_input, "workspace create")?;
|
||||
|
||||
if let Some(payload) = payload {
|
||||
if name.is_some() {
|
||||
return Err("workspace create cannot combine --name with JSON payload".to_string());
|
||||
}
|
||||
|
||||
validate_create_id(&payload, "workspace")?;
|
||||
let workspace: Workspace = serde_json::from_value(payload)
|
||||
.map_err(|e| format!("Failed to parse workspace create JSON: {e}"))?;
|
||||
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_workspace(&workspace, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create workspace: {e}"))?;
|
||||
println!("Created workspace: {}", created.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let name = name.ok_or_else(|| {
|
||||
"workspace create requires --name unless JSON payload is provided".to_string()
|
||||
})?;
|
||||
|
||||
let workspace = Workspace { name, ..Default::default() };
|
||||
let created = ctx
|
||||
.db()
|
||||
.upsert_workspace(&workspace, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to create workspace: {e}"))?;
|
||||
println!("Created workspace: {}", created.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update(ctx: &CliContext, json: Option<String>, json_input: Option<String>) -> CommandResult {
|
||||
let patch = parse_required_json(json, json_input, "workspace update")?;
|
||||
let id = require_id(&patch, "workspace update")?;
|
||||
|
||||
let existing = ctx
|
||||
.db()
|
||||
.get_workspace(&id)
|
||||
.map_err(|e| format!("Failed to get workspace for update: {e}"))?;
|
||||
let updated = apply_merge_patch(&existing, &patch, &id, "workspace update")?;
|
||||
|
||||
let saved = ctx
|
||||
.db()
|
||||
.upsert_workspace(&updated, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to update workspace: {e}"))?;
|
||||
|
||||
println!("Updated workspace: {}", saved.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(ctx: &CliContext, workspace_id: &str, yes: bool) -> CommandResult {
|
||||
if !yes && !confirm_delete("workspace", workspace_id) {
|
||||
println!("Aborted");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let deleted = ctx
|
||||
.db()
|
||||
.delete_workspace_by_id(workspace_id, &UpdateSource::Sync)
|
||||
.map_err(|e| format!("Failed to delete workspace: {e}"))?;
|
||||
println!("Deleted workspace: {}", deleted.id);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
use crate::plugin_events::CliPluginEventBridge;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use yaak_crypto::manager::EncryptionManager;
|
||||
use yaak_models::blob_manager::BlobManager;
|
||||
use yaak_models::db_context::DbContext;
|
||||
use yaak_models::query_manager::QueryManager;
|
||||
use yaak_plugins::events::PluginContext;
|
||||
use yaak_plugins::manager::PluginManager;
|
||||
|
||||
pub struct CliContext {
|
||||
data_dir: PathBuf,
|
||||
query_manager: QueryManager,
|
||||
blob_manager: BlobManager,
|
||||
pub encryption_manager: Arc<EncryptionManager>,
|
||||
plugin_manager: Option<Arc<PluginManager>>,
|
||||
plugin_event_bridge: Mutex<Option<CliPluginEventBridge>>,
|
||||
}
|
||||
|
||||
impl CliContext {
|
||||
pub async fn initialize(data_dir: PathBuf, app_id: &str, with_plugins: bool) -> Self {
|
||||
let db_path = data_dir.join("db.sqlite");
|
||||
let blob_path = data_dir.join("blobs.sqlite");
|
||||
|
||||
let (query_manager, blob_manager, _rx) = yaak_models::init_standalone(&db_path, &blob_path)
|
||||
.expect("Failed to initialize database");
|
||||
|
||||
let encryption_manager = Arc::new(EncryptionManager::new(query_manager.clone(), app_id));
|
||||
|
||||
let plugin_manager = if with_plugins {
|
||||
let vendored_plugin_dir = data_dir.join("vendored-plugins");
|
||||
let installed_plugin_dir = data_dir.join("installed-plugins");
|
||||
let node_bin_path = PathBuf::from("node");
|
||||
|
||||
let plugin_runtime_main =
|
||||
std::env::var("YAAK_PLUGIN_RUNTIME").map(PathBuf::from).unwrap_or_else(|_| {
|
||||
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("../../crates-tauri/yaak-app/vendored/plugin-runtime/index.cjs")
|
||||
});
|
||||
|
||||
let plugin_manager = Arc::new(
|
||||
PluginManager::new(
|
||||
vendored_plugin_dir,
|
||||
installed_plugin_dir,
|
||||
node_bin_path,
|
||||
plugin_runtime_main,
|
||||
false,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
let plugins = query_manager.connect().list_plugins().unwrap_or_default();
|
||||
if !plugins.is_empty() {
|
||||
let errors = plugin_manager
|
||||
.initialize_all_plugins(plugins, &PluginContext::new_empty())
|
||||
.await;
|
||||
for (plugin_dir, error_msg) in errors {
|
||||
eprintln!(
|
||||
"Warning: Failed to initialize plugin '{}': {}",
|
||||
plugin_dir, error_msg
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Some(plugin_manager)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let plugin_event_bridge = if let Some(plugin_manager) = &plugin_manager {
|
||||
Some(CliPluginEventBridge::start(plugin_manager.clone(), query_manager.clone()).await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
data_dir,
|
||||
query_manager,
|
||||
blob_manager,
|
||||
encryption_manager,
|
||||
plugin_manager,
|
||||
plugin_event_bridge: Mutex::new(plugin_event_bridge),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data_dir(&self) -> &Path {
|
||||
&self.data_dir
|
||||
}
|
||||
|
||||
pub fn db(&self) -> DbContext<'_> {
|
||||
self.query_manager.connect()
|
||||
}
|
||||
|
||||
pub fn query_manager(&self) -> &QueryManager {
|
||||
&self.query_manager
|
||||
}
|
||||
|
||||
pub fn blob_manager(&self) -> &BlobManager {
|
||||
&self.blob_manager
|
||||
}
|
||||
|
||||
pub fn plugin_manager(&self) -> Arc<PluginManager> {
|
||||
self.plugin_manager.clone().expect("Plugin manager was not initialized for this command")
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
if let Some(plugin_manager) = &self.plugin_manager {
|
||||
if let Some(plugin_event_bridge) = self.plugin_event_bridge.lock().await.take() {
|
||||
plugin_event_bridge.shutdown(plugin_manager).await;
|
||||
}
|
||||
plugin_manager.terminate().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
mod cli;
|
||||
mod commands;
|
||||
mod context;
|
||||
mod plugin_events;
|
||||
mod utils;
|
||||
|
||||
use clap::Parser;
|
||||
use cli::{Cli, Commands, RequestCommands};
|
||||
use context::CliContext;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let Cli { data_dir, environment, verbose, command } = Cli::parse();
|
||||
|
||||
if verbose {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||
}
|
||||
|
||||
let app_id = if cfg!(debug_assertions) { "app.yaak.desktop.dev" } else { "app.yaak.desktop" };
|
||||
|
||||
let data_dir = data_dir.unwrap_or_else(|| {
|
||||
dirs::data_dir().expect("Could not determine data directory").join(app_id)
|
||||
});
|
||||
|
||||
let needs_plugins = matches!(
|
||||
&command,
|
||||
Commands::Send(_)
|
||||
| Commands::Request(cli::RequestArgs {
|
||||
command: RequestCommands::Send { .. } | RequestCommands::Schema { .. },
|
||||
})
|
||||
);
|
||||
|
||||
let context = CliContext::initialize(data_dir, app_id, needs_plugins).await;
|
||||
|
||||
let exit_code = match command {
|
||||
Commands::Send(args) => {
|
||||
commands::send::run(&context, args, environment.as_deref(), verbose).await
|
||||
}
|
||||
Commands::Workspace(args) => commands::workspace::run(&context, args),
|
||||
Commands::Request(args) => {
|
||||
commands::request::run(&context, args, environment.as_deref(), verbose).await
|
||||
}
|
||||
Commands::Folder(args) => commands::folder::run(&context, args),
|
||||
Commands::Environment(args) => commands::environment::run(&context, args),
|
||||
};
|
||||
|
||||
context.shutdown().await;
|
||||
|
||||
if exit_code != 0 {
|
||||
std::process::exit(exit_code);
|
||||
}
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use tokio::task::JoinHandle;
|
||||
use yaak::plugin_events::{
|
||||
GroupedPluginEvent, HostRequest, SharedEvent, SharedPluginEventContext,
|
||||
handle_shared_plugin_event,
|
||||
};
|
||||
use yaak_models::query_manager::QueryManager;
|
||||
use yaak_plugins::events::{
|
||||
EmptyPayload, ErrorResponse, InternalEvent, InternalEventPayload, ListOpenWorkspacesResponse,
|
||||
WorkspaceInfo,
|
||||
};
|
||||
use yaak_plugins::manager::PluginManager;
|
||||
|
||||
pub struct CliPluginEventBridge {
|
||||
rx_id: String,
|
||||
task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl CliPluginEventBridge {
|
||||
pub async fn start(plugin_manager: Arc<PluginManager>, query_manager: QueryManager) -> Self {
|
||||
let (rx_id, mut rx) = plugin_manager.subscribe("cli").await;
|
||||
let rx_id_for_task = rx_id.clone();
|
||||
let pm = plugin_manager.clone();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
while let Some(event) = rx.recv().await {
|
||||
// Events with reply IDs are replies to app-originated requests.
|
||||
if event.reply_id.is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(plugin_handle) = pm.get_plugin_by_ref_id(&event.plugin_ref_id).await
|
||||
else {
|
||||
eprintln!(
|
||||
"Warning: Ignoring plugin event with unknown plugin ref '{}'",
|
||||
event.plugin_ref_id
|
||||
);
|
||||
continue;
|
||||
};
|
||||
|
||||
let plugin_name = plugin_handle.info().name;
|
||||
let Some(reply_payload) = build_plugin_reply(&query_manager, &event, &plugin_name)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Err(err) = pm.reply(&event, &reply_payload).await {
|
||||
eprintln!("Warning: Failed replying to plugin event: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
pm.unsubscribe(&rx_id_for_task).await;
|
||||
});
|
||||
|
||||
Self { rx_id, task }
|
||||
}
|
||||
|
||||
pub async fn shutdown(self, plugin_manager: &PluginManager) {
|
||||
plugin_manager.unsubscribe(&self.rx_id).await;
|
||||
self.task.abort();
|
||||
let _ = self.task.await;
|
||||
}
|
||||
}
|
||||
|
||||
fn build_plugin_reply(
|
||||
query_manager: &QueryManager,
|
||||
event: &InternalEvent,
|
||||
plugin_name: &str,
|
||||
) -> Option<InternalEventPayload> {
|
||||
match handle_shared_plugin_event(
|
||||
query_manager,
|
||||
&event.payload,
|
||||
SharedPluginEventContext {
|
||||
plugin_name,
|
||||
workspace_id: event.context.workspace_id.as_deref(),
|
||||
},
|
||||
) {
|
||||
GroupedPluginEvent::Shared(SharedEvent::Reply(payload)) => Some(payload),
|
||||
GroupedPluginEvent::Shared(SharedEvent::ErrorResponse(resp)) => {
|
||||
eprintln!("[plugin:{}] error: {}", plugin_name, resp.error);
|
||||
None
|
||||
}
|
||||
GroupedPluginEvent::Shared(SharedEvent::ReloadResponse(_)) => None,
|
||||
GroupedPluginEvent::Host(HostRequest::ShowToast(req)) => {
|
||||
eprintln!("[plugin:{}] {}", plugin_name, req.message);
|
||||
Some(InternalEventPayload::ShowToastResponse(EmptyPayload {}))
|
||||
}
|
||||
GroupedPluginEvent::Host(HostRequest::ListOpenWorkspaces(_)) => {
|
||||
let workspaces = match query_manager.connect().list_workspaces() {
|
||||
Ok(workspaces) => workspaces
|
||||
.into_iter()
|
||||
.map(|w| WorkspaceInfo { id: w.id.clone(), name: w.name, label: w.id })
|
||||
.collect(),
|
||||
Err(err) => {
|
||||
return Some(InternalEventPayload::ErrorResponse(ErrorResponse {
|
||||
error: format!("Failed to list workspaces in CLI: {err}"),
|
||||
}));
|
||||
}
|
||||
};
|
||||
Some(InternalEventPayload::ListOpenWorkspacesResponse(ListOpenWorkspacesResponse {
|
||||
workspaces,
|
||||
}))
|
||||
}
|
||||
GroupedPluginEvent::Host(req) => Some(InternalEventPayload::ErrorResponse(ErrorResponse {
|
||||
error: format!("Unsupported plugin request in CLI: {}", req.type_name()),
|
||||
})),
|
||||
GroupedPluginEvent::Ignore => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
use yaak_plugins::events::{GetKeyValueRequest, PluginContext, WindowInfoRequest};
|
||||
|
||||
fn query_manager_for_test() -> (QueryManager, TempDir) {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let db_path = temp_dir.path().join("db.sqlite");
|
||||
let blob_path = temp_dir.path().join("blobs.sqlite");
|
||||
let (query_manager, _blob_manager, _rx) =
|
||||
yaak_models::init_standalone(&db_path, &blob_path).expect("Failed to initialize DB");
|
||||
(query_manager, temp_dir)
|
||||
}
|
||||
|
||||
fn event(payload: InternalEventPayload) -> InternalEvent {
|
||||
InternalEvent {
|
||||
id: "evt_1".to_string(),
|
||||
plugin_ref_id: "plugin_ref_1".to_string(),
|
||||
plugin_name: "@yaak/test-plugin".to_string(),
|
||||
reply_id: None,
|
||||
context: PluginContext::new_empty(),
|
||||
payload,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_value_requests_round_trip() {
|
||||
let (query_manager, _temp_dir) = query_manager_for_test();
|
||||
let plugin_name = "@yaak/test-plugin";
|
||||
|
||||
let get_missing = build_plugin_reply(
|
||||
&query_manager,
|
||||
&event(InternalEventPayload::GetKeyValueRequest(GetKeyValueRequest {
|
||||
key: "missing".to_string(),
|
||||
})),
|
||||
plugin_name,
|
||||
);
|
||||
match get_missing {
|
||||
Some(InternalEventPayload::GetKeyValueResponse(r)) => assert_eq!(r.value, None),
|
||||
other => panic!("unexpected payload for missing get: {other:?}"),
|
||||
}
|
||||
|
||||
let set = build_plugin_reply(
|
||||
&query_manager,
|
||||
&event(InternalEventPayload::SetKeyValueRequest(
|
||||
yaak_plugins::events::SetKeyValueRequest {
|
||||
key: "token".to_string(),
|
||||
value: "{\"access_token\":\"abc\"}".to_string(),
|
||||
},
|
||||
)),
|
||||
plugin_name,
|
||||
);
|
||||
assert!(matches!(set, Some(InternalEventPayload::SetKeyValueResponse(_))));
|
||||
|
||||
let get_present = build_plugin_reply(
|
||||
&query_manager,
|
||||
&event(InternalEventPayload::GetKeyValueRequest(GetKeyValueRequest {
|
||||
key: "token".to_string(),
|
||||
})),
|
||||
plugin_name,
|
||||
);
|
||||
match get_present {
|
||||
Some(InternalEventPayload::GetKeyValueResponse(r)) => {
|
||||
assert_eq!(r.value, Some("{\"access_token\":\"abc\"}".to_string()))
|
||||
}
|
||||
other => panic!("unexpected payload for present get: {other:?}"),
|
||||
}
|
||||
|
||||
let delete = build_plugin_reply(
|
||||
&query_manager,
|
||||
&event(InternalEventPayload::DeleteKeyValueRequest(
|
||||
yaak_plugins::events::DeleteKeyValueRequest { key: "token".to_string() },
|
||||
)),
|
||||
plugin_name,
|
||||
);
|
||||
match delete {
|
||||
Some(InternalEventPayload::DeleteKeyValueResponse(r)) => assert!(r.deleted),
|
||||
other => panic!("unexpected payload for delete: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsupported_request_gets_error_reply() {
|
||||
let (query_manager, _temp_dir) = query_manager_for_test();
|
||||
let payload = build_plugin_reply(
|
||||
&query_manager,
|
||||
&event(InternalEventPayload::WindowInfoRequest(WindowInfoRequest {
|
||||
label: "main".to_string(),
|
||||
})),
|
||||
"@yaak/test-plugin",
|
||||
);
|
||||
|
||||
match payload {
|
||||
Some(InternalEventPayload::ErrorResponse(err)) => {
|
||||
assert!(err.error.contains("Unsupported plugin request in CLI"));
|
||||
assert!(err.error.contains("window_info_request"));
|
||||
}
|
||||
other => panic!("unexpected payload for unsupported request: {other:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
use std::io::{self, IsTerminal, Write};
|
||||
|
||||
pub fn confirm_delete(resource_name: &str, resource_id: &str) -> bool {
|
||||
if !io::stdin().is_terminal() {
|
||||
eprintln!("Refusing to delete in non-interactive mode without --yes");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
print!("Delete {resource_name} {resource_id}? [y/N]: ");
|
||||
io::stdout().flush().expect("Failed to flush stdout");
|
||||
|
||||
let mut input = String::new();
|
||||
io::stdin().read_line(&mut input).expect("Failed to read confirmation");
|
||||
|
||||
matches!(input.trim().to_lowercase().as_str(), "y" | "yes")
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
type JsonResult<T> = std::result::Result<T, String>;
|
||||
|
||||
pub fn is_json_shorthand(input: &str) -> bool {
|
||||
input.trim_start().starts_with('{')
|
||||
}
|
||||
|
||||
pub fn parse_json_object(raw: &str, context: &str) -> JsonResult<Value> {
|
||||
let value: Value = serde_json::from_str(raw)
|
||||
.map_err(|error| format!("Invalid JSON for {context}: {error}"))?;
|
||||
|
||||
if !value.is_object() {
|
||||
return Err(format!("JSON payload for {context} must be an object"));
|
||||
}
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub fn parse_optional_json(
|
||||
json_flag: Option<String>,
|
||||
json_shorthand: Option<String>,
|
||||
context: &str,
|
||||
) -> JsonResult<Option<Value>> {
|
||||
match (json_flag, json_shorthand) {
|
||||
(Some(_), Some(_)) => {
|
||||
Err(format!("Cannot provide both --json and positional JSON for {context}"))
|
||||
}
|
||||
(Some(raw), None) => parse_json_object(&raw, context).map(Some),
|
||||
(None, Some(raw)) => parse_json_object(&raw, context).map(Some),
|
||||
(None, None) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_required_json(
|
||||
json_flag: Option<String>,
|
||||
json_shorthand: Option<String>,
|
||||
context: &str,
|
||||
) -> JsonResult<Value> {
|
||||
parse_optional_json(json_flag, json_shorthand, context)?
|
||||
.ok_or_else(|| format!("Missing JSON payload for {context}. Use --json or positional JSON"))
|
||||
}
|
||||
|
||||
pub fn require_id(payload: &Value, context: &str) -> JsonResult<String> {
|
||||
payload
|
||||
.get("id")
|
||||
.and_then(|value| value.as_str())
|
||||
.filter(|value| !value.is_empty())
|
||||
.map(|value| value.to_string())
|
||||
.ok_or_else(|| format!("{context} requires a non-empty \"id\" field"))
|
||||
}
|
||||
|
||||
pub fn validate_create_id(payload: &Value, context: &str) -> JsonResult<()> {
|
||||
let Some(id_value) = payload.get("id") else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
match id_value {
|
||||
Value::String(id) if id.is_empty() => Ok(()),
|
||||
_ => Err(format!("{context} create JSON must omit \"id\" or set it to an empty string")),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_merge_patch<T>(existing: &T, patch: &Value, id: &str, context: &str) -> JsonResult<T>
|
||||
where
|
||||
T: Serialize + DeserializeOwned,
|
||||
{
|
||||
let mut base = serde_json::to_value(existing)
|
||||
.map_err(|error| format!("Failed to serialize existing model for {context}: {error}"))?;
|
||||
merge_patch(&mut base, patch);
|
||||
|
||||
let Some(base_object) = base.as_object_mut() else {
|
||||
return Err(format!("Merged payload for {context} must be an object"));
|
||||
};
|
||||
base_object.insert("id".to_string(), Value::String(id.to_string()));
|
||||
|
||||
serde_json::from_value(base)
|
||||
.map_err(|error| format!("Failed to deserialize merged payload for {context}: {error}"))
|
||||
}
|
||||
|
||||
fn merge_patch(target: &mut Value, patch: &Value) {
|
||||
match patch {
|
||||
Value::Object(patch_map) => {
|
||||
if !target.is_object() {
|
||||
*target = Value::Object(Map::new());
|
||||
}
|
||||
|
||||
let target_map =
|
||||
target.as_object_mut().expect("merge_patch target expected to be object");
|
||||
|
||||
for (key, patch_value) in patch_map {
|
||||
if patch_value.is_null() {
|
||||
target_map.remove(key);
|
||||
continue;
|
||||
}
|
||||
|
||||
let target_entry = target_map.entry(key.clone()).or_insert(Value::Null);
|
||||
merge_patch(target_entry, patch_value);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
*target = patch.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
pub mod confirm;
|
||||
pub mod json;
|
||||
@@ -1,42 +0,0 @@
|
||||
use std::io::{Read, Write};
|
||||
use std::net::TcpListener;
|
||||
use std::thread;
|
||||
|
||||
pub struct TestHttpServer {
|
||||
pub url: String,
|
||||
handle: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl TestHttpServer {
|
||||
pub fn spawn_ok(body: &'static str) -> Self {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test HTTP server");
|
||||
let addr = listener.local_addr().expect("Failed to get local addr");
|
||||
let url = format!("http://{addr}/test");
|
||||
let body_bytes = body.as_bytes().to_vec();
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
if let Ok((mut stream, _)) = listener.accept() {
|
||||
let mut request_buf = [0u8; 4096];
|
||||
let _ = stream.read(&mut request_buf);
|
||||
|
||||
let response = format!(
|
||||
"HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: {}\r\nConnection: close\r\n\r\n",
|
||||
body_bytes.len()
|
||||
);
|
||||
let _ = stream.write_all(response.as_bytes());
|
||||
let _ = stream.write_all(&body_bytes);
|
||||
let _ = stream.flush();
|
||||
}
|
||||
});
|
||||
|
||||
Self { url, handle: Some(handle) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestHttpServer {
|
||||
fn drop(&mut self) {
|
||||
if let Some(handle) = self.handle.take() {
|
||||
let _ = handle.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod http_server;
|
||||
|
||||
use assert_cmd::Command;
|
||||
use assert_cmd::cargo::cargo_bin_cmd;
|
||||
use std::path::Path;
|
||||
use yaak_models::models::{Folder, GrpcRequest, HttpRequest, WebsocketRequest, Workspace};
|
||||
use yaak_models::query_manager::QueryManager;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
pub fn cli_cmd(data_dir: &Path) -> Command {
|
||||
let mut cmd = cargo_bin_cmd!("yaakcli");
|
||||
cmd.arg("--data-dir").arg(data_dir);
|
||||
cmd
|
||||
}
|
||||
|
||||
pub fn parse_created_id(stdout: &[u8], label: &str) -> String {
|
||||
String::from_utf8_lossy(stdout)
|
||||
.trim()
|
||||
.split_once(": ")
|
||||
.map(|(_, id)| id.to_string())
|
||||
.unwrap_or_else(|| panic!("Expected id in '{label}' output"))
|
||||
}
|
||||
|
||||
pub fn query_manager(data_dir: &Path) -> QueryManager {
|
||||
let db_path = data_dir.join("db.sqlite");
|
||||
let blob_path = data_dir.join("blobs.sqlite");
|
||||
let (query_manager, _blob_manager, _rx) =
|
||||
yaak_models::init_standalone(&db_path, &blob_path).expect("Failed to initialize DB");
|
||||
query_manager
|
||||
}
|
||||
|
||||
pub fn seed_workspace(data_dir: &Path, workspace_id: &str) {
|
||||
let workspace = Workspace {
|
||||
id: workspace_id.to_string(),
|
||||
name: "Seed Workspace".to_string(),
|
||||
description: "Seeded for integration tests".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_workspace(&workspace, &UpdateSource::Sync)
|
||||
.expect("Failed to seed workspace");
|
||||
}
|
||||
|
||||
pub fn seed_request(data_dir: &Path, workspace_id: &str, request_id: &str) {
|
||||
let request = HttpRequest {
|
||||
id: request_id.to_string(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
name: "Seeded Request".to_string(),
|
||||
method: "GET".to_string(),
|
||||
url: "https://example.com".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_http_request(&request, &UpdateSource::Sync)
|
||||
.expect("Failed to seed request");
|
||||
}
|
||||
|
||||
pub fn seed_folder(data_dir: &Path, workspace_id: &str, folder_id: &str) {
|
||||
let folder = Folder {
|
||||
id: folder_id.to_string(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
name: "Seed Folder".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_folder(&folder, &UpdateSource::Sync)
|
||||
.expect("Failed to seed folder");
|
||||
}
|
||||
|
||||
pub fn seed_grpc_request(data_dir: &Path, workspace_id: &str, request_id: &str) {
|
||||
let request = GrpcRequest {
|
||||
id: request_id.to_string(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
name: "Seeded gRPC Request".to_string(),
|
||||
url: "https://example.com".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_grpc_request(&request, &UpdateSource::Sync)
|
||||
.expect("Failed to seed gRPC request");
|
||||
}
|
||||
|
||||
pub fn seed_websocket_request(data_dir: &Path, workspace_id: &str, request_id: &str) {
|
||||
let request = WebsocketRequest {
|
||||
id: request_id.to_string(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
name: "Seeded WebSocket Request".to_string(),
|
||||
url: "wss://example.com/socket".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_websocket_request(&request, &UpdateSource::Sync)
|
||||
.expect("Failed to seed WebSocket request");
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
mod common;
|
||||
|
||||
use common::{cli_cmd, parse_created_id, query_manager, seed_workspace};
|
||||
use predicates::str::contains;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn create_list_show_delete_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["environment", "list", "wk_test"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("Global Variables"));
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args(["environment", "create", "wk_test", "--name", "Production"])
|
||||
.assert()
|
||||
.success();
|
||||
let environment_id = parse_created_id(&create_assert.get_output().stdout, "environment create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["environment", "list", "wk_test"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(&environment_id))
|
||||
.stdout(contains("Production"));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["environment", "show", &environment_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("\"id\": \"{environment_id}\"")))
|
||||
.stdout(contains("\"parentModel\": \"environment\""));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["environment", "delete", &environment_id, "--yes"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Deleted environment: {environment_id}")));
|
||||
|
||||
assert!(query_manager(data_dir).connect().get_environment(&environment_id).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn json_create_and_update_merge_patch_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args([
|
||||
"environment",
|
||||
"create",
|
||||
r#"{"workspaceId":"wk_test","name":"Json Environment"}"#,
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
let environment_id = parse_created_id(&create_assert.get_output().stdout, "environment create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args([
|
||||
"environment",
|
||||
"update",
|
||||
&format!(r##"{{"id":"{}","color":"#00ff00"}}"##, environment_id),
|
||||
])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Updated environment: {environment_id}")));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["environment", "show", &environment_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"Json Environment\""))
|
||||
.stdout(contains("\"color\": \"#00ff00\""));
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
mod common;
|
||||
|
||||
use common::{cli_cmd, parse_created_id, query_manager, seed_workspace};
|
||||
use predicates::str::contains;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn create_list_show_delete_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args(["folder", "create", "wk_test", "--name", "Auth"])
|
||||
.assert()
|
||||
.success();
|
||||
let folder_id = parse_created_id(&create_assert.get_output().stdout, "folder create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["folder", "list", "wk_test"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(&folder_id))
|
||||
.stdout(contains("Auth"));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["folder", "show", &folder_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("\"id\": \"{folder_id}\"")))
|
||||
.stdout(contains("\"workspaceId\": \"wk_test\""));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["folder", "delete", &folder_id, "--yes"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Deleted folder: {folder_id}")));
|
||||
|
||||
assert!(query_manager(data_dir).connect().get_folder(&folder_id).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn json_create_and_update_merge_patch_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args([
|
||||
"folder",
|
||||
"create",
|
||||
r#"{"workspaceId":"wk_test","name":"Json Folder"}"#,
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
let folder_id = parse_created_id(&create_assert.get_output().stdout, "folder create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args([
|
||||
"folder",
|
||||
"update",
|
||||
&format!(r#"{{"id":"{}","description":"Folder Description"}}"#, folder_id),
|
||||
])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Updated folder: {folder_id}")));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["folder", "show", &folder_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"Json Folder\""))
|
||||
.stdout(contains("\"description\": \"Folder Description\""));
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
mod common;
|
||||
|
||||
use common::http_server::TestHttpServer;
|
||||
use common::{
|
||||
cli_cmd, parse_created_id, query_manager, seed_grpc_request, seed_request,
|
||||
seed_websocket_request, seed_workspace,
|
||||
};
|
||||
use predicates::str::contains;
|
||||
use tempfile::TempDir;
|
||||
use yaak_models::models::HttpResponseState;
|
||||
|
||||
#[test]
|
||||
fn show_and_delete_yes_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args([
|
||||
"request",
|
||||
"create",
|
||||
"wk_test",
|
||||
"--name",
|
||||
"Smoke Test",
|
||||
"--url",
|
||||
"https://example.com",
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let request_id = parse_created_id(&create_assert.get_output().stdout, "request create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "show", &request_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("\"id\": \"{request_id}\"")))
|
||||
.stdout(contains("\"workspaceId\": \"wk_test\""));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "delete", &request_id, "--yes"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Deleted request: {request_id}")));
|
||||
|
||||
assert!(query_manager(data_dir).connect().get_http_request(&request_id).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_without_yes_fails_in_non_interactive_mode() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
seed_request(data_dir, "wk_test", "rq_seed_delete_noninteractive");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "delete", "rq_seed_delete_noninteractive"])
|
||||
.assert()
|
||||
.failure()
|
||||
.code(1)
|
||||
.stderr(contains("Refusing to delete in non-interactive mode without --yes"));
|
||||
|
||||
assert!(
|
||||
query_manager(data_dir).connect().get_http_request("rq_seed_delete_noninteractive").is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn json_create_and_update_merge_patch_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args([
|
||||
"request",
|
||||
"create",
|
||||
r#"{"workspaceId":"wk_test","name":"Json Request","url":"https://example.com"}"#,
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
let request_id = parse_created_id(&create_assert.get_output().stdout, "request create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args([
|
||||
"request",
|
||||
"update",
|
||||
&format!(r#"{{"id":"{}","name":"Renamed Request"}}"#, request_id),
|
||||
])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Updated request: {request_id}")));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "show", &request_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"Renamed Request\""))
|
||||
.stdout(contains("\"url\": \"https://example.com\""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_requires_id_in_json_payload() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "update", r#"{"name":"No ID"}"#])
|
||||
.assert()
|
||||
.failure()
|
||||
.stderr(contains("request update requires a non-empty \"id\" field"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_allows_workspace_only_with_empty_defaults() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir).args(["request", "create", "wk_test"]).assert().success();
|
||||
let request_id = parse_created_id(&create_assert.get_output().stdout, "request create");
|
||||
|
||||
let request = query_manager(data_dir)
|
||||
.connect()
|
||||
.get_http_request(&request_id)
|
||||
.expect("Failed to load created request");
|
||||
assert_eq!(request.workspace_id, "wk_test");
|
||||
assert_eq!(request.method, "GET");
|
||||
assert_eq!(request.name, "");
|
||||
assert_eq!(request.url, "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_send_persists_response_body_and_events() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let server = TestHttpServer::spawn_ok("hello from integration test");
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args([
|
||||
"request",
|
||||
"create",
|
||||
"wk_test",
|
||||
"--name",
|
||||
"Send Test",
|
||||
"--url",
|
||||
&server.url,
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
let request_id = parse_created_id(&create_assert.get_output().stdout, "request create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "send", &request_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("HTTP 200 OK"))
|
||||
.stdout(contains("hello from integration test"));
|
||||
|
||||
let qm = query_manager(data_dir);
|
||||
let db = qm.connect();
|
||||
let responses =
|
||||
db.list_http_responses_for_request(&request_id, None).expect("Failed to load responses");
|
||||
assert_eq!(responses.len(), 1, "expected exactly one persisted response");
|
||||
|
||||
let response = &responses[0];
|
||||
assert_eq!(response.status, 200);
|
||||
assert!(matches!(response.state, HttpResponseState::Closed));
|
||||
assert!(response.error.is_none());
|
||||
|
||||
let body_path =
|
||||
response.body_path.as_ref().expect("expected persisted response body path").to_string();
|
||||
let body = std::fs::read_to_string(&body_path).expect("Failed to read response body file");
|
||||
assert_eq!(body, "hello from integration test");
|
||||
|
||||
let events =
|
||||
db.list_http_response_events(&response.id).expect("Failed to load response events");
|
||||
assert!(!events.is_empty(), "expected at least one persisted response event");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_schema_http_outputs_json_schema() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "schema", "http"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"type\": \"object\""))
|
||||
.stdout(contains("\"authentication\""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_send_grpc_returns_explicit_nyi_error() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
seed_grpc_request(data_dir, "wk_test", "gr_seed_nyi");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "send", "gr_seed_nyi"])
|
||||
.assert()
|
||||
.failure()
|
||||
.code(1)
|
||||
.stderr(contains("gRPC request send is not implemented yet in yaak-cli"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_send_websocket_returns_explicit_nyi_error() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
seed_websocket_request(data_dir, "wk_test", "wr_seed_nyi");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["request", "send", "wr_seed_nyi"])
|
||||
.assert()
|
||||
.failure()
|
||||
.code(1)
|
||||
.stderr(contains("WebSocket request send is not implemented yet in yaak-cli"));
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
mod common;
|
||||
|
||||
use common::http_server::TestHttpServer;
|
||||
use common::{cli_cmd, query_manager, seed_folder, seed_workspace};
|
||||
use predicates::str::contains;
|
||||
use tempfile::TempDir;
|
||||
use yaak_models::models::HttpRequest;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
#[test]
|
||||
fn top_level_send_workspace_sends_http_requests_and_prints_summary() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
|
||||
let server = TestHttpServer::spawn_ok("workspace bulk send");
|
||||
let request = HttpRequest {
|
||||
id: "rq_workspace_send".to_string(),
|
||||
workspace_id: "wk_test".to_string(),
|
||||
name: "Workspace Send".to_string(),
|
||||
method: "GET".to_string(),
|
||||
url: server.url.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_http_request(&request, &UpdateSource::Sync)
|
||||
.expect("Failed to seed workspace request");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["send", "wk_test"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("HTTP 200 OK"))
|
||||
.stdout(contains("workspace bulk send"))
|
||||
.stdout(contains("Send summary: 1 succeeded, 0 failed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn top_level_send_folder_sends_http_requests_and_prints_summary() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
seed_workspace(data_dir, "wk_test");
|
||||
seed_folder(data_dir, "wk_test", "fl_test");
|
||||
|
||||
let server = TestHttpServer::spawn_ok("folder bulk send");
|
||||
let request = HttpRequest {
|
||||
id: "rq_folder_send".to_string(),
|
||||
workspace_id: "wk_test".to_string(),
|
||||
folder_id: Some("fl_test".to_string()),
|
||||
name: "Folder Send".to_string(),
|
||||
method: "GET".to_string(),
|
||||
url: server.url.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
query_manager(data_dir)
|
||||
.connect()
|
||||
.upsert_http_request(&request, &UpdateSource::Sync)
|
||||
.expect("Failed to seed folder request");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["send", "fl_test"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("HTTP 200 OK"))
|
||||
.stdout(contains("folder bulk send"))
|
||||
.stdout(contains("Send summary: 1 succeeded, 0 failed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn top_level_send_unknown_id_fails_with_clear_error() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["send", "does_not_exist"])
|
||||
.assert()
|
||||
.failure()
|
||||
.code(1)
|
||||
.stderr(contains("Could not resolve ID 'does_not_exist' as request, folder, or workspace"));
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
mod common;
|
||||
|
||||
use common::{cli_cmd, parse_created_id, query_manager};
|
||||
use predicates::str::contains;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn create_show_delete_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
|
||||
let create_assert =
|
||||
cli_cmd(data_dir).args(["workspace", "create", "--name", "WS One"]).assert().success();
|
||||
let workspace_id = parse_created_id(&create_assert.get_output().stdout, "workspace create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["workspace", "show", &workspace_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("\"id\": \"{workspace_id}\"")))
|
||||
.stdout(contains("\"name\": \"WS One\""));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["workspace", "delete", &workspace_id, "--yes"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Deleted workspace: {workspace_id}")));
|
||||
|
||||
assert!(query_manager(data_dir).connect().get_workspace(&workspace_id).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn json_create_and_update_merge_patch_round_trip() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let data_dir = temp_dir.path();
|
||||
|
||||
let create_assert = cli_cmd(data_dir)
|
||||
.args(["workspace", "create", r#"{"name":"Json Workspace"}"#])
|
||||
.assert()
|
||||
.success();
|
||||
let workspace_id = parse_created_id(&create_assert.get_output().stdout, "workspace create");
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args([
|
||||
"workspace",
|
||||
"update",
|
||||
&format!(r#"{{"id":"{}","description":"Updated via JSON"}}"#, workspace_id),
|
||||
])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains(format!("Updated workspace: {workspace_id}")));
|
||||
|
||||
cli_cmd(data_dir)
|
||||
.args(["workspace", "show", &workspace_id])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"Json Workspace\""))
|
||||
.stdout(contains("\"description\": \"Updated via JSON\""));
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type WatchResult = { unlistenEvent: string, };
|
||||
@@ -1,5 +0,0 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginUpdateInfo = { name: string, currentVersion: string, latestVersion: string, };
|
||||
|
||||
export type PluginUpdateNotification = { updateCount: number, plugins: Array<PluginUpdateInfo>, };
|
||||
@@ -1,13 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<!-- Enable for NodeJS/V8 JIT compiler -->
|
||||
<key>com.apple.security.cs.allow-unsigned-executable-memory</key>
|
||||
<true/>
|
||||
|
||||
<!-- Allow loading plugins signed with different Team IDs (e.g., 1Password) -->
|
||||
<key>com.apple.security.cs.disable-library-validation</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
||||
@@ -1,6 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
</dict>
|
||||
</plist>
|
||||
@@ -1,100 +0,0 @@
|
||||
use crate::PluginContextExt;
|
||||
use crate::error::Result;
|
||||
use std::sync::Arc;
|
||||
use tauri::{AppHandle, Manager, Runtime, State, WebviewWindow, command};
|
||||
use yaak_crypto::manager::EncryptionManager;
|
||||
use yaak_models::models::HttpRequestHeader;
|
||||
use yaak_models::queries::workspaces::default_headers;
|
||||
use yaak_plugins::events::GetThemesResponse;
|
||||
use yaak_plugins::manager::PluginManager;
|
||||
use yaak_plugins::native_template_functions::{
|
||||
decrypt_secure_template_function, encrypt_secure_template_function,
|
||||
};
|
||||
|
||||
/// Extension trait for accessing the EncryptionManager from Tauri Manager types.
|
||||
pub trait EncryptionManagerExt<'a, R> {
|
||||
fn crypto(&'a self) -> State<'a, EncryptionManager>;
|
||||
}
|
||||
|
||||
impl<'a, R: Runtime, M: Manager<R>> EncryptionManagerExt<'a, R> for M {
|
||||
fn crypto(&'a self) -> State<'a, EncryptionManager> {
|
||||
self.state::<EncryptionManager>()
|
||||
}
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_decrypt_template<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
template: &str,
|
||||
) -> Result<String> {
|
||||
let encryption_manager = window.app_handle().state::<EncryptionManager>();
|
||||
let plugin_context = window.plugin_context();
|
||||
Ok(decrypt_secure_template_function(&encryption_manager, &plugin_context, template)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_secure_template<R: Runtime>(
|
||||
app_handle: AppHandle<R>,
|
||||
window: WebviewWindow<R>,
|
||||
template: &str,
|
||||
) -> Result<String> {
|
||||
let plugin_manager = Arc::new((*app_handle.state::<PluginManager>()).clone());
|
||||
let encryption_manager = Arc::new((*app_handle.state::<EncryptionManager>()).clone());
|
||||
let plugin_context = window.plugin_context();
|
||||
Ok(encrypt_secure_template_function(
|
||||
plugin_manager,
|
||||
encryption_manager,
|
||||
&plugin_context,
|
||||
template,
|
||||
)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_get_themes<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
plugin_manager: State<'_, PluginManager>,
|
||||
) -> Result<Vec<GetThemesResponse>> {
|
||||
Ok(plugin_manager.get_themes(&window.plugin_context()).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_enable_encryption<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
workspace_id: &str,
|
||||
) -> Result<()> {
|
||||
window.crypto().ensure_workspace_key(workspace_id)?;
|
||||
window.crypto().reveal_workspace_key(workspace_id)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_reveal_workspace_key<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
workspace_id: &str,
|
||||
) -> Result<String> {
|
||||
Ok(window.crypto().reveal_workspace_key(workspace_id)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_set_workspace_key<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
workspace_id: &str,
|
||||
key: &str,
|
||||
) -> Result<()> {
|
||||
window.crypto().set_human_key(workspace_id, key)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) async fn cmd_disable_encryption<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
workspace_id: &str,
|
||||
) -> Result<()> {
|
||||
window.crypto().disable_encryption(workspace_id)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub(crate) fn cmd_default_headers() -> Vec<HttpRequestHeader> {
|
||||
default_headers()
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
//! Tauri-specific extensions for yaak-git.
|
||||
//!
|
||||
//! This module provides the Tauri commands for git functionality.
|
||||
|
||||
use crate::error::Result;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tauri::command;
|
||||
use yaak_git::{
|
||||
BranchDeleteResult, CloneResult, GitCommit, GitRemote, GitStatusSummary, PullResult,
|
||||
PushResult, git_add, git_add_credential, git_add_remote, git_checkout_branch, git_clone,
|
||||
git_commit, git_create_branch, git_delete_branch, git_delete_remote_branch, git_fetch_all,
|
||||
git_init, git_log, git_merge_branch, git_pull, git_pull_force_reset, git_pull_merge, git_push,
|
||||
git_remotes, git_rename_branch, git_reset_changes, git_rm_remote, git_status, git_unstage,
|
||||
};
|
||||
|
||||
// NOTE: All of these commands are async to prevent blocking work from locking up the UI
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_checkout(dir: &Path, branch: &str, force: bool) -> Result<String> {
|
||||
Ok(git_checkout_branch(dir, branch, force).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_branch(dir: &Path, branch: &str, base: Option<&str>) -> Result<()> {
|
||||
Ok(git_create_branch(dir, branch, base).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_delete_branch(
|
||||
dir: &Path,
|
||||
branch: &str,
|
||||
force: Option<bool>,
|
||||
) -> Result<BranchDeleteResult> {
|
||||
Ok(git_delete_branch(dir, branch, force.unwrap_or(false)).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_delete_remote_branch(dir: &Path, branch: &str) -> Result<()> {
|
||||
Ok(git_delete_remote_branch(dir, branch).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_merge_branch(dir: &Path, branch: &str) -> Result<()> {
|
||||
Ok(git_merge_branch(dir, branch).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_rename_branch(dir: &Path, old_name: &str, new_name: &str) -> Result<()> {
|
||||
Ok(git_rename_branch(dir, old_name, new_name).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_status(dir: &Path) -> Result<GitStatusSummary> {
|
||||
Ok(git_status(dir)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_log(dir: &Path) -> Result<Vec<GitCommit>> {
|
||||
Ok(git_log(dir)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_initialize(dir: &Path) -> Result<()> {
|
||||
Ok(git_init(dir)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_clone(url: &str, dir: &Path) -> Result<CloneResult> {
|
||||
Ok(git_clone(url, dir).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_commit(dir: &Path, message: &str) -> Result<()> {
|
||||
Ok(git_commit(dir, message).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_fetch_all(dir: &Path) -> Result<()> {
|
||||
Ok(git_fetch_all(dir).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_push(dir: &Path) -> Result<PushResult> {
|
||||
Ok(git_push(dir).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_pull(dir: &Path) -> Result<PullResult> {
|
||||
Ok(git_pull(dir).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_pull_force_reset(
|
||||
dir: &Path,
|
||||
remote: &str,
|
||||
branch: &str,
|
||||
) -> Result<PullResult> {
|
||||
Ok(git_pull_force_reset(dir, remote, branch).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_pull_merge(dir: &Path, remote: &str, branch: &str) -> Result<PullResult> {
|
||||
Ok(git_pull_merge(dir, remote, branch).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_add(dir: &Path, rela_paths: Vec<PathBuf>) -> Result<()> {
|
||||
for path in rela_paths {
|
||||
git_add(dir, &path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_unstage(dir: &Path, rela_paths: Vec<PathBuf>) -> Result<()> {
|
||||
for path in rela_paths {
|
||||
git_unstage(dir, &path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_reset_changes(dir: &Path) -> Result<()> {
|
||||
Ok(git_reset_changes(dir).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_add_credential(
|
||||
remote_url: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<()> {
|
||||
Ok(git_add_credential(remote_url, username, password).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_remotes(dir: &Path) -> Result<Vec<GitRemote>> {
|
||||
Ok(git_remotes(dir)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_add_remote(dir: &Path, name: &str, url: &str) -> Result<GitRemote> {
|
||||
Ok(git_add_remote(dir, name, url)?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_git_rm_remote(dir: &Path, name: &str) -> Result<()> {
|
||||
Ok(git_rm_remote(dir, name)?)
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
use crate::PluginContextExt;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use crate::models_ext::BlobManagerExt;
|
||||
use crate::models_ext::QueryManagerExt;
|
||||
use log::warn;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tauri::{AppHandle, Manager, Runtime, WebviewWindow};
|
||||
use tokio::sync::watch::Receiver;
|
||||
use yaak::send::{SendHttpRequestWithPluginsParams, send_http_request_with_plugins};
|
||||
use yaak_crypto::manager::EncryptionManager;
|
||||
use yaak_http::manager::HttpConnectionManager;
|
||||
use yaak_models::models::{CookieJar, Environment, HttpRequest, HttpResponse, HttpResponseState};
|
||||
use yaak_models::util::UpdateSource;
|
||||
use yaak_plugins::events::PluginContext;
|
||||
use yaak_plugins::manager::PluginManager;
|
||||
|
||||
/// Context for managing response state during HTTP transactions.
|
||||
/// Handles both persisted responses (stored in DB) and ephemeral responses (in-memory only).
|
||||
struct ResponseContext<R: Runtime> {
|
||||
app_handle: AppHandle<R>,
|
||||
response: HttpResponse,
|
||||
update_source: UpdateSource,
|
||||
}
|
||||
|
||||
impl<R: Runtime> ResponseContext<R> {
|
||||
fn new(app_handle: AppHandle<R>, response: HttpResponse, update_source: UpdateSource) -> Self {
|
||||
Self { app_handle, response, update_source }
|
||||
}
|
||||
|
||||
/// Whether this response is persisted (has a non-empty ID)
|
||||
fn is_persisted(&self) -> bool {
|
||||
!self.response.id.is_empty()
|
||||
}
|
||||
|
||||
/// Update the response state. For persisted responses, fetches from DB, applies the
|
||||
/// closure, and updates the DB. For ephemeral responses, just applies the closure
|
||||
/// to the in-memory response.
|
||||
fn update<F>(&mut self, func: F) -> Result<()>
|
||||
where
|
||||
F: FnOnce(&mut HttpResponse),
|
||||
{
|
||||
if self.is_persisted() {
|
||||
let r = self.app_handle.with_tx(|tx| {
|
||||
let mut r = tx.get_http_response(&self.response.id)?;
|
||||
func(&mut r);
|
||||
tx.update_http_response_if_id(&r, &self.update_source)?;
|
||||
Ok(r)
|
||||
})?;
|
||||
self.response = r;
|
||||
Ok(())
|
||||
} else {
|
||||
func(&mut self.response);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current response state
|
||||
fn response(&self) -> &HttpResponse {
|
||||
&self.response
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_http_request<R: Runtime>(
|
||||
window: &WebviewWindow<R>,
|
||||
unrendered_request: &HttpRequest,
|
||||
og_response: &HttpResponse,
|
||||
environment: Option<Environment>,
|
||||
cookie_jar: Option<CookieJar>,
|
||||
cancelled_rx: &mut Receiver<bool>,
|
||||
) -> Result<HttpResponse> {
|
||||
send_http_request_with_context(
|
||||
window,
|
||||
unrendered_request,
|
||||
og_response,
|
||||
environment,
|
||||
cookie_jar,
|
||||
cancelled_rx,
|
||||
&window.plugin_context(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn send_http_request_with_context<R: Runtime>(
|
||||
window: &WebviewWindow<R>,
|
||||
unrendered_request: &HttpRequest,
|
||||
og_response: &HttpResponse,
|
||||
environment: Option<Environment>,
|
||||
cookie_jar: Option<CookieJar>,
|
||||
cancelled_rx: &Receiver<bool>,
|
||||
plugin_context: &PluginContext,
|
||||
) -> Result<HttpResponse> {
|
||||
let app_handle = window.app_handle().clone();
|
||||
let update_source = UpdateSource::from_window_label(window.label());
|
||||
let mut response_ctx =
|
||||
ResponseContext::new(app_handle.clone(), og_response.clone(), update_source);
|
||||
|
||||
// Execute the inner send logic and handle errors consistently
|
||||
let start = Instant::now();
|
||||
let result = send_http_request_inner(
|
||||
window,
|
||||
unrendered_request,
|
||||
environment,
|
||||
cookie_jar,
|
||||
cancelled_rx,
|
||||
plugin_context,
|
||||
&mut response_ctx,
|
||||
)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(response) => Ok(response),
|
||||
Err(e) => {
|
||||
let error = e.to_string();
|
||||
let elapsed = start.elapsed().as_millis() as i32;
|
||||
warn!("Failed to send request: {error:?}");
|
||||
let _ = response_ctx.update(|r| {
|
||||
r.state = HttpResponseState::Closed;
|
||||
r.elapsed = elapsed;
|
||||
if r.elapsed_headers == 0 {
|
||||
r.elapsed_headers = elapsed;
|
||||
}
|
||||
r.error = Some(error);
|
||||
});
|
||||
Ok(response_ctx.response().clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_http_request_inner<R: Runtime>(
|
||||
window: &WebviewWindow<R>,
|
||||
unrendered_request: &HttpRequest,
|
||||
environment: Option<Environment>,
|
||||
cookie_jar: Option<CookieJar>,
|
||||
cancelled_rx: &Receiver<bool>,
|
||||
plugin_context: &PluginContext,
|
||||
response_ctx: &mut ResponseContext<R>,
|
||||
) -> Result<HttpResponse> {
|
||||
let app_handle = window.app_handle().clone();
|
||||
let plugin_manager = Arc::new((*app_handle.state::<PluginManager>()).clone());
|
||||
let encryption_manager = Arc::new((*app_handle.state::<EncryptionManager>()).clone());
|
||||
let connection_manager = app_handle.state::<HttpConnectionManager>();
|
||||
let environment_id = environment.map(|e| e.id);
|
||||
let cookie_jar_id = cookie_jar.as_ref().map(|jar| jar.id.clone());
|
||||
|
||||
let response_dir = app_handle.path().app_data_dir()?.join("responses");
|
||||
let result = send_http_request_with_plugins(SendHttpRequestWithPluginsParams {
|
||||
query_manager: app_handle.db_manager().inner(),
|
||||
blob_manager: app_handle.blob_manager().inner(),
|
||||
request: unrendered_request.clone(),
|
||||
environment_id: environment_id.as_deref(),
|
||||
update_source: response_ctx.update_source.clone(),
|
||||
cookie_jar_id,
|
||||
response_dir: &response_dir,
|
||||
emit_events_to: None,
|
||||
existing_response: Some(response_ctx.response().clone()),
|
||||
plugin_manager,
|
||||
encryption_manager,
|
||||
plugin_context,
|
||||
cancelled_rx: Some(cancelled_rx.clone()),
|
||||
connection_manager: Some(connection_manager.inner()),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| GenericError(e.to_string()))?;
|
||||
|
||||
Ok(result.response)
|
||||
}
|
||||
|
||||
pub fn resolve_http_request<R: Runtime>(
|
||||
window: &WebviewWindow<R>,
|
||||
request: &HttpRequest,
|
||||
) -> Result<(HttpRequest, String)> {
|
||||
let mut new_request = request.clone();
|
||||
|
||||
let (authentication_type, authentication, authentication_context_id) =
|
||||
window.db().resolve_auth_for_http_request(request)?;
|
||||
new_request.authentication_type = authentication_type;
|
||||
new_request.authentication = authentication;
|
||||
|
||||
let headers = window.db().resolve_headers_for_http_request(request)?;
|
||||
new_request.headers = headers;
|
||||
|
||||
Ok((new_request, authentication_context_id))
|
||||
}
|
||||
@@ -1,373 +0,0 @@
|
||||
//! Tauri-specific extensions for yaak-models.
|
||||
//!
|
||||
//! This module provides the Tauri plugin initialization and extension traits
|
||||
//! that allow accessing QueryManager and BlobManager from Tauri's Manager types.
|
||||
|
||||
use chrono::Utc;
|
||||
use log::error;
|
||||
use std::time::Duration;
|
||||
use tauri::plugin::TauriPlugin;
|
||||
use tauri::{Emitter, Manager, Runtime, State};
|
||||
use tauri_plugin_dialog::{DialogExt, MessageDialogKind};
|
||||
use yaak_models::blob_manager::BlobManager;
|
||||
use yaak_models::db_context::DbContext;
|
||||
use yaak_models::error::Result;
|
||||
use yaak_models::models::{AnyModel, GraphQlIntrospection, GrpcEvent, Settings, WebsocketEvent};
|
||||
use yaak_models::query_manager::QueryManager;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
const MODEL_CHANGES_RETENTION_HOURS: i64 = 1;
|
||||
const MODEL_CHANGES_POLL_INTERVAL_MS: u64 = 1000;
|
||||
const MODEL_CHANGES_POLL_BATCH_SIZE: usize = 200;
|
||||
|
||||
struct ModelChangeCursor {
|
||||
created_at: String,
|
||||
id: i64,
|
||||
}
|
||||
|
||||
impl ModelChangeCursor {
|
||||
fn from_launch_time() -> Self {
|
||||
Self {
|
||||
created_at: Utc::now().naive_utc().format("%Y-%m-%d %H:%M:%S%.3f").to_string(),
|
||||
id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn drain_model_changes_batch<R: Runtime>(
|
||||
query_manager: &QueryManager,
|
||||
app_handle: &tauri::AppHandle<R>,
|
||||
cursor: &mut ModelChangeCursor,
|
||||
) -> bool {
|
||||
let changes = match query_manager.connect().list_model_changes_since(
|
||||
&cursor.created_at,
|
||||
cursor.id,
|
||||
MODEL_CHANGES_POLL_BATCH_SIZE,
|
||||
) {
|
||||
Ok(changes) => changes,
|
||||
Err(err) => {
|
||||
error!("Failed to poll model_changes rows: {err:?}");
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
if changes.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let fetched_count = changes.len();
|
||||
for change in changes {
|
||||
cursor.created_at = change.created_at;
|
||||
cursor.id = change.id;
|
||||
|
||||
// Local window-originated writes are forwarded immediately from the
|
||||
// in-memory model event channel.
|
||||
if matches!(change.payload.update_source, UpdateSource::Window { .. }) {
|
||||
continue;
|
||||
}
|
||||
if let Err(err) = app_handle.emit("model_write", change.payload) {
|
||||
error!("Failed to emit model_write event: {err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
fetched_count == MODEL_CHANGES_POLL_BATCH_SIZE
|
||||
}
|
||||
|
||||
async fn run_model_change_poller<R: Runtime>(
|
||||
query_manager: QueryManager,
|
||||
app_handle: tauri::AppHandle<R>,
|
||||
mut cursor: ModelChangeCursor,
|
||||
) {
|
||||
loop {
|
||||
while drain_model_changes_batch(&query_manager, &app_handle, &mut cursor) {}
|
||||
tokio::time::sleep(Duration::from_millis(MODEL_CHANGES_POLL_INTERVAL_MS)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension trait for accessing the QueryManager from Tauri Manager types.
|
||||
pub trait QueryManagerExt<'a, R> {
|
||||
fn db_manager(&'a self) -> State<'a, QueryManager>;
|
||||
fn db(&'a self) -> DbContext<'a>;
|
||||
fn with_tx<F, T>(&'a self, func: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce(&DbContext) -> Result<T>;
|
||||
}
|
||||
|
||||
impl<'a, R: Runtime, M: Manager<R>> QueryManagerExt<'a, R> for M {
|
||||
fn db_manager(&'a self) -> State<'a, QueryManager> {
|
||||
self.state::<QueryManager>()
|
||||
}
|
||||
|
||||
fn db(&'a self) -> DbContext<'a> {
|
||||
let qm = self.state::<QueryManager>();
|
||||
qm.inner().connect()
|
||||
}
|
||||
|
||||
fn with_tx<F, T>(&'a self, func: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce(&DbContext) -> Result<T>,
|
||||
{
|
||||
let qm = self.state::<QueryManager>();
|
||||
qm.inner().with_tx(func)
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension trait for accessing the BlobManager from Tauri Manager types.
|
||||
pub trait BlobManagerExt<'a, R> {
|
||||
fn blob_manager(&'a self) -> State<'a, BlobManager>;
|
||||
fn blobs(&'a self) -> yaak_models::blob_manager::BlobContext;
|
||||
}
|
||||
|
||||
impl<'a, R: Runtime, M: Manager<R>> BlobManagerExt<'a, R> for M {
|
||||
fn blob_manager(&'a self) -> State<'a, BlobManager> {
|
||||
self.state::<BlobManager>()
|
||||
}
|
||||
|
||||
fn blobs(&'a self) -> yaak_models::blob_manager::BlobContext {
|
||||
let manager = self.state::<BlobManager>();
|
||||
manager.inner().connect()
|
||||
}
|
||||
}
|
||||
|
||||
// Commands for yaak-models
|
||||
use tauri::WebviewWindow;
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_upsert<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
model: AnyModel,
|
||||
) -> Result<String> {
|
||||
use yaak_models::error::Error::GenericError;
|
||||
|
||||
let db = window.db();
|
||||
let blobs = window.blob_manager();
|
||||
let source = &UpdateSource::from_window_label(window.label());
|
||||
let id = match model {
|
||||
AnyModel::CookieJar(m) => db.upsert_cookie_jar(&m, source)?.id,
|
||||
AnyModel::Environment(m) => db.upsert_environment(&m, source)?.id,
|
||||
AnyModel::Folder(m) => db.upsert_folder(&m, source)?.id,
|
||||
AnyModel::GrpcRequest(m) => db.upsert_grpc_request(&m, source)?.id,
|
||||
AnyModel::HttpRequest(m) => db.upsert_http_request(&m, source)?.id,
|
||||
AnyModel::HttpResponse(m) => db.upsert_http_response(&m, source, &blobs)?.id,
|
||||
AnyModel::KeyValue(m) => db.upsert_key_value(&m, source)?.id,
|
||||
AnyModel::Plugin(m) => db.upsert_plugin(&m, source)?.id,
|
||||
AnyModel::Settings(m) => db.upsert_settings(&m, source)?.id,
|
||||
AnyModel::WebsocketRequest(m) => db.upsert_websocket_request(&m, source)?.id,
|
||||
AnyModel::Workspace(m) => db.upsert_workspace(&m, source)?.id,
|
||||
AnyModel::WorkspaceMeta(m) => db.upsert_workspace_meta(&m, source)?.id,
|
||||
a => return Err(GenericError(format!("Cannot upsert AnyModel {a:?})"))),
|
||||
};
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_delete<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
model: AnyModel,
|
||||
) -> Result<String> {
|
||||
use yaak_models::error::Error::GenericError;
|
||||
|
||||
let blobs = window.blob_manager();
|
||||
// Use transaction for deletions because it might recurse
|
||||
window.with_tx(|tx| {
|
||||
let source = &UpdateSource::from_window_label(window.label());
|
||||
let id = match model {
|
||||
AnyModel::CookieJar(m) => tx.delete_cookie_jar(&m, source)?.id,
|
||||
AnyModel::Environment(m) => tx.delete_environment(&m, source)?.id,
|
||||
AnyModel::Folder(m) => tx.delete_folder(&m, source)?.id,
|
||||
AnyModel::GrpcConnection(m) => tx.delete_grpc_connection(&m, source)?.id,
|
||||
AnyModel::GrpcRequest(m) => tx.delete_grpc_request(&m, source)?.id,
|
||||
AnyModel::HttpRequest(m) => tx.delete_http_request(&m, source)?.id,
|
||||
AnyModel::HttpResponse(m) => tx.delete_http_response(&m, source, &blobs)?.id,
|
||||
AnyModel::Plugin(m) => tx.delete_plugin(&m, source)?.id,
|
||||
AnyModel::WebsocketConnection(m) => tx.delete_websocket_connection(&m, source)?.id,
|
||||
AnyModel::WebsocketRequest(m) => tx.delete_websocket_request(&m, source)?.id,
|
||||
AnyModel::Workspace(m) => tx.delete_workspace(&m, source)?.id,
|
||||
a => return Err(GenericError(format!("Cannot delete AnyModel {a:?})"))),
|
||||
};
|
||||
Ok(id)
|
||||
})
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_duplicate<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
model: AnyModel,
|
||||
) -> Result<String> {
|
||||
use yaak_models::error::Error::GenericError;
|
||||
|
||||
// Use transaction for duplications because it might recurse
|
||||
window.with_tx(|tx| {
|
||||
let source = &UpdateSource::from_window_label(window.label());
|
||||
let id = match model {
|
||||
AnyModel::Environment(m) => tx.duplicate_environment(&m, source)?.id,
|
||||
AnyModel::Folder(m) => tx.duplicate_folder(&m, source)?.id,
|
||||
AnyModel::GrpcRequest(m) => tx.duplicate_grpc_request(&m, source)?.id,
|
||||
AnyModel::HttpRequest(m) => tx.duplicate_http_request(&m, source)?.id,
|
||||
AnyModel::WebsocketRequest(m) => tx.duplicate_websocket_request(&m, source)?.id,
|
||||
a => return Err(GenericError(format!("Cannot duplicate AnyModel {a:?})"))),
|
||||
};
|
||||
|
||||
Ok(id)
|
||||
})
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_websocket_events<R: Runtime>(
|
||||
app_handle: tauri::AppHandle<R>,
|
||||
connection_id: &str,
|
||||
) -> Result<Vec<WebsocketEvent>> {
|
||||
Ok(app_handle.db().list_websocket_events(connection_id)?)
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_grpc_events<R: Runtime>(
|
||||
app_handle: tauri::AppHandle<R>,
|
||||
connection_id: &str,
|
||||
) -> Result<Vec<GrpcEvent>> {
|
||||
Ok(app_handle.db().list_grpc_events(connection_id)?)
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_get_settings<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Result<Settings> {
|
||||
Ok(app_handle.db().get_settings())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_get_graphql_introspection<R: Runtime>(
|
||||
app_handle: tauri::AppHandle<R>,
|
||||
request_id: &str,
|
||||
) -> Result<Option<GraphQlIntrospection>> {
|
||||
Ok(app_handle.db().get_graphql_introspection(request_id))
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_upsert_graphql_introspection<R: Runtime>(
|
||||
app_handle: tauri::AppHandle<R>,
|
||||
request_id: &str,
|
||||
workspace_id: &str,
|
||||
content: Option<String>,
|
||||
window: WebviewWindow<R>,
|
||||
) -> Result<GraphQlIntrospection> {
|
||||
let source = UpdateSource::from_window_label(window.label());
|
||||
Ok(app_handle.db().upsert_graphql_introspection(workspace_id, request_id, content, &source)?)
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub(crate) fn models_workspace_models<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
workspace_id: Option<&str>,
|
||||
) -> Result<String> {
|
||||
let db = window.db();
|
||||
let mut l: Vec<AnyModel> = Vec::new();
|
||||
|
||||
// Add the settings
|
||||
l.push(db.get_settings().into());
|
||||
|
||||
// Add global models
|
||||
l.append(&mut db.list_workspaces()?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_key_values()?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_plugins()?.into_iter().map(Into::into).collect());
|
||||
|
||||
// Add the workspace children
|
||||
if let Some(wid) = workspace_id {
|
||||
l.append(&mut db.list_cookie_jars(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_environments_ensure_base(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_folders(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_grpc_connections(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_grpc_requests(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_http_requests(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_http_responses(wid, None)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_websocket_connections(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_websocket_requests(wid)?.into_iter().map(Into::into).collect());
|
||||
l.append(&mut db.list_workspace_metas(wid)?.into_iter().map(Into::into).collect());
|
||||
}
|
||||
|
||||
let j = serde_json::to_string(&l)?;
|
||||
|
||||
Ok(escape_str_for_webview(&j))
|
||||
}
|
||||
|
||||
fn escape_str_for_webview(input: &str) -> String {
|
||||
input
|
||||
.chars()
|
||||
.map(|c| {
|
||||
let code = c as u32;
|
||||
// ASCII
|
||||
if code <= 0x7F {
|
||||
c.to_string()
|
||||
// BMP characters encoded normally
|
||||
} else if code < 0xFFFF {
|
||||
format!("\\u{:04X}", code)
|
||||
// Beyond BMP encoded a surrogate pairs
|
||||
} else {
|
||||
let high = ((code - 0x10000) >> 10) + 0xD800;
|
||||
let low = ((code - 0x10000) & 0x3FF) + 0xDC00;
|
||||
format!("\\u{:04X}\\u{:04X}", high, low)
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Initialize database managers as a plugin (for initialization order).
|
||||
/// Commands are in the main invoke_handler.
|
||||
/// This must be registered before other plugins that depend on the database.
|
||||
pub fn init<R: Runtime>() -> TauriPlugin<R> {
|
||||
tauri::plugin::Builder::new("yaak-models-db")
|
||||
.setup(|app_handle, _api| {
|
||||
let app_path = app_handle.path().app_data_dir().unwrap();
|
||||
let db_path = app_path.join("db.sqlite");
|
||||
let blob_path = app_path.join("blobs.sqlite");
|
||||
|
||||
let (query_manager, blob_manager, rx) =
|
||||
match yaak_models::init_standalone(&db_path, &blob_path) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
app_handle
|
||||
.dialog()
|
||||
.message(e.to_string())
|
||||
.kind(MessageDialogKind::Error)
|
||||
.blocking_show();
|
||||
return Err(Box::from(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let db = query_manager.connect();
|
||||
if let Err(err) = db.prune_model_changes_older_than_hours(MODEL_CHANGES_RETENTION_HOURS)
|
||||
{
|
||||
error!("Failed to prune model_changes rows on startup: {err:?}");
|
||||
}
|
||||
// Only stream writes that happen after this app launch.
|
||||
let cursor = ModelChangeCursor::from_launch_time();
|
||||
|
||||
let poll_query_manager = query_manager.clone();
|
||||
|
||||
app_handle.manage(query_manager);
|
||||
app_handle.manage(blob_manager);
|
||||
|
||||
// Poll model_changes so all writers (including external CLI processes) update the UI.
|
||||
let app_handle_poll = app_handle.clone();
|
||||
let query_manager = poll_query_manager;
|
||||
tauri::async_runtime::spawn(async move {
|
||||
run_model_change_poller(query_manager, app_handle_poll, cursor).await;
|
||||
});
|
||||
|
||||
// Fast path for local app writes initiated by frontend windows. This keeps the
|
||||
// current sync-model UX snappy, while DB polling handles external writers (CLI).
|
||||
let app_handle_local = app_handle.clone();
|
||||
tauri::async_runtime::spawn(async move {
|
||||
for payload in rx {
|
||||
if !matches!(payload.update_source, UpdateSource::Window { .. }) {
|
||||
continue;
|
||||
}
|
||||
if let Err(err) = app_handle_local.emit("model_write", payload) {
|
||||
error!("Failed to emit local model_write event: {err:?}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.build()
|
||||
}
|
||||
@@ -1,364 +0,0 @@
|
||||
//! Tauri-specific plugin management code.
|
||||
//!
|
||||
//! This module contains all Tauri integration for the plugin system:
|
||||
//! - Plugin initialization and lifecycle management
|
||||
//! - Tauri commands for plugin search/install/uninstall
|
||||
//! - Plugin update checking
|
||||
|
||||
use crate::PluginContextExt;
|
||||
use crate::error::Result;
|
||||
use crate::models_ext::QueryManagerExt;
|
||||
use log::{error, info, warn};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
use tauri::path::BaseDirectory;
|
||||
use tauri::plugin::{Builder, TauriPlugin};
|
||||
use tauri::{
|
||||
AppHandle, Emitter, Manager, RunEvent, Runtime, State, WebviewWindow, WindowEvent, command,
|
||||
is_dev,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
use ts_rs::TS;
|
||||
use yaak_api::yaak_api_client;
|
||||
use yaak_models::models::Plugin;
|
||||
use yaak_models::util::UpdateSource;
|
||||
use yaak_plugins::api::{
|
||||
PluginNameVersion, PluginSearchResponse, PluginUpdatesResponse, check_plugin_updates,
|
||||
search_plugins,
|
||||
};
|
||||
use yaak_plugins::events::{Color, Icon, PluginContext, ShowToastRequest};
|
||||
use yaak_plugins::install::{delete_and_uninstall, download_and_install};
|
||||
use yaak_plugins::manager::PluginManager;
|
||||
use yaak_plugins::plugin_meta::get_plugin_meta;
|
||||
|
||||
static EXITING: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
// ============================================================================
|
||||
// Plugin Updater
|
||||
// ============================================================================
|
||||
|
||||
const MAX_UPDATE_CHECK_HOURS: u64 = 12;
|
||||
|
||||
pub struct PluginUpdater {
|
||||
last_check: Option<Instant>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export, export_to = "index.ts")]
|
||||
pub struct PluginUpdateNotification {
|
||||
pub update_count: usize,
|
||||
pub plugins: Vec<PluginUpdateInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export, export_to = "index.ts")]
|
||||
pub struct PluginUpdateInfo {
|
||||
pub name: String,
|
||||
pub current_version: String,
|
||||
pub latest_version: String,
|
||||
}
|
||||
|
||||
impl PluginUpdater {
|
||||
pub fn new() -> Self {
|
||||
Self { last_check: None }
|
||||
}
|
||||
|
||||
pub async fn check_now<R: Runtime>(&mut self, window: &WebviewWindow<R>) -> Result<bool> {
|
||||
self.last_check = Some(Instant::now());
|
||||
|
||||
info!("Checking for plugin updates");
|
||||
|
||||
let app_version = window.app_handle().package_info().version.to_string();
|
||||
let http_client = yaak_api_client(&app_version)?;
|
||||
let plugins = window.app_handle().db().list_plugins()?;
|
||||
let updates = check_plugin_updates(&http_client, plugins.clone()).await?;
|
||||
|
||||
if updates.plugins.is_empty() {
|
||||
info!("No plugin updates available");
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Get current plugin versions to build notification
|
||||
let mut update_infos = Vec::new();
|
||||
|
||||
for update in &updates.plugins {
|
||||
if let Some(plugin) = plugins.iter().find(|p| {
|
||||
if let Ok(meta) = get_plugin_meta(&std::path::Path::new(&p.directory)) {
|
||||
meta.name == update.name
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}) {
|
||||
if let Ok(meta) = get_plugin_meta(&std::path::Path::new(&plugin.directory)) {
|
||||
update_infos.push(PluginUpdateInfo {
|
||||
name: update.name.clone(),
|
||||
current_version: meta.version,
|
||||
latest_version: update.version.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let notification =
|
||||
PluginUpdateNotification { update_count: update_infos.len(), plugins: update_infos };
|
||||
|
||||
info!("Found {} plugin update(s)", notification.update_count);
|
||||
|
||||
if let Err(e) = window.emit_to(window.label(), "plugin_updates_available", ¬ification) {
|
||||
error!("Failed to emit plugin_updates_available event: {}", e);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub async fn maybe_check<R: Runtime>(&mut self, window: &WebviewWindow<R>) -> Result<bool> {
|
||||
let update_period_seconds = MAX_UPDATE_CHECK_HOURS * 60 * 60;
|
||||
|
||||
if let Some(i) = self.last_check
|
||||
&& i.elapsed().as_secs() < update_period_seconds
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.check_now(window).await
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tauri Commands
|
||||
// ============================================================================
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_plugins_search<R: Runtime>(
|
||||
app_handle: AppHandle<R>,
|
||||
query: &str,
|
||||
) -> Result<PluginSearchResponse> {
|
||||
let app_version = app_handle.package_info().version.to_string();
|
||||
let http_client = yaak_api_client(&app_version)?;
|
||||
Ok(search_plugins(&http_client, query).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_plugins_install<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
name: &str,
|
||||
version: Option<String>,
|
||||
) -> Result<()> {
|
||||
let plugin_manager = Arc::new((*window.state::<PluginManager>()).clone());
|
||||
let app_version = window.app_handle().package_info().version.to_string();
|
||||
let http_client = yaak_api_client(&app_version)?;
|
||||
let query_manager = window.state::<yaak_models::query_manager::QueryManager>();
|
||||
let plugin_context = window.plugin_context();
|
||||
download_and_install(
|
||||
plugin_manager,
|
||||
&query_manager,
|
||||
&http_client,
|
||||
&plugin_context,
|
||||
name,
|
||||
version,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_plugins_uninstall<R: Runtime>(
|
||||
plugin_id: &str,
|
||||
window: WebviewWindow<R>,
|
||||
) -> Result<Plugin> {
|
||||
let plugin_manager = Arc::new((*window.state::<PluginManager>()).clone());
|
||||
let query_manager = window.state::<yaak_models::query_manager::QueryManager>();
|
||||
let plugin_context = window.plugin_context();
|
||||
Ok(delete_and_uninstall(plugin_manager, &query_manager, &plugin_context, plugin_id).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_plugins_updates<R: Runtime>(
|
||||
app_handle: AppHandle<R>,
|
||||
) -> Result<PluginUpdatesResponse> {
|
||||
let app_version = app_handle.package_info().version.to_string();
|
||||
let http_client = yaak_api_client(&app_version)?;
|
||||
let plugins = app_handle.db().list_plugins()?;
|
||||
Ok(check_plugin_updates(&http_client, plugins).await?)
|
||||
}
|
||||
|
||||
#[command]
|
||||
pub async fn cmd_plugins_update_all<R: Runtime>(
|
||||
window: WebviewWindow<R>,
|
||||
) -> Result<Vec<PluginNameVersion>> {
|
||||
let app_version = window.app_handle().package_info().version.to_string();
|
||||
let http_client = yaak_api_client(&app_version)?;
|
||||
let plugins = window.db().list_plugins()?;
|
||||
|
||||
// Get list of available updates (already filtered to only registry plugins)
|
||||
let updates = check_plugin_updates(&http_client, plugins).await?;
|
||||
|
||||
if updates.plugins.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let plugin_manager = Arc::new((*window.state::<PluginManager>()).clone());
|
||||
let query_manager = window.state::<yaak_models::query_manager::QueryManager>();
|
||||
let plugin_context = window.plugin_context();
|
||||
|
||||
let mut updated = Vec::new();
|
||||
|
||||
for update in updates.plugins {
|
||||
info!("Updating plugin: {} to version {}", update.name, update.version);
|
||||
match download_and_install(
|
||||
plugin_manager.clone(),
|
||||
&query_manager,
|
||||
&http_client,
|
||||
&plugin_context,
|
||||
&update.name,
|
||||
Some(update.version.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully updated plugin: {}", update.name);
|
||||
updated.push(update.clone());
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to update plugin {}: {:?}", update.name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(updated)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tauri Plugin Initialization
|
||||
// ============================================================================
|
||||
|
||||
pub fn init<R: Runtime>() -> TauriPlugin<R> {
|
||||
Builder::new("yaak-plugins")
|
||||
.setup(|app_handle, _| {
|
||||
// Resolve paths for plugin manager
|
||||
let vendored_plugin_dir = app_handle
|
||||
.path()
|
||||
.resolve("vendored/plugins", BaseDirectory::Resource)
|
||||
.expect("failed to resolve plugin directory resource");
|
||||
|
||||
let installed_plugin_dir = app_handle
|
||||
.path()
|
||||
.app_data_dir()
|
||||
.expect("failed to get app data dir")
|
||||
.join("installed-plugins");
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
let node_bin_name = "yaaknode.exe";
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let node_bin_name = "yaaknode";
|
||||
|
||||
let node_bin_path = app_handle
|
||||
.path()
|
||||
.resolve(format!("vendored/node/{}", node_bin_name), BaseDirectory::Resource)
|
||||
.expect("failed to resolve yaaknode binary");
|
||||
|
||||
let plugin_runtime_main = app_handle
|
||||
.path()
|
||||
.resolve("vendored/plugin-runtime", BaseDirectory::Resource)
|
||||
.expect("failed to resolve plugin runtime")
|
||||
.join("index.cjs");
|
||||
|
||||
let dev_mode = is_dev();
|
||||
|
||||
// Create plugin manager asynchronously
|
||||
let app_handle_clone = app_handle.clone();
|
||||
tauri::async_runtime::block_on(async move {
|
||||
let manager = PluginManager::new(
|
||||
vendored_plugin_dir,
|
||||
installed_plugin_dir,
|
||||
node_bin_path,
|
||||
plugin_runtime_main,
|
||||
dev_mode,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Initialize all plugins after manager is created
|
||||
let bundled_dirs = manager
|
||||
.list_bundled_plugin_dirs()
|
||||
.await
|
||||
.expect("Failed to list bundled plugins");
|
||||
|
||||
// Ensure all bundled plugins make it into the database
|
||||
let db = app_handle_clone.db();
|
||||
for dir in &bundled_dirs {
|
||||
if db.get_plugin_by_directory(dir).is_none() {
|
||||
db.upsert_plugin(
|
||||
&Plugin {
|
||||
directory: dir.clone(),
|
||||
enabled: true,
|
||||
url: None,
|
||||
..Default::default()
|
||||
},
|
||||
&UpdateSource::Background,
|
||||
)
|
||||
.expect("Failed to upsert bundled plugin");
|
||||
}
|
||||
}
|
||||
|
||||
// Get all plugins from database and initialize
|
||||
let plugins = db.list_plugins().expect("Failed to list plugins from database");
|
||||
drop(db); // Explicitly drop the connection before await
|
||||
|
||||
let errors =
|
||||
manager.initialize_all_plugins(plugins, &PluginContext::new_empty()).await;
|
||||
|
||||
// Show toast for any failed plugins
|
||||
for (plugin_dir, error_msg) in errors {
|
||||
let plugin_name = plugin_dir.split('/').last().unwrap_or(&plugin_dir);
|
||||
let toast = ShowToastRequest {
|
||||
message: format!("Failed to start plugin '{}': {}", plugin_name, error_msg),
|
||||
color: Some(Color::Danger),
|
||||
icon: Some(Icon::AlertTriangle),
|
||||
timeout: Some(10000),
|
||||
};
|
||||
if let Err(emit_err) = app_handle_clone.emit("show_toast", toast) {
|
||||
error!("Failed to emit toast for plugin error: {emit_err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
app_handle_clone.manage(manager);
|
||||
});
|
||||
|
||||
let plugin_updater = PluginUpdater::new();
|
||||
app_handle.manage(Mutex::new(plugin_updater));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.on_event(|app, e| match e {
|
||||
RunEvent::ExitRequested { api, .. } => {
|
||||
if EXITING.swap(true, Ordering::SeqCst) {
|
||||
return; // Only exit once to prevent infinite recursion
|
||||
}
|
||||
api.prevent_exit();
|
||||
tauri::async_runtime::block_on(async move {
|
||||
info!("Exiting plugin runtime due to app exit");
|
||||
let manager: State<PluginManager> = app.state();
|
||||
manager.terminate().await;
|
||||
app.exit(0);
|
||||
});
|
||||
}
|
||||
RunEvent::WindowEvent { event: WindowEvent::Focused(true), label, .. } => {
|
||||
// Check for plugin updates on window focus
|
||||
let w = app.get_webview_window(&label).unwrap();
|
||||
let h = app.clone();
|
||||
tauri::async_runtime::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
let val: State<'_, Mutex<PluginUpdater>> = h.state();
|
||||
if let Err(e) = val.lock().await.maybe_check(&w).await {
|
||||
warn!("Failed to check for plugin updates {e:?}");
|
||||
}
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
})
|
||||
.build()
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
use log::info;
|
||||
use serde_json::Value;
|
||||
use std::collections::BTreeMap;
|
||||
pub use yaak::render::render_http_request;
|
||||
use yaak_models::models::{Environment, GrpcRequest, HttpRequestHeader};
|
||||
use yaak_models::render::make_vars_hashmap;
|
||||
use yaak_templates::{RenderOptions, TemplateCallback, parse_and_render, render_json_value_raw};
|
||||
|
||||
pub async fn render_template<T: TemplateCallback>(
|
||||
template: &str,
|
||||
environment_chain: Vec<Environment>,
|
||||
cb: &T,
|
||||
opt: &RenderOptions,
|
||||
) -> yaak_templates::error::Result<String> {
|
||||
let vars = &make_vars_hashmap(environment_chain);
|
||||
parse_and_render(template, vars, cb, &opt).await
|
||||
}
|
||||
|
||||
pub async fn render_json_value<T: TemplateCallback>(
|
||||
value: Value,
|
||||
environment_chain: Vec<Environment>,
|
||||
cb: &T,
|
||||
opt: &RenderOptions,
|
||||
) -> yaak_templates::error::Result<Value> {
|
||||
let vars = &make_vars_hashmap(environment_chain);
|
||||
render_json_value_raw(value, vars, cb, opt).await
|
||||
}
|
||||
|
||||
pub async fn render_grpc_request<T: TemplateCallback>(
|
||||
r: &GrpcRequest,
|
||||
environment_chain: Vec<Environment>,
|
||||
cb: &T,
|
||||
opt: &RenderOptions,
|
||||
) -> yaak_templates::error::Result<GrpcRequest> {
|
||||
let vars = &make_vars_hashmap(environment_chain);
|
||||
|
||||
let mut metadata = Vec::new();
|
||||
for p in r.metadata.clone() {
|
||||
if !p.enabled {
|
||||
continue;
|
||||
}
|
||||
metadata.push(HttpRequestHeader {
|
||||
enabled: p.enabled,
|
||||
name: parse_and_render(p.name.as_str(), vars, cb, &opt).await?,
|
||||
value: parse_and_render(p.value.as_str(), vars, cb, &opt).await?,
|
||||
id: p.id,
|
||||
})
|
||||
}
|
||||
|
||||
let authentication = {
|
||||
let mut disabled = false;
|
||||
let mut auth = BTreeMap::new();
|
||||
match r.authentication.get("disabled") {
|
||||
Some(Value::Bool(true)) => {
|
||||
disabled = true;
|
||||
}
|
||||
Some(Value::String(tmpl)) => {
|
||||
disabled = parse_and_render(tmpl.as_str(), vars, cb, &opt)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.is_empty();
|
||||
info!(
|
||||
"Rendering authentication.disabled as a template: {disabled} from \"{tmpl}\""
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
if disabled {
|
||||
auth.insert("disabled".to_string(), Value::Bool(true));
|
||||
} else {
|
||||
for (k, v) in r.authentication.clone() {
|
||||
if k == "disabled" {
|
||||
auth.insert(k, Value::Bool(false));
|
||||
} else {
|
||||
auth.insert(k, render_json_value_raw(v, vars, cb, &opt).await?);
|
||||
}
|
||||
}
|
||||
}
|
||||
auth
|
||||
};
|
||||
|
||||
let url = parse_and_render(r.url.as_str(), vars, cb, &opt).await?;
|
||||
|
||||
Ok(GrpcRequest { url, metadata, authentication, ..r.to_owned() })
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "yaak-tauri-utils"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
tauri = { workspace = true }
|
||||
regex = "1.11.0"
|
||||
@@ -1 +0,0 @@
|
||||
pub mod window;
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "yaak-api"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
log = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["gzip"] }
|
||||
sysproxy = "0.3"
|
||||
thiserror = { workspace = true }
|
||||
yaak-common = { workspace = true }
|
||||
@@ -1,9 +0,0 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
ReqwestError(#[from] reqwest::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -1,70 +0,0 @@
|
||||
mod error;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
|
||||
use log::{debug, warn};
|
||||
use reqwest::Client;
|
||||
use reqwest::header::{HeaderMap, HeaderValue};
|
||||
use std::time::Duration;
|
||||
use yaak_common::platform::{get_ua_arch, get_ua_platform};
|
||||
|
||||
/// Build a reqwest Client configured for Yaak's own API calls.
|
||||
///
|
||||
/// Includes a custom User-Agent, JSON accept header, 20s timeout, gzip,
|
||||
/// and automatic OS-level proxy detection via sysproxy.
|
||||
pub fn yaak_api_client(version: &str) -> Result<Client> {
|
||||
let platform = get_ua_platform();
|
||||
let arch = get_ua_arch();
|
||||
let ua = format!("Yaak/{version} ({platform}; {arch})");
|
||||
|
||||
let mut default_headers = HeaderMap::new();
|
||||
default_headers.insert("Accept", HeaderValue::from_str("application/json").unwrap());
|
||||
|
||||
let mut builder = reqwest::ClientBuilder::new()
|
||||
.timeout(Duration::from_secs(20))
|
||||
.default_headers(default_headers)
|
||||
.gzip(true)
|
||||
.user_agent(ua);
|
||||
|
||||
if let Some(sys) = get_enabled_system_proxy() {
|
||||
let proxy_url = format!("http://{}:{}", sys.host, sys.port);
|
||||
match reqwest::Proxy::all(&proxy_url) {
|
||||
Ok(p) => {
|
||||
let p = if !sys.bypass.is_empty() {
|
||||
p.no_proxy(reqwest::NoProxy::from_string(&sys.bypass))
|
||||
} else {
|
||||
p
|
||||
};
|
||||
builder = builder.proxy(p);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to configure system proxy: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(builder.build()?)
|
||||
}
|
||||
|
||||
/// Returns the system proxy URL if one is enabled, e.g. `http://host:port`.
|
||||
pub fn get_system_proxy_url() -> Option<String> {
|
||||
let sys = get_enabled_system_proxy()?;
|
||||
Some(format!("http://{}:{}", sys.host, sys.port))
|
||||
}
|
||||
|
||||
fn get_enabled_system_proxy() -> Option<sysproxy::Sysproxy> {
|
||||
match sysproxy::Sysproxy::get_system_proxy() {
|
||||
Ok(sys) if sys.enable => {
|
||||
debug!("Detected system proxy: http://{}:{}", sys.host, sys.port);
|
||||
Some(sys)
|
||||
}
|
||||
Ok(_) => {
|
||||
debug!("System proxy detected but not enabled");
|
||||
None
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Could not detect system proxy: {e}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "yaak-common"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["process"] }
|
||||
@@ -1,16 +0,0 @@
|
||||
use std::ffi::OsStr;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
const CREATE_NO_WINDOW: u32 = 0x0800_0000;
|
||||
|
||||
/// Creates a new `tokio::process::Command` that won't spawn a console window on Windows.
|
||||
pub fn new_xplatform_command<S: AsRef<OsStr>>(program: S) -> tokio::process::Command {
|
||||
#[allow(unused_mut)]
|
||||
let mut cmd = tokio::process::Command::new(program);
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
use std::os::windows::process::CommandExt;
|
||||
cmd.creation_flags(CREATE_NO_WINDOW);
|
||||
}
|
||||
cmd
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
pub mod command;
|
||||
pub mod platform;
|
||||
pub mod serde;
|
||||
@@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "yaak-core"
|
||||
version = "0.0.0"
|
||||
edition = "2024"
|
||||
authors = ["Gregory Schier"]
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
thiserror = { workspace = true }
|
||||
@@ -1,56 +0,0 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Context for a workspace operation.
|
||||
///
|
||||
/// In Tauri, this is extracted from the WebviewWindow URL.
|
||||
/// In CLI, this is constructed from command arguments or config.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct WorkspaceContext {
|
||||
pub workspace_id: Option<String>,
|
||||
pub environment_id: Option<String>,
|
||||
pub cookie_jar_id: Option<String>,
|
||||
pub request_id: Option<String>,
|
||||
}
|
||||
|
||||
impl WorkspaceContext {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn with_workspace(mut self, workspace_id: impl Into<String>) -> Self {
|
||||
self.workspace_id = Some(workspace_id.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_environment(mut self, environment_id: impl Into<String>) -> Self {
|
||||
self.environment_id = Some(environment_id.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_cookie_jar(mut self, cookie_jar_id: impl Into<String>) -> Self {
|
||||
self.cookie_jar_id = Some(cookie_jar_id.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_request(mut self, request_id: impl Into<String>) -> Self {
|
||||
self.request_id = Some(request_id.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Application context trait for accessing app-level resources.
|
||||
///
|
||||
/// This abstracts over Tauri's `AppHandle` for path resolution and app identity.
|
||||
/// Implemented by Tauri's AppHandle and by CLI's own context struct.
|
||||
pub trait AppContext: Send + Sync + Clone {
|
||||
/// Returns the path to the application data directory.
|
||||
/// This is where the database and other persistent data are stored.
|
||||
fn app_data_dir(&self) -> PathBuf;
|
||||
|
||||
/// Returns the application identifier (e.g., "app.yaak.desktop").
|
||||
/// Used for keyring access and other platform-specific features.
|
||||
fn app_identifier(&self) -> &str;
|
||||
|
||||
/// Returns true if running in development mode.
|
||||
fn is_dev(&self) -> bool;
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
use thiserror::Error;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("Missing required context: {0}")]
|
||||
MissingContext(String),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
//! Core abstractions for Yaak that work without Tauri.
|
||||
//!
|
||||
//! This crate provides foundational types and traits that allow Yaak's
|
||||
//! business logic to run in both Tauri (desktop app) and CLI contexts.
|
||||
|
||||
mod context;
|
||||
mod error;
|
||||
|
||||
pub use context::{AppContext, WorkspaceContext};
|
||||
pub use error::{Error, Result};
|
||||
@@ -1,17 +0,0 @@
|
||||
import { invoke } from '@tauri-apps/api/core';
|
||||
|
||||
export function enableEncryption(workspaceId: string) {
|
||||
return invoke<void>('cmd_enable_encryption', { workspaceId });
|
||||
}
|
||||
|
||||
export function revealWorkspaceKey(workspaceId: string) {
|
||||
return invoke<string>('cmd_reveal_workspace_key', { workspaceId });
|
||||
}
|
||||
|
||||
export function setWorkspaceKey(args: { workspaceId: string; key: string }) {
|
||||
return invoke<void>('cmd_set_workspace_key', args);
|
||||
}
|
||||
|
||||
export function disableEncryption(workspaceId: string) {
|
||||
return invoke<void>('cmd_disable_encryption', { workspaceId });
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
extern crate core;
|
||||
|
||||
pub mod encryption;
|
||||
pub mod error;
|
||||
pub mod manager;
|
||||
mod master_key;
|
||||
mod workspace_key;
|
||||
@@ -1,261 +0,0 @@
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { invoke } from '@tauri-apps/api/core';
|
||||
import { createFastMutation } from '@yaakapp/app/hooks/useFastMutation';
|
||||
import { queryClient } from '@yaakapp/app/lib/queryClient';
|
||||
import { useMemo } from 'react';
|
||||
import { BranchDeleteResult, CloneResult, GitCommit, GitRemote, GitStatusSummary, PullResult, PushResult } from './bindings/gen_git';
|
||||
import { showToast } from '@yaakapp/app/lib/toast';
|
||||
|
||||
export * from './bindings/gen_git';
|
||||
export * from './bindings/gen_models';
|
||||
|
||||
export interface GitCredentials {
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
export type DivergedStrategy = 'force_reset' | 'merge' | 'cancel';
|
||||
|
||||
export type UncommittedChangesStrategy = 'reset' | 'cancel';
|
||||
|
||||
export interface GitCallbacks {
|
||||
addRemote: () => Promise<GitRemote | null>;
|
||||
promptCredentials: (
|
||||
result: Extract<PushResult, { type: 'needs_credentials' }>,
|
||||
) => Promise<GitCredentials | null>;
|
||||
promptDiverged: (
|
||||
result: Extract<PullResult, { type: 'diverged' }>,
|
||||
) => Promise<DivergedStrategy>;
|
||||
promptUncommittedChanges: () => Promise<UncommittedChangesStrategy>;
|
||||
forceSync: () => Promise<void>;
|
||||
}
|
||||
|
||||
const onSuccess = () => queryClient.invalidateQueries({ queryKey: ['git'] });
|
||||
|
||||
export function useGit(dir: string, callbacks: GitCallbacks, refreshKey?: string) {
|
||||
const mutations = useMemo(() => gitMutations(dir, callbacks), [dir, callbacks]);
|
||||
const fetchAll = useQuery<void, string>({
|
||||
queryKey: ['git', 'fetch_all', dir, refreshKey],
|
||||
queryFn: () => invoke('cmd_git_fetch_all', { dir }),
|
||||
refetchInterval: 10 * 60_000,
|
||||
});
|
||||
return [
|
||||
{
|
||||
remotes: useQuery<GitRemote[], string>({
|
||||
queryKey: ['git', 'remotes', dir, refreshKey],
|
||||
queryFn: () => getRemotes(dir),
|
||||
placeholderData: (prev) => prev,
|
||||
}),
|
||||
log: useQuery<GitCommit[], string>({
|
||||
queryKey: ['git', 'log', dir, refreshKey],
|
||||
queryFn: () => invoke('cmd_git_log', { dir }),
|
||||
placeholderData: (prev) => prev,
|
||||
}),
|
||||
status: useQuery<GitStatusSummary, string>({
|
||||
refetchOnMount: true,
|
||||
queryKey: ['git', 'status', dir, refreshKey, fetchAll.dataUpdatedAt],
|
||||
queryFn: () => invoke('cmd_git_status', { dir }),
|
||||
placeholderData: (prev) => prev,
|
||||
}),
|
||||
},
|
||||
mutations,
|
||||
] as const;
|
||||
}
|
||||
|
||||
export const gitMutations = (dir: string, callbacks: GitCallbacks) => {
|
||||
const push = async () => {
|
||||
const remotes = await getRemotes(dir);
|
||||
if (remotes.length === 0) {
|
||||
const remote = await callbacks.addRemote();
|
||||
if (remote == null) throw new Error('No remote found');
|
||||
}
|
||||
|
||||
const result = await invoke<PushResult>('cmd_git_push', { dir });
|
||||
if (result.type !== 'needs_credentials') return result;
|
||||
|
||||
// Needs credentials, prompt for them
|
||||
const creds = await callbacks.promptCredentials(result);
|
||||
if (creds == null) throw new Error('Canceled');
|
||||
|
||||
await invoke('cmd_git_add_credential', {
|
||||
remoteUrl: result.url,
|
||||
username: creds.username,
|
||||
password: creds.password,
|
||||
});
|
||||
|
||||
// Push again
|
||||
return invoke<PushResult>('cmd_git_push', { dir });
|
||||
};
|
||||
|
||||
const handleError = (err: unknown) => {
|
||||
showToast({
|
||||
id: `${err}`,
|
||||
message: `${err}`,
|
||||
color: 'danger',
|
||||
timeout: 5000,
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: createFastMutation<void, string, void>({
|
||||
mutationKey: ['git', 'init'],
|
||||
mutationFn: () => invoke('cmd_git_initialize', { dir }),
|
||||
onSuccess,
|
||||
}),
|
||||
add: createFastMutation<void, string, { relaPaths: string[] }>({
|
||||
mutationKey: ['git', 'add', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_add', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
addRemote: createFastMutation<GitRemote, string, GitRemote>({
|
||||
mutationKey: ['git', 'add-remote'],
|
||||
mutationFn: (args) => invoke('cmd_git_add_remote', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
rmRemote: createFastMutation<void, string, { name: string }>({
|
||||
mutationKey: ['git', 'rm-remote', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_rm_remote', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
createBranch: createFastMutation<void, string, { branch: string; base?: string }>({
|
||||
mutationKey: ['git', 'branch', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_branch', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
mergeBranch: createFastMutation<void, string, { branch: string }>({
|
||||
mutationKey: ['git', 'merge', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_merge_branch', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
deleteBranch: createFastMutation<BranchDeleteResult, string, { branch: string, force?: boolean }>({
|
||||
mutationKey: ['git', 'delete-branch', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_delete_branch', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
deleteRemoteBranch: createFastMutation<void, string, { branch: string }>({
|
||||
mutationKey: ['git', 'delete-remote-branch', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_delete_remote_branch', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
renameBranch: createFastMutation<void, string, { oldName: string, newName: string }>({
|
||||
mutationKey: ['git', 'rename-branch', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_rename_branch', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
checkout: createFastMutation<string, string, { branch: string; force: boolean }>({
|
||||
mutationKey: ['git', 'checkout', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_checkout', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
commit: createFastMutation<void, string, { message: string }>({
|
||||
mutationKey: ['git', 'commit', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_commit', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
commitAndPush: createFastMutation<PushResult, string, { message: string }>({
|
||||
mutationKey: ['git', 'commit_push', dir],
|
||||
mutationFn: async (args) => {
|
||||
await invoke('cmd_git_commit', { dir, ...args });
|
||||
return push();
|
||||
},
|
||||
onSuccess,
|
||||
}),
|
||||
|
||||
push: createFastMutation<PushResult, string, void>({
|
||||
mutationKey: ['git', 'push', dir],
|
||||
mutationFn: push,
|
||||
onSuccess,
|
||||
}),
|
||||
pull: createFastMutation<PullResult, string, void>({
|
||||
mutationKey: ['git', 'pull', dir],
|
||||
async mutationFn() {
|
||||
const result = await invoke<PullResult>('cmd_git_pull', { dir });
|
||||
|
||||
if (result.type === 'needs_credentials') {
|
||||
const creds = await callbacks.promptCredentials(result);
|
||||
if (creds == null) throw new Error('Canceled');
|
||||
|
||||
await invoke('cmd_git_add_credential', {
|
||||
remoteUrl: result.url,
|
||||
username: creds.username,
|
||||
password: creds.password,
|
||||
});
|
||||
|
||||
// Pull again after credentials
|
||||
return invoke<PullResult>('cmd_git_pull', { dir });
|
||||
}
|
||||
|
||||
if (result.type === 'uncommitted_changes') {
|
||||
callbacks.promptUncommittedChanges().then(async (strategy) => {
|
||||
if (strategy === 'cancel') return;
|
||||
|
||||
await invoke('cmd_git_reset_changes', { dir });
|
||||
return invoke<PullResult>('cmd_git_pull', { dir });
|
||||
}).then(async () => { onSuccess(); await callbacks.forceSync(); }, handleError);
|
||||
}
|
||||
|
||||
if (result.type === 'diverged') {
|
||||
callbacks.promptDiverged(result).then((strategy) => {
|
||||
if (strategy === 'cancel') return;
|
||||
|
||||
if (strategy === 'force_reset') {
|
||||
return invoke<PullResult>('cmd_git_pull_force_reset', {
|
||||
dir,
|
||||
remote: result.remote,
|
||||
branch: result.branch,
|
||||
});
|
||||
}
|
||||
|
||||
return invoke<PullResult>('cmd_git_pull_merge', {
|
||||
dir,
|
||||
remote: result.remote,
|
||||
branch: result.branch,
|
||||
});
|
||||
}).then(async () => { onSuccess(); await callbacks.forceSync(); }, handleError);
|
||||
}
|
||||
|
||||
return result;
|
||||
},
|
||||
onSuccess,
|
||||
}),
|
||||
unstage: createFastMutation<void, string, { relaPaths: string[] }>({
|
||||
mutationKey: ['git', 'unstage', dir],
|
||||
mutationFn: (args) => invoke('cmd_git_unstage', { dir, ...args }),
|
||||
onSuccess,
|
||||
}),
|
||||
resetChanges: createFastMutation<void, string, void>({
|
||||
mutationKey: ['git', 'reset-changes', dir],
|
||||
mutationFn: () => invoke('cmd_git_reset_changes', { dir }),
|
||||
onSuccess,
|
||||
}),
|
||||
} as const;
|
||||
};
|
||||
|
||||
async function getRemotes(dir: string) {
|
||||
return invoke<GitRemote[]>('cmd_git_remotes', { dir });
|
||||
}
|
||||
|
||||
/**
|
||||
* Clone a git repository, prompting for credentials if needed.
|
||||
*/
|
||||
export async function gitClone(
|
||||
url: string,
|
||||
dir: string,
|
||||
promptCredentials: (args: { url: string; error: string | null }) => Promise<GitCredentials | null>,
|
||||
): Promise<CloneResult> {
|
||||
const result = await invoke<CloneResult>('cmd_git_clone', { url, dir });
|
||||
if (result.type !== 'needs_credentials') return result;
|
||||
|
||||
// Prompt for credentials
|
||||
const creds = await promptCredentials({ url: result.url, error: result.error });
|
||||
if (creds == null) return {type: 'cancelled'};
|
||||
|
||||
// Store credentials and retry
|
||||
await invoke('cmd_git_add_credential', {
|
||||
remoteUrl: result.url,
|
||||
username: creds.username,
|
||||
password: creds.password,
|
||||
});
|
||||
|
||||
return invoke<CloneResult>('cmd_git_clone', { url, dir });
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
use crate::error::Error::GitNotFound;
|
||||
use crate::error::Result;
|
||||
use std::path::Path;
|
||||
use std::process::Stdio;
|
||||
use tokio::process::Command;
|
||||
use yaak_common::command::new_xplatform_command;
|
||||
|
||||
/// Create a git command that runs in the specified directory
|
||||
pub(crate) async fn new_binary_command(dir: &Path) -> Result<Command> {
|
||||
let mut cmd = new_binary_command_global().await?;
|
||||
cmd.arg("-C").arg(dir);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
/// Create a git command without a specific directory (for global operations)
|
||||
pub(crate) async fn new_binary_command_global() -> Result<Command> {
|
||||
// 1. Probe that `git` exists and is runnable
|
||||
let mut probe = new_xplatform_command("git");
|
||||
probe.arg("--version").stdin(Stdio::null()).stdout(Stdio::null()).stderr(Stdio::null());
|
||||
|
||||
let status = probe.status().await.map_err(|_| GitNotFound)?;
|
||||
|
||||
if !status.success() {
|
||||
return Err(GitNotFound);
|
||||
}
|
||||
|
||||
// 2. Build the reusable git command
|
||||
let cmd = new_xplatform_command("git");
|
||||
Ok(cmd)
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::binary::new_binary_command;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, TS)]
|
||||
#[serde(rename_all = "snake_case", tag = "type")]
|
||||
#[ts(export, export_to = "gen_git.ts")]
|
||||
pub enum BranchDeleteResult {
|
||||
Success { message: String },
|
||||
NotFullyMerged,
|
||||
}
|
||||
|
||||
pub async fn git_checkout_branch(dir: &Path, branch_name: &str, force: bool) -> Result<String> {
|
||||
let branch_name = branch_name.trim_start_matches("origin/");
|
||||
|
||||
let mut args = vec!["checkout"];
|
||||
if force {
|
||||
args.push("--force");
|
||||
}
|
||||
args.push(branch_name);
|
||||
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(&args)
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git checkout: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(GenericError(format!("Failed to checkout: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(branch_name.to_string())
|
||||
}
|
||||
|
||||
pub async fn git_create_branch(dir: &Path, name: &str, base: Option<&str>) -> Result<()> {
|
||||
let mut cmd = new_binary_command(dir).await?;
|
||||
cmd.arg("branch").arg(name);
|
||||
if let Some(base_branch) = base {
|
||||
cmd.arg(base_branch);
|
||||
}
|
||||
|
||||
let out =
|
||||
cmd.output().await.map_err(|e| GenericError(format!("failed to run git branch: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(GenericError(format!("Failed to create branch: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn git_delete_branch(dir: &Path, name: &str, force: bool) -> Result<BranchDeleteResult> {
|
||||
let mut cmd = new_binary_command(dir).await?;
|
||||
|
||||
let out =
|
||||
if force { cmd.args(["branch", "-D", name]) } else { cmd.args(["branch", "-d", name]) }
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git branch -d: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
if !out.status.success() && stderr.to_lowercase().contains("not fully merged") {
|
||||
return Ok(BranchDeleteResult::NotFullyMerged);
|
||||
}
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(GenericError(format!("Failed to delete branch: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(BranchDeleteResult::Success { message: combined })
|
||||
}
|
||||
|
||||
pub async fn git_merge_branch(dir: &Path, name: &str) -> Result<()> {
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["merge", name])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git merge: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
if !out.status.success() {
|
||||
// Check for merge conflicts
|
||||
if combined.to_lowercase().contains("conflict") {
|
||||
return Err(GenericError(
|
||||
"Merge conflicts detected. Please resolve them manually.".to_string(),
|
||||
));
|
||||
}
|
||||
return Err(GenericError(format!("Failed to merge: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn git_delete_remote_branch(dir: &Path, name: &str) -> Result<()> {
|
||||
// Remote branch names come in as "origin/branch-name", extract the branch name
|
||||
let branch_name = name.trim_start_matches("origin/");
|
||||
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["push", "origin", "--delete", branch_name])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git push --delete: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(GenericError(format!("Failed to delete remote branch: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn git_rename_branch(dir: &Path, old_name: &str, new_name: &str) -> Result<()> {
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["branch", "-m", old_name, new_name])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git branch -m: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(GenericError(format!("Failed to rename branch: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
use crate::binary::new_binary_command;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use ts_rs::TS;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, TS)]
|
||||
#[serde(rename_all = "snake_case", tag = "type")]
|
||||
#[ts(export, export_to = "gen_git.ts")]
|
||||
pub enum CloneResult {
|
||||
Success,
|
||||
Cancelled,
|
||||
NeedsCredentials { url: String, error: Option<String> },
|
||||
}
|
||||
|
||||
pub async fn git_clone(url: &str, dir: &Path) -> Result<CloneResult> {
|
||||
let parent = dir.parent().ok_or_else(|| GenericError("Invalid clone directory".to_string()))?;
|
||||
fs::create_dir_all(parent)
|
||||
.map_err(|e| GenericError(format!("Failed to create directory: {e}")))?;
|
||||
let mut cmd = new_binary_command(parent).await?;
|
||||
cmd.args(["clone", url]).arg(dir).env("GIT_TERMINAL_PROMPT", "0");
|
||||
|
||||
let out =
|
||||
cmd.output().await.map_err(|e| GenericError(format!("failed to run git clone: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
let combined_lower = combined.to_lowercase();
|
||||
|
||||
info!("Cloned status={}: {combined}", out.status);
|
||||
|
||||
if !out.status.success() {
|
||||
// Check for credentials error
|
||||
if combined_lower.contains("could not read") {
|
||||
return Ok(CloneResult::NeedsCredentials { url: url.to_string(), error: None });
|
||||
}
|
||||
if combined_lower.contains("unable to access")
|
||||
|| combined_lower.contains("authentication failed")
|
||||
{
|
||||
return Ok(CloneResult::NeedsCredentials {
|
||||
url: url.to_string(),
|
||||
error: Some(combined.to_string()),
|
||||
});
|
||||
}
|
||||
return Err(GenericError(format!("Failed to clone: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(CloneResult::Success)
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
use crate::binary::new_binary_command_global;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use std::process::Stdio;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use url::Url;
|
||||
|
||||
pub async fn git_add_credential(remote_url: &str, username: &str, password: &str) -> Result<()> {
|
||||
let url = Url::parse(remote_url)
|
||||
.map_err(|e| GenericError(format!("Failed to parse remote url {remote_url}: {e:?}")))?;
|
||||
let protocol = url.scheme();
|
||||
let host = url.host_str().unwrap();
|
||||
let path = Some(url.path());
|
||||
|
||||
let mut child = new_binary_command_global()
|
||||
.await?
|
||||
.args(["credential", "approve"])
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::null())
|
||||
.spawn()?;
|
||||
|
||||
{
|
||||
let stdin = child.stdin.as_mut().unwrap();
|
||||
stdin.write_all(format!("protocol={}\n", protocol).as_bytes()).await?;
|
||||
stdin.write_all(format!("host={}\n", host).as_bytes()).await?;
|
||||
if let Some(path) = path {
|
||||
if !path.is_empty() {
|
||||
stdin
|
||||
.write_all(format!("path={}\n", path.trim_start_matches('/')).as_bytes())
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
stdin.write_all(format!("username={}\n", username).as_bytes()).await?;
|
||||
stdin.write_all(format!("password={}\n", password).as_bytes()).await?;
|
||||
stdin.write_all(b"\n").await?; // blank line terminator
|
||||
}
|
||||
|
||||
let status = child.wait().await?;
|
||||
if !status.success() {
|
||||
return Err(GenericError("Failed to approve git credential".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
mod add;
|
||||
mod binary;
|
||||
mod branch;
|
||||
mod clone;
|
||||
mod commit;
|
||||
mod credential;
|
||||
pub mod error;
|
||||
mod fetch;
|
||||
mod init;
|
||||
mod log;
|
||||
|
||||
mod pull;
|
||||
mod push;
|
||||
mod remotes;
|
||||
mod repository;
|
||||
mod reset;
|
||||
mod status;
|
||||
mod unstage;
|
||||
mod util;
|
||||
|
||||
// Re-export all git functions for external use
|
||||
pub use add::git_add;
|
||||
pub use branch::{
|
||||
BranchDeleteResult, git_checkout_branch, git_create_branch, git_delete_branch,
|
||||
git_delete_remote_branch, git_merge_branch, git_rename_branch,
|
||||
};
|
||||
pub use clone::{CloneResult, git_clone};
|
||||
pub use commit::git_commit;
|
||||
pub use credential::git_add_credential;
|
||||
pub use fetch::git_fetch_all;
|
||||
pub use init::git_init;
|
||||
pub use log::{GitCommit, git_log};
|
||||
pub use pull::{PullResult, git_pull, git_pull_force_reset, git_pull_merge};
|
||||
pub use push::{PushResult, git_push};
|
||||
pub use remotes::{GitRemote, git_add_remote, git_remotes, git_rm_remote};
|
||||
pub use reset::git_reset_changes;
|
||||
pub use status::{GitStatusSummary, git_status};
|
||||
pub use unstage::git_unstage;
|
||||
@@ -1,205 +0,0 @@
|
||||
use crate::binary::new_binary_command;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use crate::repository::open_repo;
|
||||
use crate::util::{get_current_branch_name, get_default_remote_in_repo};
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
use ts_rs::TS;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, TS)]
|
||||
#[serde(rename_all = "snake_case", tag = "type")]
|
||||
#[ts(export, export_to = "gen_git.ts")]
|
||||
pub enum PullResult {
|
||||
Success { message: String },
|
||||
UpToDate,
|
||||
NeedsCredentials { url: String, error: Option<String> },
|
||||
Diverged { remote: String, branch: String },
|
||||
UncommittedChanges,
|
||||
}
|
||||
|
||||
fn has_uncommitted_changes(dir: &Path) -> Result<bool> {
|
||||
let repo = open_repo(dir)?;
|
||||
let mut opts = git2::StatusOptions::new();
|
||||
opts.include_ignored(false).include_untracked(false);
|
||||
let statuses = repo.statuses(Some(&mut opts))?;
|
||||
Ok(statuses.iter().any(|e| e.status() != git2::Status::CURRENT))
|
||||
}
|
||||
|
||||
pub async fn git_pull(dir: &Path) -> Result<PullResult> {
|
||||
if has_uncommitted_changes(dir)? {
|
||||
return Ok(PullResult::UncommittedChanges);
|
||||
}
|
||||
|
||||
// Extract all git2 data before any await points (git2 types are not Send)
|
||||
let (branch_name, remote_name, remote_url) = {
|
||||
let repo = open_repo(dir)?;
|
||||
let branch_name = get_current_branch_name(&repo)?;
|
||||
let remote = get_default_remote_in_repo(&repo)?;
|
||||
let remote_name =
|
||||
remote.name().ok_or(GenericError("Failed to get remote name".to_string()))?.to_string();
|
||||
let remote_url =
|
||||
remote.url().ok_or(GenericError("Failed to get remote url".to_string()))?.to_string();
|
||||
(branch_name, remote_name, remote_url)
|
||||
};
|
||||
|
||||
// Step 1: fetch the specific branch
|
||||
// NOTE: We use fetch + merge instead of `git pull` to avoid conflicts with
|
||||
// global git config (e.g. pull.ff=only) and the background fetch --all.
|
||||
let fetch_out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["fetch", &remote_name, &branch_name])
|
||||
.env("GIT_TERMINAL_PROMPT", "0")
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git fetch: {e}")))?;
|
||||
|
||||
let fetch_stdout = String::from_utf8_lossy(&fetch_out.stdout);
|
||||
let fetch_stderr = String::from_utf8_lossy(&fetch_out.stderr);
|
||||
let fetch_combined = format!("{fetch_stdout}{fetch_stderr}");
|
||||
|
||||
info!("Fetched status={} {fetch_combined}", fetch_out.status);
|
||||
|
||||
if fetch_combined.to_lowercase().contains("could not read") {
|
||||
return Ok(PullResult::NeedsCredentials { url: remote_url.to_string(), error: None });
|
||||
}
|
||||
|
||||
if fetch_combined.to_lowercase().contains("unable to access") {
|
||||
return Ok(PullResult::NeedsCredentials {
|
||||
url: remote_url.to_string(),
|
||||
error: Some(fetch_combined.to_string()),
|
||||
});
|
||||
}
|
||||
|
||||
if !fetch_out.status.success() {
|
||||
return Err(GenericError(format!("Failed to fetch: {fetch_combined}")));
|
||||
}
|
||||
|
||||
// Step 2: merge the fetched branch
|
||||
let ref_name = format!("{}/{}", remote_name, branch_name);
|
||||
let merge_out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["merge", "--ff-only", &ref_name])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git merge: {e}")))?;
|
||||
|
||||
let merge_stdout = String::from_utf8_lossy(&merge_out.stdout);
|
||||
let merge_stderr = String::from_utf8_lossy(&merge_out.stderr);
|
||||
let merge_combined = format!("{merge_stdout}{merge_stderr}");
|
||||
|
||||
info!("Merged status={} {merge_combined}", merge_out.status);
|
||||
|
||||
if !merge_out.status.success() {
|
||||
let merge_lower = merge_combined.to_lowercase();
|
||||
if merge_lower.contains("cannot fast-forward")
|
||||
|| merge_lower.contains("not possible to fast-forward")
|
||||
|| merge_lower.contains("diverged")
|
||||
{
|
||||
return Ok(PullResult::Diverged { remote: remote_name, branch: branch_name });
|
||||
}
|
||||
return Err(GenericError(format!("Failed to merge: {merge_combined}")));
|
||||
}
|
||||
|
||||
if merge_combined.to_lowercase().contains("up to date") {
|
||||
return Ok(PullResult::UpToDate);
|
||||
}
|
||||
|
||||
Ok(PullResult::Success { message: format!("Pulled from {}/{}", remote_name, branch_name) })
|
||||
}
|
||||
|
||||
pub async fn git_pull_force_reset(dir: &Path, remote: &str, branch: &str) -> Result<PullResult> {
|
||||
// Step 1: fetch the remote
|
||||
let fetch_out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["fetch", remote])
|
||||
.env("GIT_TERMINAL_PROMPT", "0")
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git fetch: {e}")))?;
|
||||
|
||||
if !fetch_out.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&fetch_out.stderr);
|
||||
return Err(GenericError(format!("Failed to fetch: {stderr}")));
|
||||
}
|
||||
|
||||
// Step 2: reset --hard to remote/branch
|
||||
let ref_name = format!("{}/{}", remote, branch);
|
||||
let reset_out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["reset", "--hard", &ref_name])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git reset: {e}")))?;
|
||||
|
||||
if !reset_out.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&reset_out.stderr);
|
||||
return Err(GenericError(format!("Failed to reset: {}", stderr.trim())));
|
||||
}
|
||||
|
||||
Ok(PullResult::Success { message: format!("Reset to {}/{}", remote, branch) })
|
||||
}
|
||||
|
||||
pub async fn git_pull_merge(dir: &Path, remote: &str, branch: &str) -> Result<PullResult> {
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["pull", "--no-rebase", remote, branch])
|
||||
.env("GIT_TERMINAL_PROMPT", "0")
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git pull --no-rebase: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = format!("{}{}", stdout, stderr);
|
||||
|
||||
info!("Pull merge status={} {combined}", out.status);
|
||||
|
||||
if !out.status.success() {
|
||||
if combined.to_lowercase().contains("conflict") {
|
||||
return Err(GenericError(
|
||||
"Merge conflicts detected. Please resolve them manually.".to_string(),
|
||||
));
|
||||
}
|
||||
return Err(GenericError(format!("Failed to merge pull: {}", combined.trim())));
|
||||
}
|
||||
|
||||
Ok(PullResult::Success { message: format!("Merged from {}/{}", remote, branch) })
|
||||
}
|
||||
|
||||
// pub(crate) fn git_pull_old(dir: &Path) -> Result<PullResult> {
|
||||
// let repo = open_repo(dir)?;
|
||||
//
|
||||
// let branch = get_current_branch(&repo)?.ok_or(NoActiveBranch)?;
|
||||
// let branch_ref = branch.get();
|
||||
// let branch_ref = bytes_to_string(branch_ref.name_bytes())?;
|
||||
//
|
||||
// let remote_name = repo.branch_upstream_remote(&branch_ref)?;
|
||||
// let remote_name = bytes_to_string(&remote_name)?;
|
||||
// debug!("Pulling from {remote_name}");
|
||||
//
|
||||
// let mut remote = repo.find_remote(&remote_name)?;
|
||||
//
|
||||
// let mut options = FetchOptions::new();
|
||||
// let callbacks = default_callbacks();
|
||||
// options.remote_callbacks(callbacks);
|
||||
//
|
||||
// let mut proxy = ProxyOptions::new();
|
||||
// proxy.auto();
|
||||
// options.proxy_options(proxy);
|
||||
//
|
||||
// remote.fetch(&[&branch_ref], Some(&mut options), None)?;
|
||||
//
|
||||
// let stats = remote.stats();
|
||||
//
|
||||
// let fetch_head = repo.find_reference("FETCH_HEAD")?;
|
||||
// let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
|
||||
// do_merge(&repo, &branch, &fetch_commit)?;
|
||||
//
|
||||
// Ok(PullResult::Success {
|
||||
// message: "Hello".to_string(),
|
||||
// // received_bytes: stats.received_bytes(),
|
||||
// // received_objects: stats.received_objects(),
|
||||
// })
|
||||
// }
|
||||
@@ -1,89 +0,0 @@
|
||||
use crate::binary::new_binary_command;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use crate::repository::open_repo;
|
||||
use crate::util::{get_current_branch_name, get_default_remote_for_push_in_repo};
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
use ts_rs::TS;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, TS)]
|
||||
#[serde(rename_all = "snake_case", tag = "type")]
|
||||
#[ts(export, export_to = "gen_git.ts")]
|
||||
pub enum PushResult {
|
||||
Success { message: String },
|
||||
UpToDate,
|
||||
NeedsCredentials { url: String, error: Option<String> },
|
||||
}
|
||||
|
||||
pub async fn git_push(dir: &Path) -> Result<PushResult> {
|
||||
// Extract all git2 data before any await points (git2 types are not Send)
|
||||
let (branch_name, remote_name, remote_url) = {
|
||||
let repo = open_repo(dir)?;
|
||||
let branch_name = get_current_branch_name(&repo)?;
|
||||
let remote = get_default_remote_for_push_in_repo(&repo)?;
|
||||
let remote_name =
|
||||
remote.name().ok_or(GenericError("Failed to get remote name".to_string()))?.to_string();
|
||||
let remote_url =
|
||||
remote.url().ok_or(GenericError("Failed to get remote url".to_string()))?.to_string();
|
||||
(branch_name, remote_name, remote_url)
|
||||
};
|
||||
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["push", &remote_name, &branch_name])
|
||||
.env("GIT_TERMINAL_PROMPT", "0")
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git push: {e}")))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
let combined = stdout + stderr;
|
||||
let combined_lower = combined.to_lowercase();
|
||||
|
||||
info!("Pushed to repo status={} {combined}", out.status);
|
||||
|
||||
// Helper to check if this is a credentials error
|
||||
let is_credentials_error = || {
|
||||
combined_lower.contains("could not read")
|
||||
|| combined_lower.contains("unable to access")
|
||||
|| combined_lower.contains("authentication failed")
|
||||
};
|
||||
|
||||
// Check for explicit rejection indicators first (e.g., protected branch rejections)
|
||||
// These can occur even if some git servers don't properly set exit codes
|
||||
if combined_lower.contains("rejected") || combined_lower.contains("failed to push") {
|
||||
if is_credentials_error() {
|
||||
return Ok(PushResult::NeedsCredentials {
|
||||
url: remote_url.to_string(),
|
||||
error: Some(combined.to_string()),
|
||||
});
|
||||
}
|
||||
return Err(GenericError(format!("Failed to push: {combined}")));
|
||||
}
|
||||
|
||||
// Check exit status for any other failures
|
||||
if !out.status.success() {
|
||||
if combined_lower.contains("could not read") {
|
||||
return Ok(PushResult::NeedsCredentials { url: remote_url.to_string(), error: None });
|
||||
}
|
||||
if combined_lower.contains("unable to access")
|
||||
|| combined_lower.contains("authentication failed")
|
||||
{
|
||||
return Ok(PushResult::NeedsCredentials {
|
||||
url: remote_url.to_string(),
|
||||
error: Some(combined.to_string()),
|
||||
});
|
||||
}
|
||||
return Err(GenericError(format!("Failed to push: {combined}")));
|
||||
}
|
||||
|
||||
// Success cases (exit code 0 and no rejection indicators)
|
||||
if combined_lower.contains("up-to-date") {
|
||||
return Ok(PushResult::UpToDate);
|
||||
}
|
||||
|
||||
Ok(PushResult::Success { message: format!("Pushed to {}/{}", remote_name, branch_name) })
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
use crate::binary::new_binary_command;
|
||||
use crate::error::Error::GenericError;
|
||||
use crate::error::Result;
|
||||
use std::path::Path;
|
||||
|
||||
pub async fn git_reset_changes(dir: &Path) -> Result<()> {
|
||||
let out = new_binary_command(dir)
|
||||
.await?
|
||||
.args(["reset", "--hard", "HEAD"])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| GenericError(format!("failed to run git reset: {e}")))?;
|
||||
|
||||
if !out.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
return Err(GenericError(format!("Failed to reset: {}", stderr.trim())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,484 +0,0 @@
|
||||
//! Custom cookie handling for HTTP requests
|
||||
//!
|
||||
//! This module provides cookie storage and matching functionality that was previously
|
||||
//! delegated to reqwest. It implements RFC 6265 cookie domain and path matching.
|
||||
|
||||
use log::debug;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use url::Url;
|
||||
use yaak_models::models::{Cookie, CookieDomain, CookieExpires};
|
||||
|
||||
/// A thread-safe cookie store that can be shared across requests
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CookieStore {
|
||||
cookies: Arc<Mutex<Vec<Cookie>>>,
|
||||
}
|
||||
|
||||
impl Default for CookieStore {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl CookieStore {
|
||||
/// Create a new empty cookie store
|
||||
pub fn new() -> Self {
|
||||
Self { cookies: Arc::new(Mutex::new(Vec::new())) }
|
||||
}
|
||||
|
||||
/// Create a cookie store from existing cookies
|
||||
pub fn from_cookies(cookies: Vec<Cookie>) -> Self {
|
||||
Self { cookies: Arc::new(Mutex::new(cookies)) }
|
||||
}
|
||||
|
||||
/// Get all cookies (for persistence)
|
||||
pub fn get_all_cookies(&self) -> Vec<Cookie> {
|
||||
self.cookies.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
/// Get the Cookie header value for the given URL
|
||||
pub fn get_cookie_header(&self, url: &Url) -> Option<String> {
|
||||
let cookies = self.cookies.lock().unwrap();
|
||||
let now = SystemTime::now();
|
||||
|
||||
let matching_cookies: Vec<_> = cookies
|
||||
.iter()
|
||||
.filter(|cookie| self.cookie_matches(cookie, url, &now))
|
||||
.filter_map(|cookie| {
|
||||
// Parse the raw cookie to get name=value
|
||||
parse_cookie_name_value(&cookie.raw_cookie)
|
||||
})
|
||||
.collect();
|
||||
|
||||
if matching_cookies.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
matching_cookies
|
||||
.into_iter()
|
||||
.map(|(name, value)| format!("{}={}", name, value))
|
||||
.collect::<Vec<_>>()
|
||||
.join("; "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse Set-Cookie headers and add cookies to the store
|
||||
pub fn store_cookies_from_response(&self, url: &Url, set_cookie_headers: &[String]) {
|
||||
let mut cookies = self.cookies.lock().unwrap();
|
||||
|
||||
for header_value in set_cookie_headers {
|
||||
if let Some(cookie) = parse_set_cookie(header_value, url) {
|
||||
// Remove any existing cookie with the same name and domain
|
||||
cookies.retain(|existing| !cookies_match(existing, &cookie));
|
||||
debug!(
|
||||
"Storing cookie: {} for domain {:?}",
|
||||
parse_cookie_name_value(&cookie.raw_cookie)
|
||||
.map(|(n, _)| n)
|
||||
.unwrap_or_else(|| "unknown".to_string()),
|
||||
cookie.domain
|
||||
);
|
||||
cookies.push(cookie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a cookie matches the given URL
|
||||
fn cookie_matches(&self, cookie: &Cookie, url: &Url, now: &SystemTime) -> bool {
|
||||
// Check expiration
|
||||
if let CookieExpires::AtUtc(expiry_str) = &cookie.expires {
|
||||
if let Ok(expiry) = parse_cookie_date(expiry_str) {
|
||||
if expiry < *now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check domain
|
||||
let url_host = match url.host_str() {
|
||||
Some(h) => h.to_lowercase(),
|
||||
None => return false,
|
||||
};
|
||||
|
||||
let domain_matches = match &cookie.domain {
|
||||
CookieDomain::HostOnly(domain) => url_host == domain.to_lowercase(),
|
||||
CookieDomain::Suffix(domain) => {
|
||||
let domain_lower = domain.to_lowercase();
|
||||
url_host == domain_lower || url_host.ends_with(&format!(".{}", domain_lower))
|
||||
}
|
||||
// NotPresent and Empty should never occur in practice since we always set domain
|
||||
// when parsing Set-Cookie headers. Treat as non-matching to be safe.
|
||||
CookieDomain::NotPresent | CookieDomain::Empty => false,
|
||||
};
|
||||
|
||||
if !domain_matches {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check path
|
||||
let (cookie_path, _) = &cookie.path;
|
||||
let url_path = url.path();
|
||||
|
||||
path_matches(url_path, cookie_path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse name=value from a cookie string (raw_cookie format)
|
||||
fn parse_cookie_name_value(raw_cookie: &str) -> Option<(String, String)> {
|
||||
// The raw_cookie typically looks like "name=value" or "name=value; attr1; attr2=..."
|
||||
let first_part = raw_cookie.split(';').next()?;
|
||||
let mut parts = first_part.splitn(2, '=');
|
||||
let name = parts.next()?.trim().to_string();
|
||||
let value = parts.next().unwrap_or("").trim().to_string();
|
||||
|
||||
if name.is_empty() { None } else { Some((name, value)) }
|
||||
}
|
||||
|
||||
/// Parse a Set-Cookie header into a Cookie
|
||||
fn parse_set_cookie(header_value: &str, request_url: &Url) -> Option<Cookie> {
|
||||
let parsed = cookie::Cookie::parse(header_value).ok()?;
|
||||
|
||||
let raw_cookie = format!("{}={}", parsed.name(), parsed.value());
|
||||
|
||||
// Determine domain
|
||||
let domain = if let Some(domain_attr) = parsed.domain() {
|
||||
// Domain attribute present - this is a suffix match
|
||||
let domain = domain_attr.trim_start_matches('.').to_lowercase();
|
||||
|
||||
// Reject single-component domains (TLDs) except localhost
|
||||
if is_single_component_domain(&domain) && !is_localhost(&domain) {
|
||||
debug!("Rejecting cookie with single-component domain: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
CookieDomain::Suffix(domain)
|
||||
} else {
|
||||
// No domain attribute - host-only cookie
|
||||
CookieDomain::HostOnly(request_url.host_str().unwrap_or("").to_lowercase())
|
||||
};
|
||||
|
||||
// Determine expiration
|
||||
let expires = if let Some(max_age) = parsed.max_age() {
|
||||
let duration = Duration::from_secs(max_age.whole_seconds().max(0) as u64);
|
||||
let expiry = SystemTime::now() + duration;
|
||||
let expiry_secs = expiry.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
CookieExpires::AtUtc(format!("{}", expiry_secs))
|
||||
} else if let Some(expires_time) = parsed.expires() {
|
||||
match expires_time {
|
||||
cookie::Expiration::DateTime(dt) => {
|
||||
let timestamp = dt.unix_timestamp();
|
||||
CookieExpires::AtUtc(format!("{}", timestamp))
|
||||
}
|
||||
cookie::Expiration::Session => CookieExpires::SessionEnd,
|
||||
}
|
||||
} else {
|
||||
CookieExpires::SessionEnd
|
||||
};
|
||||
|
||||
// Determine path
|
||||
let path = if let Some(path_attr) = parsed.path() {
|
||||
(path_attr.to_string(), true)
|
||||
} else {
|
||||
// Default path is the directory of the request URI
|
||||
let default_path = default_cookie_path(request_url.path());
|
||||
(default_path, false)
|
||||
};
|
||||
|
||||
Some(Cookie { raw_cookie, domain, expires, path })
|
||||
}
|
||||
|
||||
/// Get the default cookie path from a request path (RFC 6265 Section 5.1.4)
|
||||
fn default_cookie_path(request_path: &str) -> String {
|
||||
if request_path.is_empty() || !request_path.starts_with('/') {
|
||||
return "/".to_string();
|
||||
}
|
||||
|
||||
// Find the last slash
|
||||
if let Some(last_slash) = request_path.rfind('/') {
|
||||
if last_slash == 0 { "/".to_string() } else { request_path[..last_slash].to_string() }
|
||||
} else {
|
||||
"/".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a request path matches a cookie path (RFC 6265 Section 5.1.4)
|
||||
fn path_matches(request_path: &str, cookie_path: &str) -> bool {
|
||||
if request_path == cookie_path {
|
||||
return true;
|
||||
}
|
||||
|
||||
if request_path.starts_with(cookie_path) {
|
||||
// Cookie path must end with / or the char after cookie_path in request_path must be /
|
||||
if cookie_path.ends_with('/') {
|
||||
return true;
|
||||
}
|
||||
if request_path.chars().nth(cookie_path.len()) == Some('/') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Check if two cookies match (same name and domain)
|
||||
fn cookies_match(a: &Cookie, b: &Cookie) -> bool {
|
||||
let name_a = parse_cookie_name_value(&a.raw_cookie).map(|(n, _)| n);
|
||||
let name_b = parse_cookie_name_value(&b.raw_cookie).map(|(n, _)| n);
|
||||
|
||||
if name_a != name_b {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check domain match
|
||||
match (&a.domain, &b.domain) {
|
||||
(CookieDomain::HostOnly(d1), CookieDomain::HostOnly(d2)) => {
|
||||
d1.to_lowercase() == d2.to_lowercase()
|
||||
}
|
||||
(CookieDomain::Suffix(d1), CookieDomain::Suffix(d2)) => {
|
||||
d1.to_lowercase() == d2.to_lowercase()
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a cookie date string (Unix timestamp in our format)
|
||||
fn parse_cookie_date(date_str: &str) -> Result<SystemTime, ()> {
|
||||
let timestamp: i64 = date_str.parse().map_err(|_| ())?;
|
||||
let duration = Duration::from_secs(timestamp.max(0) as u64);
|
||||
Ok(UNIX_EPOCH + duration)
|
||||
}
|
||||
|
||||
/// Check if a domain is a single-component domain (TLD)
|
||||
/// e.g., "com", "org", "net" - domains without any dots
|
||||
fn is_single_component_domain(domain: &str) -> bool {
|
||||
// Empty or only dots
|
||||
let trimmed = domain.trim_matches('.');
|
||||
if trimmed.is_empty() {
|
||||
return true;
|
||||
}
|
||||
// IPv6 addresses use colons, not dots - don't consider them single-component
|
||||
if domain.contains(':') {
|
||||
return false;
|
||||
}
|
||||
!trimmed.contains('.')
|
||||
}
|
||||
|
||||
/// Check if a domain is localhost or a localhost variant
|
||||
fn is_localhost(domain: &str) -> bool {
|
||||
let lower = domain.to_lowercase();
|
||||
lower == "localhost"
|
||||
|| lower.ends_with(".localhost")
|
||||
|| lower == "127.0.0.1"
|
||||
|| lower == "::1"
|
||||
|| lower == "[::1]"
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_cookie_name_value() {
|
||||
assert_eq!(
|
||||
parse_cookie_name_value("session=abc123"),
|
||||
Some(("session".to_string(), "abc123".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
parse_cookie_name_value("name=value; Path=/; HttpOnly"),
|
||||
Some(("name".to_string(), "value".to_string()))
|
||||
);
|
||||
assert_eq!(parse_cookie_name_value("empty="), Some(("empty".to_string(), "".to_string())));
|
||||
assert_eq!(parse_cookie_name_value(""), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_matches() {
|
||||
assert!(path_matches("/", "/"));
|
||||
assert!(path_matches("/foo", "/"));
|
||||
assert!(path_matches("/foo/bar", "/foo"));
|
||||
assert!(path_matches("/foo/bar", "/foo/"));
|
||||
assert!(!path_matches("/foobar", "/foo"));
|
||||
assert!(!path_matches("/foo", "/foo/bar"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_cookie_path() {
|
||||
assert_eq!(default_cookie_path("/"), "/");
|
||||
assert_eq!(default_cookie_path("/foo"), "/");
|
||||
assert_eq!(default_cookie_path("/foo/bar"), "/foo");
|
||||
assert_eq!(default_cookie_path("/foo/bar/baz"), "/foo/bar");
|
||||
assert_eq!(default_cookie_path(""), "/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cookie_store_basic() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("https://example.com/path").unwrap();
|
||||
|
||||
// Initially empty
|
||||
assert!(store.get_cookie_header(&url).is_none());
|
||||
|
||||
// Add a cookie
|
||||
store.store_cookies_from_response(&url, &["session=abc123".to_string()]);
|
||||
|
||||
// Should now have the cookie
|
||||
let header = store.get_cookie_header(&url);
|
||||
assert_eq!(header, Some("session=abc123".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cookie_domain_matching() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("https://example.com/").unwrap();
|
||||
|
||||
// Cookie with domain attribute (suffix match)
|
||||
store.store_cookies_from_response(
|
||||
&url,
|
||||
&["domain_cookie=value; Domain=example.com".to_string()],
|
||||
);
|
||||
|
||||
// Should match example.com
|
||||
assert!(store.get_cookie_header(&url).is_some());
|
||||
|
||||
// Should match subdomain
|
||||
let subdomain_url = Url::parse("https://sub.example.com/").unwrap();
|
||||
assert!(store.get_cookie_header(&subdomain_url).is_some());
|
||||
|
||||
// Should not match different domain
|
||||
let other_url = Url::parse("https://other.com/").unwrap();
|
||||
assert!(store.get_cookie_header(&other_url).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cookie_path_matching() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("https://example.com/api/v1").unwrap();
|
||||
|
||||
// Cookie with path
|
||||
store.store_cookies_from_response(&url, &["api_cookie=value; Path=/api".to_string()]);
|
||||
|
||||
// Should match /api/v1
|
||||
assert!(store.get_cookie_header(&url).is_some());
|
||||
|
||||
// Should match /api
|
||||
let api_url = Url::parse("https://example.com/api").unwrap();
|
||||
assert!(store.get_cookie_header(&api_url).is_some());
|
||||
|
||||
// Should not match /other
|
||||
let other_url = Url::parse("https://example.com/other").unwrap();
|
||||
assert!(store.get_cookie_header(&other_url).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cookie_replacement() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("https://example.com/").unwrap();
|
||||
|
||||
// Add a cookie
|
||||
store.store_cookies_from_response(&url, &["session=old".to_string()]);
|
||||
assert_eq!(store.get_cookie_header(&url), Some("session=old".to_string()));
|
||||
|
||||
// Replace with new value
|
||||
store.store_cookies_from_response(&url, &["session=new".to_string()]);
|
||||
assert_eq!(store.get_cookie_header(&url), Some("session=new".to_string()));
|
||||
|
||||
// Should only have one cookie
|
||||
assert_eq!(store.get_all_cookies().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_single_component_domain() {
|
||||
// Single-component domains (TLDs)
|
||||
assert!(is_single_component_domain("com"));
|
||||
assert!(is_single_component_domain("org"));
|
||||
assert!(is_single_component_domain("net"));
|
||||
assert!(is_single_component_domain("localhost")); // Still single-component, but allowed separately
|
||||
|
||||
// Multi-component domains
|
||||
assert!(!is_single_component_domain("example.com"));
|
||||
assert!(!is_single_component_domain("sub.example.com"));
|
||||
assert!(!is_single_component_domain("co.uk"));
|
||||
|
||||
// Edge cases
|
||||
assert!(is_single_component_domain("")); // Empty is treated as single-component
|
||||
assert!(is_single_component_domain(".")); // Only dots
|
||||
assert!(is_single_component_domain("..")); // Only dots
|
||||
|
||||
// IPv6 addresses (have colons, not dots)
|
||||
assert!(!is_single_component_domain("::1")); // IPv6 localhost
|
||||
assert!(!is_single_component_domain("[::1]")); // Bracketed IPv6
|
||||
assert!(!is_single_component_domain("2001:db8::1")); // IPv6 address
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_localhost() {
|
||||
// Localhost variants
|
||||
assert!(is_localhost("localhost"));
|
||||
assert!(is_localhost("LOCALHOST")); // Case-insensitive
|
||||
assert!(is_localhost("sub.localhost"));
|
||||
assert!(is_localhost("app.sub.localhost"));
|
||||
|
||||
// IP localhost
|
||||
assert!(is_localhost("127.0.0.1"));
|
||||
assert!(is_localhost("::1"));
|
||||
assert!(is_localhost("[::1]"));
|
||||
|
||||
// Not localhost
|
||||
assert!(!is_localhost("example.com"));
|
||||
assert!(!is_localhost("localhost.com")); // .com domain, not localhost
|
||||
assert!(!is_localhost("notlocalhost"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_tld_cookies() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("https://example.com/").unwrap();
|
||||
|
||||
// Try to set a cookie with Domain=com (TLD)
|
||||
store.store_cookies_from_response(&url, &["bad=cookie; Domain=com".to_string()]);
|
||||
|
||||
// Should be rejected - no cookies stored
|
||||
assert_eq!(store.get_all_cookies().len(), 0);
|
||||
assert!(store.get_cookie_header(&url).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allow_localhost_cookies() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("http://localhost:3000/").unwrap();
|
||||
|
||||
// Cookie with Domain=localhost should be allowed
|
||||
store.store_cookies_from_response(&url, &["session=abc; Domain=localhost".to_string()]);
|
||||
|
||||
// Should be accepted
|
||||
assert_eq!(store.get_all_cookies().len(), 1);
|
||||
assert!(store.get_cookie_header(&url).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allow_127_0_0_1_cookies() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("http://127.0.0.1:8080/").unwrap();
|
||||
|
||||
// Cookie without Domain attribute (host-only) should work
|
||||
store.store_cookies_from_response(&url, &["session=xyz".to_string()]);
|
||||
|
||||
// Should be accepted
|
||||
assert_eq!(store.get_all_cookies().len(), 1);
|
||||
assert!(store.get_cookie_header(&url).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allow_normal_domain_cookies() {
|
||||
let store = CookieStore::new();
|
||||
let url = Url::parse("https://example.com/").unwrap();
|
||||
|
||||
// Cookie with valid domain should be allowed
|
||||
store.store_cookies_from_response(&url, &["session=abc; Domain=example.com".to_string()]);
|
||||
|
||||
// Should be accepted
|
||||
assert_eq!(store.get_all_cookies().len(), 1);
|
||||
assert!(store.get_cookie_header(&url).is_some());
|
||||
}
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
use crate::sender::HttpResponseEvent;
|
||||
use hyper_util::client::legacy::connect::dns::{
|
||||
GaiResolver as HyperGaiResolver, Name as HyperName,
|
||||
};
|
||||
use log::info;
|
||||
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::{RwLock, mpsc};
|
||||
use tower_service::Service;
|
||||
use yaak_models::models::DnsOverride;
|
||||
|
||||
/// Stores resolved addresses for a hostname override
|
||||
#[derive(Clone)]
|
||||
pub struct ResolvedOverride {
|
||||
pub ipv4: Vec<Ipv4Addr>,
|
||||
pub ipv6: Vec<Ipv6Addr>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LocalhostResolver {
|
||||
fallback: HyperGaiResolver,
|
||||
event_tx: Arc<RwLock<Option<mpsc::Sender<HttpResponseEvent>>>>,
|
||||
overrides: Arc<HashMap<String, ResolvedOverride>>,
|
||||
}
|
||||
|
||||
impl LocalhostResolver {
|
||||
pub fn new(dns_overrides: Vec<DnsOverride>) -> Arc<Self> {
|
||||
let resolver = HyperGaiResolver::new();
|
||||
|
||||
// Pre-parse DNS overrides into a lookup map
|
||||
let mut overrides = HashMap::new();
|
||||
for o in dns_overrides {
|
||||
if !o.enabled {
|
||||
continue;
|
||||
}
|
||||
let hostname = o.hostname.to_lowercase();
|
||||
|
||||
let ipv4: Vec<Ipv4Addr> =
|
||||
o.ipv4.iter().filter_map(|s| s.parse::<Ipv4Addr>().ok()).collect();
|
||||
|
||||
let ipv6: Vec<Ipv6Addr> =
|
||||
o.ipv6.iter().filter_map(|s| s.parse::<Ipv6Addr>().ok()).collect();
|
||||
|
||||
// Only add if at least one address is valid
|
||||
if !ipv4.is_empty() || !ipv6.is_empty() {
|
||||
overrides.insert(hostname, ResolvedOverride { ipv4, ipv6 });
|
||||
}
|
||||
}
|
||||
|
||||
Arc::new(Self {
|
||||
fallback: resolver,
|
||||
event_tx: Arc::new(RwLock::new(None)),
|
||||
overrides: Arc::new(overrides),
|
||||
})
|
||||
}
|
||||
|
||||
/// Set the event sender for the current request.
|
||||
/// This should be called before each request to direct DNS events
|
||||
/// to the appropriate channel.
|
||||
pub async fn set_event_sender(&self, tx: Option<mpsc::Sender<HttpResponseEvent>>) {
|
||||
let mut guard = self.event_tx.write().await;
|
||||
*guard = tx;
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve for LocalhostResolver {
|
||||
fn resolve(&self, name: Name) -> Resolving {
|
||||
let host = name.as_str().to_lowercase();
|
||||
let event_tx = self.event_tx.clone();
|
||||
let overrides = self.overrides.clone();
|
||||
|
||||
info!("DNS resolve called for: {}", host);
|
||||
|
||||
// Check for DNS override first
|
||||
if let Some(resolved) = overrides.get(&host) {
|
||||
log::debug!("DNS override found for: {}", host);
|
||||
let hostname = host.clone();
|
||||
let mut addrs: Vec<SocketAddr> = Vec::new();
|
||||
|
||||
// Add IPv4 addresses
|
||||
for ip in &resolved.ipv4 {
|
||||
addrs.push(SocketAddr::new(IpAddr::V4(*ip), 0));
|
||||
}
|
||||
|
||||
// Add IPv6 addresses
|
||||
for ip in &resolved.ipv6 {
|
||||
addrs.push(SocketAddr::new(IpAddr::V6(*ip), 0));
|
||||
}
|
||||
|
||||
let addresses: Vec<String> = addrs.iter().map(|a| a.ip().to_string()).collect();
|
||||
|
||||
return Box::pin(async move {
|
||||
// Emit DNS event for override
|
||||
let guard = event_tx.read().await;
|
||||
if let Some(tx) = guard.as_ref() {
|
||||
let _ = tx
|
||||
.send(HttpResponseEvent::DnsResolved {
|
||||
hostname,
|
||||
addresses,
|
||||
duration: 0,
|
||||
overridden: true,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok::<Addrs, Box<dyn std::error::Error + Send + Sync>>(Box::new(addrs.into_iter()))
|
||||
});
|
||||
}
|
||||
|
||||
// Check for .localhost suffix
|
||||
let is_localhost = host.ends_with(".localhost");
|
||||
if is_localhost {
|
||||
let hostname = host.clone();
|
||||
// Port 0 is fine; reqwest replaces it with the URL's explicit
|
||||
// port or the scheme's default (80/443, etc.).
|
||||
let addrs: Vec<SocketAddr> = vec![
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0),
|
||||
SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0),
|
||||
];
|
||||
|
||||
let addresses: Vec<String> = addrs.iter().map(|a| a.ip().to_string()).collect();
|
||||
|
||||
return Box::pin(async move {
|
||||
// Emit DNS event for localhost resolution
|
||||
let guard = event_tx.read().await;
|
||||
if let Some(tx) = guard.as_ref() {
|
||||
let _ = tx
|
||||
.send(HttpResponseEvent::DnsResolved {
|
||||
hostname,
|
||||
addresses,
|
||||
duration: 0,
|
||||
overridden: false,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok::<Addrs, Box<dyn std::error::Error + Send + Sync>>(Box::new(addrs.into_iter()))
|
||||
});
|
||||
}
|
||||
|
||||
// Fall back to system DNS
|
||||
let mut fallback = self.fallback.clone();
|
||||
let name_str = name.as_str().to_string();
|
||||
let hostname = host.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
let start = Instant::now();
|
||||
|
||||
let result = match HyperName::from_str(&name_str) {
|
||||
Ok(n) => fallback.call(n).await,
|
||||
Err(e) => return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>),
|
||||
};
|
||||
|
||||
let duration = start.elapsed().as_millis() as u64;
|
||||
|
||||
match result {
|
||||
Ok(addrs) => {
|
||||
// Collect addresses for event emission
|
||||
let addr_vec: Vec<SocketAddr> = addrs.collect();
|
||||
let addresses: Vec<String> =
|
||||
addr_vec.iter().map(|a| a.ip().to_string()).collect();
|
||||
|
||||
// Emit DNS event
|
||||
let guard = event_tx.read().await;
|
||||
if let Some(tx) = guard.as_ref() {
|
||||
let _ = tx
|
||||
.send(HttpResponseEvent::DnsResolved {
|
||||
hostname,
|
||||
addresses,
|
||||
duration,
|
||||
overridden: false,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(Box::new(addr_vec.into_iter()) as Addrs)
|
||||
}
|
||||
Err(err) => Err(Box::new(err) as Box<dyn std::error::Error + Send + Sync>),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
mod chained_reader;
|
||||
pub mod client;
|
||||
pub mod cookies;
|
||||
pub mod decompress;
|
||||
pub mod dns;
|
||||
pub mod error;
|
||||
pub mod manager;
|
||||
pub mod path_placeholders;
|
||||
mod proto;
|
||||
pub mod sender;
|
||||
pub mod tee_reader;
|
||||
pub mod transaction;
|
||||
pub mod types;
|
||||
@@ -1,159 +0,0 @@
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::io::{AsyncRead, ReadBuf};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// A reader that forwards all read data to a channel while also returning it to the caller.
|
||||
/// This allows capturing request body data as it's being sent.
|
||||
/// Uses an unbounded channel to ensure all data is captured without blocking the request.
|
||||
pub struct TeeReader<R> {
|
||||
inner: R,
|
||||
tx: mpsc::UnboundedSender<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<R> TeeReader<R> {
|
||||
pub fn new(inner: R, tx: mpsc::UnboundedSender<Vec<u8>>) -> Self {
|
||||
Self { inner, tx }
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin> AsyncRead for TeeReader<R> {
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let before_len = buf.filled().len();
|
||||
|
||||
match Pin::new(&mut self.inner).poll_read(cx, buf) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let after_len = buf.filled().len();
|
||||
if after_len > before_len {
|
||||
// Data was read, send a copy to the channel
|
||||
let data = buf.filled()[before_len..after_len].to_vec();
|
||||
// Send to unbounded channel - this never blocks
|
||||
// Ignore error if receiver is closed
|
||||
let _ = self.tx.send(data);
|
||||
}
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tee_reader_captures_all_data() {
|
||||
let data = b"Hello, World!";
|
||||
let cursor = Cursor::new(data.to_vec());
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
|
||||
let mut tee = TeeReader::new(cursor, tx);
|
||||
let mut output = Vec::new();
|
||||
tee.read_to_end(&mut output).await.unwrap();
|
||||
|
||||
// Verify the reader returns the correct data
|
||||
assert_eq!(output, data);
|
||||
|
||||
// Verify the channel received the data
|
||||
let mut captured = Vec::new();
|
||||
while let Ok(chunk) = rx.try_recv() {
|
||||
captured.extend(chunk);
|
||||
}
|
||||
assert_eq!(captured, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tee_reader_with_chunked_reads() {
|
||||
let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
let cursor = Cursor::new(data.to_vec());
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
|
||||
let mut tee = TeeReader::new(cursor, tx);
|
||||
|
||||
// Read in small chunks
|
||||
let mut buf = [0u8; 5];
|
||||
let mut output = Vec::new();
|
||||
loop {
|
||||
let n = tee.read(&mut buf).await.unwrap();
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
output.extend_from_slice(&buf[..n]);
|
||||
}
|
||||
|
||||
// Verify the reader returns the correct data
|
||||
assert_eq!(output, data);
|
||||
|
||||
// Verify the channel received all chunks
|
||||
let mut captured = Vec::new();
|
||||
while let Ok(chunk) = rx.try_recv() {
|
||||
captured.extend(chunk);
|
||||
}
|
||||
assert_eq!(captured, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tee_reader_empty_data() {
|
||||
let data: Vec<u8> = vec![];
|
||||
let cursor = Cursor::new(data.clone());
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
|
||||
let mut tee = TeeReader::new(cursor, tx);
|
||||
let mut output = Vec::new();
|
||||
tee.read_to_end(&mut output).await.unwrap();
|
||||
|
||||
// Verify empty output
|
||||
assert!(output.is_empty());
|
||||
|
||||
// Verify no data was sent to channel
|
||||
assert!(rx.try_recv().is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tee_reader_works_when_receiver_dropped() {
|
||||
let data = b"Hello, World!";
|
||||
let cursor = Cursor::new(data.to_vec());
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
// Drop the receiver before reading
|
||||
drop(rx);
|
||||
|
||||
let mut tee = TeeReader::new(cursor, tx);
|
||||
let mut output = Vec::new();
|
||||
|
||||
// Should still work even though receiver is dropped
|
||||
tee.read_to_end(&mut output).await.unwrap();
|
||||
assert_eq!(output, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tee_reader_large_data() {
|
||||
// Test with 1MB of data
|
||||
let data: Vec<u8> = (0..1024 * 1024).map(|i| (i % 256) as u8).collect();
|
||||
let cursor = Cursor::new(data.clone());
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
|
||||
let mut tee = TeeReader::new(cursor, tx);
|
||||
let mut output = Vec::new();
|
||||
tee.read_to_end(&mut output).await.unwrap();
|
||||
|
||||
// Verify the reader returns the correct data
|
||||
assert_eq!(output, data);
|
||||
|
||||
// Verify the channel received all data
|
||||
let mut captured = Vec::new();
|
||||
while let Ok(chunk) = rx.try_recv() {
|
||||
captured.extend(chunk);
|
||||
}
|
||||
assert_eq!(captured, data);
|
||||
}
|
||||
}
|
||||
@@ -1,892 +0,0 @@
|
||||
use crate::cookies::CookieStore;
|
||||
use crate::error::Result;
|
||||
use crate::sender::{HttpResponse, HttpResponseEvent, HttpSender, RedirectBehavior};
|
||||
use crate::types::SendableHttpRequest;
|
||||
use log::debug;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::watch::Receiver;
|
||||
use url::Url;
|
||||
|
||||
/// HTTP Transaction that manages the lifecycle of a request, including redirect handling
|
||||
pub struct HttpTransaction<S: HttpSender> {
|
||||
sender: S,
|
||||
max_redirects: usize,
|
||||
cookie_store: Option<CookieStore>,
|
||||
}
|
||||
|
||||
impl<S: HttpSender> HttpTransaction<S> {
|
||||
/// Create a new transaction with default settings
|
||||
pub fn new(sender: S) -> Self {
|
||||
Self { sender, max_redirects: 10, cookie_store: None }
|
||||
}
|
||||
|
||||
/// Create a new transaction with custom max redirects
|
||||
pub fn with_max_redirects(sender: S, max_redirects: usize) -> Self {
|
||||
Self { sender, max_redirects, cookie_store: None }
|
||||
}
|
||||
|
||||
/// Create a new transaction with a cookie store
|
||||
pub fn with_cookie_store(sender: S, cookie_store: CookieStore) -> Self {
|
||||
Self { sender, max_redirects: 10, cookie_store: Some(cookie_store) }
|
||||
}
|
||||
|
||||
/// Create a new transaction with custom max redirects and a cookie store
|
||||
pub fn with_options(
|
||||
sender: S,
|
||||
max_redirects: usize,
|
||||
cookie_store: Option<CookieStore>,
|
||||
) -> Self {
|
||||
Self { sender, max_redirects, cookie_store }
|
||||
}
|
||||
|
||||
/// Execute the request with cancellation support.
|
||||
/// Returns an HttpResponse with unconsumed body - caller decides how to consume it.
|
||||
/// Events are sent through the provided channel.
|
||||
pub async fn execute_with_cancellation(
|
||||
&self,
|
||||
request: SendableHttpRequest,
|
||||
mut cancelled_rx: Receiver<bool>,
|
||||
event_tx: mpsc::Sender<HttpResponseEvent>,
|
||||
) -> Result<HttpResponse> {
|
||||
let mut redirect_count = 0;
|
||||
let mut current_url = request.url;
|
||||
let mut current_method = request.method;
|
||||
let mut current_headers = request.headers;
|
||||
let mut current_body = request.body;
|
||||
|
||||
// Helper to send events (ignores errors if receiver is dropped or channel is full)
|
||||
let send_event = |event: HttpResponseEvent| {
|
||||
let _ = event_tx.try_send(event);
|
||||
};
|
||||
|
||||
loop {
|
||||
// Check for cancellation before each request
|
||||
if *cancelled_rx.borrow() {
|
||||
return Err(crate::error::Error::RequestCanceledError);
|
||||
}
|
||||
|
||||
// Inject cookies into headers if we have a cookie store
|
||||
let headers_with_cookies = if let Some(cookie_store) = &self.cookie_store {
|
||||
let mut headers = current_headers.clone();
|
||||
if let Ok(url) = Url::parse(¤t_url) {
|
||||
if let Some(cookie_header) = cookie_store.get_cookie_header(&url) {
|
||||
debug!("Injecting Cookie header: {}", cookie_header);
|
||||
// Check if there's already a Cookie header and merge if so
|
||||
if let Some(existing) =
|
||||
headers.iter_mut().find(|h| h.0.eq_ignore_ascii_case("cookie"))
|
||||
{
|
||||
existing.1 = format!("{}; {}", existing.1, cookie_header);
|
||||
} else {
|
||||
headers.push(("Cookie".to_string(), cookie_header));
|
||||
}
|
||||
}
|
||||
}
|
||||
headers
|
||||
} else {
|
||||
current_headers.clone()
|
||||
};
|
||||
|
||||
// Build request for this iteration
|
||||
let req = SendableHttpRequest {
|
||||
url: current_url.clone(),
|
||||
method: current_method.clone(),
|
||||
headers: headers_with_cookies,
|
||||
body: current_body,
|
||||
options: request.options.clone(),
|
||||
};
|
||||
|
||||
// Send the request
|
||||
send_event(HttpResponseEvent::Setting(
|
||||
"redirects".to_string(),
|
||||
request.options.follow_redirects.to_string(),
|
||||
));
|
||||
|
||||
// Execute with cancellation support
|
||||
let response = tokio::select! {
|
||||
result = self.sender.send(req, event_tx.clone()) => result?,
|
||||
_ = cancelled_rx.changed() => {
|
||||
return Err(crate::error::Error::RequestCanceledError);
|
||||
}
|
||||
};
|
||||
|
||||
// Parse Set-Cookie headers and store cookies
|
||||
if let Some(cookie_store) = &self.cookie_store {
|
||||
if let Ok(url) = Url::parse(¤t_url) {
|
||||
let set_cookie_headers: Vec<String> = response
|
||||
.headers
|
||||
.iter()
|
||||
.filter(|(k, _)| k.eq_ignore_ascii_case("set-cookie"))
|
||||
.map(|(_, v)| v.clone())
|
||||
.collect();
|
||||
|
||||
if !set_cookie_headers.is_empty() {
|
||||
debug!("Storing {} cookies from response", set_cookie_headers.len());
|
||||
cookie_store.store_cookies_from_response(&url, &set_cookie_headers);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !Self::is_redirect(response.status) {
|
||||
// Not a redirect - return the response for caller to consume body
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
if !request.options.follow_redirects {
|
||||
// Redirects disabled - return the redirect response as-is
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
// Check if we've exceeded max redirects
|
||||
if redirect_count >= self.max_redirects {
|
||||
// Drain the response before returning error
|
||||
let _ = response.drain().await;
|
||||
return Err(crate::error::Error::RequestError(format!(
|
||||
"Maximum redirect limit ({}) exceeded",
|
||||
self.max_redirects
|
||||
)));
|
||||
}
|
||||
|
||||
// Extract Location header before draining (headers are available immediately)
|
||||
// HTTP headers are case-insensitive, so we need to search for any casing
|
||||
let location = response
|
||||
.headers
|
||||
.iter()
|
||||
.find(|(k, _)| k.eq_ignore_ascii_case("location"))
|
||||
.map(|(_, v)| v.clone())
|
||||
.ok_or_else(|| {
|
||||
crate::error::Error::RequestError(
|
||||
"Redirect response missing Location header".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
// Also get status before draining
|
||||
let status = response.status;
|
||||
|
||||
send_event(HttpResponseEvent::Info("Ignoring the response body".to_string()));
|
||||
|
||||
// Drain the redirect response body before following
|
||||
response.drain().await?;
|
||||
|
||||
// Update the request URL
|
||||
let previous_url = current_url.clone();
|
||||
current_url = if location.starts_with("http://") || location.starts_with("https://") {
|
||||
// Absolute URL
|
||||
location
|
||||
} else if location.starts_with('/') {
|
||||
// Absolute path - need to extract base URL from current request
|
||||
let base_url = Self::extract_base_url(¤t_url)?;
|
||||
format!("{}{}", base_url, location)
|
||||
} else {
|
||||
// Relative path - need to resolve relative to current path
|
||||
let base_path = Self::extract_base_path(¤t_url)?;
|
||||
format!("{}/{}", base_path, location)
|
||||
};
|
||||
|
||||
Self::remove_sensitive_headers(&mut current_headers, &previous_url, ¤t_url);
|
||||
|
||||
// Determine redirect behavior based on status code and method
|
||||
let behavior = if status == 303 {
|
||||
// 303 See Other always changes to GET
|
||||
RedirectBehavior::DropBody
|
||||
} else if (status == 301 || status == 302) && current_method == "POST" {
|
||||
// For 301/302, change POST to GET (common browser behavior)
|
||||
RedirectBehavior::DropBody
|
||||
} else {
|
||||
// For 307 and 308, the method and body are preserved
|
||||
// Also for 301/302 with non-POST methods
|
||||
RedirectBehavior::Preserve
|
||||
};
|
||||
|
||||
send_event(HttpResponseEvent::Redirect {
|
||||
url: current_url.clone(),
|
||||
status,
|
||||
behavior: behavior.clone(),
|
||||
});
|
||||
|
||||
// Handle method changes for certain redirect codes
|
||||
if matches!(behavior, RedirectBehavior::DropBody) {
|
||||
if current_method != "GET" {
|
||||
current_method = "GET".to_string();
|
||||
}
|
||||
// Remove content-related headers
|
||||
current_headers.retain(|h| {
|
||||
let name_lower = h.0.to_lowercase();
|
||||
!name_lower.starts_with("content-") && name_lower != "transfer-encoding"
|
||||
});
|
||||
}
|
||||
|
||||
// Reset body for next iteration (since it was moved in the send call)
|
||||
// For redirects that change method to GET or for all redirects since body was consumed
|
||||
current_body = None;
|
||||
|
||||
redirect_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove sensitive headers when redirecting to a different host.
|
||||
/// This matches reqwest's `remove_sensitive_headers()` behavior and prevents
|
||||
/// credentials from being forwarded to third-party servers (e.g., an
|
||||
/// Authorization header sent from an API redirect to an S3 bucket).
|
||||
fn remove_sensitive_headers(
|
||||
headers: &mut Vec<(String, String)>,
|
||||
previous_url: &str,
|
||||
next_url: &str,
|
||||
) {
|
||||
let previous_host = Url::parse(previous_url).ok().and_then(|u| {
|
||||
u.host_str().map(|h| format!("{}:{}", h, u.port_or_known_default().unwrap_or(0)))
|
||||
});
|
||||
let next_host = Url::parse(next_url).ok().and_then(|u| {
|
||||
u.host_str().map(|h| format!("{}:{}", h, u.port_or_known_default().unwrap_or(0)))
|
||||
});
|
||||
if previous_host != next_host {
|
||||
headers.retain(|h| {
|
||||
let name_lower = h.0.to_lowercase();
|
||||
name_lower != "authorization"
|
||||
&& name_lower != "cookie"
|
||||
&& name_lower != "cookie2"
|
||||
&& name_lower != "proxy-authorization"
|
||||
&& name_lower != "www-authenticate"
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a status code indicates a redirect
|
||||
fn is_redirect(status: u16) -> bool {
|
||||
matches!(status, 301 | 302 | 303 | 307 | 308)
|
||||
}
|
||||
|
||||
/// Extract the base URL (scheme + host) from a full URL
|
||||
fn extract_base_url(url: &str) -> Result<String> {
|
||||
// Find the position after "://"
|
||||
let scheme_end = url.find("://").ok_or_else(|| {
|
||||
crate::error::Error::RequestError(format!("Invalid URL format: {}", url))
|
||||
})?;
|
||||
|
||||
// Find the first '/' after the scheme
|
||||
let path_start = url[scheme_end + 3..].find('/');
|
||||
|
||||
if let Some(idx) = path_start {
|
||||
Ok(url[..scheme_end + 3 + idx].to_string())
|
||||
} else {
|
||||
// No path, return entire URL
|
||||
Ok(url.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the base path (everything except the last segment) from a URL
|
||||
fn extract_base_path(url: &str) -> Result<String> {
|
||||
if let Some(last_slash) = url.rfind('/') {
|
||||
// Don't include the trailing slash if it's part of the host
|
||||
if url[..last_slash].ends_with("://") || url[..last_slash].ends_with(':') {
|
||||
Ok(url.to_string())
|
||||
} else {
|
||||
Ok(url[..last_slash].to_string())
|
||||
}
|
||||
} else {
|
||||
Ok(url.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::decompress::ContentEncoding;
|
||||
use crate::sender::{HttpResponseEvent, HttpSender};
|
||||
use async_trait::async_trait;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
/// Captured request metadata for test assertions
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(dead_code)]
|
||||
struct CapturedRequest {
|
||||
url: String,
|
||||
method: String,
|
||||
headers: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
/// Mock sender for testing
|
||||
struct MockSender {
|
||||
responses: Arc<Mutex<Vec<MockResponse>>>,
|
||||
/// Captured requests for assertions
|
||||
captured_requests: Arc<Mutex<Vec<CapturedRequest>>>,
|
||||
}
|
||||
|
||||
struct MockResponse {
|
||||
status: u16,
|
||||
headers: Vec<(String, String)>,
|
||||
body: Vec<u8>,
|
||||
}
|
||||
|
||||
impl MockSender {
|
||||
fn new(responses: Vec<MockResponse>) -> Self {
|
||||
Self {
|
||||
responses: Arc::new(Mutex::new(responses)),
|
||||
captured_requests: Arc::new(Mutex::new(Vec::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HttpSender for MockSender {
|
||||
async fn send(
|
||||
&self,
|
||||
request: SendableHttpRequest,
|
||||
_event_tx: mpsc::Sender<HttpResponseEvent>,
|
||||
) -> Result<HttpResponse> {
|
||||
// Capture the request metadata for later assertions
|
||||
self.captured_requests.lock().await.push(CapturedRequest {
|
||||
url: request.url.clone(),
|
||||
method: request.method.clone(),
|
||||
headers: request.headers.clone(),
|
||||
});
|
||||
|
||||
let mut responses = self.responses.lock().await;
|
||||
if responses.is_empty() {
|
||||
Err(crate::error::Error::RequestError("No more mock responses".to_string()))
|
||||
} else {
|
||||
let mock = responses.remove(0);
|
||||
// Create a simple in-memory stream from the body
|
||||
let body_stream: Pin<Box<dyn AsyncRead + Send>> =
|
||||
Box::pin(std::io::Cursor::new(mock.body));
|
||||
Ok(HttpResponse::new(
|
||||
mock.status,
|
||||
None, // status_reason
|
||||
mock.headers,
|
||||
Vec::new(),
|
||||
None, // content_length
|
||||
"https://example.com".to_string(), // url
|
||||
None, // remote_addr
|
||||
Some("HTTP/1.1".to_string()), // version
|
||||
body_stream,
|
||||
ContentEncoding::Identity,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_no_redirect() {
|
||||
let response = MockResponse { status: 200, headers: Vec::new(), body: b"OK".to_vec() };
|
||||
let sender = MockSender::new(vec![response]);
|
||||
let transaction = HttpTransaction::new(sender);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com".to_string(),
|
||||
method: "GET".to_string(),
|
||||
headers: vec![],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await.unwrap();
|
||||
assert_eq!(result.status, 200);
|
||||
|
||||
// Consume the body to verify it
|
||||
let (body, _) = result.bytes().await.unwrap();
|
||||
assert_eq!(body, b"OK");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_single_redirect() {
|
||||
let redirect_headers =
|
||||
vec![("Location".to_string(), "https://example.com/new".to_string())];
|
||||
|
||||
let responses = vec![
|
||||
MockResponse { status: 302, headers: redirect_headers, body: vec![] },
|
||||
MockResponse { status: 200, headers: Vec::new(), body: b"Final".to_vec() },
|
||||
];
|
||||
|
||||
let sender = MockSender::new(responses);
|
||||
let transaction = HttpTransaction::new(sender);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com/old".to_string(),
|
||||
method: "GET".to_string(),
|
||||
options: crate::types::SendableHttpRequestOptions {
|
||||
follow_redirects: true,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await.unwrap();
|
||||
assert_eq!(result.status, 200);
|
||||
|
||||
let (body, _) = result.bytes().await.unwrap();
|
||||
assert_eq!(body, b"Final");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_max_redirects_exceeded() {
|
||||
let redirect_headers =
|
||||
vec![("Location".to_string(), "https://example.com/loop".to_string())];
|
||||
|
||||
// Create more redirects than allowed
|
||||
let responses: Vec<MockResponse> = (0..12)
|
||||
.map(|_| MockResponse { status: 302, headers: redirect_headers.clone(), body: vec![] })
|
||||
.collect();
|
||||
|
||||
let sender = MockSender::new(responses);
|
||||
let transaction = HttpTransaction::with_max_redirects(sender, 10);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com/start".to_string(),
|
||||
method: "GET".to_string(),
|
||||
options: crate::types::SendableHttpRequestOptions {
|
||||
follow_redirects: true,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await;
|
||||
if let Err(crate::error::Error::RequestError(msg)) = result {
|
||||
assert!(msg.contains("Maximum redirect limit"));
|
||||
} else {
|
||||
panic!("Expected RequestError with max redirect message. Got {result:?}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_redirect() {
|
||||
assert!(HttpTransaction::<MockSender>::is_redirect(301));
|
||||
assert!(HttpTransaction::<MockSender>::is_redirect(302));
|
||||
assert!(HttpTransaction::<MockSender>::is_redirect(303));
|
||||
assert!(HttpTransaction::<MockSender>::is_redirect(307));
|
||||
assert!(HttpTransaction::<MockSender>::is_redirect(308));
|
||||
assert!(!HttpTransaction::<MockSender>::is_redirect(200));
|
||||
assert!(!HttpTransaction::<MockSender>::is_redirect(404));
|
||||
assert!(!HttpTransaction::<MockSender>::is_redirect(500));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_base_url() {
|
||||
let result =
|
||||
HttpTransaction::<MockSender>::extract_base_url("https://example.com/path/to/resource");
|
||||
assert_eq!(result.unwrap(), "https://example.com");
|
||||
|
||||
let result = HttpTransaction::<MockSender>::extract_base_url("http://localhost:8080/api");
|
||||
assert_eq!(result.unwrap(), "http://localhost:8080");
|
||||
|
||||
let result = HttpTransaction::<MockSender>::extract_base_url("invalid-url");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_base_path() {
|
||||
let result = HttpTransaction::<MockSender>::extract_base_path(
|
||||
"https://example.com/path/to/resource",
|
||||
);
|
||||
assert_eq!(result.unwrap(), "https://example.com/path/to");
|
||||
|
||||
let result = HttpTransaction::<MockSender>::extract_base_path("https://example.com/single");
|
||||
assert_eq!(result.unwrap(), "https://example.com");
|
||||
|
||||
let result = HttpTransaction::<MockSender>::extract_base_path("https://example.com/");
|
||||
assert_eq!(result.unwrap(), "https://example.com");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cookie_injection() {
|
||||
// Create a mock sender that verifies the Cookie header was injected
|
||||
struct CookieVerifyingSender {
|
||||
expected_cookie: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HttpSender for CookieVerifyingSender {
|
||||
async fn send(
|
||||
&self,
|
||||
request: SendableHttpRequest,
|
||||
_event_tx: mpsc::Sender<HttpResponseEvent>,
|
||||
) -> Result<HttpResponse> {
|
||||
// Verify the Cookie header was injected
|
||||
let cookie_header =
|
||||
request.headers.iter().find(|(k, _)| k.eq_ignore_ascii_case("cookie"));
|
||||
|
||||
assert!(cookie_header.is_some(), "Cookie header should be present");
|
||||
assert!(
|
||||
cookie_header.unwrap().1.contains(&self.expected_cookie),
|
||||
"Cookie header should contain expected value"
|
||||
);
|
||||
|
||||
let body_stream: Pin<Box<dyn AsyncRead + Send>> =
|
||||
Box::pin(std::io::Cursor::new(vec![]));
|
||||
Ok(HttpResponse::new(
|
||||
200,
|
||||
None,
|
||||
Vec::new(),
|
||||
Vec::new(),
|
||||
None,
|
||||
"https://example.com".to_string(),
|
||||
None,
|
||||
Some("HTTP/1.1".to_string()),
|
||||
body_stream,
|
||||
ContentEncoding::Identity,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
use yaak_models::models::{Cookie, CookieDomain, CookieExpires};
|
||||
|
||||
// Create a cookie store with a test cookie
|
||||
let cookie = Cookie {
|
||||
raw_cookie: "session=abc123".to_string(),
|
||||
domain: CookieDomain::HostOnly("example.com".to_string()),
|
||||
expires: CookieExpires::SessionEnd,
|
||||
path: ("/".to_string(), false),
|
||||
};
|
||||
let cookie_store = CookieStore::from_cookies(vec![cookie]);
|
||||
|
||||
let sender = CookieVerifyingSender { expected_cookie: "session=abc123".to_string() };
|
||||
let transaction = HttpTransaction::with_cookie_store(sender, cookie_store);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com/api".to_string(),
|
||||
method: "GET".to_string(),
|
||||
headers: vec![],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_set_cookie_parsing() {
|
||||
// Create a cookie store
|
||||
let cookie_store = CookieStore::new();
|
||||
|
||||
// Mock sender that returns a Set-Cookie header
|
||||
struct SetCookieSender;
|
||||
|
||||
#[async_trait]
|
||||
impl HttpSender for SetCookieSender {
|
||||
async fn send(
|
||||
&self,
|
||||
_request: SendableHttpRequest,
|
||||
_event_tx: mpsc::Sender<HttpResponseEvent>,
|
||||
) -> Result<HttpResponse> {
|
||||
let headers =
|
||||
vec![("set-cookie".to_string(), "session=xyz789; Path=/".to_string())];
|
||||
|
||||
let body_stream: Pin<Box<dyn AsyncRead + Send>> =
|
||||
Box::pin(std::io::Cursor::new(vec![]));
|
||||
Ok(HttpResponse::new(
|
||||
200,
|
||||
None,
|
||||
headers,
|
||||
Vec::new(),
|
||||
None,
|
||||
"https://example.com".to_string(),
|
||||
None,
|
||||
Some("HTTP/1.1".to_string()),
|
||||
body_stream,
|
||||
ContentEncoding::Identity,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
let sender = SetCookieSender;
|
||||
let transaction = HttpTransaction::with_cookie_store(sender, cookie_store.clone());
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com/login".to_string(),
|
||||
method: "POST".to_string(),
|
||||
headers: vec![],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
// Verify the cookie was stored
|
||||
let cookies = cookie_store.get_all_cookies();
|
||||
assert_eq!(cookies.len(), 1);
|
||||
assert!(cookies[0].raw_cookie.contains("session=xyz789"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_set_cookie_headers() {
|
||||
// Create a cookie store
|
||||
let cookie_store = CookieStore::new();
|
||||
|
||||
// Mock sender that returns multiple Set-Cookie headers
|
||||
struct MultiSetCookieSender;
|
||||
|
||||
#[async_trait]
|
||||
impl HttpSender for MultiSetCookieSender {
|
||||
async fn send(
|
||||
&self,
|
||||
_request: SendableHttpRequest,
|
||||
_event_tx: mpsc::Sender<HttpResponseEvent>,
|
||||
) -> Result<HttpResponse> {
|
||||
// Multiple Set-Cookie headers (this is standard HTTP behavior)
|
||||
let headers = vec![
|
||||
("set-cookie".to_string(), "session=abc123; Path=/".to_string()),
|
||||
("set-cookie".to_string(), "user_id=42; Path=/".to_string()),
|
||||
(
|
||||
"set-cookie".to_string(),
|
||||
"preferences=dark; Path=/; Max-Age=86400".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
let body_stream: Pin<Box<dyn AsyncRead + Send>> =
|
||||
Box::pin(std::io::Cursor::new(vec![]));
|
||||
Ok(HttpResponse::new(
|
||||
200,
|
||||
None,
|
||||
headers,
|
||||
Vec::new(),
|
||||
None,
|
||||
"https://example.com".to_string(),
|
||||
None,
|
||||
Some("HTTP/1.1".to_string()),
|
||||
body_stream,
|
||||
ContentEncoding::Identity,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
let sender = MultiSetCookieSender;
|
||||
let transaction = HttpTransaction::with_cookie_store(sender, cookie_store.clone());
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com/login".to_string(),
|
||||
method: "POST".to_string(),
|
||||
headers: vec![],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
// Verify all three cookies were stored
|
||||
let cookies = cookie_store.get_all_cookies();
|
||||
assert_eq!(cookies.len(), 3, "All three Set-Cookie headers should be parsed and stored");
|
||||
|
||||
let cookie_values: Vec<&str> = cookies.iter().map(|c| c.raw_cookie.as_str()).collect();
|
||||
assert!(
|
||||
cookie_values.iter().any(|c| c.contains("session=abc123")),
|
||||
"session cookie should be stored"
|
||||
);
|
||||
assert!(
|
||||
cookie_values.iter().any(|c| c.contains("user_id=42")),
|
||||
"user_id cookie should be stored"
|
||||
);
|
||||
assert!(
|
||||
cookie_values.iter().any(|c| c.contains("preferences=dark")),
|
||||
"preferences cookie should be stored"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cookies_across_redirects() {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
// Create a cookie store
|
||||
let cookie_store = CookieStore::new();
|
||||
|
||||
// Track request count
|
||||
let request_count = Arc::new(AtomicUsize::new(0));
|
||||
let request_count_clone = request_count.clone();
|
||||
|
||||
struct RedirectWithCookiesSender {
|
||||
request_count: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HttpSender for RedirectWithCookiesSender {
|
||||
async fn send(
|
||||
&self,
|
||||
request: SendableHttpRequest,
|
||||
_event_tx: mpsc::Sender<HttpResponseEvent>,
|
||||
) -> Result<HttpResponse> {
|
||||
let count = self.request_count.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
let (status, headers) = if count == 0 {
|
||||
// First request: return redirect with Set-Cookie
|
||||
let h = vec![
|
||||
("location".to_string(), "https://example.com/final".to_string()),
|
||||
("set-cookie".to_string(), "redirect_cookie=value1".to_string()),
|
||||
];
|
||||
(302, h)
|
||||
} else {
|
||||
// Second request: verify cookie was sent
|
||||
let cookie_header =
|
||||
request.headers.iter().find(|(k, _)| k.eq_ignore_ascii_case("cookie"));
|
||||
|
||||
assert!(cookie_header.is_some(), "Cookie header should be present on redirect");
|
||||
assert!(
|
||||
cookie_header.unwrap().1.contains("redirect_cookie=value1"),
|
||||
"Redirect cookie should be included"
|
||||
);
|
||||
|
||||
(200, Vec::new())
|
||||
};
|
||||
|
||||
let body_stream: Pin<Box<dyn AsyncRead + Send>> =
|
||||
Box::pin(std::io::Cursor::new(vec![]));
|
||||
Ok(HttpResponse::new(
|
||||
status,
|
||||
None,
|
||||
headers,
|
||||
Vec::new(),
|
||||
None,
|
||||
"https://example.com".to_string(),
|
||||
None,
|
||||
Some("HTTP/1.1".to_string()),
|
||||
body_stream,
|
||||
ContentEncoding::Identity,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
let sender = RedirectWithCookiesSender { request_count: request_count_clone };
|
||||
let transaction = HttpTransaction::with_cookie_store(sender, cookie_store);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://example.com/start".to_string(),
|
||||
method: "GET".to_string(),
|
||||
headers: vec![],
|
||||
options: crate::types::SendableHttpRequestOptions {
|
||||
follow_redirects: true,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await;
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(request_count.load(Ordering::SeqCst), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cross_origin_redirect_strips_auth_headers() {
|
||||
// Redirect from api.example.com -> s3.amazonaws.com should strip Authorization
|
||||
let responses = vec![
|
||||
MockResponse {
|
||||
status: 302,
|
||||
headers: vec![(
|
||||
"Location".to_string(),
|
||||
"https://s3.amazonaws.com/bucket/file.pdf".to_string(),
|
||||
)],
|
||||
body: vec![],
|
||||
},
|
||||
MockResponse { status: 200, headers: Vec::new(), body: b"PDF content".to_vec() },
|
||||
];
|
||||
|
||||
let sender = MockSender::new(responses);
|
||||
let captured = sender.captured_requests.clone();
|
||||
let transaction = HttpTransaction::new(sender);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://api.example.com/download".to_string(),
|
||||
method: "GET".to_string(),
|
||||
headers: vec![
|
||||
("Authorization".to_string(), "Basic dXNlcjpwYXNz".to_string()),
|
||||
("Accept".to_string(), "application/pdf".to_string()),
|
||||
],
|
||||
options: crate::types::SendableHttpRequestOptions {
|
||||
follow_redirects: true,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await.unwrap();
|
||||
assert_eq!(result.status, 200);
|
||||
|
||||
let requests = captured.lock().await;
|
||||
assert_eq!(requests.len(), 2);
|
||||
|
||||
// First request should have the Authorization header
|
||||
assert!(
|
||||
requests[0].headers.iter().any(|(k, _)| k.eq_ignore_ascii_case("authorization")),
|
||||
"First request should have Authorization header"
|
||||
);
|
||||
|
||||
// Second request (to different host) should NOT have the Authorization header
|
||||
assert!(
|
||||
!requests[1].headers.iter().any(|(k, _)| k.eq_ignore_ascii_case("authorization")),
|
||||
"Redirected request to different host should NOT have Authorization header"
|
||||
);
|
||||
|
||||
// Non-sensitive headers should still be present
|
||||
assert!(
|
||||
requests[1].headers.iter().any(|(k, _)| k.eq_ignore_ascii_case("accept")),
|
||||
"Non-sensitive headers should be preserved across cross-origin redirects"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_same_origin_redirect_preserves_auth_headers() {
|
||||
// Redirect within the same host should keep Authorization
|
||||
let responses = vec![
|
||||
MockResponse {
|
||||
status: 302,
|
||||
headers: vec![(
|
||||
"Location".to_string(),
|
||||
"https://api.example.com/v2/download".to_string(),
|
||||
)],
|
||||
body: vec![],
|
||||
},
|
||||
MockResponse { status: 200, headers: Vec::new(), body: b"OK".to_vec() },
|
||||
];
|
||||
|
||||
let sender = MockSender::new(responses);
|
||||
let captured = sender.captured_requests.clone();
|
||||
let transaction = HttpTransaction::new(sender);
|
||||
|
||||
let request = SendableHttpRequest {
|
||||
url: "https://api.example.com/v1/download".to_string(),
|
||||
method: "GET".to_string(),
|
||||
headers: vec![
|
||||
("Authorization".to_string(), "Bearer token123".to_string()),
|
||||
("Accept".to_string(), "application/json".to_string()),
|
||||
],
|
||||
options: crate::types::SendableHttpRequestOptions {
|
||||
follow_redirects: true,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let (_tx, rx) = tokio::sync::watch::channel(false);
|
||||
let (event_tx, _event_rx) = mpsc::channel(100);
|
||||
let result = transaction.execute_with_cancellation(request, rx, event_tx).await.unwrap();
|
||||
assert_eq!(result.status, 200);
|
||||
|
||||
let requests = captured.lock().await;
|
||||
assert_eq!(requests.len(), 2);
|
||||
|
||||
// Both requests should have the Authorization header (same host)
|
||||
assert!(
|
||||
requests[0].headers.iter().any(|(k, _)| k.eq_ignore_ascii_case("authorization")),
|
||||
"First request should have Authorization header"
|
||||
);
|
||||
assert!(
|
||||
requests[1].headers.iter().any(|(k, _)| k.eq_ignore_ascii_case("authorization")),
|
||||
"Redirected request to same host should preserve Authorization header"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
CREATE TABLE body_chunks
|
||||
(
|
||||
id TEXT PRIMARY KEY,
|
||||
body_id TEXT NOT NULL,
|
||||
chunk_index INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
created_at DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL,
|
||||
|
||||
UNIQUE (body_id, chunk_index)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_body_chunks_body_id ON body_chunks (body_id, chunk_index);
|
||||
@@ -1,5 +0,0 @@
|
||||
fn main() {
|
||||
// Migrations are embedded with include_dir!, so trigger rebuilds when SQL files change.
|
||||
println!("cargo:rerun-if-changed=migrations");
|
||||
println!("cargo:rerun-if-changed=blob_migrations");
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
ALTER TABLE http_responses
|
||||
ADD COLUMN request_content_length INTEGER;
|
||||
@@ -1 +0,0 @@
|
||||
ALTER TABLE settings ADD COLUMN hotkeys TEXT DEFAULT '{}' NOT NULL;
|
||||
@@ -1,2 +0,0 @@
|
||||
-- Add DNS resolution timing to http_responses
|
||||
ALTER TABLE http_responses ADD COLUMN elapsed_dns INTEGER DEFAULT 0 NOT NULL;
|
||||
@@ -1,2 +0,0 @@
|
||||
-- Add DNS overrides setting to workspaces
|
||||
ALTER TABLE workspaces ADD COLUMN setting_dns_overrides TEXT DEFAULT '[]' NOT NULL;
|
||||
@@ -1,12 +0,0 @@
|
||||
-- Filter out headers that match the hardcoded defaults (User-Agent: yaak, Accept: */*),
|
||||
-- keeping any other custom headers the user may have added.
|
||||
UPDATE workspaces
|
||||
SET headers = (
|
||||
SELECT json_group_array(json(value))
|
||||
FROM json_each(headers)
|
||||
WHERE NOT (
|
||||
(LOWER(json_extract(value, '$.name')) = 'user-agent' AND json_extract(value, '$.value') = 'yaak')
|
||||
OR (LOWER(json_extract(value, '$.name')) = 'accept' AND json_extract(value, '$.value') = '*/*')
|
||||
)
|
||||
)
|
||||
WHERE json_array_length(headers) > 0;
|
||||
@@ -1,12 +0,0 @@
|
||||
CREATE TABLE model_changes
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
model TEXT NOT NULL,
|
||||
model_id TEXT NOT NULL,
|
||||
change TEXT NOT NULL,
|
||||
update_source TEXT NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
created_at DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_model_changes_created_at ON model_changes (created_at);
|
||||
@@ -1,3 +0,0 @@
|
||||
-- Remove stale plugin rows left over from the brief period when faker shipped as bundled.
|
||||
DELETE FROM plugins
|
||||
WHERE directory LIKE '%template-function-faker';
|
||||
@@ -1,354 +0,0 @@
|
||||
use crate::error::Result;
|
||||
use crate::util::generate_prefixed_id;
|
||||
use include_dir::{Dir, include_dir};
|
||||
use log::{debug, info};
|
||||
use r2d2::Pool;
|
||||
use r2d2_sqlite::SqliteConnectionManager;
|
||||
use rusqlite::{OptionalExtension, params};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
static BLOB_MIGRATIONS_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/blob_migrations");
|
||||
|
||||
/// A chunk of body data stored in the blob database.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BodyChunk {
|
||||
pub id: String,
|
||||
pub body_id: String,
|
||||
pub chunk_index: i32,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl BodyChunk {
|
||||
pub fn new(body_id: impl Into<String>, chunk_index: i32, data: Vec<u8>) -> Self {
|
||||
Self { id: generate_prefixed_id("bc"), body_id: body_id.into(), chunk_index, data }
|
||||
}
|
||||
}
|
||||
|
||||
/// Manages the blob database connection pool.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlobManager {
|
||||
pool: Arc<Mutex<Pool<SqliteConnectionManager>>>,
|
||||
}
|
||||
|
||||
impl BlobManager {
|
||||
pub fn new(pool: Pool<SqliteConnectionManager>) -> Self {
|
||||
Self { pool: Arc::new(Mutex::new(pool)) }
|
||||
}
|
||||
|
||||
pub fn connect(&self) -> BlobContext {
|
||||
let conn = self
|
||||
.pool
|
||||
.lock()
|
||||
.expect("Failed to gain lock on blob DB")
|
||||
.get()
|
||||
.expect("Failed to get blob DB connection from pool");
|
||||
BlobContext { conn }
|
||||
}
|
||||
}
|
||||
|
||||
/// Context for blob database operations.
|
||||
pub struct BlobContext {
|
||||
conn: r2d2::PooledConnection<SqliteConnectionManager>,
|
||||
}
|
||||
|
||||
impl BlobContext {
|
||||
/// Insert a single chunk.
|
||||
pub fn insert_chunk(&self, chunk: &BodyChunk) -> Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO body_chunks (id, body_id, chunk_index, data) VALUES (?1, ?2, ?3, ?4)",
|
||||
params![chunk.id, chunk.body_id, chunk.chunk_index, chunk.data],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get all chunks for a body, ordered by chunk_index.
|
||||
pub fn get_chunks(&self, body_id: &str) -> Result<Vec<BodyChunk>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, body_id, chunk_index, data FROM body_chunks
|
||||
WHERE body_id = ?1 ORDER BY chunk_index ASC",
|
||||
)?;
|
||||
|
||||
let chunks = stmt
|
||||
.query_map(params![body_id], |row| {
|
||||
Ok(BodyChunk {
|
||||
id: row.get(0)?,
|
||||
body_id: row.get(1)?,
|
||||
chunk_index: row.get(2)?,
|
||||
data: row.get(3)?,
|
||||
})
|
||||
})?
|
||||
.collect::<std::result::Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
/// Delete all chunks for a body.
|
||||
pub fn delete_chunks(&self, body_id: &str) -> Result<()> {
|
||||
self.conn.execute("DELETE FROM body_chunks WHERE body_id = ?1", params![body_id])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete all chunks matching a body_id prefix (e.g., "rs_abc123.%" to delete all bodies for a response).
|
||||
pub fn delete_chunks_like(&self, body_id_prefix: &str) -> Result<()> {
|
||||
self.conn
|
||||
.execute("DELETE FROM body_chunks WHERE body_id LIKE ?1", params![body_id_prefix])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total size of a body without loading data.
|
||||
impl BlobContext {
|
||||
pub fn get_body_size(&self, body_id: &str) -> Result<usize> {
|
||||
let size: i64 = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT COALESCE(SUM(LENGTH(data)), 0) FROM body_chunks WHERE body_id = ?1",
|
||||
params![body_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.unwrap_or(0);
|
||||
Ok(size as usize)
|
||||
}
|
||||
|
||||
/// Check if a body exists.
|
||||
pub fn body_exists(&self, body_id: &str) -> Result<bool> {
|
||||
let count: i64 = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM body_chunks WHERE body_id = ?1",
|
||||
params![body_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.unwrap_or(0);
|
||||
Ok(count > 0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Run migrations for the blob database.
|
||||
pub fn migrate_blob_db(pool: &Pool<SqliteConnectionManager>) -> Result<()> {
|
||||
info!("Running blob database migrations");
|
||||
|
||||
// Create migrations tracking table
|
||||
pool.get()?.execute(
|
||||
"CREATE TABLE IF NOT EXISTS _blob_migrations (
|
||||
version TEXT PRIMARY KEY,
|
||||
description TEXT NOT NULL,
|
||||
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Read and sort all .sql files
|
||||
let mut entries: Vec<_> = BLOB_MIGRATIONS_DIR
|
||||
.entries()
|
||||
.iter()
|
||||
.filter(|e| e.path().extension().map(|ext| ext == "sql").unwrap_or(false))
|
||||
.collect();
|
||||
|
||||
entries.sort_by_key(|e| e.path());
|
||||
|
||||
let mut ran_migrations = 0;
|
||||
for entry in &entries {
|
||||
let filename = entry.path().file_name().unwrap().to_str().unwrap();
|
||||
let version = filename.split('_').next().unwrap();
|
||||
|
||||
// Check if already applied
|
||||
let already_applied: Option<i64> = pool
|
||||
.get()?
|
||||
.query_row("SELECT 1 FROM _blob_migrations WHERE version = ?", [version], |r| r.get(0))
|
||||
.optional()?;
|
||||
|
||||
if already_applied.is_some() {
|
||||
debug!("Skipping already applied blob migration: {}", filename);
|
||||
continue;
|
||||
}
|
||||
|
||||
let sql =
|
||||
entry.as_file().unwrap().contents_utf8().expect("Failed to read blob migration file");
|
||||
|
||||
info!("Applying blob migration: {}", filename);
|
||||
let conn = pool.get()?;
|
||||
conn.execute_batch(sql)?;
|
||||
|
||||
// Record migration
|
||||
conn.execute(
|
||||
"INSERT INTO _blob_migrations (version, description) VALUES (?, ?)",
|
||||
params![version, filename],
|
||||
)?;
|
||||
|
||||
ran_migrations += 1;
|
||||
}
|
||||
|
||||
if ran_migrations == 0 {
|
||||
info!("No blob migrations to run");
|
||||
} else {
|
||||
info!("Ran {} blob migration(s)", ran_migrations);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn create_test_pool() -> Pool<SqliteConnectionManager> {
|
||||
let manager = SqliteConnectionManager::memory();
|
||||
let pool = Pool::builder().max_size(1).build(manager).unwrap();
|
||||
migrate_blob_db(&pool).unwrap();
|
||||
pool
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_and_get_chunks() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
let body_id = "rs_test123.request";
|
||||
let chunk1 = BodyChunk::new(body_id, 0, b"Hello, ".to_vec());
|
||||
let chunk2 = BodyChunk::new(body_id, 1, b"World!".to_vec());
|
||||
|
||||
ctx.insert_chunk(&chunk1).unwrap();
|
||||
ctx.insert_chunk(&chunk2).unwrap();
|
||||
|
||||
let chunks = ctx.get_chunks(body_id).unwrap();
|
||||
assert_eq!(chunks.len(), 2);
|
||||
assert_eq!(chunks[0].chunk_index, 0);
|
||||
assert_eq!(chunks[0].data, b"Hello, ");
|
||||
assert_eq!(chunks[1].chunk_index, 1);
|
||||
assert_eq!(chunks[1].data, b"World!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_chunks_ordered_by_index() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
let body_id = "rs_test123.request";
|
||||
|
||||
// Insert out of order
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 2, b"C".to_vec())).unwrap();
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 0, b"A".to_vec())).unwrap();
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 1, b"B".to_vec())).unwrap();
|
||||
|
||||
let chunks = ctx.get_chunks(body_id).unwrap();
|
||||
assert_eq!(chunks.len(), 3);
|
||||
assert_eq!(chunks[0].data, b"A");
|
||||
assert_eq!(chunks[1].data, b"B");
|
||||
assert_eq!(chunks[2].data, b"C");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_chunks() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
let body_id = "rs_test123.request";
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 0, b"data".to_vec())).unwrap();
|
||||
|
||||
assert!(ctx.body_exists(body_id).unwrap());
|
||||
|
||||
ctx.delete_chunks(body_id).unwrap();
|
||||
|
||||
assert!(!ctx.body_exists(body_id).unwrap());
|
||||
assert_eq!(ctx.get_chunks(body_id).unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_chunks_like() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
// Insert chunks for same response but different body types
|
||||
ctx.insert_chunk(&BodyChunk::new("rs_abc.request", 0, b"req".to_vec())).unwrap();
|
||||
ctx.insert_chunk(&BodyChunk::new("rs_abc.response", 0, b"resp".to_vec())).unwrap();
|
||||
ctx.insert_chunk(&BodyChunk::new("rs_other.request", 0, b"other".to_vec())).unwrap();
|
||||
|
||||
// Delete all bodies for rs_abc
|
||||
ctx.delete_chunks_like("rs_abc.%").unwrap();
|
||||
|
||||
// rs_abc bodies should be gone
|
||||
assert!(!ctx.body_exists("rs_abc.request").unwrap());
|
||||
assert!(!ctx.body_exists("rs_abc.response").unwrap());
|
||||
|
||||
// rs_other should still exist
|
||||
assert!(ctx.body_exists("rs_other.request").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_body_size() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
let body_id = "rs_test123.request";
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 0, b"Hello".to_vec())).unwrap();
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 1, b"World".to_vec())).unwrap();
|
||||
|
||||
let size = ctx.get_body_size(body_id).unwrap();
|
||||
assert_eq!(size, 10); // "Hello" + "World" = 10 bytes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_body_size_empty() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
let size = ctx.get_body_size("nonexistent").unwrap();
|
||||
assert_eq!(size, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_body_exists() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
assert!(!ctx.body_exists("rs_test.request").unwrap());
|
||||
|
||||
ctx.insert_chunk(&BodyChunk::new("rs_test.request", 0, b"data".to_vec())).unwrap();
|
||||
|
||||
assert!(ctx.body_exists("rs_test.request").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_bodies_isolated() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
ctx.insert_chunk(&BodyChunk::new("body1", 0, b"data1".to_vec())).unwrap();
|
||||
ctx.insert_chunk(&BodyChunk::new("body2", 0, b"data2".to_vec())).unwrap();
|
||||
|
||||
let chunks1 = ctx.get_chunks("body1").unwrap();
|
||||
let chunks2 = ctx.get_chunks("body2").unwrap();
|
||||
|
||||
assert_eq!(chunks1.len(), 1);
|
||||
assert_eq!(chunks1[0].data, b"data1");
|
||||
assert_eq!(chunks2.len(), 1);
|
||||
assert_eq!(chunks2[0].data, b"data2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_chunk() {
|
||||
let pool = create_test_pool();
|
||||
let manager = BlobManager::new(pool);
|
||||
let ctx = manager.connect();
|
||||
|
||||
// 1MB chunk
|
||||
let large_data: Vec<u8> = (0..1024 * 1024).map(|i| (i % 256) as u8).collect();
|
||||
let body_id = "rs_large.request";
|
||||
|
||||
ctx.insert_chunk(&BodyChunk::new(body_id, 0, large_data.clone())).unwrap();
|
||||
|
||||
let chunks = ctx.get_chunks(body_id).unwrap();
|
||||
assert_eq!(chunks.len(), 1);
|
||||
assert_eq!(chunks[0].data, large_data);
|
||||
assert_eq!(ctx.get_body_size(body_id).unwrap(), 1024 * 1024);
|
||||
}
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
use crate::blob_manager::{BlobManager, migrate_blob_db};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::migrate::migrate_db;
|
||||
use crate::query_manager::QueryManager;
|
||||
use crate::util::ModelPayload;
|
||||
use log::info;
|
||||
use r2d2::Pool;
|
||||
use r2d2_sqlite::SqliteConnectionManager;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub mod blob_manager;
|
||||
mod connection_or_tx;
|
||||
pub mod db_context;
|
||||
pub mod error;
|
||||
pub mod migrate;
|
||||
pub mod models;
|
||||
pub mod queries;
|
||||
pub mod query_manager;
|
||||
pub mod render;
|
||||
pub mod util;
|
||||
|
||||
/// Initialize the database managers for standalone (non-Tauri) usage.
|
||||
///
|
||||
/// Returns a tuple of (QueryManager, BlobManager, event_receiver).
|
||||
/// The event_receiver can be used to listen for model change events.
|
||||
pub fn init_standalone(
|
||||
db_path: impl AsRef<Path>,
|
||||
blob_path: impl AsRef<Path>,
|
||||
) -> Result<(QueryManager, BlobManager, mpsc::Receiver<ModelPayload>)> {
|
||||
let db_path = db_path.as_ref();
|
||||
let blob_path = blob_path.as_ref();
|
||||
|
||||
// Create parent directories if needed
|
||||
if let Some(parent) = db_path.parent() {
|
||||
create_dir_all(parent)?;
|
||||
}
|
||||
if let Some(parent) = blob_path.parent() {
|
||||
create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
// Main database pool
|
||||
info!("Initializing app database {db_path:?}");
|
||||
let manager = SqliteConnectionManager::file(db_path);
|
||||
let pool = Pool::builder()
|
||||
.max_size(100)
|
||||
.connection_timeout(Duration::from_secs(10))
|
||||
.build(manager)
|
||||
.map_err(|e| Error::Database(e.to_string()))?;
|
||||
|
||||
migrate_db(&pool)?;
|
||||
|
||||
info!("Initializing blobs database {blob_path:?}");
|
||||
|
||||
// Blob database pool
|
||||
let blob_manager = SqliteConnectionManager::file(blob_path);
|
||||
let blob_pool = Pool::builder()
|
||||
.max_size(50)
|
||||
.connection_timeout(Duration::from_secs(10))
|
||||
.build(blob_manager)
|
||||
.map_err(|e| Error::Database(e.to_string()))?;
|
||||
|
||||
migrate_blob_db(&blob_pool)?;
|
||||
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let query_manager = QueryManager::new(pool, tx);
|
||||
let blob_manager = BlobManager::new(blob_pool);
|
||||
|
||||
Ok((query_manager, blob_manager, rx))
|
||||
}
|
||||
|
||||
/// Initialize the database managers with in-memory SQLite databases.
|
||||
/// Useful for testing and CI environments.
|
||||
pub fn init_in_memory() -> Result<(QueryManager, BlobManager, mpsc::Receiver<ModelPayload>)> {
|
||||
// Main database pool
|
||||
let manager = SqliteConnectionManager::memory();
|
||||
let pool = Pool::builder()
|
||||
.max_size(1) // In-memory DB doesn't support multiple connections
|
||||
.build(manager)
|
||||
.map_err(|e| Error::Database(e.to_string()))?;
|
||||
|
||||
migrate_db(&pool)?;
|
||||
|
||||
// Blob database pool
|
||||
let blob_manager = SqliteConnectionManager::memory();
|
||||
let blob_pool = Pool::builder()
|
||||
.max_size(1)
|
||||
.build(blob_manager)
|
||||
.map_err(|e| Error::Database(e.to_string()))?;
|
||||
|
||||
migrate_blob_db(&blob_pool)?;
|
||||
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let query_manager = QueryManager::new(pool, tx);
|
||||
let blob_manager = BlobManager::new(blob_pool);
|
||||
|
||||
Ok((query_manager, blob_manager, rx))
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use crate::db_context::DbContext;
|
||||
use crate::error::Result;
|
||||
use crate::models::{HttpResponseEvent, HttpResponseEventIden};
|
||||
use crate::util::UpdateSource;
|
||||
|
||||
impl<'a> DbContext<'a> {
|
||||
pub fn list_http_response_events(&self, response_id: &str) -> Result<Vec<HttpResponseEvent>> {
|
||||
self.find_many(HttpResponseEventIden::ResponseId, response_id, None)
|
||||
}
|
||||
|
||||
pub fn upsert_http_response_event(
|
||||
&self,
|
||||
http_response_event: &HttpResponseEvent,
|
||||
source: &UpdateSource,
|
||||
) -> Result<HttpResponseEvent> {
|
||||
self.upsert(http_response_event, source)
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
pub mod any_request;
|
||||
mod batch;
|
||||
mod cookie_jars;
|
||||
mod environments;
|
||||
mod folders;
|
||||
mod graphql_introspections;
|
||||
mod grpc_connections;
|
||||
mod grpc_events;
|
||||
mod grpc_requests;
|
||||
mod http_requests;
|
||||
mod http_response_events;
|
||||
mod http_responses;
|
||||
mod key_values;
|
||||
mod model_changes;
|
||||
mod plugin_key_values;
|
||||
mod plugins;
|
||||
mod settings;
|
||||
mod sync_states;
|
||||
mod websocket_connections;
|
||||
mod websocket_events;
|
||||
mod websocket_requests;
|
||||
mod workspace_metas;
|
||||
pub mod workspaces;
|
||||
pub use model_changes::PersistedModelChange;
|
||||
|
||||
const MAX_HISTORY_ITEMS: usize = 20;
|
||||
|
||||
use crate::models::HttpRequestHeader;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Deduplicate headers by name (case-insensitive), keeping the latest (most specific) value.
|
||||
/// Preserves the order of first occurrence for each header name.
|
||||
pub(crate) fn dedupe_headers(headers: Vec<HttpRequestHeader>) -> Vec<HttpRequestHeader> {
|
||||
let mut index_by_name: HashMap<String, usize> = HashMap::new();
|
||||
let mut deduped: Vec<HttpRequestHeader> = Vec::new();
|
||||
for header in headers {
|
||||
let key = header.name.to_lowercase();
|
||||
if let Some(&idx) = index_by_name.get(&key) {
|
||||
deduped[idx] = header;
|
||||
} else {
|
||||
index_by_name.insert(key, deduped.len());
|
||||
deduped.push(header);
|
||||
}
|
||||
}
|
||||
deduped
|
||||
}
|
||||
@@ -1,289 +0,0 @@
|
||||
use crate::db_context::DbContext;
|
||||
use crate::error::Result;
|
||||
use crate::util::ModelPayload;
|
||||
use rusqlite::params;
|
||||
use rusqlite::types::Type;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PersistedModelChange {
|
||||
pub id: i64,
|
||||
pub created_at: String,
|
||||
pub payload: ModelPayload,
|
||||
}
|
||||
|
||||
impl<'a> DbContext<'a> {
|
||||
pub fn list_model_changes_after(
|
||||
&self,
|
||||
after_id: i64,
|
||||
limit: usize,
|
||||
) -> Result<Vec<PersistedModelChange>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
r#"
|
||||
SELECT id, created_at, payload
|
||||
FROM model_changes
|
||||
WHERE id > ?1
|
||||
ORDER BY id ASC
|
||||
LIMIT ?2
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let items = stmt.query_map(params![after_id, limit as i64], |row| {
|
||||
let id: i64 = row.get(0)?;
|
||||
let created_at: String = row.get(1)?;
|
||||
let payload_raw: String = row.get(2)?;
|
||||
let payload = serde_json::from_str::<ModelPayload>(&payload_raw).map_err(|e| {
|
||||
rusqlite::Error::FromSqlConversionFailure(2, Type::Text, Box::new(e))
|
||||
})?;
|
||||
Ok(PersistedModelChange { id, created_at, payload })
|
||||
})?;
|
||||
|
||||
Ok(items.collect::<std::result::Result<Vec<_>, rusqlite::Error>>()?)
|
||||
}
|
||||
|
||||
pub fn list_model_changes_since(
|
||||
&self,
|
||||
since_created_at: &str,
|
||||
since_id: i64,
|
||||
limit: usize,
|
||||
) -> Result<Vec<PersistedModelChange>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
r#"
|
||||
SELECT id, created_at, payload
|
||||
FROM model_changes
|
||||
WHERE created_at > ?1
|
||||
OR (created_at = ?1 AND id > ?2)
|
||||
ORDER BY created_at ASC, id ASC
|
||||
LIMIT ?3
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let items = stmt.query_map(params![since_created_at, since_id, limit as i64], |row| {
|
||||
let id: i64 = row.get(0)?;
|
||||
let created_at: String = row.get(1)?;
|
||||
let payload_raw: String = row.get(2)?;
|
||||
let payload = serde_json::from_str::<ModelPayload>(&payload_raw).map_err(|e| {
|
||||
rusqlite::Error::FromSqlConversionFailure(2, Type::Text, Box::new(e))
|
||||
})?;
|
||||
Ok(PersistedModelChange { id, created_at, payload })
|
||||
})?;
|
||||
|
||||
Ok(items.collect::<std::result::Result<Vec<_>, rusqlite::Error>>()?)
|
||||
}
|
||||
|
||||
pub fn prune_model_changes_older_than_days(&self, days: i64) -> Result<usize> {
|
||||
let offset = format!("-{days} days");
|
||||
Ok(self.conn.resolve().execute(
|
||||
r#"
|
||||
DELETE FROM model_changes
|
||||
WHERE created_at < STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW', ?1)
|
||||
"#,
|
||||
params![offset],
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn prune_model_changes_older_than_hours(&self, hours: i64) -> Result<usize> {
|
||||
let offset = format!("-{hours} hours");
|
||||
Ok(self.conn.resolve().execute(
|
||||
r#"
|
||||
DELETE FROM model_changes
|
||||
WHERE created_at < STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW', ?1)
|
||||
"#,
|
||||
params![offset],
|
||||
)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::init_in_memory;
|
||||
use crate::models::Workspace;
|
||||
use crate::util::{ModelChangeEvent, UpdateSource};
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
fn records_model_changes_for_upsert_and_delete() {
|
||||
let (query_manager, _blob_manager, _rx) = init_in_memory().expect("Failed to init DB");
|
||||
let db = query_manager.connect();
|
||||
|
||||
let workspace = db
|
||||
.upsert_workspace(
|
||||
&Workspace {
|
||||
name: "Changes Test".to_string(),
|
||||
setting_follow_redirects: true,
|
||||
setting_validate_certificates: true,
|
||||
..Default::default()
|
||||
},
|
||||
&UpdateSource::Sync,
|
||||
)
|
||||
.expect("Failed to upsert workspace");
|
||||
|
||||
let created_changes = db.list_model_changes_after(0, 10).expect("Failed to list changes");
|
||||
assert_eq!(created_changes.len(), 1);
|
||||
assert_eq!(created_changes[0].payload.model.id(), workspace.id);
|
||||
assert_eq!(created_changes[0].payload.model.model(), "workspace");
|
||||
assert!(matches!(
|
||||
created_changes[0].payload.change,
|
||||
ModelChangeEvent::Upsert { created: true }
|
||||
));
|
||||
assert!(matches!(created_changes[0].payload.update_source, UpdateSource::Sync));
|
||||
|
||||
db.delete_workspace_by_id(&workspace.id, &UpdateSource::Sync)
|
||||
.expect("Failed to delete workspace");
|
||||
|
||||
let all_changes = db.list_model_changes_after(0, 10).expect("Failed to list changes");
|
||||
assert_eq!(all_changes.len(), 2);
|
||||
assert!(matches!(all_changes[1].payload.change, ModelChangeEvent::Delete));
|
||||
assert!(all_changes[1].id > all_changes[0].id);
|
||||
|
||||
let changes_after_first = db
|
||||
.list_model_changes_after(all_changes[0].id, 10)
|
||||
.expect("Failed to list changes after cursor");
|
||||
assert_eq!(changes_after_first.len(), 1);
|
||||
assert!(matches!(changes_after_first[0].payload.change, ModelChangeEvent::Delete));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prunes_old_model_changes() {
|
||||
let (query_manager, _blob_manager, _rx) = init_in_memory().expect("Failed to init DB");
|
||||
let db = query_manager.connect();
|
||||
|
||||
db.upsert_workspace(
|
||||
&Workspace {
|
||||
name: "Prune Test".to_string(),
|
||||
setting_follow_redirects: true,
|
||||
setting_validate_certificates: true,
|
||||
..Default::default()
|
||||
},
|
||||
&UpdateSource::Sync,
|
||||
)
|
||||
.expect("Failed to upsert workspace");
|
||||
|
||||
let changes = db.list_model_changes_after(0, 10).expect("Failed to list changes");
|
||||
assert_eq!(changes.len(), 1);
|
||||
|
||||
db.conn
|
||||
.resolve()
|
||||
.execute(
|
||||
"UPDATE model_changes SET created_at = '2000-01-01 00:00:00.000' WHERE id = ?1",
|
||||
params![changes[0].id],
|
||||
)
|
||||
.expect("Failed to age model change row");
|
||||
|
||||
let pruned =
|
||||
db.prune_model_changes_older_than_days(30).expect("Failed to prune model changes");
|
||||
assert_eq!(pruned, 1);
|
||||
assert!(db.list_model_changes_after(0, 10).expect("Failed to list changes").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_model_changes_since_uses_timestamp_with_id_tiebreaker() {
|
||||
let (query_manager, _blob_manager, _rx) = init_in_memory().expect("Failed to init DB");
|
||||
let db = query_manager.connect();
|
||||
|
||||
let workspace = db
|
||||
.upsert_workspace(
|
||||
&Workspace {
|
||||
name: "Cursor Test".to_string(),
|
||||
setting_follow_redirects: true,
|
||||
setting_validate_certificates: true,
|
||||
..Default::default()
|
||||
},
|
||||
&UpdateSource::Sync,
|
||||
)
|
||||
.expect("Failed to upsert workspace");
|
||||
db.delete_workspace_by_id(&workspace.id, &UpdateSource::Sync)
|
||||
.expect("Failed to delete workspace");
|
||||
|
||||
let all = db.list_model_changes_after(0, 10).expect("Failed to list changes");
|
||||
assert_eq!(all.len(), 2);
|
||||
|
||||
let fixed_ts = "2026-02-16 00:00:00.000";
|
||||
db.conn
|
||||
.resolve()
|
||||
.execute("UPDATE model_changes SET created_at = ?1", params![fixed_ts])
|
||||
.expect("Failed to normalize timestamps");
|
||||
|
||||
let after_first =
|
||||
db.list_model_changes_since(fixed_ts, all[0].id, 10).expect("Failed to query cursor");
|
||||
assert_eq!(after_first.len(), 1);
|
||||
assert_eq!(after_first[0].id, all[1].id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prunes_old_model_changes_by_hours() {
|
||||
let (query_manager, _blob_manager, _rx) = init_in_memory().expect("Failed to init DB");
|
||||
let db = query_manager.connect();
|
||||
|
||||
db.upsert_workspace(
|
||||
&Workspace {
|
||||
name: "Prune Hour Test".to_string(),
|
||||
setting_follow_redirects: true,
|
||||
setting_validate_certificates: true,
|
||||
..Default::default()
|
||||
},
|
||||
&UpdateSource::Sync,
|
||||
)
|
||||
.expect("Failed to upsert workspace");
|
||||
|
||||
let changes = db.list_model_changes_after(0, 10).expect("Failed to list changes");
|
||||
assert_eq!(changes.len(), 1);
|
||||
|
||||
db.conn
|
||||
.resolve()
|
||||
.execute(
|
||||
"UPDATE model_changes SET created_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW', '-2 hours') WHERE id = ?1",
|
||||
params![changes[0].id],
|
||||
)
|
||||
.expect("Failed to age model change row");
|
||||
|
||||
let pruned =
|
||||
db.prune_model_changes_older_than_hours(1).expect("Failed to prune model changes");
|
||||
assert_eq!(pruned, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_model_changes_deserializes_http_response_event_payload() {
|
||||
let (query_manager, _blob_manager, _rx) = init_in_memory().expect("Failed to init DB");
|
||||
let db = query_manager.connect();
|
||||
|
||||
let payload = json!({
|
||||
"model": {
|
||||
"model": "http_response_event",
|
||||
"id": "re_test",
|
||||
"createdAt": "2026-02-16T21:01:34.809162",
|
||||
"updatedAt": "2026-02-16T21:01:34.809163",
|
||||
"workspaceId": "wk_test",
|
||||
"responseId": "rs_test",
|
||||
"event": {
|
||||
"type": "info",
|
||||
"message": "hello"
|
||||
}
|
||||
},
|
||||
"updateSource": { "type": "sync" },
|
||||
"change": { "type": "upsert", "created": false }
|
||||
});
|
||||
|
||||
db.conn
|
||||
.resolve()
|
||||
.execute(
|
||||
r#"
|
||||
INSERT INTO model_changes (model, model_id, change, update_source, payload)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5)
|
||||
"#,
|
||||
params![
|
||||
"http_response_event",
|
||||
"re_test",
|
||||
r#"{"type":"upsert","created":false}"#,
|
||||
r#"{"type":"sync"}"#,
|
||||
payload.to_string(),
|
||||
],
|
||||
)
|
||||
.expect("Failed to insert model change row");
|
||||
|
||||
let changes = db.list_model_changes_after(0, 10).expect("Failed to list changes");
|
||||
assert_eq!(changes.len(), 1);
|
||||
assert_eq!(changes[0].payload.model.model(), "http_response_event");
|
||||
assert_eq!(changes[0].payload.model.id(), "re_test");
|
||||
}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type AnyModel = CookieJar | Environment | Folder | GraphQlIntrospection | GrpcConnection | GrpcEvent | GrpcRequest | HttpRequest | HttpResponse | HttpResponseEvent | KeyValue | Plugin | Settings | SyncState | WebsocketConnection | WebsocketEvent | WebsocketRequest | Workspace | WorkspaceMeta;
|
||||
|
||||
export type ClientCertificate = { host: string, port: number | null, crtFile: string | null, keyFile: string | null, pfxFile: string | null, passphrase: string | null, enabled?: boolean, };
|
||||
|
||||
export type Cookie = { raw_cookie: string, domain: CookieDomain, expires: CookieExpires, path: [string, boolean], };
|
||||
|
||||
export type CookieDomain = { "HostOnly": string } | { "Suffix": string } | "NotPresent" | "Empty";
|
||||
|
||||
export type CookieExpires = { "AtUtc": string } | "SessionEnd";
|
||||
|
||||
export type CookieJar = { model: "cookie_jar", id: string, createdAt: string, updatedAt: string, workspaceId: string, cookies: Array<Cookie>, name: string, };
|
||||
|
||||
export type DnsOverride = { hostname: string, ipv4: Array<string>, ipv6: Array<string>, enabled?: boolean, };
|
||||
|
||||
export type EditorKeymap = "default" | "vim" | "vscode" | "emacs";
|
||||
|
||||
export type EncryptedKey = { encryptedKey: string, };
|
||||
|
||||
export type Environment = { model: "environment", id: string, workspaceId: string, createdAt: string, updatedAt: string, name: string, public: boolean, parentModel: string, parentId: string | null, variables: Array<EnvironmentVariable>, color: string | null, sortPriority: number, };
|
||||
|
||||
export type EnvironmentVariable = { enabled?: boolean, name: string, value: string, id?: string, };
|
||||
|
||||
export type Folder = { model: "folder", id: string, createdAt: string, updatedAt: string, workspaceId: string, folderId: string | null, authentication: Record<string, any>, authenticationType: string | null, description: string, headers: Array<HttpRequestHeader>, name: string, sortPriority: number, };
|
||||
|
||||
export type GraphQlIntrospection = { model: "graphql_introspection", id: string, createdAt: string, updatedAt: string, workspaceId: string, requestId: string, content: string | null, };
|
||||
|
||||
export type GrpcConnection = { model: "grpc_connection", id: string, createdAt: string, updatedAt: string, workspaceId: string, requestId: string, elapsed: number, error: string | null, method: string, service: string, status: number, state: GrpcConnectionState, trailers: { [key in string]?: string }, url: string, };
|
||||
|
||||
export type GrpcConnectionState = "initialized" | "connected" | "closed";
|
||||
|
||||
export type GrpcEvent = { model: "grpc_event", id: string, createdAt: string, updatedAt: string, workspaceId: string, requestId: string, connectionId: string, content: string, error: string | null, eventType: GrpcEventType, metadata: { [key in string]?: string }, status: number | null, };
|
||||
|
||||
export type GrpcEventType = "info" | "error" | "client_message" | "server_message" | "connection_start" | "connection_end";
|
||||
|
||||
export type GrpcRequest = { model: "grpc_request", id: string, createdAt: string, updatedAt: string, workspaceId: string, folderId: string | null, authenticationType: string | null, authentication: Record<string, any>, description: string, message: string, metadata: Array<HttpRequestHeader>, method: string | null, name: string, service: string | null, sortPriority: number, url: string, };
|
||||
|
||||
export type HttpRequest = { model: "http_request", id: string, createdAt: string, updatedAt: string, workspaceId: string, folderId: string | null, authentication: Record<string, any>, authenticationType: string | null, body: Record<string, any>, bodyType: string | null, description: string, headers: Array<HttpRequestHeader>, method: string, name: string, sortPriority: number, url: string, urlParameters: Array<HttpUrlParameter>, };
|
||||
|
||||
export type HttpRequestHeader = { enabled?: boolean, name: string, value: string, id?: string, };
|
||||
|
||||
export type HttpResponse = { model: "http_response", id: string, createdAt: string, updatedAt: string, workspaceId: string, requestId: string, bodyPath: string | null, contentLength: number | null, contentLengthCompressed: number | null, elapsed: number, elapsedHeaders: number, elapsedDns: number, error: string | null, headers: Array<HttpResponseHeader>, remoteAddr: string | null, requestContentLength: number | null, requestHeaders: Array<HttpResponseHeader>, status: number, statusReason: string | null, state: HttpResponseState, url: string, version: string | null, };
|
||||
|
||||
export type HttpResponseEvent = { model: "http_response_event", id: string, createdAt: string, updatedAt: string, workspaceId: string, responseId: string, event: HttpResponseEventData, };
|
||||
|
||||
/**
|
||||
* Serializable representation of HTTP response events for DB storage.
|
||||
* This mirrors `yaak_http::sender::HttpResponseEvent` but with serde support.
|
||||
* The `From` impl is in yaak-http to avoid circular dependencies.
|
||||
*/
|
||||
export type HttpResponseEventData = { "type": "setting", name: string, value: string, } | { "type": "info", message: string, } | { "type": "redirect", url: string, status: number, behavior: string, } | { "type": "send_url", method: string, scheme: string, username: string, password: string, host: string, port: number, path: string, query: string, fragment: string, } | { "type": "receive_url", version: string, status: string, } | { "type": "header_up", name: string, value: string, } | { "type": "header_down", name: string, value: string, } | { "type": "chunk_sent", bytes: number, } | { "type": "chunk_received", bytes: number, } | { "type": "dns_resolved", hostname: string, addresses: Array<string>, duration: bigint, overridden: boolean, };
|
||||
|
||||
export type HttpResponseHeader = { name: string, value: string, };
|
||||
|
||||
export type HttpResponseState = "initialized" | "connected" | "closed";
|
||||
|
||||
export type HttpUrlParameter = { enabled?: boolean, name: string, value: string, id?: string, };
|
||||
|
||||
export type KeyValue = { model: "key_value", id: string, createdAt: string, updatedAt: string, key: string, namespace: string, value: string, };
|
||||
|
||||
export type Plugin = { model: "plugin", id: string, createdAt: string, updatedAt: string, checkedAt: string | null, directory: string, enabled: boolean, url: string | null, };
|
||||
|
||||
export type ProxySetting = { "type": "enabled", http: string, https: string, auth: ProxySettingAuth | null, bypass: string, disabled: boolean, } | { "type": "disabled" };
|
||||
|
||||
export type ProxySettingAuth = { user: string, password: string, };
|
||||
|
||||
export type Settings = { model: "settings", id: string, createdAt: string, updatedAt: string, appearance: string, clientCertificates: Array<ClientCertificate>, coloredMethods: boolean, editorFont: string | null, editorFontSize: number, editorKeymap: EditorKeymap, editorSoftWrap: boolean, hideWindowControls: boolean, useNativeTitlebar: boolean, interfaceFont: string | null, interfaceFontSize: number, interfaceScale: number, openWorkspaceNewWindow: boolean | null, proxy: ProxySetting | null, themeDark: string, themeLight: string, updateChannel: string, hideLicenseBadge: boolean, autoupdate: boolean, autoDownloadUpdates: boolean, checkNotifications: boolean, hotkeys: { [key in string]?: Array<string> }, };
|
||||
|
||||
export type SyncState = { model: "sync_state", id: string, workspaceId: string, createdAt: string, updatedAt: string, flushedAt: string, modelId: string, checksum: string, relPath: string, syncDir: string, };
|
||||
|
||||
export type WebsocketConnection = { model: "websocket_connection", id: string, createdAt: string, updatedAt: string, workspaceId: string, requestId: string, elapsed: number, error: string | null, headers: Array<HttpResponseHeader>, state: WebsocketConnectionState, status: number, url: string, };
|
||||
|
||||
export type WebsocketConnectionState = "initialized" | "connected" | "closing" | "closed";
|
||||
|
||||
export type WebsocketEvent = { model: "websocket_event", id: string, createdAt: string, updatedAt: string, workspaceId: string, requestId: string, connectionId: string, isServer: boolean, message: Array<number>, messageType: WebsocketEventType, };
|
||||
|
||||
export type WebsocketEventType = "binary" | "close" | "frame" | "open" | "ping" | "pong" | "text";
|
||||
|
||||
export type WebsocketRequest = { model: "websocket_request", id: string, createdAt: string, updatedAt: string, workspaceId: string, folderId: string | null, authentication: Record<string, any>, authenticationType: string | null, description: string, headers: Array<HttpRequestHeader>, message: string, name: string, sortPriority: number, url: string, urlParameters: Array<HttpUrlParameter>, };
|
||||
|
||||
export type Workspace = { model: "workspace", id: string, createdAt: string, updatedAt: string, authentication: Record<string, any>, authenticationType: string | null, description: string, headers: Array<HttpRequestHeader>, name: string, encryptionKeyChallenge: string | null, settingValidateCertificates: boolean, settingFollowRedirects: boolean, settingRequestTimeout: number, settingDnsOverrides: Array<DnsOverride>, };
|
||||
|
||||
export type WorkspaceMeta = { model: "workspace_meta", id: string, workspaceId: string, createdAt: string, updatedAt: string, encryptionKey: EncryptedKey | null, settingSyncDir: string | null, };
|
||||
@@ -1,26 +0,0 @@
|
||||
import { invoke } from '@tauri-apps/api/core';
|
||||
import { PluginNameVersion, PluginSearchResponse, PluginUpdatesResponse } from './bindings/gen_api';
|
||||
|
||||
export * from './bindings/gen_models';
|
||||
export * from './bindings/gen_events';
|
||||
export * from './bindings/gen_search';
|
||||
|
||||
export async function searchPlugins(query: string) {
|
||||
return invoke<PluginSearchResponse>('cmd_plugins_search', { query });
|
||||
}
|
||||
|
||||
export async function installPlugin(name: string, version: string | null) {
|
||||
return invoke<void>('cmd_plugins_install', { name, version });
|
||||
}
|
||||
|
||||
export async function uninstallPlugin(pluginId: string) {
|
||||
return invoke<void>('cmd_plugins_uninstall', { pluginId });
|
||||
}
|
||||
|
||||
export async function checkPluginUpdates() {
|
||||
return invoke<PluginUpdatesResponse>('cmd_plugins_updates', {});
|
||||
}
|
||||
|
||||
export async function updateAllPlugins() {
|
||||
return invoke<PluginNameVersion[]>('cmd_plugins_update_all', {});
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
use crate::api::{PluginVersion, download_plugin_archive, get_plugin};
|
||||
use crate::checksum::compute_checksum;
|
||||
use crate::error::Error::PluginErr;
|
||||
use crate::error::Result;
|
||||
use crate::events::PluginContext;
|
||||
use crate::manager::PluginManager;
|
||||
use chrono::Utc;
|
||||
use log::info;
|
||||
use std::fs::{create_dir_all, remove_dir_all};
|
||||
use std::io::Cursor;
|
||||
use std::sync::Arc;
|
||||
use yaak_models::models::Plugin;
|
||||
use yaak_models::query_manager::QueryManager;
|
||||
use yaak_models::util::UpdateSource;
|
||||
|
||||
/// Delete a plugin from the database and uninstall it.
|
||||
pub async fn delete_and_uninstall(
|
||||
plugin_manager: Arc<PluginManager>,
|
||||
query_manager: &QueryManager,
|
||||
plugin_context: &PluginContext,
|
||||
plugin_id: &str,
|
||||
) -> Result<Plugin> {
|
||||
let update_source = match plugin_context.label.clone() {
|
||||
Some(label) => UpdateSource::from_window_label(label),
|
||||
None => UpdateSource::Background,
|
||||
};
|
||||
// Scope the db connection so it doesn't live across await
|
||||
let plugin = {
|
||||
let db = query_manager.connect();
|
||||
db.delete_plugin_by_id(plugin_id, &update_source)?
|
||||
};
|
||||
plugin_manager.uninstall(plugin_context, plugin.directory.as_str()).await?;
|
||||
Ok(plugin)
|
||||
}
|
||||
|
||||
/// Download and install a plugin.
|
||||
pub async fn download_and_install(
|
||||
plugin_manager: Arc<PluginManager>,
|
||||
query_manager: &QueryManager,
|
||||
http_client: &reqwest::Client,
|
||||
plugin_context: &PluginContext,
|
||||
name: &str,
|
||||
version: Option<String>,
|
||||
) -> Result<PluginVersion> {
|
||||
info!("Installing plugin {} {}", name, version.clone().unwrap_or_default());
|
||||
let plugin_version = get_plugin(http_client, name, version).await?;
|
||||
let resp = download_plugin_archive(http_client, &plugin_version).await?;
|
||||
let bytes = resp.bytes().await?;
|
||||
|
||||
let checksum = compute_checksum(&bytes);
|
||||
if checksum != plugin_version.checksum {
|
||||
return Err(PluginErr(format!(
|
||||
"Checksum mismatch {}b {checksum} != {}",
|
||||
bytes.len(),
|
||||
plugin_version.checksum
|
||||
)));
|
||||
}
|
||||
|
||||
info!("Checksum matched {}", checksum);
|
||||
|
||||
let plugin_dir = plugin_manager.installed_plugin_dir.join(name);
|
||||
let plugin_dir_str = plugin_dir.to_str().unwrap().to_string();
|
||||
|
||||
// Re-create the plugin directory
|
||||
let _ = remove_dir_all(&plugin_dir);
|
||||
create_dir_all(&plugin_dir)?;
|
||||
|
||||
zip_extract::extract(Cursor::new(&bytes), &plugin_dir, true)?;
|
||||
info!("Extracted plugin {} to {}", plugin_version.id, plugin_dir_str);
|
||||
|
||||
// Scope the db connection so it doesn't live across await
|
||||
let plugin = {
|
||||
let db = query_manager.connect();
|
||||
db.upsert_plugin(
|
||||
&Plugin {
|
||||
id: plugin_version.id.clone(),
|
||||
checked_at: Some(Utc::now().naive_utc()),
|
||||
directory: plugin_dir_str.clone(),
|
||||
enabled: true,
|
||||
url: Some(plugin_version.url.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
&UpdateSource::Background,
|
||||
)?
|
||||
};
|
||||
|
||||
plugin_manager.add_plugin(plugin_context, &plugin).await?;
|
||||
|
||||
info!("Installed plugin {} to {}", plugin_version.id, plugin_dir_str);
|
||||
|
||||
Ok(plugin_version)
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
//! Core plugin system for Yaak.
|
||||
//!
|
||||
//! This crate provides the plugin manager and supporting functionality
|
||||
//! for running JavaScript plugins via a Node.js runtime.
|
||||
//!
|
||||
//! Note: This crate is Tauri-independent. Tauri integration is provided
|
||||
//! by yaak-app's plugins_ext module.
|
||||
|
||||
pub mod api;
|
||||
mod checksum;
|
||||
pub mod error;
|
||||
pub mod events;
|
||||
pub mod install;
|
||||
pub mod manager;
|
||||
pub mod native_template_functions;
|
||||
mod nodejs;
|
||||
pub mod plugin_handle;
|
||||
pub mod plugin_meta;
|
||||
mod server_ws;
|
||||
pub mod template_callback;
|
||||
mod util;
|
||||
@@ -1,83 +0,0 @@
|
||||
use crate::error::Result;
|
||||
use log::{info, warn};
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::process::Stdio;
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::watch::Receiver;
|
||||
use yaak_common::command::new_xplatform_command;
|
||||
|
||||
/// Start the Node.js plugin runtime process.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `node_bin_path` - Path to the yaaknode binary
|
||||
/// * `plugin_runtime_main` - Path to the plugin runtime index.cjs
|
||||
/// * `addr` - Socket address for the plugin runtime to connect to
|
||||
/// * `kill_rx` - Channel to signal shutdown
|
||||
pub async fn start_nodejs_plugin_runtime(
|
||||
node_bin_path: &Path,
|
||||
plugin_runtime_main: &Path,
|
||||
addr: SocketAddr,
|
||||
kill_rx: &Receiver<bool>,
|
||||
killed_tx: oneshot::Sender<()>,
|
||||
) -> Result<()> {
|
||||
// HACK: Remove UNC prefix for Windows paths to pass to sidecar
|
||||
let plugin_runtime_main_str =
|
||||
dunce::simplified(plugin_runtime_main).to_string_lossy().to_string();
|
||||
|
||||
info!(
|
||||
"Starting plugin runtime node={} main={}",
|
||||
node_bin_path.display(),
|
||||
plugin_runtime_main_str
|
||||
);
|
||||
|
||||
let mut cmd = new_xplatform_command(node_bin_path);
|
||||
cmd.env("HOST", addr.ip().to_string())
|
||||
.env("PORT", addr.port().to_string())
|
||||
.arg(&plugin_runtime_main_str)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
let mut child = cmd.spawn()?;
|
||||
|
||||
info!("Spawned plugin runtime");
|
||||
|
||||
// Stream stdout
|
||||
if let Some(stdout) = child.stdout.take() {
|
||||
tokio::spawn(async move {
|
||||
let reader = BufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
info!("{}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Stream stderr
|
||||
if let Some(stderr) = child.stderr.take() {
|
||||
tokio::spawn(async move {
|
||||
let reader = BufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
warn!("{}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Handle kill signal
|
||||
let mut kill_rx = kill_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
if kill_rx.wait_for(|b| *b == true).await.is_err() {
|
||||
warn!("Kill channel closed before explicit shutdown; terminating plugin runtime");
|
||||
}
|
||||
info!("Killing plugin runtime");
|
||||
if let Err(e) = child.kill().await {
|
||||
warn!("Failed to kill plugin runtime: {e}");
|
||||
}
|
||||
info!("Killed plugin runtime");
|
||||
let _ = killed_tx.send(());
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
pub mod error;
|
||||
pub mod models;
|
||||
pub mod sync;
|
||||
pub mod watch;
|
||||
@@ -1,8 +0,0 @@
|
||||
const { execSync } = require('node:child_process');
|
||||
|
||||
if (process.env.SKIP_WASM_BUILD === '1') {
|
||||
console.log('Skipping wasm-pack build (SKIP_WASM_BUILD=1)');
|
||||
return;
|
||||
}
|
||||
|
||||
execSync('wasm-pack build --target bundler', { stdio: 'inherit' });
|
||||
Binary file not shown.
@@ -1,11 +0,0 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
export const memory: WebAssembly.Memory;
|
||||
export const escape_template: (a: number, b: number) => [number, number, number];
|
||||
export const parse_template: (a: number, b: number) => [number, number, number];
|
||||
export const unescape_template: (a: number, b: number) => [number, number, number];
|
||||
export const __wbindgen_malloc: (a: number, b: number) => number;
|
||||
export const __wbindgen_realloc: (a: number, b: number, c: number, d: number) => number;
|
||||
export const __wbindgen_export_2: WebAssembly.Table;
|
||||
export const __externref_table_dealloc: (a: number) => void;
|
||||
export const __wbindgen_start: () => void;
|
||||
@@ -1,43 +0,0 @@
|
||||
import { invoke } from '@tauri-apps/api/core';
|
||||
import { WebsocketConnection } from '@yaakapp-internal/models';
|
||||
|
||||
export function deleteWebsocketConnections(requestId: string) {
|
||||
return invoke('cmd_ws_delete_connections', {
|
||||
requestId,
|
||||
});
|
||||
}
|
||||
|
||||
export function connectWebsocket({
|
||||
requestId,
|
||||
environmentId,
|
||||
cookieJarId,
|
||||
}: {
|
||||
requestId: string;
|
||||
environmentId: string | null;
|
||||
cookieJarId: string | null;
|
||||
}) {
|
||||
return invoke('cmd_ws_connect', {
|
||||
requestId,
|
||||
environmentId,
|
||||
cookieJarId,
|
||||
}) as Promise<WebsocketConnection>;
|
||||
}
|
||||
|
||||
export function closeWebsocket({ connectionId }: { connectionId: string }) {
|
||||
return invoke('cmd_ws_close', {
|
||||
connectionId,
|
||||
});
|
||||
}
|
||||
|
||||
export function sendWebsocket({
|
||||
connectionId,
|
||||
environmentId,
|
||||
}: {
|
||||
connectionId: string;
|
||||
environmentId: string | null;
|
||||
}) {
|
||||
return invoke('cmd_ws_send', {
|
||||
connectionId,
|
||||
environmentId,
|
||||
});
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user