mirror of
https://github.com/beshu-tech/deltaglider.git
synced 2026-04-30 12:14:32 +02:00
Compare commits
66 Commits
fix/intell
...
v5.1.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9a1396e6e | ||
|
|
52eb5bba21 | ||
|
|
f75db142e8 | ||
|
|
35d34d4862 | ||
|
|
9230cbd762 | ||
|
|
2eba6e8d38 | ||
|
|
656726b57b | ||
|
|
85dd315424 | ||
|
|
dbd2632cae | ||
|
|
3d04a407c0 | ||
|
|
47f022fffe | ||
|
|
7a2ed16ee7 | ||
|
|
5e333254ba | ||
|
|
04cc984d4a | ||
|
|
ac7d4e067f | ||
|
|
e8fb926fd6 | ||
|
|
626e28eaf6 | ||
|
|
90a342dc33 | ||
|
|
f9f2b036e3 | ||
|
|
778d7f0148 | ||
|
|
37ea2f138c | ||
|
|
5e3b76791e | ||
|
|
fb2877bfd3 | ||
|
|
88fd1f51cd | ||
|
|
0857e02edd | ||
|
|
689cf00d02 | ||
|
|
743d52e783 | ||
|
|
8bc0a0eaf3 | ||
|
|
4cf25e4681 | ||
|
|
69ed9056d2 | ||
|
|
38134f28f5 | ||
|
|
fa1f8b85a9 | ||
|
|
a06cc2939c | ||
|
|
5b8477ed61 | ||
|
|
e706ddebdd | ||
|
|
50db9bbb27 | ||
|
|
c25568e315 | ||
|
|
ca1186a3f6 | ||
|
|
4217535e8c | ||
|
|
0064d7e74b | ||
|
|
9c1659a1f1 | ||
|
|
34c871b0d7 | ||
|
|
db0662c175 | ||
|
|
2efa760785 | ||
|
|
74207f4ee4 | ||
|
|
4668b10c3f | ||
|
|
8cea5a3527 | ||
|
|
07f630d855 | ||
|
|
09c0893244 | ||
|
|
ac2e2b5a0a | ||
|
|
b760890a61 | ||
|
|
03106b76a8 | ||
|
|
dd39595c67 | ||
|
|
12c71c1d6e | ||
|
|
cf10a689cc | ||
|
|
b6ea6d734a | ||
|
|
673e87e5b8 | ||
|
|
c9103cfd4b | ||
|
|
23357e240b | ||
|
|
13fcc8738c | ||
|
|
4a633802b7 | ||
|
|
9f839cc8b7 | ||
|
|
4852f373f1 | ||
|
|
a7ec85b064 | ||
|
|
09a5899a56 | ||
|
|
e0b8bac859 |
26
.github/workflows/ci.yml
vendored
26
.github/workflows/ci.yml
vendored
@@ -3,7 +3,6 @@ name: CI
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
tags: ["v*"]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
@@ -143,28 +142,3 @@ jobs:
|
||||
run: |
|
||||
uv run pytest tests/e2e -v --tb=short
|
||||
|
||||
pypi-publish:
|
||||
needs: [lint, typecheck, test, e2e-test]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
uv build
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
250
.github/workflows/release-manual.yml
vendored
Normal file
250
.github/workflows/release-manual.yml
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
name: Manual Release (Simple)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to release (e.g., 0.3.2) - make sure tag v0.3.2 exists!'
|
||||
required: true
|
||||
type: string
|
||||
pypi_environment:
|
||||
description: 'PyPI environment'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- 'pypi'
|
||||
- 'testpypi'
|
||||
default: 'pypi'
|
||||
|
||||
env:
|
||||
UV_VERSION: "0.5.13"
|
||||
PYTHON_VERSION: "3.12"
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag_name: ${{ steps.validate_tag.outputs.tag }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Validate version format
|
||||
run: |
|
||||
if ! echo "${{ github.event.inputs.version }}" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9]+)?$'; then
|
||||
echo "Error: Version must be in format X.Y.Z or X.Y.Z-suffix"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check if tag exists
|
||||
id: validate_tag
|
||||
run: |
|
||||
TAG="v${{ github.event.inputs.version }}"
|
||||
if ! git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "Error: Tag $TAG does not exist!"
|
||||
echo "Please create it first with:"
|
||||
echo " git tag $TAG"
|
||||
echo " git push origin $TAG"
|
||||
exit 1
|
||||
fi
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
lint:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run ruff check
|
||||
run: |
|
||||
uv run ruff check src tests
|
||||
|
||||
- name: Run ruff format check
|
||||
run: |
|
||||
uv run ruff format --check src tests
|
||||
|
||||
typecheck:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run mypy
|
||||
run: |
|
||||
uv run mypy src
|
||||
|
||||
test:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install xdelta3
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y xdelta3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
uv run pytest tests/unit -v --tb=short
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
uv run pytest tests/integration -v --tb=short
|
||||
|
||||
e2e-test:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
localstack:
|
||||
image: localstack/localstack:latest
|
||||
ports:
|
||||
- 4566:4566
|
||||
env:
|
||||
SERVICES: s3
|
||||
DEBUG: 0
|
||||
DATA_DIR: /tmp/localstack/data
|
||||
options: >-
|
||||
--health-cmd "curl -f http://localhost:4566/_localstack/health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install xdelta3
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y xdelta3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: test
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
AWS_ENDPOINT_URL: http://localhost:4566
|
||||
run: |
|
||||
uv run pytest tests/e2e -v --tb=short
|
||||
|
||||
publish:
|
||||
needs: [validate, lint, typecheck, test, e2e-test]
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.pypi_environment }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate.outputs.tag_name }}
|
||||
fetch-depth: 0 # Important for setuptools-scm
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
uv build
|
||||
|
||||
- name: Publish to TestPyPI
|
||||
if: github.event.inputs.pypi_environment == 'testpypi'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||
|
||||
- name: Publish to PyPI
|
||||
if: github.event.inputs.pypi_environment == 'pypi'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
continue-on-error: true # Don't fail if GitHub release creation fails
|
||||
with:
|
||||
tag_name: ${{ needs.validate.outputs.tag_name }}
|
||||
name: Release v${{ github.event.inputs.version }}
|
||||
body: |
|
||||
## DeltaGlider v${{ github.event.inputs.version }}
|
||||
|
||||
Published to ${{ github.event.inputs.pypi_environment == 'pypi' && 'PyPI' || 'TestPyPI' }}
|
||||
|
||||
### Installation
|
||||
```bash
|
||||
pip install deltaglider==${{ github.event.inputs.version }}
|
||||
```
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.event.inputs.version, '-') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
254
.github/workflows/release.yml
vendored
Normal file
254
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,254 @@
|
||||
name: Manual Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to release (e.g., 0.3.2)'
|
||||
required: true
|
||||
type: string
|
||||
pypi_environment:
|
||||
description: 'PyPI environment'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- 'pypi'
|
||||
- 'testpypi'
|
||||
default: 'pypi'
|
||||
|
||||
env:
|
||||
UV_VERSION: "0.5.13"
|
||||
PYTHON_VERSION: "3.12"
|
||||
|
||||
jobs:
|
||||
validate-and-tag:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag_name: ${{ steps.create_tag.outputs.tag }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.PAT_TOKEN }}
|
||||
|
||||
- name: Validate version format
|
||||
run: |
|
||||
if ! echo "${{ github.event.inputs.version }}" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9]+)?$'; then
|
||||
echo "Error: Version must be in format X.Y.Z or X.Y.Z-suffix"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check if tag already exists
|
||||
run: |
|
||||
if git rev-parse "v${{ github.event.inputs.version }}" >/dev/null 2>&1; then
|
||||
echo "Error: Tag v${{ github.event.inputs.version }} already exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create and push tag
|
||||
id: create_tag
|
||||
run: |
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git tag -a "v${{ github.event.inputs.version }}" -m "Release v${{ github.event.inputs.version }}"
|
||||
git push origin "v${{ github.event.inputs.version }}"
|
||||
echo "tag=v${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT
|
||||
|
||||
lint:
|
||||
needs: validate-and-tag
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate-and-tag.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run ruff check
|
||||
run: |
|
||||
uv run ruff check src tests
|
||||
|
||||
- name: Run ruff format check
|
||||
run: |
|
||||
uv run ruff format --check src tests
|
||||
|
||||
typecheck:
|
||||
needs: validate-and-tag
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate-and-tag.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run mypy
|
||||
run: |
|
||||
uv run mypy src
|
||||
|
||||
test:
|
||||
needs: validate-and-tag
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate-and-tag.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install xdelta3
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y xdelta3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
uv run pytest tests/unit -v --tb=short
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
uv run pytest tests/integration -v --tb=short
|
||||
|
||||
e2e-test:
|
||||
needs: validate-and-tag
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
localstack:
|
||||
image: localstack/localstack:latest
|
||||
ports:
|
||||
- 4566:4566
|
||||
env:
|
||||
SERVICES: s3
|
||||
DEBUG: 0
|
||||
DATA_DIR: /tmp/localstack/data
|
||||
options: >-
|
||||
--health-cmd "curl -f http://localhost:4566/_localstack/health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate-and-tag.outputs.tag_name }}
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install xdelta3
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y xdelta3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --system -e ".[dev]"
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: test
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
AWS_ENDPOINT_URL: http://localhost:4566
|
||||
run: |
|
||||
uv run pytest tests/e2e -v --tb=short
|
||||
|
||||
publish:
|
||||
needs: [validate-and-tag, lint, typecheck, test, e2e-test]
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.pypi_environment }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.validate-and-tag.outputs.tag_name }}
|
||||
fetch-depth: 0 # Important for setuptools-scm
|
||||
|
||||
- name: Install UV
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/${{ env.UV_VERSION }}/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
uv build
|
||||
|
||||
- name: Publish to TestPyPI
|
||||
if: github.event.inputs.pypi_environment == 'testpypi'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||
|
||||
- name: Publish to PyPI
|
||||
if: github.event.inputs.pypi_environment == 'pypi'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
continue-on-error: true # Don't fail if GitHub release creation fails
|
||||
with:
|
||||
tag_name: ${{ needs.validate-and-tag.outputs.tag_name }}
|
||||
name: Release v${{ github.event.inputs.version }}
|
||||
body: |
|
||||
## DeltaGlider v${{ github.event.inputs.version }}
|
||||
|
||||
Published to ${{ github.event.inputs.pypi_environment == 'pypi' && 'PyPI' || 'TestPyPI' }}
|
||||
|
||||
### Installation
|
||||
```bash
|
||||
pip install deltaglider==${{ github.event.inputs.version }}
|
||||
```
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.event.inputs.version, '-') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -86,3 +86,4 @@ docs/_templates/
|
||||
|
||||
# Temporary downloads
|
||||
temp_downloads/
|
||||
src/deltaglider/_version.py
|
||||
|
||||
225
BOTO3_COMPATIBILITY.md
Normal file
225
BOTO3_COMPATIBILITY.md
Normal file
@@ -0,0 +1,225 @@
|
||||
# boto3 S3 Client Compatibility
|
||||
|
||||
DeltaGlider implements a **subset** of boto3's S3 client API, focusing on the most commonly used operations. This is **not** a 100% drop-in replacement, but covers the core functionality needed for most use cases.
|
||||
|
||||
## ✅ Implemented Methods (21 core methods)
|
||||
|
||||
### Object Operations
|
||||
- ✅ `put_object()` - Upload objects (with automatic delta compression)
|
||||
- ✅ `get_object()` - Download objects (with automatic delta reconstruction)
|
||||
- ✅ `delete_object()` - Delete single object
|
||||
- ✅ `delete_objects()` - Delete multiple objects
|
||||
- ✅ `head_object()` - Get object metadata
|
||||
- ✅ `list_objects()` - List objects (list_objects_v2 compatible)
|
||||
- ✅ `copy_object()` - Copy objects between locations
|
||||
|
||||
### Bucket Operations
|
||||
- ✅ `create_bucket()` - Create buckets
|
||||
- ✅ `delete_bucket()` - Delete empty buckets
|
||||
- ✅ `list_buckets()` - List all buckets
|
||||
|
||||
### Presigned URLs
|
||||
- ✅ `generate_presigned_url()` - Generate presigned URLs
|
||||
- ✅ `generate_presigned_post()` - Generate presigned POST data
|
||||
|
||||
### DeltaGlider Extensions
|
||||
- ✅ `upload()` - Simple upload with S3 URL
|
||||
- ✅ `download()` - Simple download with S3 URL
|
||||
- ✅ `verify()` - Verify object integrity
|
||||
- ✅ `upload_chunked()` - Upload with progress callback
|
||||
- ✅ `upload_batch()` - Batch upload multiple files
|
||||
- ✅ `download_batch()` - Batch download multiple files
|
||||
- ✅ `estimate_compression()` - Estimate compression ratio
|
||||
- ✅ `find_similar_files()` - Find similar files for delta reference
|
||||
- ✅ `get_object_info()` - Get detailed object info with compression stats
|
||||
- ✅ `get_bucket_stats()` - Get bucket statistics
|
||||
- ✅ `delete_objects_recursive()` - Recursively delete objects
|
||||
|
||||
## ❌ Not Implemented (80+ methods)
|
||||
|
||||
### Multipart Upload
|
||||
- ❌ `create_multipart_upload()`
|
||||
- ❌ `upload_part()`
|
||||
- ❌ `complete_multipart_upload()`
|
||||
- ❌ `abort_multipart_upload()`
|
||||
- ❌ `list_multipart_uploads()`
|
||||
- ❌ `list_parts()`
|
||||
|
||||
### Access Control (ACL)
|
||||
- ❌ `get_bucket_acl()`
|
||||
- ❌ `put_bucket_acl()`
|
||||
- ❌ `get_object_acl()`
|
||||
- ❌ `put_object_acl()`
|
||||
- ❌ `get_public_access_block()`
|
||||
- ❌ `put_public_access_block()`
|
||||
- ❌ `delete_public_access_block()`
|
||||
|
||||
### Bucket Configuration
|
||||
- ❌ `get_bucket_location()`
|
||||
- ❌ `get_bucket_versioning()`
|
||||
- ❌ `put_bucket_versioning()`
|
||||
- ❌ `get_bucket_logging()`
|
||||
- ❌ `put_bucket_logging()`
|
||||
- ❌ `get_bucket_website()`
|
||||
- ❌ `put_bucket_website()`
|
||||
- ❌ `delete_bucket_website()`
|
||||
- ❌ `get_bucket_cors()`
|
||||
- ❌ `put_bucket_cors()`
|
||||
- ❌ `delete_bucket_cors()`
|
||||
- ❌ `get_bucket_lifecycle_configuration()`
|
||||
- ❌ `put_bucket_lifecycle_configuration()`
|
||||
- ❌ `delete_bucket_lifecycle()`
|
||||
- ❌ `get_bucket_policy()`
|
||||
- ❌ `put_bucket_policy()`
|
||||
- ❌ `delete_bucket_policy()`
|
||||
- ❌ `get_bucket_encryption()`
|
||||
- ❌ `put_bucket_encryption()`
|
||||
- ❌ `delete_bucket_encryption()`
|
||||
- ❌ `get_bucket_notification_configuration()`
|
||||
- ❌ `put_bucket_notification_configuration()`
|
||||
- ❌ `get_bucket_accelerate_configuration()`
|
||||
- ❌ `put_bucket_accelerate_configuration()`
|
||||
- ❌ `get_bucket_request_payment()`
|
||||
- ❌ `put_bucket_request_payment()`
|
||||
- ❌ `get_bucket_replication()`
|
||||
- ❌ `put_bucket_replication()`
|
||||
- ❌ `delete_bucket_replication()`
|
||||
|
||||
### Tagging & Metadata
|
||||
- ❌ `get_object_tagging()`
|
||||
- ❌ `put_object_tagging()`
|
||||
- ❌ `delete_object_tagging()`
|
||||
- ❌ `get_bucket_tagging()`
|
||||
- ❌ `put_bucket_tagging()`
|
||||
- ❌ `delete_bucket_tagging()`
|
||||
|
||||
### Advanced Features
|
||||
- ❌ `restore_object()` - Glacier restore
|
||||
- ❌ `select_object_content()` - S3 Select
|
||||
- ❌ `get_object_torrent()` - BitTorrent
|
||||
- ❌ `get_object_legal_hold()` - Object Lock
|
||||
- ❌ `put_object_legal_hold()`
|
||||
- ❌ `get_object_retention()`
|
||||
- ❌ `put_object_retention()`
|
||||
- ❌ `get_bucket_analytics_configuration()`
|
||||
- ❌ `put_bucket_analytics_configuration()`
|
||||
- ❌ `delete_bucket_analytics_configuration()`
|
||||
- ❌ `list_bucket_analytics_configurations()`
|
||||
- ❌ `get_bucket_metrics_configuration()`
|
||||
- ❌ `put_bucket_metrics_configuration()`
|
||||
- ❌ `delete_bucket_metrics_configuration()`
|
||||
- ❌ `list_bucket_metrics_configurations()`
|
||||
- ❌ `get_bucket_inventory_configuration()`
|
||||
- ❌ `put_bucket_inventory_configuration()`
|
||||
- ❌ `delete_bucket_inventory_configuration()`
|
||||
- ❌ `list_bucket_inventory_configurations()`
|
||||
- ❌ `get_bucket_intelligent_tiering_configuration()`
|
||||
- ❌ `put_bucket_intelligent_tiering_configuration()`
|
||||
- ❌ `delete_bucket_intelligent_tiering_configuration()`
|
||||
- ❌ `list_bucket_intelligent_tiering_configurations()`
|
||||
|
||||
### Helper Methods
|
||||
- ❌ `download_file()` - High-level download
|
||||
- ❌ `upload_file()` - High-level upload
|
||||
- ❌ `download_fileobj()` - Download to file object
|
||||
- ❌ `upload_fileobj()` - Upload from file object
|
||||
|
||||
### Other
|
||||
- ❌ `get_bucket_ownership_controls()`
|
||||
- ❌ `put_bucket_ownership_controls()`
|
||||
- ❌ `delete_bucket_ownership_controls()`
|
||||
- ❌ `get_bucket_policy_status()`
|
||||
- ❌ `list_object_versions()`
|
||||
- ❌ `create_session()` - S3 Express
|
||||
- And 20+ more metadata/configuration methods...
|
||||
|
||||
## Coverage Analysis
|
||||
|
||||
**Implemented:** ~21 methods
|
||||
**Total boto3 S3 methods:** ~100+ methods
|
||||
**Coverage:** ~20%
|
||||
|
||||
## What's Covered
|
||||
|
||||
DeltaGlider focuses on:
|
||||
1. ✅ **Core CRUD operations** - put, get, delete, list
|
||||
2. ✅ **Bucket management** - create, delete, list buckets
|
||||
3. ✅ **Basic metadata** - head_object
|
||||
4. ✅ **Presigned URLs** - generate_presigned_url/post
|
||||
5. ✅ **Delta compression** - automatic for archive files
|
||||
6. ✅ **Batch operations** - upload_batch, download_batch
|
||||
7. ✅ **Compression stats** - get_bucket_stats, estimate_compression
|
||||
|
||||
## What's NOT Covered
|
||||
|
||||
❌ **Advanced bucket configuration** (versioning, lifecycle, logging, etc.)
|
||||
❌ **Access control** (ACLs, bucket policies)
|
||||
❌ **Multipart uploads** (for >5GB files)
|
||||
❌ **Advanced features** (S3 Select, Glacier, Object Lock)
|
||||
❌ **Tagging APIs** (object/bucket tags)
|
||||
❌ **High-level transfer utilities** (upload_file, download_file)
|
||||
|
||||
## Use Cases
|
||||
|
||||
### ✅ DeltaGlider is PERFECT for:
|
||||
- Storing versioned releases/builds
|
||||
- Backup storage with deduplication
|
||||
- CI/CD artifact storage
|
||||
- Docker layer storage
|
||||
- Archive file storage (zip, tar, etc.)
|
||||
- Simple S3 storage needs
|
||||
|
||||
### ❌ Use boto3 directly for:
|
||||
- Complex bucket policies
|
||||
- Versioning/lifecycle management
|
||||
- Multipart uploads (>5GB files)
|
||||
- S3 Select queries
|
||||
- Glacier deep archive
|
||||
- Object Lock/Legal Hold
|
||||
- Advanced ACL management
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
If you need both boto3 and DeltaGlider:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import boto3
|
||||
|
||||
# Use DeltaGlider for objects (with compression!)
|
||||
dg_client = create_client()
|
||||
dg_client.put_object(Bucket='releases', Key='app.zip', Body=data)
|
||||
|
||||
# Use boto3 for advanced features
|
||||
s3_client = boto3.client('s3')
|
||||
s3_client.put_bucket_versioning(
|
||||
Bucket='releases',
|
||||
VersioningConfiguration={'Status': 'Enabled'}
|
||||
)
|
||||
```
|
||||
|
||||
## Future Additions
|
||||
|
||||
Likely to be added:
|
||||
- `upload_file()` / `download_file()` - High-level helpers
|
||||
- `copy_object()` - Object copying
|
||||
- Basic tagging support
|
||||
- Multipart upload (for large files)
|
||||
|
||||
Unlikely to be added:
|
||||
- Advanced bucket configuration
|
||||
- ACL management
|
||||
- S3 Select
|
||||
- Glacier operations
|
||||
|
||||
## Conclusion
|
||||
|
||||
**DeltaGlider is NOT a 100% drop-in boto3 replacement.**
|
||||
|
||||
It implements the **20% of boto3 methods that cover 80% of use cases**, with a focus on:
|
||||
- Core object operations
|
||||
- Bucket management
|
||||
- Delta compression for storage savings
|
||||
- Simple, clean API
|
||||
|
||||
For advanced S3 features, use boto3 directly or in combination with DeltaGlider.
|
||||
206
CHANGELOG.md
Normal file
206
CHANGELOG.md
Normal file
@@ -0,0 +1,206 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [5.1.1] - 2025-01-10
|
||||
|
||||
### Fixed
|
||||
- **Stats Command**: Fixed incorrect compression ratio calculations
|
||||
- Now correctly counts ALL files including reference.bin in compressed size
|
||||
- Fixed handling of orphaned reference.bin files (reference files with no delta files)
|
||||
- Added prominent warnings for orphaned reference files with cleanup commands
|
||||
- Fixed stats for buckets with no compression (now shows 0% instead of negative)
|
||||
- SHA1 checksum files are now properly included in calculations
|
||||
|
||||
### Improved
|
||||
- **Stats Performance**: Optimized metadata fetching with parallel requests
|
||||
- 5-10x faster for buckets with many delta files
|
||||
- Uses ThreadPoolExecutor for concurrent HEAD requests
|
||||
- Single-pass calculation algorithm for better efficiency
|
||||
|
||||
## [5.1.0] - 2025-10-10
|
||||
|
||||
### Added
|
||||
- **New CLI Command**: `deltaglider stats <bucket>` for bucket statistics and compression metrics
|
||||
- Supports `--detailed` flag for comprehensive analysis
|
||||
- Supports `--json` flag for machine-readable output
|
||||
- Accepts multiple formats: `s3://bucket/`, `s3://bucket`, `bucket`
|
||||
- **Session-Level Statistics Caching**: Bucket stats now cached per client instance
|
||||
- Automatic cache invalidation on mutations (put, delete, bucket operations)
|
||||
- Intelligent cache reuse (detailed stats serve quick stat requests)
|
||||
- Enhanced `list_buckets()` includes cached stats when available
|
||||
- **Programmatic Cache Management**: Added cache management APIs for long-running applications
|
||||
- `clear_cache()`: Clear all cached references
|
||||
- `evict_cache()`: Remove specific cached reference
|
||||
- Session-scoped cache lifecycle management
|
||||
|
||||
### Changed
|
||||
- Bucket statistics are now cached within client session for performance
|
||||
- `list_buckets()` response includes `DeltaGliderStats` metadata when cached
|
||||
|
||||
### Documentation
|
||||
- Added comprehensive DG_MAX_RATIO tuning guide in docs/
|
||||
- Updated CLI command reference in CLAUDE.md and README.md
|
||||
- Added detailed cache management documentation
|
||||
|
||||
## [5.0.3] - 2025-10-10
|
||||
|
||||
### Security
|
||||
- **BREAKING**: Removed all legacy shared cache code for security
|
||||
- **BREAKING**: Encryption is now ALWAYS ON (cannot be disabled)
|
||||
- Ephemeral process-isolated cache is now the ONLY mode (no opt-out)
|
||||
- **Content-Addressed Storage (CAS)**: Implemented SHA256-based cache storage
|
||||
- Zero collision risk (SHA256 namespace guarantees uniqueness)
|
||||
- Automatic deduplication (same content = same filename)
|
||||
- Tampering protection (changing content changes SHA, breaks lookup)
|
||||
- Two-level directory structure for filesystem optimization
|
||||
- **Encrypted Cache**: All cache data encrypted at rest using Fernet (AES-128-CBC + HMAC)
|
||||
- Ephemeral encryption keys per process (forward secrecy)
|
||||
- Optional persistent keys via `DG_CACHE_ENCRYPTION_KEY` for shared filesystems
|
||||
- Automatic cleanup of corrupted cache files on decryption failures
|
||||
- Fixed TOCTOU vulnerabilities with atomic SHA validation at use-time
|
||||
- Added `get_validated_ref()` method to prevent cache poisoning
|
||||
- Eliminated multi-user data exposure through mandatory cache isolation
|
||||
|
||||
### Removed
|
||||
- **BREAKING**: Removed `DG_UNSAFE_SHARED_CACHE` environment variable
|
||||
- **BREAKING**: Removed `DG_CACHE_DIR` environment variable
|
||||
- **BREAKING**: Removed `DG_CACHE_ENCRYPTION` environment variable (encryption always on)
|
||||
- **BREAKING**: Removed `cache_dir` parameter from `create_client()`
|
||||
|
||||
### Changed
|
||||
- Cache is now auto-created in `/tmp/deltaglider-*` and cleaned on exit
|
||||
- All cache operations use file locking (Unix) and SHA validation
|
||||
- Added `CacheMissError` and `CacheCorruptionError` exceptions
|
||||
|
||||
### Added
|
||||
- New `ContentAddressedCache` adapter in `adapters/cache_cas.py`
|
||||
- New `EncryptedCache` wrapper in `adapters/cache_encrypted.py`
|
||||
- New `MemoryCache` adapter in `adapters/cache_memory.py` with LRU eviction
|
||||
- Self-describing cache structure with SHA256-based filenames
|
||||
- Configurable cache backends via `DG_CACHE_BACKEND` (filesystem or memory)
|
||||
- Memory cache size limit via `DG_CACHE_MEMORY_SIZE_MB` (default: 100MB)
|
||||
|
||||
### Internal
|
||||
- Updated all tests to use Content-Addressed Storage and encryption
|
||||
- All 119 tests passing with zero errors (99 original + 20 new cache tests)
|
||||
- Type checking: 0 errors (mypy)
|
||||
- Linting: All checks passed (ruff)
|
||||
- Completed Phase 1, 2, and 7 of SECURITY_FIX_ROADMAP.md
|
||||
- Added comprehensive test suites for encryption (13 tests) and memory cache (10 tests)
|
||||
|
||||
## [5.0.1] - 2025-01-10
|
||||
|
||||
### Changed
|
||||
- **Code Organization**: Refactored client.py from 1560 to 1154 lines (26% reduction)
|
||||
- Extracted client operations into modular `client_operations/` package:
|
||||
- `bucket.py` - S3 bucket management operations
|
||||
- `presigned.py` - Presigned URL generation
|
||||
- `batch.py` - Batch upload/download operations
|
||||
- `stats.py` - Analytics and statistics operations
|
||||
- Improved code maintainability with logical separation of concerns
|
||||
- Better developer experience with cleaner module structure
|
||||
|
||||
### Internal
|
||||
- Full type safety maintained with mypy (0 errors)
|
||||
- All 99 tests passing
|
||||
- Code quality checks passing (ruff)
|
||||
- No breaking changes - all public APIs remain unchanged
|
||||
|
||||
## [5.0.0] - 2025-01-10
|
||||
|
||||
### Added
|
||||
- boto3-compatible TypedDict types for S3 responses (no boto3 import needed)
|
||||
- Complete boto3 compatibility vision document
|
||||
- Type-safe response builders using TypedDict patterns
|
||||
|
||||
### Changed
|
||||
- **BREAKING**: `list_objects()` now returns boto3-compatible dict instead of custom dataclass
|
||||
- Use `response['Contents']` instead of `response.contents`
|
||||
- Use `response.get('IsTruncated')` instead of `response.is_truncated`
|
||||
- Use `response.get('NextContinuationToken')` instead of `response.next_continuation_token`
|
||||
- DeltaGlider metadata now in `Metadata` field of each object
|
||||
- Internal response building now uses TypedDict for compile-time type safety
|
||||
- All S3 responses are dicts at runtime (TypedDict is a dict!)
|
||||
|
||||
### Fixed
|
||||
- Updated all documentation examples to use dict-based responses
|
||||
- Fixed pagination examples in README and API docs
|
||||
- Corrected SDK documentation with accurate method signatures
|
||||
|
||||
## [4.2.4] - 2025-01-10
|
||||
|
||||
### Fixed
|
||||
- Show only filename in `ls` output instead of full path for cleaner display
|
||||
- Correct `ls` command path handling and prefix display logic
|
||||
|
||||
## [4.2.3] - 2025-01-07
|
||||
|
||||
### Added
|
||||
- Comprehensive test coverage for `delete_objects_recursive()` method with 19 thorough tests
|
||||
- Tests cover delta suffix handling, error/warning aggregation, statistics tracking, and edge cases
|
||||
- Better code organization with separate `client_models.py` and `client_delete_helpers.py` modules
|
||||
|
||||
### Fixed
|
||||
- Fixed all mypy type errors using proper `cast()` for type safety
|
||||
- Improved type hints for dictionary operations in client code
|
||||
|
||||
### Changed
|
||||
- Refactored client code into logical modules for better maintainability
|
||||
- Enhanced code quality with comprehensive linting and type checking
|
||||
- All 99 integration/unit tests passing with zero type errors
|
||||
|
||||
### Internal
|
||||
- Better separation of concerns in client module
|
||||
- Improved developer experience with clearer code structure
|
||||
|
||||
## [4.2.2] - 2024-10-06
|
||||
|
||||
### Fixed
|
||||
- Add .delta suffix fallback for `delete_object()` method
|
||||
- Handle regular S3 objects without DeltaGlider metadata
|
||||
- Update mypy type ignore comment for compatibility
|
||||
|
||||
## [4.2.1] - 2024-10-06
|
||||
|
||||
### Fixed
|
||||
- Make GitHub release creation non-blocking in workflows
|
||||
|
||||
## [4.2.0] - 2024-10-03
|
||||
|
||||
### Added
|
||||
- AWS credential parameters to `create_client()` function
|
||||
- Support for custom endpoint URLs
|
||||
- Enhanced boto3 compatibility
|
||||
|
||||
## [4.1.0] - 2024-09-29
|
||||
|
||||
### Added
|
||||
- boto3-compatible client API
|
||||
- Bucket management methods
|
||||
- Comprehensive SDK documentation
|
||||
|
||||
## [4.0.0] - 2024-09-21
|
||||
|
||||
### Added
|
||||
- Initial public release
|
||||
- CLI with AWS S3 compatibility
|
||||
- Delta compression for versioned artifacts
|
||||
- 99%+ compression for similar files
|
||||
|
||||
[5.1.0]: https://github.com/beshu-tech/deltaglider/compare/v5.0.3...v5.1.0
|
||||
[5.0.3]: https://github.com/beshu-tech/deltaglider/compare/v5.0.1...v5.0.3
|
||||
[5.0.1]: https://github.com/beshu-tech/deltaglider/compare/v5.0.0...v5.0.1
|
||||
[5.0.0]: https://github.com/beshu-tech/deltaglider/compare/v4.2.4...v5.0.0
|
||||
[4.2.4]: https://github.com/beshu-tech/deltaglider/compare/v4.2.3...v4.2.4
|
||||
[4.2.3]: https://github.com/beshu-tech/deltaglider/compare/v4.2.2...v4.2.3
|
||||
[4.2.2]: https://github.com/beshu-tech/deltaglider/compare/v4.2.1...v4.2.2
|
||||
[4.2.1]: https://github.com/beshu-tech/deltaglider/compare/v4.2.0...v4.2.1
|
||||
[4.2.0]: https://github.com/beshu-tech/deltaglider/compare/v4.1.0...v4.2.0
|
||||
[4.1.0]: https://github.com/beshu-tech/deltaglider/compare/v4.0.0...v4.1.0
|
||||
[4.0.0]: https://github.com/beshu-tech/deltaglider/releases/tag/v4.0.0
|
||||
67
CLAUDE.md
67
CLAUDE.md
@@ -74,6 +74,17 @@ export AWS_SECRET_ACCESS_KEY=minioadmin
|
||||
|
||||
# Now you can use deltaglider commands
|
||||
deltaglider cp test.zip s3://test-bucket/
|
||||
deltaglider stats test-bucket # Get bucket statistics
|
||||
```
|
||||
|
||||
### Available CLI Commands
|
||||
```bash
|
||||
cp # Copy files to/from S3 (AWS S3 compatible)
|
||||
ls # List S3 buckets or objects (AWS S3 compatible)
|
||||
rm # Remove S3 objects (AWS S3 compatible)
|
||||
sync # Synchronize directories with S3 (AWS S3 compatible)
|
||||
stats # Get bucket statistics and compression metrics
|
||||
verify # Verify integrity of delta file
|
||||
```
|
||||
|
||||
## Architecture
|
||||
@@ -97,13 +108,15 @@ src/deltaglider/
|
||||
│ ├── logger.py # LoggerPort protocol for logging
|
||||
│ └── metrics.py # MetricsPort protocol for observability
|
||||
├── adapters/ # Concrete implementations
|
||||
│ ├── storage_s3.py # S3StorageAdapter using boto3
|
||||
│ ├── diff_xdelta.py # XdeltaAdapter using xdelta3 binary
|
||||
│ ├── hash_sha256.py # Sha256Adapter for checksums
|
||||
│ ├── cache_fs.py # FsCacheAdapter for file system cache
|
||||
│ ├── clock_utc.py # UtcClockAdapter for UTC timestamps
|
||||
│ ├── logger_std.py # StdLoggerAdapter for console output
|
||||
│ └── metrics_noop.py # NoopMetricsAdapter (placeholder)
|
||||
│ ├── storage_s3.py # S3StorageAdapter using boto3
|
||||
│ ├── diff_xdelta.py # XdeltaAdapter using xdelta3 binary
|
||||
│ ├── hash_sha256.py # Sha256Adapter for checksums
|
||||
│ ├── cache_cas.py # ContentAddressedCache (SHA256-based storage)
|
||||
│ ├── cache_encrypted.py # EncryptedCache (Fernet encryption wrapper)
|
||||
│ ├── cache_memory.py # MemoryCache (LRU in-memory cache)
|
||||
│ ├── clock_utc.py # UtcClockAdapter for UTC timestamps
|
||||
│ ├── logger_std.py # StdLoggerAdapter for console output
|
||||
│ └── metrics_noop.py # NoopMetricsAdapter (placeholder)
|
||||
└── app/
|
||||
└── cli/ # Click-based CLI application
|
||||
├── main.py # Main CLI entry point with AWS S3 commands
|
||||
@@ -129,7 +142,6 @@ src/deltaglider/
|
||||
4. **AWS S3 CLI Compatibility**:
|
||||
- Commands (`cp`, `ls`, `rm`, `sync`) mirror AWS CLI syntax exactly
|
||||
- Located in `app/cli/main.py` with helpers in `aws_compat.py`
|
||||
- Maintains backward compatibility with original `put`/`get` commands
|
||||
|
||||
### Key Algorithms
|
||||
|
||||
@@ -141,7 +153,13 @@ src/deltaglider/
|
||||
2. **Reference Management** (`core/service.py`):
|
||||
- Reference stored at `{deltaspace.prefix}/reference.bin`
|
||||
- SHA256 verification on every read/write
|
||||
- Local cache in `/tmp/.deltaglider/reference_cache` for performance
|
||||
- **Content-Addressed Storage (CAS)** cache in `/tmp/deltaglider-*` (ephemeral)
|
||||
- Cache uses SHA256 as filename with two-level directory structure (ab/cd/abcdef...)
|
||||
- Automatic deduplication: same content = same SHA = same cache file
|
||||
- Zero collision risk: SHA256 namespace guarantees uniqueness
|
||||
- **Encryption**: Optional Fernet (AES-128-CBC + HMAC) encryption at rest (enabled by default)
|
||||
- Ephemeral encryption keys per process for forward secrecy
|
||||
- **Cache Backends**: Configurable filesystem or in-memory cache with LRU eviction
|
||||
|
||||
3. **Sync Algorithm** (`app/cli/sync.py`):
|
||||
- Compares local vs S3 using size and modification time
|
||||
@@ -182,13 +200,26 @@ Core delta logic is in `src/deltaglider/core/service.py`:
|
||||
## Environment Variables
|
||||
|
||||
- `DG_LOG_LEVEL`: Logging level (default: "INFO")
|
||||
- `DG_CACHE_DIR`: Local reference cache directory (default: "/tmp/.deltaglider/reference_cache")
|
||||
- `DG_MAX_RATIO`: Maximum acceptable delta/file ratio (default: "0.5")
|
||||
- `DG_MAX_RATIO`: Maximum acceptable delta/file ratio (default: "0.5", range: "0.0-1.0")
|
||||
- **See [docs/DG_MAX_RATIO.md](docs/DG_MAX_RATIO.md) for complete tuning guide**
|
||||
- Controls when to use delta vs. direct storage
|
||||
- Lower (0.2-0.3) = conservative, only high-quality compression
|
||||
- Higher (0.6-0.7) = permissive, accept modest savings
|
||||
- `DG_CACHE_BACKEND`: Cache backend type - "filesystem" (default) or "memory"
|
||||
- `DG_CACHE_MEMORY_SIZE_MB`: Memory cache size limit in MB (default: "100")
|
||||
- `DG_CACHE_ENCRYPTION_KEY`: Optional base64-encoded Fernet key for persistent encryption (ephemeral by default)
|
||||
- `AWS_ENDPOINT_URL`: Override S3 endpoint for MinIO/LocalStack
|
||||
- `AWS_ACCESS_KEY_ID`: AWS credentials
|
||||
- `AWS_SECRET_ACCESS_KEY`: AWS credentials
|
||||
- `AWS_DEFAULT_REGION`: AWS region
|
||||
|
||||
**Security Notes**:
|
||||
- **Encryption Always On**: Cache data is ALWAYS encrypted (cannot be disabled)
|
||||
- **Ephemeral Keys**: Encryption keys auto-generated per process for maximum security
|
||||
- **Auto-Cleanup**: Corrupted cache files automatically deleted on decryption failures
|
||||
- **Process Isolation**: Each process gets isolated cache in `/tmp/deltaglider-*`, cleaned up on exit
|
||||
- **Persistent Keys**: Set `DG_CACHE_ENCRYPTION_KEY` only if you need cross-process cache sharing (e.g., shared filesystems)
|
||||
|
||||
## Important Implementation Details
|
||||
|
||||
1. **xdelta3 Binary Dependency**: The system requires xdelta3 binary installed on the system. The `XdeltaAdapter` uses subprocess to call it.
|
||||
@@ -203,7 +234,11 @@ Core delta logic is in `src/deltaglider/core/service.py`:
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- Local reference caching dramatically improves performance for repeated operations
|
||||
- **Content-Addressed Storage**: SHA256-based deduplication eliminates redundant storage
|
||||
- **Cache Backends**:
|
||||
- Filesystem cache (default): persistent across processes, good for shared workflows
|
||||
- Memory cache: faster, zero I/O, perfect for ephemeral CI/CD pipelines
|
||||
- **Encryption Overhead**: ~10-15% performance impact, provides security at rest
|
||||
- Delta compression is CPU-intensive; consider parallelization for bulk uploads
|
||||
- The default max_ratio of 0.5 prevents storing inefficient deltas
|
||||
- For files <1MB, delta overhead may exceed benefits
|
||||
@@ -213,4 +248,10 @@ Core delta logic is in `src/deltaglider/core/service.py`:
|
||||
- Never store AWS credentials in code
|
||||
- Use IAM roles when possible
|
||||
- All S3 operations respect bucket policies and encryption settings
|
||||
- SHA256 checksums prevent tampering and corruption
|
||||
- SHA256 checksums prevent tampering and corruption
|
||||
- **Encryption Always On**: Cache data is ALWAYS encrypted using Fernet (AES-128-CBC + HMAC) - cannot be disabled
|
||||
- **Ephemeral Keys**: Encryption keys auto-generated per process for forward secrecy and process isolation
|
||||
- **Auto-Cleanup**: Corrupted or tampered cache files automatically deleted on decryption failures
|
||||
- **Persistent Keys**: Set `DG_CACHE_ENCRYPTION_KEY` only for cross-process cache sharing (use secrets management)
|
||||
- **Content-Addressed Storage**: SHA256-based filenames prevent collision attacks
|
||||
- **Zero-Trust Cache**: All cache operations include cryptographic validation
|
||||
38
Dockerfile
38
Dockerfile
@@ -30,7 +30,16 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
# Runtime stage - minimal image
|
||||
FROM python:${PYTHON_VERSION}
|
||||
|
||||
# Install xdelta3
|
||||
# Skip man pages and docs to speed up builds
|
||||
RUN mkdir -p /etc/dpkg/dpkg.cfg.d && \
|
||||
echo 'path-exclude /usr/share/doc/*' > /etc/dpkg/dpkg.cfg.d/01_nodoc && \
|
||||
echo 'path-exclude /usr/share/man/*' >> /etc/dpkg/dpkg.cfg.d/01_nodoc && \
|
||||
echo 'path-exclude /usr/share/groff/*' >> /etc/dpkg/dpkg.cfg.d/01_nodoc && \
|
||||
echo 'path-exclude /usr/share/info/*' >> /etc/dpkg/dpkg.cfg.d/01_nodoc && \
|
||||
echo 'path-exclude /usr/share/lintian/*' >> /etc/dpkg/dpkg.cfg.d/01_nodoc && \
|
||||
echo 'path-exclude /usr/share/linda/*' >> /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
|
||||
# Install xdelta3 (now much faster without man pages)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends xdelta3 && \
|
||||
apt-get clean && \
|
||||
@@ -57,10 +66,33 @@ USER deltaglider
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD deltaglider --help || exit 1
|
||||
|
||||
# Environment variables (all optional, can be overridden at runtime)
|
||||
# Logging
|
||||
ENV DG_LOG_LEVEL=INFO
|
||||
|
||||
# Performance & Compression
|
||||
# DG_MAX_RATIO: Maximum delta/file ratio (0.0-1.0)
|
||||
# Default 0.5 means: only use delta if delta_size ≤ 50% of original_size
|
||||
# Lower (0.2-0.3) = more conservative, only high-quality compression
|
||||
# Higher (0.6-0.7) = more permissive, accept modest savings
|
||||
# See docs/DG_MAX_RATIO.md for complete tuning guide
|
||||
ENV DG_MAX_RATIO=0.5
|
||||
|
||||
# Cache Configuration
|
||||
ENV DG_CACHE_BACKEND=filesystem
|
||||
ENV DG_CACHE_MEMORY_SIZE_MB=100
|
||||
# ENV DG_CACHE_ENCRYPTION_KEY=<base64-key> # Optional: Set for cross-process cache sharing
|
||||
|
||||
# AWS Configuration (override at runtime)
|
||||
# ENV AWS_ENDPOINT_URL=https://s3.amazonaws.com
|
||||
# ENV AWS_ACCESS_KEY_ID=<your-key>
|
||||
# ENV AWS_SECRET_ACCESS_KEY=<your-secret>
|
||||
# ENV AWS_DEFAULT_REGION=us-east-1
|
||||
|
||||
# Labels
|
||||
LABEL org.opencontainers.image.title="DeltaGlider" \
|
||||
org.opencontainers.image.description="Delta-aware S3 file storage wrapper" \
|
||||
org.opencontainers.image.version="0.1.0" \
|
||||
org.opencontainers.image.description="Delta-aware S3 file storage wrapper with encryption" \
|
||||
org.opencontainers.image.version="5.0.3" \
|
||||
org.opencontainers.image.authors="Beshu Limited" \
|
||||
org.opencontainers.image.source="https://github.com/beshu-tech/deltaglider"
|
||||
|
||||
|
||||
122
PYPI_RELEASE.md
122
PYPI_RELEASE.md
@@ -1,122 +0,0 @@
|
||||
# Publishing DeltaGlider to PyPI
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Create PyPI account at https://pypi.org
|
||||
2. Create API token at https://pypi.org/manage/account/token/
|
||||
3. Install build tools:
|
||||
```bash
|
||||
pip install build twine
|
||||
```
|
||||
|
||||
## Build the Package
|
||||
|
||||
```bash
|
||||
# Clean previous builds
|
||||
rm -rf dist/ build/ *.egg-info/
|
||||
|
||||
# Build source distribution and wheel
|
||||
python -m build
|
||||
|
||||
# This creates:
|
||||
# - dist/deltaglider-0.1.0.tar.gz (source distribution)
|
||||
# - dist/deltaglider-0.1.0-py3-none-any.whl (wheel)
|
||||
```
|
||||
|
||||
## Test with TestPyPI (Optional but Recommended)
|
||||
|
||||
1. Upload to TestPyPI:
|
||||
```bash
|
||||
python -m twine upload --repository testpypi dist/*
|
||||
```
|
||||
|
||||
2. Test installation:
|
||||
```bash
|
||||
pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ deltaglider
|
||||
```
|
||||
|
||||
## Upload to PyPI
|
||||
|
||||
```bash
|
||||
# Upload to PyPI
|
||||
python -m twine upload dist/*
|
||||
|
||||
# You'll be prompted for:
|
||||
# - username: __token__
|
||||
# - password: <your-pypi-api-token>
|
||||
```
|
||||
|
||||
## Verify Installation
|
||||
|
||||
```bash
|
||||
# Install from PyPI
|
||||
pip install deltaglider
|
||||
|
||||
# Test it works
|
||||
deltaglider --help
|
||||
```
|
||||
|
||||
## GitHub Release
|
||||
|
||||
After PyPI release, create a GitHub release:
|
||||
|
||||
```bash
|
||||
git tag -a v0.1.0 -m "Release version 0.1.0"
|
||||
git push origin v0.1.0
|
||||
```
|
||||
|
||||
Then create a release on GitHub:
|
||||
1. Go to https://github.com/beshu-tech/deltaglider/releases
|
||||
2. Click "Create a new release"
|
||||
3. Select the tag v0.1.0
|
||||
4. Add release notes from CHANGELOG
|
||||
5. Attach the wheel and source distribution from dist/
|
||||
6. Publish release
|
||||
|
||||
## Version Bumping
|
||||
|
||||
For next release:
|
||||
1. Update version in `pyproject.toml`
|
||||
2. Update CHANGELOG
|
||||
3. Commit changes
|
||||
4. Follow steps above
|
||||
|
||||
## Automated Release (GitHub Actions)
|
||||
|
||||
Consider adding `.github/workflows/publish.yml`:
|
||||
|
||||
```yaml
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install build twine
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
twine upload dist/*
|
||||
```
|
||||
|
||||
## Marketing After Release
|
||||
|
||||
1. **Hacker News**: Post with compelling title focusing on the 99.9% compression
|
||||
2. **Reddit**: r/Python, r/devops, r/aws
|
||||
3. **Twitter/X**: Tag AWS, Python, and DevOps influencers
|
||||
4. **Dev.to / Medium**: Write technical article about the architecture
|
||||
5. **PyPI Description**: Ensure it's compelling and includes the case study link
|
||||
454
README.md
454
README.md
@@ -12,11 +12,11 @@
|
||||
|
||||
**Store 4TB of similar files in 5GB. No, that's not a typo.**
|
||||
|
||||
DeltaGlider is a drop-in S3 replacement that achieves 99.9% compression for versioned artifacts, backups, and release archives through intelligent binary delta compression.
|
||||
DeltaGlider is a drop-in S3 replacement that may achieve 99.9% size reduction for versioned compressed artifacts, backups, and release archives through intelligent binary delta compression (via xdelta3).
|
||||
|
||||
## The Problem We Solved
|
||||
|
||||
You're storing hundreds of versions of your releases. Each 100MB build differs by <1% from the previous version. You're paying to store 100GB of what's essentially 100MB of unique data.
|
||||
You're storing hundreds of versions of your software releases. Each 100MB build differs by <1% from the previous version. You're paying to store 100GB of what's essentially 100MB of unique data.
|
||||
|
||||
Sound familiar?
|
||||
|
||||
@@ -28,7 +28,101 @@ From our [ReadOnlyREST case study](docs/case-study-readonlyrest.md):
|
||||
- **Compression**: 99.9% (not a typo)
|
||||
- **Integration time**: 5 minutes
|
||||
|
||||
## How It Works
|
||||
## Quick Start
|
||||
|
||||
The quickest way to start is using the GUI
|
||||
* https://github.com/sscarduzio/dg_commander/
|
||||
|
||||
### CLI Installation
|
||||
|
||||
```bash
|
||||
# Via pip (Python 3.11+)
|
||||
pip install deltaglider
|
||||
|
||||
# Via uv (faster)
|
||||
uv pip install deltaglider
|
||||
|
||||
# Via Docker
|
||||
docker run -v ~/.aws:/root/.aws deltaglider/deltaglider --help
|
||||
```
|
||||
|
||||
### Docker Usage
|
||||
|
||||
DeltaGlider provides a secure, production-ready Docker image with encryption always enabled:
|
||||
|
||||
```bash
|
||||
# Basic usage with AWS credentials from environment
|
||||
docker run -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
deltaglider/deltaglider ls s3://my-bucket/
|
||||
|
||||
# Mount AWS credentials
|
||||
docker run -v ~/.aws:/root/.aws:ro \
|
||||
deltaglider/deltaglider cp file.zip s3://releases/
|
||||
|
||||
# Use memory cache for ephemeral CI/CD pipelines (faster)
|
||||
docker run -e DG_CACHE_BACKEND=memory \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
deltaglider/deltaglider sync ./dist/ s3://releases/v1.0.0/
|
||||
|
||||
# Configure memory cache size (default: 100MB)
|
||||
docker run -e DG_CACHE_BACKEND=memory \
|
||||
-e DG_CACHE_MEMORY_SIZE_MB=500 \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
deltaglider/deltaglider cp large-file.zip s3://releases/
|
||||
|
||||
# Use MinIO or custom S3 endpoint
|
||||
docker run -e AWS_ENDPOINT_URL=http://minio:9000 \
|
||||
-e AWS_ACCESS_KEY_ID=minioadmin \
|
||||
-e AWS_SECRET_ACCESS_KEY=minioadmin \
|
||||
deltaglider/deltaglider ls s3://test-bucket/
|
||||
|
||||
# Persistent encryption key for cross-container cache sharing
|
||||
# (Only needed if sharing cache across containers via volume mount)
|
||||
docker run -v /shared-cache:/tmp/.deltaglider \
|
||||
-e DG_CACHE_ENCRYPTION_KEY=$(openssl rand -base64 32) \
|
||||
deltaglider/deltaglider cp file.zip s3://releases/
|
||||
```
|
||||
|
||||
**Environment Variables**:
|
||||
- `DG_LOG_LEVEL`: Logging level (default: `INFO`, options: `DEBUG`, `INFO`, `WARNING`, `ERROR`)
|
||||
- `DG_MAX_RATIO`: Maximum delta/file ratio (default: `0.5`, range: `0.0-1.0`) - [📖 Complete Guide](docs/DG_MAX_RATIO.md)
|
||||
- `DG_CACHE_BACKEND`: Cache backend (default: `filesystem`, options: `filesystem`, `memory`)
|
||||
- `DG_CACHE_MEMORY_SIZE_MB`: Memory cache size in MB (default: `100`)
|
||||
- `DG_CACHE_ENCRYPTION_KEY`: Optional base64-encoded encryption key for cross-process cache sharing
|
||||
- `AWS_ENDPOINT_URL`: S3 endpoint URL (default: AWS S3)
|
||||
- `AWS_ACCESS_KEY_ID`: AWS access key
|
||||
- `AWS_SECRET_ACCESS_KEY`: AWS secret key
|
||||
- `AWS_DEFAULT_REGION`: AWS region (default: `us-east-1`)
|
||||
|
||||
> **💡 Tip**: `DG_MAX_RATIO` is a powerful tuning parameter. See the [DG_MAX_RATIO guide](docs/DG_MAX_RATIO.md) to learn how to optimize compression for your use case.
|
||||
|
||||
**Security Notes**:
|
||||
- Encryption is **always enabled** (cannot be disabled)
|
||||
- Each container gets ephemeral encryption keys for maximum security
|
||||
- Corrupted cache files are automatically deleted
|
||||
- Use `DG_CACHE_ENCRYPTION_KEY` only for persistent cache sharing (store securely)
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Upload a file (automatic delta compression)
|
||||
deltaglider cp my-app-v1.0.0.zip s3://releases/
|
||||
|
||||
# Download a file (automatic delta reconstruction)
|
||||
deltaglider cp s3://releases/my-app-v1.0.0.zip ./downloaded.zip
|
||||
|
||||
# List objects
|
||||
deltaglider ls s3://releases/
|
||||
|
||||
# Sync directories
|
||||
deltaglider sync ./dist/ s3://releases/v1.0.0/
|
||||
```
|
||||
|
||||
**That's it!** DeltaGlider automatically detects similar files and applies 99%+ compression. For more commands and options, see [CLI Reference](#cli-reference).
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### How It Works
|
||||
|
||||
```
|
||||
Traditional S3:
|
||||
@@ -42,24 +136,32 @@ With DeltaGlider:
|
||||
v1.0.2.zip (100MB) → S3: 97KB delta (100.3MB total)
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
DeltaGlider stores the first file as a reference and subsequent similar files as tiny deltas (differences). When you download, it reconstructs the original file perfectly using the reference + delta.
|
||||
|
||||
### Installation
|
||||
### Intelligent File Type Detection
|
||||
|
||||
```bash
|
||||
# Via pip (Python 3.11+)
|
||||
pip install deltaglider
|
||||
DeltaGlider automatically detects file types and applies the optimal strategy:
|
||||
|
||||
# Via uv (faster)
|
||||
uv pip install deltaglider
|
||||
| File Type | Strategy | Typical Compression | Why It Works |
|
||||
|-----------|----------|---------------------|--------------|
|
||||
| `.zip`, `.tar`, `.gz` | Binary delta | 99%+ for similar versions | Archive structure remains consistent between versions |
|
||||
| `.dmg`, `.deb`, `.rpm` | Binary delta | 95%+ for similar versions | Package formats with predictable structure |
|
||||
| `.jar`, `.war`, `.ear` | Binary delta | 90%+ for similar builds | Java archives with mostly unchanged classes |
|
||||
| `.exe`, `.dll`, `.so` | Direct upload | 0% (no delta benefit) | Compiled code changes unpredictably |
|
||||
| `.txt`, `.json`, `.xml` | Direct upload | 0% (use gzip instead) | Text files benefit more from standard compression |
|
||||
| `.sha1`, `.sha512`, `.md5` | Direct upload | 0% (already minimal) | Hash files are unique by design |
|
||||
|
||||
# Via Docker
|
||||
docker run -v ~/.aws:/root/.aws deltaglider/deltaglider --help
|
||||
```
|
||||
### Key Features
|
||||
|
||||
### AWS S3 Compatible Commands
|
||||
- **AWS CLI Replacement**: Same commands as `aws s3` with automatic compression
|
||||
- **boto3-Compatible SDK**: Works with existing boto3 code with minimal changes
|
||||
- **Zero Configuration**: No databases, no manifest files, no complex setup
|
||||
- **Data Integrity**: SHA256 verification on every operation
|
||||
- **S3 Compatible**: Works with AWS S3, MinIO, Cloudflare R2, and any S3-compatible storage
|
||||
|
||||
DeltaGlider is a **drop-in replacement** for AWS S3 CLI with automatic delta compression:
|
||||
## CLI Reference
|
||||
|
||||
### All Commands
|
||||
|
||||
```bash
|
||||
# Copy files to/from S3 (automatic delta compression for archives)
|
||||
@@ -87,97 +189,46 @@ deltaglider sync s3://releases/ ./local-backup/ # Sync from S3
|
||||
deltaglider sync --delete ./src/ s3://backup/ # Mirror exactly
|
||||
deltaglider sync --exclude "*.log" ./src/ s3://backup/ # Exclude patterns
|
||||
|
||||
# Get bucket statistics (compression metrics)
|
||||
deltaglider stats my-bucket # Quick stats overview
|
||||
deltaglider stats s3://my-bucket # Also accepts s3:// format
|
||||
deltaglider stats s3://my-bucket/ # With or without trailing slash
|
||||
deltaglider stats my-bucket --detailed # Detailed compression metrics (slower)
|
||||
deltaglider stats my-bucket --json # JSON output for automation
|
||||
|
||||
# Works with MinIO, R2, and S3-compatible storage
|
||||
deltaglider cp file.zip s3://bucket/ --endpoint-url http://localhost:9000
|
||||
```
|
||||
|
||||
### Legacy Commands (still supported)
|
||||
### Command Flags
|
||||
|
||||
```bash
|
||||
# Original DeltaGlider commands
|
||||
deltaglider put my-app-v1.0.0.zip s3://releases/
|
||||
deltaglider get s3://releases/my-app-v1.0.1.zip
|
||||
deltaglider verify s3://releases/my-app-v1.0.1.zip.delta
|
||||
# All standard AWS flags work
|
||||
deltaglider cp file.zip s3://bucket/ \
|
||||
--endpoint-url http://localhost:9000 \
|
||||
--profile production \
|
||||
--region us-west-2
|
||||
|
||||
# DeltaGlider-specific flags
|
||||
deltaglider cp file.zip s3://bucket/ \
|
||||
--no-delta # Disable compression for specific files
|
||||
--max-ratio 0.8 # Only use delta if compression > 20%
|
||||
```
|
||||
|
||||
## Why xdelta3 Excels at Archive Compression
|
||||
### CI/CD Integration
|
||||
|
||||
Traditional diff algorithms (like `diff` or `git diff`) work line-by-line on text files. Binary diff tools like `bsdiff` or `courgette` are optimized for executables. But **xdelta3** is uniquely suited for compressed archives because:
|
||||
|
||||
1. **Block-level matching**: xdelta3 uses a rolling hash algorithm to find matching byte sequences at any offset, not just line boundaries. This is crucial for archives where small file changes can shift all subsequent byte positions.
|
||||
|
||||
2. **Large window support**: xdelta3 can use reference windows up to 2GB, allowing it to find matches even when content has moved significantly within the archive. Other delta algorithms typically use much smaller windows (64KB-1MB).
|
||||
|
||||
3. **Compression-aware**: When you update one file in a ZIP/TAR archive, the archive format itself remains largely identical - same compression dictionary, same structure. xdelta3 preserves these similarities while other algorithms might miss them.
|
||||
|
||||
4. **Format agnostic**: Unlike specialized tools (e.g., `courgette` for Chrome updates), xdelta3 works on raw bytes without understanding the file format, making it perfect for any archive type.
|
||||
|
||||
### Real-World Example
|
||||
When you rebuild a JAR file with one class changed:
|
||||
- **Text diff**: 100% different (it's binary data!)
|
||||
- **bsdiff**: ~30-40% of original size (optimized for executables, not archives)
|
||||
- **xdelta3**: ~0.1-1% of original size (finds the unchanged parts regardless of position)
|
||||
|
||||
This is why DeltaGlider achieves 99%+ compression on versioned archives - xdelta3 can identify that 99% of the archive structure and content remains identical between versions.
|
||||
|
||||
## Intelligent File Type Detection
|
||||
|
||||
DeltaGlider automatically detects file types and applies the optimal strategy:
|
||||
|
||||
| File Type | Strategy | Typical Compression | Why It Works |
|
||||
|-----------|----------|-------------------|--------------|
|
||||
| `.zip`, `.tar`, `.gz` | Binary delta | 99%+ for similar versions | Archive structure remains consistent between versions |
|
||||
| `.dmg`, `.deb`, `.rpm` | Binary delta | 95%+ for similar versions | Package formats with predictable structure |
|
||||
| `.jar`, `.war`, `.ear` | Binary delta | 90%+ for similar builds | Java archives with mostly unchanged classes |
|
||||
| `.exe`, `.dll`, `.so` | Direct upload | 0% (no delta benefit) | Compiled code changes unpredictably |
|
||||
| `.txt`, `.json`, `.xml` | Direct upload | 0% (use gzip instead) | Text files benefit more from standard compression |
|
||||
| `.sha1`, `.sha512`, `.md5` | Direct upload | 0% (already minimal) | Hash files are unique by design |
|
||||
|
||||
## Performance Benchmarks
|
||||
|
||||
Testing with real software releases:
|
||||
|
||||
```python
|
||||
# 513 Elasticsearch plugin releases (82.5MB each)
|
||||
Original size: 42.3 GB
|
||||
DeltaGlider size: 115 MB
|
||||
Compression: 99.7%
|
||||
Upload speed: 3-4 files/second
|
||||
Download speed: <100ms reconstruction
|
||||
```
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### Drop-in AWS CLI Replacement
|
||||
|
||||
```bash
|
||||
# Before (aws-cli)
|
||||
aws s3 cp release-v2.0.0.zip s3://releases/
|
||||
aws s3 cp --recursive ./build/ s3://releases/v2.0.0/
|
||||
aws s3 ls s3://releases/
|
||||
aws s3 rm s3://releases/old-version.zip
|
||||
|
||||
# After (deltaglider) - Same commands, 99% less storage!
|
||||
deltaglider cp release-v2.0.0.zip s3://releases/
|
||||
deltaglider cp -r ./build/ s3://releases/v2.0.0/
|
||||
deltaglider ls s3://releases/
|
||||
deltaglider rm s3://releases/old-version.zip
|
||||
```
|
||||
|
||||
### CI/CD Pipeline (GitHub Actions)
|
||||
#### GitHub Actions
|
||||
|
||||
```yaml
|
||||
- name: Upload Release with 99% compression
|
||||
run: |
|
||||
pip install deltaglider
|
||||
# Use AWS S3 compatible syntax
|
||||
deltaglider cp dist/*.zip s3://releases/${{ github.ref_name }}/
|
||||
|
||||
# Or use recursive for entire directories
|
||||
# Or recursive for entire directories
|
||||
deltaglider cp -r dist/ s3://releases/${{ github.ref_name }}/
|
||||
```
|
||||
|
||||
### Backup Script
|
||||
#### Daily Backup Script
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
@@ -186,20 +237,17 @@ tar -czf backup-$(date +%Y%m%d).tar.gz /data
|
||||
deltaglider cp backup-*.tar.gz s3://backups/
|
||||
# Only changes are stored, not full backup
|
||||
|
||||
# List backups with human-readable sizes
|
||||
deltaglider ls -h s3://backups/
|
||||
|
||||
# Clean up old backups
|
||||
deltaglider rm -r s3://backups/2023/
|
||||
```
|
||||
|
||||
### Python SDK - Drop-in boto3 Replacement
|
||||
## Python SDK
|
||||
|
||||
**[📚 Full SDK Documentation](docs/sdk/README.md)** | **[API Reference](docs/sdk/api.md)** | **[Examples](docs/sdk/examples.md)**
|
||||
**[📚 Full SDK Documentation](docs/sdk/README.md)** | **[API Reference](docs/sdk/api.md)** | **[Examples](docs/sdk/examples.md)** | **[boto3 Compatibility Guide](BOTO3_COMPATIBILITY.md)**
|
||||
|
||||
#### Quick Start - boto3 Compatible API (Recommended)
|
||||
### boto3-Compatible API (Recommended)
|
||||
|
||||
DeltaGlider provides a **100% boto3-compatible API** that works as a drop-in replacement for AWS S3 SDK:
|
||||
DeltaGlider provides a **boto3-compatible API** for core S3 operations (21 methods covering 80% of use cases):
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
@@ -220,13 +268,57 @@ response = client.get_object(Bucket='releases', Key='v2.0.0/my-app.zip')
|
||||
with open('downloaded.zip', 'wb') as f:
|
||||
f.write(response['Body'].read())
|
||||
|
||||
# All boto3 S3 methods supported
|
||||
client.list_objects(Bucket='releases', Prefix='v2.0.0/')
|
||||
# Smart list_objects with optimized performance
|
||||
response = client.list_objects(Bucket='releases', Prefix='v2.0.0/')
|
||||
for obj in response['Contents']:
|
||||
print(f"{obj['Key']}: {obj['Size']} bytes")
|
||||
|
||||
# Paginated listing for large buckets
|
||||
response = client.list_objects(Bucket='releases', MaxKeys=100)
|
||||
while response.get('IsTruncated'):
|
||||
for obj in response['Contents']:
|
||||
print(obj['Key'])
|
||||
response = client.list_objects(
|
||||
Bucket='releases',
|
||||
MaxKeys=100,
|
||||
ContinuationToken=response.get('NextContinuationToken')
|
||||
)
|
||||
|
||||
# Delete and inspect objects
|
||||
client.delete_object(Bucket='releases', Key='old-version.zip')
|
||||
client.head_object(Bucket='releases', Key='v2.0.0/my-app.zip')
|
||||
```
|
||||
|
||||
#### Simple API (Alternative)
|
||||
### Bucket Management
|
||||
|
||||
**No boto3 required!** DeltaGlider provides complete bucket management:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Create buckets
|
||||
client.create_bucket(Bucket='my-releases')
|
||||
|
||||
# Create bucket in specific region (AWS only)
|
||||
client.create_bucket(
|
||||
Bucket='my-regional-bucket',
|
||||
CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}
|
||||
)
|
||||
|
||||
# List all buckets
|
||||
response = client.list_buckets()
|
||||
for bucket in response['Buckets']:
|
||||
print(f"{bucket['Name']} - {bucket['CreationDate']}")
|
||||
|
||||
# Delete bucket (must be empty)
|
||||
client.delete_bucket(Bucket='my-old-bucket')
|
||||
```
|
||||
|
||||
See [examples/bucket_management.py](examples/bucket_management.py) for complete example.
|
||||
|
||||
### Simple API (Alternative)
|
||||
|
||||
For simpler use cases, DeltaGlider also provides a streamlined API:
|
||||
|
||||
@@ -244,15 +336,16 @@ print(f"Saved {summary.savings_percent:.0f}% storage space")
|
||||
client.download("s3://releases/v2.0.0/my-app-v2.0.0.zip", "local-app.zip")
|
||||
```
|
||||
|
||||
#### Real-World Example: Software Release Storage with boto3 API
|
||||
### Real-World Examples
|
||||
|
||||
#### Software Release Storage
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
# Works exactly like boto3, but with 99% compression!
|
||||
client = create_client()
|
||||
|
||||
# Upload multiple versions using boto3-compatible API
|
||||
# Upload multiple versions
|
||||
versions = ["v1.0.0", "v1.0.1", "v1.0.2", "v1.1.0"]
|
||||
for version in versions:
|
||||
with open(f"dist/my-app-{version}.zip", 'rb') as f:
|
||||
@@ -277,27 +370,19 @@ for version in versions:
|
||||
# v1.0.1: Stored as 0.2MB delta (saved 99.8%)
|
||||
# v1.0.2: Stored as 0.3MB delta (saved 99.7%)
|
||||
# v1.1.0: Stored as 5.2MB delta (saved 94.8%)
|
||||
|
||||
# Download using standard boto3 API
|
||||
response = client.get_object(Bucket='releases', Key='v1.1.0/my-app-v1.1.0.zip')
|
||||
with open('my-app-latest.zip', 'wb') as f:
|
||||
f.write(response['Body'].read())
|
||||
```
|
||||
|
||||
#### Advanced Example: Automated Backup with boto3 API
|
||||
#### Automated Database Backup
|
||||
|
||||
```python
|
||||
from datetime import datetime
|
||||
from deltaglider import create_client
|
||||
|
||||
# Works with any S3-compatible storage
|
||||
client = create_client(endpoint_url="http://minio.internal:9000")
|
||||
|
||||
def backup_database():
|
||||
"""Daily database backup with automatic deduplication using boto3 API."""
|
||||
"""Daily database backup with automatic deduplication."""
|
||||
date = datetime.now().strftime("%Y%m%d")
|
||||
|
||||
# Create database dump
|
||||
dump_file = f"backup-{date}.sql.gz"
|
||||
|
||||
# Upload using boto3-compatible API
|
||||
@@ -310,63 +395,80 @@ def backup_database():
|
||||
Metadata={'date': date, 'source': 'production'}
|
||||
)
|
||||
|
||||
# Check compression effectiveness (DeltaGlider extension)
|
||||
# Check compression effectiveness
|
||||
if 'DeltaGliderInfo' in response:
|
||||
info = response['DeltaGliderInfo']
|
||||
if info['DeltaRatio'] > 0.1: # If delta is >10% of original
|
||||
if info['DeltaRatio'] > 0.1:
|
||||
print(f"Warning: Low compression ({info['SavingsPercent']:.0f}%), "
|
||||
"database might have significant changes")
|
||||
print(f"Backup stored: {info['StoredSizeMB']:.1f}MB "
|
||||
f"(compressed from {info['OriginalSizeMB']:.1f}MB)")
|
||||
|
||||
# List recent backups using boto3 API
|
||||
response = client.list_objects(
|
||||
Bucket='backups',
|
||||
Prefix='postgres/',
|
||||
MaxKeys=30
|
||||
)
|
||||
|
||||
# Clean up old backups
|
||||
for obj in response.get('Contents', []):
|
||||
# Parse date from key
|
||||
obj_date = obj['Key'].split('/')[1]
|
||||
if days_old(obj_date) > 30:
|
||||
client.delete_object(Bucket='backups', Key=obj['Key'])
|
||||
|
||||
# Run backup
|
||||
backup_database()
|
||||
```
|
||||
|
||||
For more examples and detailed API documentation, see the [SDK Documentation](docs/sdk/README.md).
|
||||
|
||||
## Migration from AWS CLI
|
||||
## Performance & Benchmarks
|
||||
|
||||
Migrating from `aws s3` to `deltaglider` is as simple as changing the command name:
|
||||
### Real-World Results
|
||||
|
||||
| AWS CLI | DeltaGlider | Compression Benefit |
|
||||
|---------|------------|-------------------|
|
||||
| `aws s3 cp file.zip s3://bucket/` | `deltaglider cp file.zip s3://bucket/` | ✅ 99% for similar files |
|
||||
| `aws s3 cp -r dir/ s3://bucket/` | `deltaglider cp -r dir/ s3://bucket/` | ✅ 99% for archives |
|
||||
| `aws s3 ls s3://bucket/` | `deltaglider ls s3://bucket/` | - |
|
||||
| `aws s3 rm s3://bucket/file` | `deltaglider rm s3://bucket/file` | - |
|
||||
| `aws s3 sync dir/ s3://bucket/` | `deltaglider sync dir/ s3://bucket/` | ✅ 99% incremental |
|
||||
Testing with 513 Elasticsearch plugin releases (82.5MB each):
|
||||
|
||||
### Compatibility Flags
|
||||
|
||||
```bash
|
||||
# All standard AWS flags work
|
||||
deltaglider cp file.zip s3://bucket/ \
|
||||
--endpoint-url http://localhost:9000 \
|
||||
--profile production \
|
||||
--region us-west-2
|
||||
|
||||
# DeltaGlider-specific flags
|
||||
deltaglider cp file.zip s3://bucket/ \
|
||||
--no-delta # Disable compression for specific files
|
||||
--max-ratio 0.8 # Only use delta if compression > 20%
|
||||
```
|
||||
Original size: 42.3 GB
|
||||
DeltaGlider size: 115 MB
|
||||
Compression: 99.7%
|
||||
Upload speed: 3-4 files/second
|
||||
Download speed: <100ms reconstruction
|
||||
```
|
||||
|
||||
## Architecture
|
||||
### The Math
|
||||
|
||||
For `N` versions of a `S` MB file with `D%` difference between versions:
|
||||
|
||||
**Traditional S3**: `N × S` MB
|
||||
**DeltaGlider**: `S + (N-1) × S × D%` MB
|
||||
|
||||
Example: 100 versions of 100MB files with 1% difference:
|
||||
- **Traditional**: 10,000 MB
|
||||
- **DeltaGlider**: 199 MB
|
||||
- **Savings**: 98%
|
||||
|
||||
### Comparison
|
||||
|
||||
| Solution | Compression | Speed | Integration | Cost |
|
||||
|----------|------------|-------|-------------|------|
|
||||
| **DeltaGlider** | 99%+ | Fast | Drop-in | Open source |
|
||||
| S3 Versioning | 0% | Native | Built-in | $$ per version |
|
||||
| Deduplication | 30-50% | Slow | Complex | Enterprise $$$ |
|
||||
| Git LFS | Good | Slow | Git-only | $ per GB |
|
||||
| Restic/Borg | 80-90% | Medium | Backup-only | Open source |
|
||||
|
||||
## Architecture & Technical Deep Dive
|
||||
|
||||
### Why xdelta3 Excels at Archive Compression
|
||||
|
||||
Traditional diff algorithms (like `diff` or `git diff`) work line-by-line on text files. Binary diff tools like `bsdiff` or `courgette` are optimized for executables. But **xdelta3** is uniquely suited for compressed archives because:
|
||||
|
||||
1. **Block-level matching**: xdelta3 uses a rolling hash algorithm to find matching byte sequences at any offset, not just line boundaries. This is crucial for archives where small file changes can shift all subsequent byte positions.
|
||||
|
||||
2. **Large window support**: xdelta3 can use reference windows up to 2GB, allowing it to find matches even when content has moved significantly within the archive. Other delta algorithms typically use much smaller windows (64KB-1MB).
|
||||
|
||||
3. **Compression-aware**: When you update one file in a ZIP/TAR archive, the archive format itself remains largely identical - same compression dictionary, same structure. xdelta3 preserves these similarities while other algorithms might miss them.
|
||||
|
||||
4. **Format agnostic**: Unlike specialized tools (e.g., `courgette` for Chrome updates), xdelta3 works on raw bytes without understanding the file format, making it perfect for any archive type.
|
||||
|
||||
#### Real-World Example
|
||||
|
||||
When you rebuild a JAR file with one class changed:
|
||||
- **Text diff**: 100% different (it's binary data!)
|
||||
- **bsdiff**: ~30-40% of original size (optimized for executables, not archives)
|
||||
- **xdelta3**: ~0.1-1% of original size (finds the unchanged parts regardless of position)
|
||||
|
||||
This is why DeltaGlider achieves 99%+ compression on versioned archives - xdelta3 can identify that 99% of the archive structure and content remains identical between versions.
|
||||
|
||||
### System Architecture
|
||||
|
||||
DeltaGlider uses a clean hexagonal architecture:
|
||||
|
||||
@@ -389,7 +491,7 @@ DeltaGlider uses a clean hexagonal architecture:
|
||||
- **Local caching**: Fast repeated operations
|
||||
- **Zero dependencies**: No database, no manifest files
|
||||
|
||||
## When to Use DeltaGlider
|
||||
### When to Use DeltaGlider
|
||||
|
||||
✅ **Perfect for:**
|
||||
- Software releases and versioned artifacts
|
||||
@@ -400,20 +502,22 @@ DeltaGlider uses a clean hexagonal architecture:
|
||||
- Any versioned binary data
|
||||
|
||||
❌ **Not ideal for:**
|
||||
- Already compressed unique files
|
||||
- Streaming media files
|
||||
- Already compressed **unique** files
|
||||
- Streaming or multimedia files
|
||||
- Frequently changing unstructured data
|
||||
- Files smaller than 1MB
|
||||
|
||||
## Comparison
|
||||
## Migration from AWS CLI
|
||||
|
||||
| Solution | Compression | Speed | Integration | Cost |
|
||||
|----------|------------|-------|-------------|------|
|
||||
| **DeltaGlider** | 99%+ | Fast | Drop-in | Open source |
|
||||
| S3 Versioning | 0% | Native | Built-in | $$ per version |
|
||||
| Deduplication | 30-50% | Slow | Complex | Enterprise $$$ |
|
||||
| Git LFS | Good | Slow | Git-only | $ per GB |
|
||||
| Restic/Borg | 80-90% | Medium | Backup-only | Open source |
|
||||
Migrating from `aws s3` to `deltaglider` is as simple as changing the command name:
|
||||
|
||||
| AWS CLI | DeltaGlider | Compression Benefit |
|
||||
|---------|------------|---------------------|
|
||||
| `aws s3 cp file.zip s3://bucket/` | `deltaglider cp file.zip s3://bucket/` | ✅ 99% for similar files |
|
||||
| `aws s3 cp -r dir/ s3://bucket/` | `deltaglider cp -r dir/ s3://bucket/` | ✅ 99% for archives |
|
||||
| `aws s3 ls s3://bucket/` | `deltaglider ls s3://bucket/` | - |
|
||||
| `aws s3 rm s3://bucket/file` | `deltaglider rm s3://bucket/file` | - |
|
||||
| `aws s3 sync dir/ s3://bucket/` | `deltaglider sync dir/ s3://bucket/` | ✅ 99% incremental |
|
||||
|
||||
## Production Ready
|
||||
|
||||
@@ -422,7 +526,9 @@ DeltaGlider uses a clean hexagonal architecture:
|
||||
- ✅ **S3 compatible**: Works with AWS, MinIO, Cloudflare R2, etc.
|
||||
- ✅ **Atomic operations**: No partial states
|
||||
- ✅ **Concurrent safe**: Multiple clients supported
|
||||
- ✅ **Well tested**: 95%+ code coverage
|
||||
- ✅ **Thoroughly tested**: 99 integration/unit tests, comprehensive test coverage
|
||||
- ✅ **Type safe**: Full mypy type checking, zero type errors
|
||||
- ✅ **Code quality**: Automated linting with ruff, clean codebase
|
||||
|
||||
## Development
|
||||
|
||||
@@ -434,13 +540,17 @@ cd deltaglider
|
||||
# Install with dev dependencies
|
||||
uv pip install -e ".[dev]"
|
||||
|
||||
# Run tests
|
||||
# Run tests (99 integration/unit tests)
|
||||
uv run pytest
|
||||
|
||||
# Run quality checks
|
||||
uv run ruff check src/ # Linting
|
||||
uv run mypy src/ # Type checking
|
||||
|
||||
# Run with local MinIO
|
||||
docker-compose up -d
|
||||
export AWS_ENDPOINT_URL=http://localhost:9000
|
||||
deltaglider put test.zip s3://test/
|
||||
deltaglider cp test.zip s3://test/
|
||||
```
|
||||
|
||||
## FAQ
|
||||
@@ -460,18 +570,6 @@ A: Zero. Files without similarity are uploaded directly.
|
||||
**Q: Is this compatible with S3 encryption?**
|
||||
A: Yes, DeltaGlider respects all S3 settings including SSE, KMS, and bucket policies.
|
||||
|
||||
## The Math
|
||||
|
||||
For `N` versions of a `S` MB file with `D%` difference between versions:
|
||||
|
||||
**Traditional S3**: `N × S` MB
|
||||
**DeltaGlider**: `S + (N-1) × S × D%` MB
|
||||
|
||||
Example: 100 versions of 100MB files with 1% difference:
|
||||
- **Traditional**: 10,000 MB
|
||||
- **DeltaGlider**: 199 MB
|
||||
- **Savings**: 98%
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
@@ -508,4 +606,4 @@ deltaglider analyze s3://your-bucket/
|
||||
# Output: "Potential savings: 95.2% (4.8TB → 237GB)"
|
||||
```
|
||||
|
||||
Built with ❤️ by engineers who were tired of paying to store the same bytes over and over.
|
||||
Built with ❤️ by engineers who were tired of paying to store the same bytes over and over.
|
||||
|
||||
630
SECURITY_FIX_ROADMAP.md
Normal file
630
SECURITY_FIX_ROADMAP.md
Normal file
@@ -0,0 +1,630 @@
|
||||
# 🛡️ DeltaGlider Security Fix Roadmap
|
||||
|
||||
## Executive Summary
|
||||
Critical security vulnerabilities have been identified in DeltaGlider's cache system that enable multi-user attacks, data exposure, and cache poisoning. This document provides a **chronological, actionable roadmap** to eliminate these threats through bold architectural changes.
|
||||
|
||||
**Key Innovation**: Instead of patching individual issues, we propose a **"Zero-Trust Cache Architecture"** that eliminates entire classes of vulnerabilities.
|
||||
|
||||
---
|
||||
|
||||
## 🚀 The Bold Solution: Ephemeral Signed Cache
|
||||
|
||||
### Core Concept
|
||||
Replace filesystem cache with **ephemeral, cryptographically-signed, user-isolated cache** that eliminates:
|
||||
- TOCTOU vulnerabilities (no shared filesystem)
|
||||
- Multi-user interference (process isolation)
|
||||
- Cache poisoning (cryptographic signatures)
|
||||
- Information disclosure (encrypted metadata)
|
||||
- Cross-endpoint collision (content-addressed storage)
|
||||
|
||||
**Note**: DeltaGlider is designed as a standalone CLI/SDK application. All solutions maintain this architecture without requiring external services.
|
||||
|
||||
---
|
||||
|
||||
## 📋 Implementation Roadmap
|
||||
|
||||
### **DAY 1-2: Emergency Hotfix** (v5.0.3) ✅ COMPLETED
|
||||
*Stop the bleeding - minimal changes for immediate deployment*
|
||||
|
||||
#### 1. **Ephemeral Process-Isolated Cache** (2 hours) ✅ COMPLETED
|
||||
```python
|
||||
# src/deltaglider/app/cli/main.py
|
||||
import tempfile
|
||||
import atexit
|
||||
|
||||
# SECURITY: Always use ephemeral process-isolated cache
|
||||
cache_dir = Path(tempfile.mkdtemp(prefix="deltaglider-", dir="/tmp"))
|
||||
atexit.register(lambda: shutil.rmtree(cache_dir, ignore_errors=True))
|
||||
```
|
||||
|
||||
**Impact**: Each process gets isolated cache, auto-cleaned on exit. Eliminates multi-user attacks.
|
||||
**Implementation**: All legacy shared cache code removed. Ephemeral cache is now the ONLY mode.
|
||||
|
||||
#### 2. **Add SHA Validation at Use-Time** (2 hours) ✅ COMPLETED
|
||||
```python
|
||||
# src/deltaglider/ports/cache.py
|
||||
class CachePort(Protocol):
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get reference with atomic SHA validation - MUST use this for all operations."""
|
||||
...
|
||||
|
||||
# src/deltaglider/adapters/cache_fs.py
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
path = self.ref_path(bucket, prefix)
|
||||
if not path.exists():
|
||||
raise CacheMissError(f"Cache miss for {bucket}/{prefix}")
|
||||
|
||||
# Lock file for atomic read (Unix only)
|
||||
with open(path, 'rb') as f:
|
||||
if sys.platform != "win32":
|
||||
fcntl.flock(f.fileno(), fcntl.LOCK_SH)
|
||||
content = f.read()
|
||||
actual_sha = hashlib.sha256(content).hexdigest()
|
||||
|
||||
if actual_sha != expected_sha:
|
||||
path.unlink() # Remove corrupted cache
|
||||
raise CacheCorruptionError(f"SHA mismatch: cache corrupted")
|
||||
|
||||
return path
|
||||
```
|
||||
|
||||
#### 3. **Update All Usage Points** (1 hour) ✅ COMPLETED
|
||||
```python
|
||||
# src/deltaglider/core/service.py
|
||||
# Replaced ALL instances in two locations:
|
||||
# - Line 234 (get method for decoding)
|
||||
# - Line 415 (_create_delta method for encoding)
|
||||
|
||||
ref_path = self.cache.get_validated_ref(
|
||||
delta_space.bucket,
|
||||
delta_space.prefix,
|
||||
ref_sha256 # Pass expected SHA
|
||||
)
|
||||
```
|
||||
|
||||
**Test & Deploy**: ✅ All 99 tests passing + ready for release
|
||||
|
||||
---
|
||||
|
||||
### **DAY 3-5: Quick Wins** (v5.0.3) ✅ COMPLETED
|
||||
*Low-risk improvements with high security impact*
|
||||
|
||||
#### 4. **Implement Content-Addressed Storage** (4 hours) ✅ COMPLETED
|
||||
```python
|
||||
# src/deltaglider/adapters/cache_cas.py
|
||||
class ContentAddressedCache(CachePort):
|
||||
"""Cache using SHA as filename - eliminates collisions"""
|
||||
|
||||
def ref_path(self, bucket: str, prefix: str, sha256: str) -> Path:
|
||||
# Use SHA as filename - guaranteed unique
|
||||
return self.base_dir / sha256[:2] / sha256[2:4] / sha256
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path, sha256: str) -> Path:
|
||||
path = self.ref_path(bucket, prefix, sha256)
|
||||
|
||||
# If file with this SHA exists, we're done (deduplication!)
|
||||
if path.exists():
|
||||
return path
|
||||
|
||||
# Atomic write
|
||||
path.parent.mkdir(parents=True, mode=0o700, exist_ok=True)
|
||||
tmp = path.with_suffix('.tmp')
|
||||
shutil.copy2(src, tmp)
|
||||
os.chmod(tmp, 0o600)
|
||||
|
||||
# Verify content before committing
|
||||
actual_sha = self.hasher.sha256(tmp)
|
||||
if actual_sha != sha256:
|
||||
tmp.unlink()
|
||||
raise ValueError("File corruption during cache write")
|
||||
|
||||
os.replace(tmp, path) # Atomic
|
||||
return path
|
||||
```
|
||||
|
||||
**Benefits**: ✅ ACHIEVED
|
||||
- Same file cached once regardless of bucket/prefix (automatic deduplication)
|
||||
- No collision possible (SHA256 uniqueness guarantees)
|
||||
- Natural cache validation (filename IS the checksum)
|
||||
- Two-level directory structure (ab/cd/abcdef...) for filesystem optimization
|
||||
|
||||
**Implementation**: Complete in `src/deltaglider/adapters/cache_cas.py` with:
|
||||
- `_cas_path()` method for SHA256-based path computation
|
||||
- `get_validated_ref()` with atomic validation and locking
|
||||
- `write_ref()` with atomic temp-file + rename pattern
|
||||
- Ephemeral deltaspace-to-SHA mapping for compatibility
|
||||
|
||||
#### 5. **Add Secure Directory Creation** (2 hours)
|
||||
```python
|
||||
# src/deltaglider/utils/secure_fs.py
|
||||
import os
|
||||
import stat
|
||||
|
||||
def secure_makedirs(path: Path, mode: int = 0o700) -> None:
|
||||
"""Create directory with secure permissions atomically."""
|
||||
try:
|
||||
path.mkdir(parents=True, mode=mode, exist_ok=False)
|
||||
except FileExistsError:
|
||||
# Verify it's ours and has correct permissions
|
||||
st = path.stat()
|
||||
if st.st_uid != os.getuid():
|
||||
raise SecurityError(f"Directory {path} owned by different user")
|
||||
if stat.S_IMODE(st.st_mode) != mode:
|
||||
os.chmod(path, mode) # Fix permissions
|
||||
```
|
||||
|
||||
#### 6. **Unify Cache Configuration** (1 hour)
|
||||
```python
|
||||
# src/deltaglider/config.py
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
def get_cache_dir() -> Path:
|
||||
"""Single source of truth for cache directory."""
|
||||
if os.environ.get("DG_NO_CACHE") == "true":
|
||||
return None # Feature flag to disable cache
|
||||
|
||||
if os.environ.get("DG_EPHEMERAL_CACHE") == "true":
|
||||
return Path(tempfile.mkdtemp(prefix="dg-cache-"))
|
||||
|
||||
# User-specific cache by default
|
||||
cache_base = os.environ.get("DG_CACHE_DIR",
|
||||
os.path.expanduser("~/.cache/deltaglider"))
|
||||
return Path(cache_base) / "v2" # Version cache format
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **DAY 6-10: Architecture Redesign** (v5.0.3) ✅ COMPLETED
|
||||
*The bold solution that eliminates entire vulnerability classes*
|
||||
|
||||
#### 7. **Implement Memory Cache with Encryption** (8 hours) ✅ COMPLETED
|
||||
```python
|
||||
# src/deltaglider/adapters/cache_memory.py
|
||||
class MemoryCache(CachePort):
|
||||
"""In-memory cache with LRU eviction and configurable size limits."""
|
||||
|
||||
def __init__(self, hasher: HashPort, max_size_mb: int = 100, temp_dir: Path | None = None):
|
||||
self.hasher = hasher
|
||||
self.max_size_bytes = max_size_mb * 1024 * 1024
|
||||
self._current_size = 0
|
||||
self._cache: dict[tuple[str, str], tuple[bytes, str]] = {} # (bucket, prefix) -> (content, SHA)
|
||||
self._access_order: list[tuple[str, str]] = [] # LRU tracking
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Write reference to in-memory cache with LRU eviction."""
|
||||
# Read content and compute SHA
|
||||
content = src.read_bytes()
|
||||
sha256 = self.hasher.sha256_bytes(content)
|
||||
|
||||
# Check if file fits in cache
|
||||
needed_bytes = len(content)
|
||||
if needed_bytes > self.max_size_bytes:
|
||||
raise CacheCorruptionError(f"File too large for cache: {needed_bytes} > {self.max_size_bytes}")
|
||||
|
||||
# Evict LRU if needed
|
||||
self._evict_lru(needed_bytes)
|
||||
|
||||
# Store in memory
|
||||
key = (bucket, prefix)
|
||||
self._cache[key] = (content, sha256)
|
||||
self._current_size += needed_bytes
|
||||
self._access_order.append(key)
|
||||
|
||||
return src # Return original path for compatibility
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference with validation."""
|
||||
key = (bucket, prefix)
|
||||
if key not in self._cache:
|
||||
raise CacheMissError(f"Cache miss for {bucket}/{prefix}")
|
||||
|
||||
content, stored_sha = self._cache[key]
|
||||
|
||||
# Validate SHA matches
|
||||
if stored_sha != expected_sha:
|
||||
raise CacheCorruptionError(f"SHA mismatch for {bucket}/{prefix}")
|
||||
|
||||
# Update LRU order
|
||||
self._access_order.remove(key)
|
||||
self._access_order.append(key)
|
||||
|
||||
# Write to temp file for compatibility
|
||||
temp_path = self.temp_dir / f"{expected_sha}.bin"
|
||||
temp_path.write_bytes(content)
|
||||
return temp_path
|
||||
```
|
||||
|
||||
# src/deltaglider/adapters/cache_encrypted.py
|
||||
class EncryptedCache(CachePort):
|
||||
"""Encrypted cache wrapper using Fernet symmetric encryption."""
|
||||
|
||||
def __init__(self, backend: CachePort, encryption_key: bytes | None = None):
|
||||
self.backend = backend
|
||||
|
||||
# Key management: ephemeral (default) or provided
|
||||
if encryption_key is None:
|
||||
self._key = Fernet.generate_key() # Ephemeral per process
|
||||
self._ephemeral = True
|
||||
else:
|
||||
self._key = encryption_key
|
||||
self._ephemeral = False
|
||||
|
||||
self._cipher = Fernet(self._key)
|
||||
# Track plaintext SHA since encrypted content has different SHA
|
||||
self._plaintext_sha_map: dict[tuple[str, str], str] = {}
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Encrypt and cache reference file."""
|
||||
# Read plaintext and compute SHA
|
||||
plaintext_data = src.read_bytes()
|
||||
plaintext_sha = hashlib.sha256(plaintext_data).hexdigest()
|
||||
|
||||
# Encrypt data
|
||||
encrypted_data = self._cipher.encrypt(plaintext_data)
|
||||
|
||||
# Write encrypted data to temp file
|
||||
temp_encrypted = src.with_suffix(".encrypted.tmp")
|
||||
temp_encrypted.write_bytes(encrypted_data)
|
||||
|
||||
try:
|
||||
# Store encrypted file via backend
|
||||
result_path = self.backend.write_ref(bucket, prefix, temp_encrypted)
|
||||
|
||||
# Store plaintext SHA mapping
|
||||
key = (bucket, prefix)
|
||||
self._plaintext_sha_map[key] = plaintext_sha
|
||||
|
||||
return result_path
|
||||
finally:
|
||||
temp_encrypted.unlink(missing_ok=True)
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference with decryption and validation."""
|
||||
# Verify we have the plaintext SHA mapped
|
||||
key = (bucket, prefix)
|
||||
if key not in self._plaintext_sha_map:
|
||||
raise CacheMissError(f"Cache miss for {bucket}/{prefix}")
|
||||
|
||||
if self._plaintext_sha_map[key] != expected_sha:
|
||||
raise CacheCorruptionError(f"SHA mismatch for {bucket}/{prefix}")
|
||||
|
||||
# Get encrypted file from backend
|
||||
encrypted_path = self.backend.ref_path(bucket, prefix)
|
||||
if not encrypted_path.exists():
|
||||
raise CacheMissError(f"Encrypted cache file not found")
|
||||
|
||||
# Decrypt content
|
||||
encrypted_data = encrypted_path.read_bytes()
|
||||
try:
|
||||
decrypted_data = self._cipher.decrypt(encrypted_data)
|
||||
except Exception as e:
|
||||
raise CacheCorruptionError(f"Decryption failed: {e}") from e
|
||||
|
||||
# Validate plaintext SHA
|
||||
actual_sha = hashlib.sha256(decrypted_data).hexdigest()
|
||||
if actual_sha != expected_sha:
|
||||
raise CacheCorruptionError(f"Decrypted content SHA mismatch")
|
||||
|
||||
# Write decrypted content to temp file
|
||||
decrypted_path = encrypted_path.with_suffix(".decrypted")
|
||||
decrypted_path.write_bytes(decrypted_data)
|
||||
return decrypted_path
|
||||
```
|
||||
|
||||
**Implementation**: ✅ COMPLETED
|
||||
- **MemoryCache**: In-memory cache with LRU eviction, configurable size limits, zero filesystem I/O
|
||||
- **EncryptedCache**: Fernet (AES-128-CBC + HMAC) encryption wrapper, ephemeral keys by default
|
||||
- **Configuration**: `DG_CACHE_BACKEND` (filesystem/memory), `DG_CACHE_ENCRYPTION` (true/false)
|
||||
- **Environment Variables**: `DG_CACHE_MEMORY_SIZE_MB`, `DG_CACHE_ENCRYPTION_KEY`
|
||||
|
||||
**Benefits**: ✅ ACHIEVED
|
||||
- No filesystem access for memory cache = no permission issues
|
||||
- Encrypted at rest = secure cache storage
|
||||
- Per-process ephemeral keys = forward secrecy and process isolation
|
||||
- LRU eviction = prevents memory exhaustion
|
||||
- Zero TOCTOU window = memory operations are atomic
|
||||
- Configurable backends = flexibility for different use cases
|
||||
|
||||
#### 8. **Implement Signed Cache Entries** (6 hours)
|
||||
```python
|
||||
# src/deltaglider/adapters/cache_signed.py
|
||||
import hmac
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
class SignedCache(CachePort):
|
||||
"""Cache with cryptographic signatures and expiry."""
|
||||
|
||||
def __init__(self, base_dir: Path, secret_key: bytes = None):
|
||||
self.base_dir = base_dir
|
||||
# Per-session key if not provided
|
||||
self.secret = secret_key or os.urandom(32)
|
||||
|
||||
def _sign_metadata(self, metadata: dict) -> str:
|
||||
"""Create HMAC signature for metadata."""
|
||||
json_meta = json.dumps(metadata, sort_keys=True)
|
||||
signature = hmac.new(
|
||||
self.secret,
|
||||
json_meta.encode(),
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
return signature
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path, sha256: str) -> Path:
|
||||
# Create signed metadata
|
||||
metadata = {
|
||||
"sha256": sha256,
|
||||
"bucket": bucket,
|
||||
"prefix": prefix,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"expires": (datetime.utcnow() + timedelta(hours=24)).isoformat(),
|
||||
"pid": os.getpid(),
|
||||
"uid": os.getuid(),
|
||||
}
|
||||
signature = self._sign_metadata(metadata)
|
||||
|
||||
# Store data + metadata
|
||||
cache_dir = self.base_dir / signature[:8] # Use signature prefix as namespace
|
||||
cache_dir.mkdir(parents=True, mode=0o700, exist_ok=True)
|
||||
|
||||
data_path = cache_dir / f"{sha256}.bin"
|
||||
meta_path = cache_dir / f"{sha256}.meta"
|
||||
|
||||
# Atomic writes
|
||||
shutil.copy2(src, data_path)
|
||||
os.chmod(data_path, 0o600)
|
||||
|
||||
with open(meta_path, 'w') as f:
|
||||
json.dump({"metadata": metadata, "signature": signature}, f)
|
||||
os.chmod(meta_path, 0o600)
|
||||
|
||||
return data_path
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, sha256: str) -> Path:
|
||||
# Find and validate signed entry
|
||||
pattern = self.base_dir / "*" / f"{sha256}.meta"
|
||||
matches = list(Path(self.base_dir).glob(f"*/{sha256}.meta"))
|
||||
|
||||
for meta_path in matches:
|
||||
with open(meta_path) as f:
|
||||
entry = json.load(f)
|
||||
|
||||
# Verify signature
|
||||
expected_sig = self._sign_metadata(entry["metadata"])
|
||||
if not hmac.compare_digest(entry["signature"], expected_sig):
|
||||
meta_path.unlink() # Remove tampered entry
|
||||
continue
|
||||
|
||||
# Check expiry
|
||||
expires = datetime.fromisoformat(entry["metadata"]["expires"])
|
||||
if datetime.utcnow() > expires:
|
||||
meta_path.unlink()
|
||||
continue
|
||||
|
||||
# Validate data integrity
|
||||
data_path = meta_path.with_suffix('.bin')
|
||||
actual_sha = self.hasher.sha256(data_path)
|
||||
if actual_sha != sha256:
|
||||
data_path.unlink()
|
||||
meta_path.unlink()
|
||||
continue
|
||||
|
||||
return data_path
|
||||
|
||||
raise CacheMissError(f"No valid cache entry for {sha256}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **DAY 11-15: Advanced Security** (v6.0.0)
|
||||
*Next-generation features for standalone security*
|
||||
|
||||
#### 9. **Add Integrity Monitoring** (4 hours)
|
||||
```python
|
||||
# src/deltaglider/security/monitor.py
|
||||
import inotify
|
||||
import logging
|
||||
|
||||
class CacheIntegrityMonitor:
|
||||
"""Detect and alert on cache tampering attempts."""
|
||||
|
||||
def __init__(self, cache_dir: Path):
|
||||
self.cache_dir = cache_dir
|
||||
self.notifier = inotify.INotify()
|
||||
self.watch_desc = self.notifier.add_watch(
|
||||
str(cache_dir),
|
||||
inotify.IN_MODIFY | inotify.IN_DELETE | inotify.IN_ATTRIB
|
||||
)
|
||||
self.logger = logging.getLogger("security")
|
||||
|
||||
async def monitor(self):
|
||||
"""Monitor for unauthorized cache modifications."""
|
||||
async for event in self.notifier:
|
||||
if event.mask & inotify.IN_MODIFY:
|
||||
# File modified - verify it was by our process
|
||||
if not self._is_our_modification(event):
|
||||
self.logger.critical(
|
||||
f"SECURITY: Unauthorized cache modification detected: {event.path}"
|
||||
)
|
||||
# Immediately invalidate affected cache
|
||||
Path(event.path).unlink(missing_ok=True)
|
||||
|
||||
elif event.mask & inotify.IN_ATTRIB:
|
||||
# Permission change - always suspicious
|
||||
self.logger.warning(
|
||||
f"SECURITY: Cache permission change: {event.path}"
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **DAY 16-20: Testing & Rollout** (v6.0.0 release)
|
||||
|
||||
#### 10. **Security Test Suite** (8 hours)
|
||||
```python
|
||||
# tests/security/test_cache_attacks.py
|
||||
import pytest
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
class TestCacheSecurity:
|
||||
"""Test all known attack vectors."""
|
||||
|
||||
def test_toctou_attack_prevented(self, cache):
|
||||
"""Verify TOCTOU window is eliminated."""
|
||||
sha = "abc123"
|
||||
cache.write_ref("bucket", "prefix", test_file, sha)
|
||||
|
||||
# Attacker thread tries to replace file during read
|
||||
def attacker():
|
||||
time.sleep(0.0001) # Try to hit the TOCTOU window
|
||||
cache_path = cache.ref_path("bucket", "prefix", sha)
|
||||
cache_path.write_bytes(b"malicious")
|
||||
|
||||
thread = threading.Thread(target=attacker)
|
||||
thread.start()
|
||||
|
||||
# Should detect tampering
|
||||
with pytest.raises(CacheCorruptionError):
|
||||
cache.get_validated_ref("bucket", "prefix", sha)
|
||||
|
||||
def test_multi_user_isolation(self, cache):
|
||||
"""Verify users can't access each other's cache."""
|
||||
# Create cache as user A
|
||||
cache_a = SignedCache(Path("/tmp/cache"), secret=b"key_a")
|
||||
cache_a.write_ref("bucket", "prefix", test_file, "sha_a")
|
||||
|
||||
# Try to read as user B with different key
|
||||
cache_b = SignedCache(Path("/tmp/cache"), secret=b"key_b")
|
||||
|
||||
with pytest.raises(CacheMissError):
|
||||
cache_b.get_validated_ref("bucket", "prefix", "sha_a")
|
||||
|
||||
def test_cache_poisoning_prevented(self, cache):
|
||||
"""Verify corrupted cache is detected."""
|
||||
sha = "abc123"
|
||||
cache.write_ref("bucket", "prefix", test_file, sha)
|
||||
|
||||
# Corrupt the cache file
|
||||
cache_path = cache.ref_path("bucket", "prefix", sha)
|
||||
with open(cache_path, 'ab') as f:
|
||||
f.write(b"corrupted")
|
||||
|
||||
# Should detect corruption
|
||||
with pytest.raises(CacheCorruptionError):
|
||||
cache.get_validated_ref("bucket", "prefix", sha)
|
||||
```
|
||||
|
||||
#### 11. **Migration Guide** (4 hours)
|
||||
```python
|
||||
# src/deltaglider/migration/v5_to_v6.py
|
||||
def migrate_cache():
|
||||
"""Migrate from v5 shared cache to v6 secure cache."""
|
||||
old_cache = Path("/tmp/.deltaglider/cache")
|
||||
|
||||
if old_cache.exists():
|
||||
print("WARNING: Old insecure cache detected at", old_cache)
|
||||
print("This cache had security vulnerabilities and will not be migrated.")
|
||||
|
||||
response = input("Delete old cache? [y/N]: ")
|
||||
if response.lower() == 'y':
|
||||
shutil.rmtree(old_cache)
|
||||
print("Old cache deleted. New secure cache will be created on demand.")
|
||||
else:
|
||||
print("Old cache retained at", old_cache)
|
||||
print("Set DG_CACHE_DIR to use a different location.")
|
||||
```
|
||||
|
||||
#### 12. **Performance Benchmarks** (4 hours)
|
||||
```python
|
||||
# benchmarks/cache_performance.py
|
||||
def benchmark_cache_implementations():
|
||||
"""Compare performance of cache implementations."""
|
||||
|
||||
implementations = [
|
||||
("Filesystem (v5)", FsCacheAdapter),
|
||||
("Content-Addressed", ContentAddressedCache),
|
||||
("Memory", MemoryCache),
|
||||
("Signed", SignedCache),
|
||||
]
|
||||
|
||||
for name, cache_class in implementations:
|
||||
cache = cache_class(test_dir)
|
||||
|
||||
# Measure write performance
|
||||
start = time.perf_counter()
|
||||
for i in range(1000):
|
||||
cache.write_ref("bucket", f"prefix{i}", test_file, f"sha{i}")
|
||||
write_time = time.perf_counter() - start
|
||||
|
||||
# Measure read performance
|
||||
start = time.perf_counter()
|
||||
for i in range(1000):
|
||||
cache.get_validated_ref("bucket", f"prefix{i}", f"sha{i}")
|
||||
read_time = time.perf_counter() - start
|
||||
|
||||
print(f"{name}: Write={write_time:.3f}s Read={read_time:.3f}s")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📊 Decision Matrix
|
||||
|
||||
| Solution | Security | Performance | Complexity | Breaking Change |
|
||||
|----------|----------|-------------|------------|-----------------|
|
||||
| Hotfix (Day 1-2) | ⭐⭐⭐ | ⭐⭐ | ⭐ | No |
|
||||
| Content-Addressed | ⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ⭐⭐ | No |
|
||||
| Memory Cache | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | No |
|
||||
| Signed Cache | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐ | No |
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Recommended Approach
|
||||
|
||||
### For Immediate Production (Next 48 hours)
|
||||
Deploy **Hotfix v5.0.3** with ephemeral cache + SHA validation
|
||||
|
||||
### For Next Release (1 week)
|
||||
Implement **Content-Addressed Storage** (v5.1.0) - best balance of security and simplicity
|
||||
|
||||
### For Enterprise (1 month)
|
||||
Deploy **Signed Cache** (v6.0.0) for maximum security with built-in TTL and integrity
|
||||
|
||||
---
|
||||
|
||||
## 🚦 Success Metrics
|
||||
|
||||
After implementation, verify:
|
||||
|
||||
1. **Security Tests Pass**: All attack vectors prevented
|
||||
2. **Performance Maintained**: <10% degradation vs v5
|
||||
3. **Zero CVEs**: No security vulnerabilities in cache
|
||||
4. **User Isolation**: Multi-user systems work safely
|
||||
5. **Backward Compatible**: Existing workflows unaffected
|
||||
|
||||
---
|
||||
|
||||
## 📞 Support
|
||||
|
||||
For questions or security concerns:
|
||||
- Security Team: security@deltaglider.io
|
||||
- Lead Developer: @architect
|
||||
- Immediate Issues: Create SECURITY labeled issue
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Disclosure Timeline
|
||||
|
||||
- **Day 0**: Vulnerabilities discovered
|
||||
- **Day 1**: Hotfix released (v5.0.3)
|
||||
- **Day 7**: Improved version released (v5.1.0)
|
||||
- **Day 30**: Full disclosure published
|
||||
- **Day 45**: v6.0.0 with complete redesign
|
||||
|
||||
---
|
||||
|
||||
*Document Version: 1.0*
|
||||
*Classification: SENSITIVE - INTERNAL USE ONLY*
|
||||
*Last Updated: 2024-10-09*
|
||||
8
command.sh
Executable file
8
command.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
export AWS_ENDPOINT_URL=http://localhost:9000
|
||||
export AWS_ACCESS_KEY_ID=deltadmin
|
||||
export AWS_SECRET_ACCESS_KEY=deltasecret
|
||||
|
||||
ror-data-importer \
|
||||
--source-bucket=dg-demo \
|
||||
--dest-bucket=new-buck \
|
||||
--yes
|
||||
44
commit_message.txt
Normal file
44
commit_message.txt
Normal file
@@ -0,0 +1,44 @@
|
||||
fix: Optimize list_objects performance by eliminating N+1 query problem
|
||||
|
||||
BREAKING CHANGE: list_objects and get_bucket_stats signatures updated
|
||||
|
||||
## Problem
|
||||
The list_objects method was making a separate HEAD request for every object
|
||||
in the bucket to fetch metadata, causing severe performance degradation:
|
||||
- 100 objects = 101 API calls (1 LIST + 100 HEAD)
|
||||
- Response time: ~2.6 seconds for 1000 objects
|
||||
|
||||
## Solution
|
||||
Implemented smart metadata fetching with intelligent defaults:
|
||||
- Added FetchMetadata parameter (default: False) to list_objects
|
||||
- Added detailed_stats parameter (default: False) to get_bucket_stats
|
||||
- NEVER fetch metadata for non-delta files (they don't need it)
|
||||
- Only fetch metadata for delta files when explicitly requested
|
||||
|
||||
## Performance Impact
|
||||
- Before: ~2.6 seconds for 1000 objects (N+1 API calls)
|
||||
- After: ~50ms for 1000 objects (1 API call)
|
||||
- Improvement: ~5x faster for typical operations
|
||||
|
||||
## API Changes
|
||||
- list_objects(..., FetchMetadata=False) - Smart performance default
|
||||
- get_bucket_stats(..., detailed_stats=False) - Quick stats by default
|
||||
- Full pagination support with ContinuationToken
|
||||
- Backwards compatible with existing code
|
||||
|
||||
## Implementation Details
|
||||
- Eliminated unnecessary HEAD requests for metadata
|
||||
- Smart detection: only delta files can benefit from metadata
|
||||
- Preserved boto3 compatibility while adding performance optimizations
|
||||
- Updated documentation with performance notes and examples
|
||||
|
||||
## Testing
|
||||
- All existing tests pass
|
||||
- Added test coverage for new parameters
|
||||
- Linting (ruff) passes
|
||||
- Type checking (mypy) passes
|
||||
- 61 tests passing (18 unit + 43 integration)
|
||||
|
||||
Fixes #[issue-number] - Web UI /buckets/ endpoint 2.6s latency
|
||||
|
||||
Co-authored-by: Claude <noreply@anthropic.com>
|
||||
316
docs/BOTO3_COMPATIBILITY_VISION.md
Normal file
316
docs/BOTO3_COMPATIBILITY_VISION.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# boto3 Compatibility Vision
|
||||
|
||||
## Current State (v4.2.3)
|
||||
|
||||
DeltaGlider currently uses custom dataclasses for responses:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client, ListObjectsResponse, ObjectInfo
|
||||
|
||||
client = create_client()
|
||||
response: ListObjectsResponse = client.list_objects(Bucket='my-bucket')
|
||||
|
||||
for obj in response.contents: # Custom field name
|
||||
print(f"{obj.key}: {obj.size}") # Custom ObjectInfo dataclass
|
||||
```
|
||||
|
||||
**Problems:**
|
||||
- ❌ Not a true drop-in replacement for boto3
|
||||
- ❌ Users need to learn DeltaGlider-specific types
|
||||
- ❌ Can't use with tools expecting boto3 responses
|
||||
- ❌ Different API surface (`.contents` vs `['Contents']`)
|
||||
|
||||
## Target State (v5.0.0)
|
||||
|
||||
DeltaGlider should return native boto3-compatible dicts with TypedDict type hints:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client, ListObjectsV2Response
|
||||
|
||||
client = create_client()
|
||||
response: ListObjectsV2Response = client.list_objects(Bucket='my-bucket')
|
||||
|
||||
for obj in response['Contents']: # boto3-compatible!
|
||||
print(f"{obj['Key']}: {obj['Size']}") # Works exactly like boto3
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- ✅ **True drop-in replacement** - swap `boto3.client('s3')` with `create_client()`
|
||||
- ✅ **No learning curve** - if you know boto3, you know DeltaGlider
|
||||
- ✅ **Tool compatibility** - works with any library expecting boto3 types
|
||||
- ✅ **Type safety** - TypedDict provides IDE autocomplete without boto3 import
|
||||
- ✅ **Zero runtime overhead** - TypedDict compiles to plain dict
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Type Definitions ✅ (DONE)
|
||||
|
||||
Created `deltaglider/types.py` with comprehensive TypedDict definitions:
|
||||
|
||||
```python
|
||||
from typing import TypedDict, NotRequired
|
||||
from datetime import datetime
|
||||
|
||||
class S3Object(TypedDict):
|
||||
Key: str
|
||||
Size: int
|
||||
LastModified: datetime
|
||||
ETag: NotRequired[str]
|
||||
StorageClass: NotRequired[str]
|
||||
|
||||
class ListObjectsV2Response(TypedDict):
|
||||
Contents: list[S3Object]
|
||||
CommonPrefixes: NotRequired[list[dict[str, str]]]
|
||||
IsTruncated: NotRequired[bool]
|
||||
NextContinuationToken: NotRequired[str]
|
||||
```
|
||||
|
||||
**Key insight:** TypedDict provides type safety at development time but compiles to plain `dict` at runtime!
|
||||
|
||||
### Phase 2: Refactor Client Methods (TODO)
|
||||
|
||||
Update all client methods to return boto3-compatible dicts:
|
||||
|
||||
#### `list_objects()`
|
||||
|
||||
**Before:**
|
||||
```python
|
||||
def list_objects(...) -> ListObjectsResponse: # Custom dataclass
|
||||
return ListObjectsResponse(
|
||||
name=bucket,
|
||||
contents=[ObjectInfo(...), ...] # Custom dataclass
|
||||
)
|
||||
```
|
||||
|
||||
**After:**
|
||||
```python
|
||||
def list_objects(...) -> ListObjectsV2Response: # TypedDict
|
||||
return {
|
||||
'Contents': [
|
||||
{
|
||||
'Key': 'file.zip', # .delta suffix already stripped
|
||||
'Size': 1024,
|
||||
'LastModified': datetime(...),
|
||||
'ETag': '"abc123"',
|
||||
}
|
||||
],
|
||||
'CommonPrefixes': [{'Prefix': 'dir/'}],
|
||||
'IsTruncated': False,
|
||||
}
|
||||
```
|
||||
|
||||
**Key changes:**
|
||||
1. Return plain dict instead of custom dataclass
|
||||
2. Use boto3 field names: `Contents` not `contents`, `Key` not `key`
|
||||
3. Strip `.delta` suffix transparently (already done)
|
||||
4. Hide `reference.bin` files (already done)
|
||||
|
||||
#### `put_object()`
|
||||
|
||||
**Before:**
|
||||
```python
|
||||
def put_object(...) -> dict[str, Any]:
|
||||
return {
|
||||
"ETag": etag,
|
||||
"VersionId": None,
|
||||
"DeltaGliderInfo": {...} # Custom field
|
||||
}
|
||||
```
|
||||
|
||||
**After:**
|
||||
```python
|
||||
def put_object(...) -> PutObjectResponse: # TypedDict
|
||||
return {
|
||||
'ETag': etag,
|
||||
'ResponseMetadata': {'HTTPStatusCode': 200},
|
||||
# DeltaGlider metadata goes in Metadata field
|
||||
'Metadata': {
|
||||
'deltaglider-is-delta': 'true',
|
||||
'deltaglider-compression-ratio': '0.99'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `get_object()`
|
||||
|
||||
**Before:**
|
||||
```python
|
||||
def get_object(...) -> dict[str, Any]:
|
||||
return {
|
||||
"Body": data,
|
||||
"ContentLength": len(data),
|
||||
"DeltaGliderInfo": {...} # Custom field
|
||||
}
|
||||
```
|
||||
|
||||
**After:**
|
||||
```python
|
||||
def get_object(...) -> GetObjectResponse: # TypedDict
|
||||
return {
|
||||
'Body': data, # bytes, not StreamingBody (simpler!)
|
||||
'ContentLength': len(data),
|
||||
'LastModified': datetime(...),
|
||||
'ETag': '"abc123"',
|
||||
'Metadata': { # DeltaGlider metadata here
|
||||
'deltaglider-is-delta': 'true'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `delete_object()`, `delete_objects()`, `head_object()`, etc.
|
||||
|
||||
All follow the same pattern: return boto3-compatible dicts with TypedDict hints.
|
||||
|
||||
### Phase 3: Backward Compatibility (TODO)
|
||||
|
||||
Keep old dataclasses for 1-2 versions with deprecation warnings:
|
||||
|
||||
```python
|
||||
class ListObjectsResponse:
|
||||
"""DEPRECATED: Use dict responses with ListObjectsV2Response type hint.
|
||||
|
||||
This will be removed in v6.0.0. Update your code:
|
||||
|
||||
Before:
|
||||
response.contents[0].key
|
||||
|
||||
After:
|
||||
response['Contents'][0]['Key']
|
||||
"""
|
||||
def __init__(self, data: dict):
|
||||
warnings.warn(
|
||||
"ListObjectsResponse dataclass is deprecated. "
|
||||
"Use dict responses with ListObjectsV2Response type hint.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
self._data = data
|
||||
|
||||
@property
|
||||
def contents(self):
|
||||
return [ObjectInfo(obj) for obj in self._data.get('Contents', [])]
|
||||
```
|
||||
|
||||
### Phase 4: Update Documentation (TODO)
|
||||
|
||||
1. Update all examples to use dict responses
|
||||
2. Add migration guide from v4.x to v5.0
|
||||
3. Update BOTO3_COMPATIBILITY.md
|
||||
4. Add "Drop-in Replacement" marketing language
|
||||
|
||||
### Phase 5: Update Tests (TODO)
|
||||
|
||||
Convert all tests from:
|
||||
```python
|
||||
assert response.contents[0].key == "file.zip"
|
||||
```
|
||||
|
||||
To:
|
||||
```python
|
||||
assert response['Contents'][0]['Key'] == "file.zip"
|
||||
```
|
||||
|
||||
## Migration Guide (for users)
|
||||
|
||||
### v4.x → v5.0
|
||||
|
||||
**Old code (v4.x):**
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
response = client.list_objects(Bucket='my-bucket')
|
||||
|
||||
for obj in response.contents: # Dataclass attribute
|
||||
print(f"{obj.key}: {obj.size}") # Dataclass attributes
|
||||
```
|
||||
|
||||
**New code (v5.0):**
|
||||
```python
|
||||
from deltaglider import create_client, ListObjectsV2Response
|
||||
|
||||
client = create_client()
|
||||
response: ListObjectsV2Response = client.list_objects(Bucket='my-bucket')
|
||||
|
||||
for obj in response['Contents']: # Dict key (boto3-compatible)
|
||||
print(f"{obj['Key']}: {obj['Size']}") # Dict keys (boto3-compatible)
|
||||
```
|
||||
|
||||
**Or even simpler - no type hint needed:**
|
||||
```python
|
||||
client = create_client()
|
||||
response = client.list_objects(Bucket='my-bucket')
|
||||
|
||||
for obj in response['Contents']:
|
||||
print(f"{obj['Key']}: {obj['Size']}")
|
||||
```
|
||||
|
||||
## Benefits Summary
|
||||
|
||||
### For Users
|
||||
- **Zero learning curve** - if you know boto3, you're done
|
||||
- **Drop-in replacement** - literally change one line (client creation)
|
||||
- **Type safety** - TypedDict provides autocomplete without boto3 dependency
|
||||
- **Tool compatibility** - works with all boto3-compatible libraries
|
||||
|
||||
### For DeltaGlider
|
||||
- **Simpler codebase** - no custom dataclasses to maintain
|
||||
- **Better marketing** - true "drop-in replacement" claim
|
||||
- **Easier testing** - test against boto3 behavior directly
|
||||
- **Future-proof** - if boto3 adds fields, users can access them immediately
|
||||
|
||||
## Technical Details
|
||||
|
||||
### How TypedDict Works
|
||||
|
||||
```python
|
||||
from typing import TypedDict
|
||||
|
||||
class MyResponse(TypedDict):
|
||||
Key: str
|
||||
Size: int
|
||||
|
||||
# At runtime, this is just a dict!
|
||||
response: MyResponse = {'Key': 'file.zip', 'Size': 1024}
|
||||
print(type(response)) # <class 'dict'>
|
||||
|
||||
# But mypy and IDEs understand the structure
|
||||
response['Key'] # ✅ Autocomplete works!
|
||||
response['Nonexistent'] # ❌ Mypy error: Key 'Nonexistent' not found
|
||||
```
|
||||
|
||||
### DeltaGlider-Specific Metadata
|
||||
|
||||
Store in standard boto3 `Metadata` field:
|
||||
|
||||
```python
|
||||
{
|
||||
'Key': 'file.zip',
|
||||
'Size': 1024,
|
||||
'Metadata': {
|
||||
# DeltaGlider-specific fields (prefixed for safety)
|
||||
'deltaglider-is-delta': 'true',
|
||||
'deltaglider-compression-ratio': '0.99',
|
||||
'deltaglider-original-size': '100000',
|
||||
'deltaglider-reference-key': 'releases/v1.0.0/reference.bin',
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This is:
|
||||
- ✅ boto3-compatible (Metadata is a standard field)
|
||||
- ✅ Namespaced (deltaglider- prefix prevents conflicts)
|
||||
- ✅ Optional (tools can ignore it)
|
||||
- ✅ Type-safe (Metadata: NotRequired[dict[str, str]])
|
||||
|
||||
## Status
|
||||
|
||||
- ✅ **Phase 1:** TypedDict definitions created
|
||||
- ✅ **Phase 2:** `list_objects()` refactored to return boto3-compatible dict
|
||||
- ⏳ **Phase 3:** Refactor remaining methods (`put_object`, `get_object`, etc.) (TODO)
|
||||
- ⏳ **Phase 4:** Backward compatibility with deprecation warnings (TODO)
|
||||
- ⏳ **Phase 5:** Documentation updates (TODO)
|
||||
- ⏳ **Phase 6:** Full test coverage updates (PARTIAL - list_objects tests done)
|
||||
|
||||
**Current:** v4.2.3+ (Phase 2 complete - `list_objects()` boto3-compatible)
|
||||
**Target:** v5.0.0 release (all phases complete)
|
||||
684
docs/CACHE_MANAGEMENT.md
Normal file
684
docs/CACHE_MANAGEMENT.md
Normal file
@@ -0,0 +1,684 @@
|
||||
# Cache Management for Long-Running Applications
|
||||
|
||||
## Overview
|
||||
|
||||
DeltaGlider uses caching to store reference files locally for efficient delta compression. However, unlike the CLI which automatically cleans up cache on exit, **programmatic SDK usage requires manual cache management** for long-running applications.
|
||||
|
||||
This guide explains how to manage cache in production applications, including:
|
||||
- When and how to clear cache
|
||||
- Encryption key management strategies
|
||||
- Memory vs. filesystem cache trade-offs
|
||||
- Best practices for different application types
|
||||
|
||||
## The Problem
|
||||
|
||||
**CLI (Automatic Cleanup)**:
|
||||
```bash
|
||||
# Cache created in /tmp/deltaglider-xyz123/
|
||||
deltaglider cp file.zip s3://bucket/
|
||||
|
||||
# Process exits → Cache automatically deleted via atexit handler
|
||||
# ✅ No manual cleanup needed
|
||||
```
|
||||
|
||||
**SDK (Manual Cleanup Required)**:
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Long-running application (runs for hours/days)
|
||||
while True:
|
||||
client.put_object(Bucket='releases', Key='file.zip', Body=data)
|
||||
time.sleep(600) # Upload every 10 minutes
|
||||
|
||||
# ❌ Process never exits → Cache never cleaned
|
||||
# ❌ Cache grows indefinitely
|
||||
# ❌ Memory/disk exhaustion after days/weeks
|
||||
```
|
||||
|
||||
## Solution: Manual Cache Management
|
||||
|
||||
### Basic Cache Clearing
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Do some uploads
|
||||
client.put_object(Bucket='releases', Key='file1.zip', Body=data1)
|
||||
client.put_object(Bucket='releases', Key='file2.zip', Body=data2)
|
||||
|
||||
# Clear cache to free resources
|
||||
client.clear_cache()
|
||||
|
||||
# ✅ All cached references removed
|
||||
# ✅ Memory/disk freed
|
||||
# ✅ Next upload will fetch fresh reference from S3
|
||||
```
|
||||
|
||||
### When to Clear Cache
|
||||
|
||||
| Scenario | Frequency | Reason |
|
||||
|----------|-----------|--------|
|
||||
| **Long-running services** | Every 1-4 hours | Prevent memory/disk growth |
|
||||
| **After config changes** | Immediately | Old cache is invalid |
|
||||
| **High memory pressure** | As needed | Free resources |
|
||||
| **Test cleanup** | After each test | Ensure clean state |
|
||||
| **Scheduled jobs** | After job completes | Clean up before next run |
|
||||
| **Key rotation** | After rotation | Old encrypted cache unusable |
|
||||
|
||||
## Cache Strategies by Application Type
|
||||
|
||||
### 1. Long-Running Background Service
|
||||
|
||||
**Scenario**: Continuous upload service running 24/7
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import schedule
|
||||
import time
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
client = create_client()
|
||||
|
||||
def upload_task():
|
||||
"""Upload latest build."""
|
||||
try:
|
||||
with open('latest-build.zip', 'rb') as f:
|
||||
response = client.put_object(
|
||||
Bucket='releases',
|
||||
Key=f'builds/{datetime.now().isoformat()}.zip',
|
||||
Body=f
|
||||
)
|
||||
logger.info(f"Uploaded: {response['ETag']}")
|
||||
except Exception as e:
|
||||
logger.error(f"Upload failed: {e}")
|
||||
|
||||
def cleanup_task():
|
||||
"""Clear cache to prevent growth."""
|
||||
client.clear_cache()
|
||||
logger.info("Cache cleared - freed resources")
|
||||
|
||||
# Upload every 10 minutes
|
||||
schedule.every(10).minutes.do(upload_task)
|
||||
|
||||
# Clear cache every 2 hours (balance performance vs. memory)
|
||||
schedule.every(2).hours.do(cleanup_task)
|
||||
|
||||
# Run indefinitely
|
||||
while True:
|
||||
schedule.run_pending()
|
||||
time.sleep(60)
|
||||
```
|
||||
|
||||
**Cache Clearing Frequency Guidelines**:
|
||||
- Every 1 hour: High upload frequency (>100/day), memory-constrained
|
||||
- Every 2-4 hours: Moderate upload frequency (10-100/day), normal memory
|
||||
- Every 6-12 hours: Low upload frequency (<10/day), abundant memory
|
||||
|
||||
### 2. Periodic Batch Job
|
||||
|
||||
**Scenario**: Daily backup script
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import glob
|
||||
from pathlib import Path
|
||||
|
||||
def daily_backup():
|
||||
"""Upload daily backups and clean up."""
|
||||
client = create_client()
|
||||
|
||||
try:
|
||||
# Upload all backup files
|
||||
for backup in glob.glob('/backups/*.zip'):
|
||||
with open(backup, 'rb') as f:
|
||||
client.put_object(
|
||||
Bucket='backups',
|
||||
Key=f'daily/{Path(backup).name}',
|
||||
Body=f
|
||||
)
|
||||
print(f"Backed up: {backup}")
|
||||
|
||||
finally:
|
||||
# ALWAYS clear cache at end of job
|
||||
client.clear_cache()
|
||||
print("Cache cleared - job complete")
|
||||
|
||||
# Run daily via cron/systemd timer
|
||||
if __name__ == '__main__':
|
||||
daily_backup()
|
||||
```
|
||||
|
||||
**Best Practice**: Always clear cache in `finally` block to ensure cleanup even if job fails.
|
||||
|
||||
### 3. Web Application / API Server
|
||||
|
||||
**Scenario**: Flask/FastAPI app with upload endpoints
|
||||
|
||||
```python
|
||||
from fastapi import FastAPI, UploadFile, BackgroundTasks
|
||||
from deltaglider import create_client
|
||||
import asyncio
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
# Create client once at startup
|
||||
client = create_client()
|
||||
|
||||
async def periodic_cache_cleanup():
|
||||
"""Background task to clear cache periodically."""
|
||||
while True:
|
||||
await asyncio.sleep(3600) # Every hour
|
||||
client.clear_cache()
|
||||
print("Cache cleared in background")
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
"""Start background cache cleanup task."""
|
||||
asyncio.create_task(periodic_cache_cleanup())
|
||||
|
||||
@app.post("/upload")
|
||||
async def upload_file(file: UploadFile):
|
||||
"""Upload endpoint with automatic cache management."""
|
||||
content = await file.read()
|
||||
|
||||
response = client.put_object(
|
||||
Bucket='uploads',
|
||||
Key=f'files/{file.filename}',
|
||||
Body=content
|
||||
)
|
||||
|
||||
return {"message": "Uploaded", "etag": response['ETag']}
|
||||
|
||||
@app.post("/admin/clear-cache")
|
||||
async def admin_clear_cache():
|
||||
"""Manual cache clear endpoint for admin."""
|
||||
client.clear_cache()
|
||||
return {"message": "Cache cleared"}
|
||||
```
|
||||
|
||||
**Best Practice**: Run periodic cache cleanup in background task, provide manual clear endpoint for emergencies.
|
||||
|
||||
### 4. Testing / CI/CD
|
||||
|
||||
**Scenario**: Test suite using DeltaGlider
|
||||
|
||||
```python
|
||||
import pytest
|
||||
from deltaglider import create_client
|
||||
|
||||
@pytest.fixture
|
||||
def deltaglider_client():
|
||||
"""Provide clean client for each test."""
|
||||
client = create_client()
|
||||
yield client
|
||||
# ALWAYS clear cache after test
|
||||
client.clear_cache()
|
||||
|
||||
def test_upload(deltaglider_client):
|
||||
"""Test upload with automatic cleanup."""
|
||||
response = deltaglider_client.put_object(
|
||||
Bucket='test-bucket',
|
||||
Key='test-file.zip',
|
||||
Body=b'test data'
|
||||
)
|
||||
assert response['ETag'] is not None
|
||||
# Cache automatically cleared by fixture
|
||||
|
||||
def test_download(deltaglider_client):
|
||||
"""Test download with clean cache."""
|
||||
# Cache is clean from previous test
|
||||
deltaglider_client.put_object(Bucket='test', Key='file.zip', Body=b'data')
|
||||
response = deltaglider_client.get_object(Bucket='test', Key='file.zip')
|
||||
assert response['Body'].read() == b'data'
|
||||
# Cache automatically cleared by fixture
|
||||
```
|
||||
|
||||
**Best Practice**: Use pytest fixtures to ensure cache is cleared after each test.
|
||||
|
||||
### 5. AWS Lambda / Serverless
|
||||
|
||||
**Scenario**: Lambda function with warm container reuse
|
||||
|
||||
```python
|
||||
import os
|
||||
from deltaglider import create_client
|
||||
|
||||
# Initialize client outside handler (reused across invocations)
|
||||
client = create_client()
|
||||
|
||||
# Track invocation count for cache clearing
|
||||
invocation_count = 0
|
||||
|
||||
def lambda_handler(event, context):
|
||||
"""Lambda handler with periodic cache clearing."""
|
||||
global invocation_count
|
||||
invocation_count += 1
|
||||
|
||||
try:
|
||||
# Upload file
|
||||
response = client.put_object(
|
||||
Bucket='lambda-uploads',
|
||||
Key=event['filename'],
|
||||
Body=event['data']
|
||||
)
|
||||
|
||||
# Clear cache every 50 invocations (warm container optimization)
|
||||
if invocation_count % 50 == 0:
|
||||
client.clear_cache()
|
||||
print(f"Cache cleared after {invocation_count} invocations")
|
||||
|
||||
return {'statusCode': 200, 'etag': response['ETag']}
|
||||
|
||||
except Exception as e:
|
||||
# Clear cache on error to prevent poisoned state
|
||||
client.clear_cache()
|
||||
return {'statusCode': 500, 'error': str(e)}
|
||||
```
|
||||
|
||||
**Best Practice**: Clear cache periodically (every N invocations) and on errors. Lambda warm containers can reuse cache across invocations for performance.
|
||||
|
||||
## Encryption Key Management
|
||||
|
||||
DeltaGlider always encrypts cache data. Understanding key management is critical for programmatic usage.
|
||||
|
||||
### Ephemeral Keys (Default - Recommended)
|
||||
|
||||
**How It Works**:
|
||||
- New encryption key generated per client instance
|
||||
- Cache encrypted with instance-specific key
|
||||
- Key lost when client is garbage collected
|
||||
- **Maximum security** - keys never persist
|
||||
|
||||
**When to Use**:
|
||||
- Single-process applications
|
||||
- Short-lived scripts
|
||||
- CI/CD pipelines
|
||||
- Testing
|
||||
- Maximum security requirements
|
||||
|
||||
**Example**:
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
# Create client (generates ephemeral key automatically)
|
||||
client = create_client()
|
||||
|
||||
# Upload file (encrypted with ephemeral key)
|
||||
client.put_object(Bucket='bucket', Key='file.zip', Body=data)
|
||||
|
||||
# Clear cache
|
||||
client.clear_cache()
|
||||
|
||||
# ✅ Encrypted cache cleared
|
||||
# ✅ Key was never persisted
|
||||
# ✅ Perfect forward secrecy
|
||||
```
|
||||
|
||||
**Characteristics**:
|
||||
- ✅ Maximum security (keys never leave process)
|
||||
- ✅ Perfect forward secrecy
|
||||
- ✅ No key management overhead
|
||||
- ❌ Cache not shareable between processes
|
||||
- ❌ Cache not reusable after client recreation
|
||||
|
||||
### Persistent Keys (Advanced - Shared Cache)
|
||||
|
||||
**How It Works**:
|
||||
- Use same encryption key across multiple processes/clients
|
||||
- Key stored in environment variable or secrets manager
|
||||
- All processes can read each other's encrypted cache
|
||||
- **Trade-off**: Convenience vs. security
|
||||
|
||||
**When to Use**:
|
||||
- Multi-process applications (workers, replicas)
|
||||
- Shared cache across containers
|
||||
- Cache persistence across application restarts
|
||||
- Horizontal scaling scenarios
|
||||
|
||||
**Example - Environment Variable**:
|
||||
```python
|
||||
import os
|
||||
from cryptography.fernet import Fernet
|
||||
import base64
|
||||
|
||||
# Generate persistent key (do this ONCE, securely)
|
||||
key = Fernet.generate_key()
|
||||
key_b64 = base64.b64encode(key).decode('utf-8')
|
||||
print(f"DG_CACHE_ENCRYPTION_KEY={key_b64}") # Store in secrets manager!
|
||||
|
||||
# Set in environment (or use secrets manager)
|
||||
os.environ['DG_CACHE_ENCRYPTION_KEY'] = key_b64
|
||||
|
||||
# All client instances use same key
|
||||
client1 = create_client()
|
||||
client2 = create_client()
|
||||
|
||||
# Client1 writes to cache
|
||||
client1.put_object(Bucket='bucket', Key='file.zip', Body=data)
|
||||
|
||||
# Client2 can read same cached data (same key!)
|
||||
client2.get_object(Bucket='bucket', Key='file.zip')
|
||||
|
||||
# ✅ Cache shared between processes
|
||||
# ⚠️ Key must be securely managed
|
||||
```
|
||||
|
||||
**Example - AWS Secrets Manager**:
|
||||
```python
|
||||
import boto3
|
||||
import json
|
||||
from deltaglider import create_client
|
||||
|
||||
def get_encryption_key_from_secrets_manager():
|
||||
"""Retrieve encryption key from AWS Secrets Manager."""
|
||||
secrets = boto3.client('secretsmanager', region_name='us-west-2')
|
||||
response = secrets.get_secret_value(SecretId='deltaglider/cache-encryption-key')
|
||||
secret = json.loads(response['SecretString'])
|
||||
return secret['encryption_key']
|
||||
|
||||
# Retrieve key securely
|
||||
os.environ['DG_CACHE_ENCRYPTION_KEY'] = get_encryption_key_from_secrets_manager()
|
||||
|
||||
# Create client with persistent key
|
||||
client = create_client()
|
||||
```
|
||||
|
||||
**Security Best Practices for Persistent Keys**:
|
||||
1. **Generate once**: Never regenerate unless rotating
|
||||
2. **Store securely**: AWS Secrets Manager, HashiCorp Vault, etc.
|
||||
3. **Rotate regularly**: Follow your key rotation policy
|
||||
4. **Audit access**: Log who accesses encryption keys
|
||||
5. **Principle of least privilege**: Only processes that need shared cache get the key
|
||||
|
||||
### Key Rotation
|
||||
|
||||
**When to Rotate**:
|
||||
- Regular schedule (every 90 days recommended)
|
||||
- After security incident
|
||||
- When team members leave
|
||||
- After key exposure
|
||||
|
||||
**How to Rotate**:
|
||||
```python
|
||||
import os
|
||||
from deltaglider import create_client
|
||||
|
||||
# OLD KEY (existing cache encrypted with this)
|
||||
old_key = os.environ.get('DG_CACHE_ENCRYPTION_KEY')
|
||||
|
||||
# Generate NEW KEY
|
||||
from cryptography.fernet import Fernet
|
||||
new_key = Fernet.generate_key()
|
||||
new_key_b64 = base64.b64encode(new_key).decode('utf-8')
|
||||
|
||||
# Steps for rotation:
|
||||
# 1. Clear old cache (encrypted with old key)
|
||||
client_old = create_client() # Uses old key from env
|
||||
client_old.clear_cache()
|
||||
|
||||
# 2. Update environment with new key
|
||||
os.environ['DG_CACHE_ENCRYPTION_KEY'] = new_key_b64
|
||||
|
||||
# 3. Create new client with new key
|
||||
client_new = create_client() # Uses new key
|
||||
|
||||
# 4. Continue operations
|
||||
client_new.put_object(Bucket='bucket', Key='file.zip', Body=data)
|
||||
|
||||
# ✅ Cache now encrypted with new key
|
||||
# ✅ Old encrypted cache cleared
|
||||
```
|
||||
|
||||
## Memory vs. Filesystem Cache
|
||||
|
||||
### Filesystem Cache (Default)
|
||||
|
||||
**Characteristics**:
|
||||
- Stored in `/tmp/deltaglider-*/`
|
||||
- Persistent across client recreations (within same process)
|
||||
- Can be shared between processes (with persistent encryption key)
|
||||
- Slower than memory cache (disk I/O)
|
||||
|
||||
**Configuration**:
|
||||
```python
|
||||
import os
|
||||
|
||||
# Explicitly set filesystem cache (this is the default)
|
||||
os.environ['DG_CACHE_BACKEND'] = 'filesystem'
|
||||
|
||||
from deltaglider import create_client
|
||||
client = create_client()
|
||||
```
|
||||
|
||||
**When to Use**:
|
||||
- Default choice for most applications
|
||||
- When cache should persist across client recreations
|
||||
- Multi-process applications (with persistent key)
|
||||
- Memory-constrained environments
|
||||
|
||||
**Cache Clearing**:
|
||||
```python
|
||||
client.clear_cache()
|
||||
# Removes all files from /tmp/deltaglider-*/
|
||||
# Frees disk space
|
||||
```
|
||||
|
||||
### Memory Cache
|
||||
|
||||
**Characteristics**:
|
||||
- Stored in process memory (RAM)
|
||||
- Fast access (no disk I/O)
|
||||
- Automatically freed when process exits
|
||||
- LRU eviction prevents unlimited growth
|
||||
- Not shared between processes
|
||||
|
||||
**Configuration**:
|
||||
```python
|
||||
import os
|
||||
|
||||
# Enable memory cache
|
||||
os.environ['DG_CACHE_BACKEND'] = 'memory'
|
||||
os.environ['DG_CACHE_MEMORY_SIZE_MB'] = '200' # Default: 100MB
|
||||
|
||||
from deltaglider import create_client
|
||||
client = create_client()
|
||||
```
|
||||
|
||||
**When to Use**:
|
||||
- High-performance requirements
|
||||
- Ephemeral environments (containers, Lambda)
|
||||
- Short-lived applications
|
||||
- CI/CD pipelines
|
||||
- When disk I/O is bottleneck
|
||||
|
||||
**Cache Clearing**:
|
||||
```python
|
||||
client.clear_cache()
|
||||
# Frees memory immediately
|
||||
# No disk I/O
|
||||
```
|
||||
|
||||
**LRU Eviction**:
|
||||
Memory cache automatically evicts least recently used entries when size limit is reached. No manual intervention needed.
|
||||
|
||||
## Best Practices Summary
|
||||
|
||||
### ✅ DO
|
||||
|
||||
1. **Clear cache periodically** in long-running applications
|
||||
2. **Clear cache in `finally` blocks** for batch jobs
|
||||
3. **Use fixtures for tests** to ensure clean state
|
||||
4. **Monitor cache size** in production
|
||||
5. **Use ephemeral keys** when possible (maximum security)
|
||||
6. **Store persistent keys securely** (Secrets Manager, Vault)
|
||||
7. **Rotate encryption keys** regularly
|
||||
8. **Use memory cache** for ephemeral environments
|
||||
9. **Clear cache after config changes**
|
||||
10. **Document cache strategy** for your application
|
||||
|
||||
### ❌ DON'T
|
||||
|
||||
1. **Never let cache grow unbounded** in long-running apps
|
||||
2. **Don't share ephemeral encrypted cache** between processes
|
||||
3. **Don't store persistent keys in code** or version control
|
||||
4. **Don't forget to clear cache in tests**
|
||||
5. **Don't assume cache is automatically cleaned** in SDK usage
|
||||
6. **Don't use persistent keys** unless you need cross-process sharing
|
||||
7. **Don't skip key rotation**
|
||||
8. **Don't ignore memory limits** for memory cache
|
||||
|
||||
## Monitoring Cache Health
|
||||
|
||||
### Cache Size Tracking
|
||||
|
||||
```python
|
||||
import os
|
||||
from pathlib import Path
|
||||
from deltaglider import create_client
|
||||
|
||||
def get_cache_size_mb(cache_dir: Path) -> float:
|
||||
"""Calculate total cache size in MB."""
|
||||
total_bytes = sum(f.stat().st_size for f in cache_dir.rglob('*') if f.is_file())
|
||||
return total_bytes / (1024 * 1024)
|
||||
|
||||
# Get cache directory (ephemeral, changes per process)
|
||||
cache_dir = Path(tempfile.gettempdir())
|
||||
deltaglider_caches = list(cache_dir.glob('deltaglider-*'))
|
||||
|
||||
if deltaglider_caches:
|
||||
cache_path = deltaglider_caches[0]
|
||||
cache_size_mb = get_cache_size_mb(cache_path)
|
||||
print(f"Cache size: {cache_size_mb:.2f} MB")
|
||||
|
||||
# Clear if over threshold
|
||||
if cache_size_mb > 500:
|
||||
client = create_client()
|
||||
client.clear_cache()
|
||||
print("Cache cleared - exceeded 500MB")
|
||||
```
|
||||
|
||||
### Memory Cache Monitoring
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ['DG_CACHE_BACKEND'] = 'memory'
|
||||
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Memory cache auto-evicts at configured limit
|
||||
# Monitor via application memory usage tools (not direct API)
|
||||
|
||||
# Example with memory_profiler
|
||||
from memory_profiler import profile
|
||||
|
||||
@profile
|
||||
def upload_many_files():
|
||||
for i in range(1000):
|
||||
client.put_object(Bucket='test', Key=f'file{i}.zip', Body=b'data' * 1000)
|
||||
# Check memory profile to see cache memory usage
|
||||
|
||||
upload_many_files()
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Problem: Cache Growing Too Large
|
||||
|
||||
**Symptoms**:
|
||||
- Disk space running out (`/tmp` filling up)
|
||||
- High memory usage
|
||||
|
||||
**Solution**:
|
||||
```python
|
||||
# Implement automatic cache clearing
|
||||
import psutil
|
||||
|
||||
client = create_client()
|
||||
|
||||
def smart_cache_clear():
|
||||
"""Clear cache if memory/disk pressure detected."""
|
||||
# Check disk space
|
||||
disk = psutil.disk_usage('/tmp')
|
||||
if disk.percent > 80:
|
||||
client.clear_cache()
|
||||
print("Cache cleared - disk pressure")
|
||||
return
|
||||
|
||||
# Check memory usage
|
||||
memory = psutil.virtual_memory()
|
||||
if memory.percent > 80:
|
||||
client.clear_cache()
|
||||
print("Cache cleared - memory pressure")
|
||||
|
||||
# Call periodically
|
||||
schedule.every(15).minutes.do(smart_cache_clear)
|
||||
```
|
||||
|
||||
### Problem: Decryption Failures After Key Rotation
|
||||
|
||||
**Symptoms**:
|
||||
- `CacheCorruptionError: Decryption failed`
|
||||
- After rotating encryption keys
|
||||
|
||||
**Solution**:
|
||||
```python
|
||||
# Clear cache before using new key
|
||||
old_client = create_client() # Old key
|
||||
old_client.clear_cache()
|
||||
|
||||
# Update encryption key
|
||||
os.environ['DG_CACHE_ENCRYPTION_KEY'] = new_key
|
||||
|
||||
# Create new client
|
||||
new_client = create_client() # New key
|
||||
```
|
||||
|
||||
### Problem: Tests Failing Due to Cached Data
|
||||
|
||||
**Symptoms**:
|
||||
- Tests pass in isolation, fail when run together
|
||||
- Unexpected data in downloads
|
||||
|
||||
**Solution**:
|
||||
```python
|
||||
# Always clear cache in test teardown
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_cache_after_test():
|
||||
"""Automatically clear cache after every test."""
|
||||
yield
|
||||
# Teardown
|
||||
client = create_client()
|
||||
client.clear_cache()
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [docs/sdk/getting-started.md](sdk/getting-started.md) - SDK configuration
|
||||
- [README.md](../README.md) - Docker and environment variables
|
||||
- [CLAUDE.md](../CLAUDE.md) - Development guide
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
# Create client
|
||||
client = create_client()
|
||||
|
||||
# Clear all cache
|
||||
client.clear_cache()
|
||||
|
||||
# Use memory cache
|
||||
os.environ['DG_CACHE_BACKEND'] = 'memory'
|
||||
|
||||
# Use persistent encryption key
|
||||
os.environ['DG_CACHE_ENCRYPTION_KEY'] = 'base64-encoded-key'
|
||||
```
|
||||
526
docs/DG_MAX_RATIO.md
Normal file
526
docs/DG_MAX_RATIO.md
Normal file
@@ -0,0 +1,526 @@
|
||||
# DG_MAX_RATIO - Delta Compression Efficiency Guard
|
||||
|
||||
## Overview
|
||||
|
||||
`DG_MAX_RATIO` is a **safety threshold** that prevents inefficient delta compression. It controls the maximum acceptable ratio of `delta_size / original_file_size`.
|
||||
|
||||
**Default**: `0.5` (50%)
|
||||
**Range**: `0.0` to `1.0`
|
||||
**Environment Variable**: `DG_MAX_RATIO`
|
||||
|
||||
## The Problem It Solves
|
||||
|
||||
When files are **too different**, xdelta3 can create a delta file that's **almost as large as the original file** (or even larger!). This defeats the purpose of compression and wastes:
|
||||
- ❌ Storage space (storing a large delta instead of the original)
|
||||
- ❌ CPU time (creating and applying the delta)
|
||||
- ❌ Network bandwidth (downloading delta + reference instead of just the file)
|
||||
|
||||
## How It Works
|
||||
|
||||
```
|
||||
1. Upload file → Create delta using xdelta3
|
||||
2. Calculate ratio = delta_size / original_file_size
|
||||
3. If ratio > DG_MAX_RATIO:
|
||||
❌ Discard delta, store original file directly
|
||||
Else:
|
||||
✅ Keep delta, save storage space
|
||||
```
|
||||
|
||||
### Example Flow
|
||||
|
||||
```
|
||||
Original file: 100MB
|
||||
Reference file: 100MB (uploaded previously)
|
||||
|
||||
xdelta3 creates delta: 60MB
|
||||
Ratio = 60MB / 100MB = 0.6 (60%)
|
||||
|
||||
With DG_MAX_RATIO=0.5:
|
||||
❌ Delta rejected (60% > 50%)
|
||||
Action: Store 100MB original file
|
||||
Reason: Delta not efficient enough
|
||||
|
||||
With DG_MAX_RATIO=0.7:
|
||||
✅ Delta accepted (60% ≤ 70%)
|
||||
Action: Store 60MB delta
|
||||
Savings: 40MB (40%)
|
||||
```
|
||||
|
||||
## Default Value: 0.5 (50%)
|
||||
|
||||
**Meaning**: Only use delta compression if the delta is **≤50% of the original file size**
|
||||
|
||||
This default provides:
|
||||
- ✅ Minimum 50% storage savings when delta compression is used
|
||||
- ✅ Prevents wasting CPU on inefficient compression
|
||||
- ✅ Works well for typical versioned releases (minor updates between versions)
|
||||
- ✅ Balanced approach without manual tuning
|
||||
|
||||
## When to Adjust
|
||||
|
||||
### 🔽 Lower Value (More Conservative)
|
||||
|
||||
**Set `DG_MAX_RATIO=0.2-0.3` when:**
|
||||
- Files change significantly between versions (major updates, refactors)
|
||||
- Storage cost is **very high** (premium S3 tiers, small quotas)
|
||||
- You want to avoid **any** inefficient compression
|
||||
- You need guaranteed high-quality compression (≥70% savings)
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.3
|
||||
|
||||
# Only accept deltas that are ≤30% of original size
|
||||
# More files stored directly, but guaranteed ≥70% savings when using deltas
|
||||
```
|
||||
|
||||
**Trade-off**:
|
||||
- ✅ Higher quality compression when deltas are used
|
||||
- ❌ Fewer files will use delta compression
|
||||
- ❌ More direct uploads (higher storage for dissimilar files)
|
||||
|
||||
### 🔼 Higher Value (More Permissive)
|
||||
|
||||
**Set `DG_MAX_RATIO=0.6-0.8` when:**
|
||||
- Files are very similar (minor patches, nightly builds, incremental changes)
|
||||
- Storage cost is **cheap** (large S3 buckets, unlimited quotas)
|
||||
- CPU time is **expensive** (you want to save re-uploading even with modest compression)
|
||||
- You want to maximize delta compression usage
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.7
|
||||
|
||||
# Accept deltas up to 70% of original size
|
||||
# More files use delta compression, even with modest 30% savings
|
||||
```
|
||||
|
||||
**Trade-off**:
|
||||
- ✅ More files use delta compression
|
||||
- ✅ Save bandwidth even with modest compression
|
||||
- ❌ Some deltas may only save 20-30% space
|
||||
- ❌ More CPU time spent on marginal compressions
|
||||
|
||||
## Real-World Scenarios
|
||||
|
||||
### Scenario 1: Nightly Builds (Minimal Changes) ⭐ IDEAL
|
||||
|
||||
```
|
||||
my-app-v1.0.0.zip → 100MB (reference)
|
||||
my-app-v1.0.1.zip → 100MB (0.1% code change)
|
||||
|
||||
Delta: 200KB (0.2% of original)
|
||||
Ratio: 0.002
|
||||
|
||||
With ANY DG_MAX_RATIO: ✅ Use delta (99.8% savings!)
|
||||
Result: Store 200KB instead of 100MB
|
||||
```
|
||||
|
||||
**This is what DeltaGlider excels at!**
|
||||
|
||||
### Scenario 2: Major Version (Significant Changes)
|
||||
|
||||
```
|
||||
my-app-v1.0.0.zip → 100MB (reference)
|
||||
my-app-v2.0.0.zip → 100MB (complete rewrite, 85% different)
|
||||
|
||||
Delta: 85MB (85% of original)
|
||||
Ratio: 0.85
|
||||
|
||||
With DG_MAX_RATIO=0.5: ❌ Store original (85% > 50%)
|
||||
→ Stores 100MB directly
|
||||
→ No compression benefit, but no CPU waste
|
||||
|
||||
With DG_MAX_RATIO=0.9: ✅ Use delta (85% ≤ 90%)
|
||||
→ Stores 85MB delta
|
||||
→ Only 15% savings, questionable benefit
|
||||
```
|
||||
|
||||
**Recommendation**: For major versions, default `0.5` correctly rejects inefficient compression.
|
||||
|
||||
### Scenario 3: Different File Format (Same Content)
|
||||
|
||||
```
|
||||
my-app-v1.0.0.zip → 100MB (ZIP archive)
|
||||
my-app-v1.0.0.tar → 100MB (TAR archive, same content)
|
||||
|
||||
Delta: 70MB (completely different format structure)
|
||||
Ratio: 0.70
|
||||
|
||||
With DG_MAX_RATIO=0.5: ❌ Store original (70% > 50%)
|
||||
→ Stores 100MB directly
|
||||
→ Correct decision: formats too different
|
||||
|
||||
With DG_MAX_RATIO=0.8: ✅ Use delta (70% ≤ 80%)
|
||||
→ Stores 70MB delta
|
||||
→ 30% savings, but CPU-intensive
|
||||
```
|
||||
|
||||
**Recommendation**: Use consistent file formats for better compression. Default `0.5` correctly rejects cross-format compression.
|
||||
|
||||
### Scenario 4: Incremental Updates (Sweet Spot) ⭐
|
||||
|
||||
```
|
||||
my-app-v1.0.0.zip → 100MB (reference)
|
||||
my-app-v1.0.1.zip → 100MB (minor bugfix, 5% code change)
|
||||
|
||||
Delta: 5MB (5% of original)
|
||||
Ratio: 0.05
|
||||
|
||||
With ANY DG_MAX_RATIO: ✅ Use delta (95% savings!)
|
||||
Result: Store 5MB instead of 100MB
|
||||
```
|
||||
|
||||
**This is the target use case for delta compression!**
|
||||
|
||||
## How to Choose the Right Value
|
||||
|
||||
### Decision Tree
|
||||
|
||||
```
|
||||
Do your files have minimal changes between versions? (< 5% different)
|
||||
├─ YES → Use default 0.5 ✅
|
||||
│ Delta compression will work perfectly
|
||||
│
|
||||
└─ NO → Are your files significantly different? (> 50% different)
|
||||
├─ YES → Lower to 0.2-0.3 🔽
|
||||
│ Avoid wasting time on inefficient compression
|
||||
│
|
||||
└─ NO → Are they moderately different? (20-50% different)
|
||||
├─ Storage is expensive → Lower to 0.3 🔽
|
||||
│ Only high-quality compression
|
||||
│
|
||||
└─ Storage is cheap → Raise to 0.6-0.7 🔼
|
||||
Accept modest savings
|
||||
```
|
||||
|
||||
### Quick Reference Table
|
||||
|
||||
| File Similarity | Recommended DG_MAX_RATIO | Expected Behavior |
|
||||
|----------------|--------------------------|-------------------|
|
||||
| Nearly identical (< 5% change) | **0.5 (default)** | 🟢 95%+ savings |
|
||||
| Minor updates (5-20% change) | **0.5 (default)** | 🟢 80-95% savings |
|
||||
| Moderate changes (20-50% change) | **0.4-0.6** | 🟡 50-80% savings |
|
||||
| Major changes (50-80% change) | **0.3 or lower** | 🔴 Store directly |
|
||||
| Complete rewrites (> 80% change) | **0.3 or lower** | 🔴 Store directly |
|
||||
|
||||
### Use Cases by Industry
|
||||
|
||||
**Software Releases (SaaS)**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.5 # Default
|
||||
# Nightly builds with minor changes compress perfectly
|
||||
```
|
||||
|
||||
**Mobile App Builds**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.4 # Slightly conservative
|
||||
# iOS/Android builds can vary, want quality compression only
|
||||
```
|
||||
|
||||
**Database Backups**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.7 # Permissive
|
||||
# Daily backups are very similar, accept modest savings
|
||||
```
|
||||
|
||||
**Document Archives**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.6 # Moderate
|
||||
# Documents change incrementally, accept good savings
|
||||
```
|
||||
|
||||
**Video/Media Archives**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=0.2 # Very conservative
|
||||
# Media files are unique, only compress if very similar
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Docker
|
||||
|
||||
**Conservative (Premium Storage)**:
|
||||
```bash
|
||||
docker run -e DG_MAX_RATIO=0.3 \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
deltaglider/deltaglider cp file.zip s3://releases/
|
||||
```
|
||||
|
||||
**Default (Balanced)**:
|
||||
```bash
|
||||
docker run -e DG_MAX_RATIO=0.5 \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
deltaglider/deltaglider cp file.zip s3://releases/
|
||||
```
|
||||
|
||||
**Permissive (Cheap Storage)**:
|
||||
```bash
|
||||
docker run -e DG_MAX_RATIO=0.7 \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
deltaglider/deltaglider cp file.zip s3://releases/
|
||||
```
|
||||
|
||||
### Python SDK
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
# Conservative (only high-quality compression)
|
||||
os.environ['DG_MAX_RATIO'] = '0.3'
|
||||
|
||||
# Default (balanced)
|
||||
os.environ['DG_MAX_RATIO'] = '0.5'
|
||||
|
||||
# Permissive (accept modest savings)
|
||||
os.environ['DG_MAX_RATIO'] = '0.7'
|
||||
|
||||
from deltaglider import create_client
|
||||
client = create_client()
|
||||
|
||||
summary = client.upload("file.zip", "s3://bucket/")
|
||||
print(f"Delta ratio: {summary.delta_ratio:.2f}")
|
||||
print(f"Used delta: {summary.is_delta}")
|
||||
```
|
||||
|
||||
### CLI
|
||||
|
||||
```bash
|
||||
# Conservative
|
||||
export DG_MAX_RATIO=0.3
|
||||
deltaglider cp my-app-v2.0.0.zip s3://releases/
|
||||
|
||||
# Default
|
||||
export DG_MAX_RATIO=0.5
|
||||
deltaglider cp my-app-v2.0.0.zip s3://releases/
|
||||
|
||||
# Permissive
|
||||
export DG_MAX_RATIO=0.7
|
||||
deltaglider cp my-app-v2.0.0.zip s3://releases/
|
||||
```
|
||||
|
||||
### Override Per-Upload (CLI)
|
||||
|
||||
```bash
|
||||
# Use custom ratio for specific file
|
||||
deltaglider cp large-file.zip s3://releases/ --max-ratio 0.3
|
||||
```
|
||||
|
||||
## Monitoring and Tuning
|
||||
|
||||
### Check Delta Ratios After Upload
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
summary = client.upload("file.zip", "s3://bucket/")
|
||||
|
||||
print(f"Delta ratio: {summary.delta_ratio:.2f}")
|
||||
print(f"Savings: {summary.savings_percent:.0f}%")
|
||||
print(f"Used delta: {summary.is_delta}")
|
||||
|
||||
if summary.is_delta:
|
||||
print(f"✅ Used delta compression (ratio {summary.delta_ratio:.2f})")
|
||||
if summary.delta_ratio > 0.4:
|
||||
print(f"⚠️ Consider lowering DG_MAX_RATIO for better quality")
|
||||
else:
|
||||
print(f"❌ Stored directly (delta would have been ratio ~{summary.delta_ratio:.2f})")
|
||||
if summary.delta_ratio < 0.6:
|
||||
print(f"💡 Consider raising DG_MAX_RATIO to enable compression")
|
||||
```
|
||||
|
||||
### Batch Analysis
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
from pathlib import Path
|
||||
|
||||
client = create_client()
|
||||
ratios = []
|
||||
|
||||
for file in Path("releases").glob("*.zip"):
|
||||
summary = client.upload(str(file), f"s3://bucket/{file.name}")
|
||||
if summary.is_delta:
|
||||
ratios.append(summary.delta_ratio)
|
||||
|
||||
if ratios:
|
||||
avg_ratio = sum(ratios) / len(ratios)
|
||||
max_ratio = max(ratios)
|
||||
|
||||
print(f"Average delta ratio: {avg_ratio:.2f}")
|
||||
print(f"Maximum delta ratio: {max_ratio:.2f}")
|
||||
print(f"Files compressed: {len(ratios)}")
|
||||
|
||||
if avg_ratio < 0.2:
|
||||
print("💡 Consider raising DG_MAX_RATIO - you're getting excellent compression")
|
||||
elif max_ratio > 0.6:
|
||||
print("⚠️ Consider lowering DG_MAX_RATIO - some deltas are inefficient")
|
||||
```
|
||||
|
||||
### Optimization Strategy
|
||||
|
||||
1. **Start with default (0.5)**
|
||||
2. **Monitor delta ratios** for 1 week of uploads
|
||||
3. **Analyze results**:
|
||||
- If most ratios < 0.2: Consider raising to 0.6-0.7
|
||||
- If many ratios > 0.4: Consider lowering to 0.3-0.4
|
||||
- If ratios vary widely: Keep default 0.5
|
||||
4. **Adjust and re-test** for 1 week
|
||||
5. **Repeat until optimal** for your use case
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Dynamic Ratio Based on File Type
|
||||
|
||||
```python
|
||||
import os
|
||||
from pathlib import Path
|
||||
from deltaglider import create_client
|
||||
|
||||
def get_optimal_ratio(file_path: str) -> float:
|
||||
"""Determine optimal ratio based on file type."""
|
||||
suffix = Path(file_path).suffix.lower()
|
||||
|
||||
# Very compressible (source code archives)
|
||||
if suffix in ['.zip', '.tar', '.gz']:
|
||||
return 0.6
|
||||
|
||||
# Moderately compressible (compiled binaries)
|
||||
elif suffix in ['.jar', '.war', '.deb', '.rpm']:
|
||||
return 0.5
|
||||
|
||||
# Rarely compressible (media, already compressed)
|
||||
elif suffix in ['.mp4', '.jpg', '.png']:
|
||||
return 0.2
|
||||
|
||||
# Default
|
||||
return 0.5
|
||||
|
||||
file = "my-app.zip"
|
||||
os.environ['DG_MAX_RATIO'] = str(get_optimal_ratio(file))
|
||||
|
||||
client = create_client()
|
||||
summary = client.upload(file, "s3://bucket/")
|
||||
```
|
||||
|
||||
### A/B Testing Different Ratios
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import os
|
||||
|
||||
def test_ratios(file_path: str, ratios: list[float]):
|
||||
"""Test different ratios and report results."""
|
||||
results = []
|
||||
|
||||
for ratio in ratios:
|
||||
os.environ['DG_MAX_RATIO'] = str(ratio)
|
||||
client = create_client()
|
||||
|
||||
# Simulate upload (don't actually upload)
|
||||
summary = client.estimate_compression(file_path, "s3://bucket/test/")
|
||||
|
||||
results.append({
|
||||
'ratio_threshold': ratio,
|
||||
'would_use_delta': summary.delta_ratio <= ratio,
|
||||
'delta_ratio': summary.delta_ratio,
|
||||
'savings': summary.savings_percent if summary.delta_ratio <= ratio else 0
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Test different ratios
|
||||
file = "my-app-v2.0.0.zip"
|
||||
test_results = test_ratios(file, [0.3, 0.5, 0.7])
|
||||
|
||||
for result in test_results:
|
||||
print(f"Ratio {result['ratio_threshold']}: "
|
||||
f"Delta={result['would_use_delta']}, "
|
||||
f"Savings={result['savings']:.0f}%")
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
### Q: What happens if I set DG_MAX_RATIO=1.0?
|
||||
|
||||
**A**: Delta compression will **always** be used, even if the delta is larger than the original file! This is generally a bad idea and defeats the purpose of the threshold.
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
export DG_MAX_RATIO=1.0
|
||||
|
||||
# File: 100MB, Delta: 120MB
|
||||
# Ratio: 1.2
|
||||
# With DG_MAX_RATIO=1.0: ✅ Use delta (1.2 > 1.0 but we accept anything ≤1.0)
|
||||
# Wait, that's wrong! The delta is LARGER than the original!
|
||||
|
||||
# NEVER set DG_MAX_RATIO to 1.0 or higher
|
||||
```
|
||||
|
||||
### Q: What happens if I set DG_MAX_RATIO=0.0?
|
||||
|
||||
**A**: Delta compression will **never** be used. All files will be stored directly. This is equivalent to disabling DeltaGlider's compression entirely.
|
||||
|
||||
### Q: Can I disable the ratio check?
|
||||
|
||||
**A**: No, and you shouldn't want to. The ratio check is a critical safety feature that prevents wasting storage and CPU on inefficient compression.
|
||||
|
||||
### Q: Does DG_MAX_RATIO affect downloading?
|
||||
|
||||
**A**: No, `DG_MAX_RATIO` only affects **uploads**. During download, DeltaGlider automatically detects whether a file is stored as a delta or directly and handles reconstruction accordingly.
|
||||
|
||||
### Q: Can I set different ratios for different buckets?
|
||||
|
||||
**A**: Not directly via environment variables, but you can change `DG_MAX_RATIO` before each upload in your code:
|
||||
|
||||
```python
|
||||
import os
|
||||
from deltaglider import create_client
|
||||
|
||||
# High-quality compression for production releases
|
||||
os.environ['DG_MAX_RATIO'] = '0.3'
|
||||
client = create_client()
|
||||
client.upload("prod-release.zip", "s3://production/")
|
||||
|
||||
# Permissive compression for dev builds
|
||||
os.environ['DG_MAX_RATIO'] = '0.7'
|
||||
client = create_client()
|
||||
client.upload("dev-build.zip", "s3://development/")
|
||||
```
|
||||
|
||||
### Q: How do I know if my DG_MAX_RATIO is set correctly?
|
||||
|
||||
**A**: Monitor your upload summaries. If most deltas have ratios close to your threshold (e.g., 0.45-0.50 with default 0.5), you might want to lower it. If most deltas have very low ratios (e.g., < 0.2), you could raise it.
|
||||
|
||||
**Ideal scenario**: Most successful delta compressions have ratios < 0.3, and inefficient deltas (> 0.5) are correctly rejected.
|
||||
|
||||
## Summary
|
||||
|
||||
**`DG_MAX_RATIO` prevents wasting time and storage on inefficient delta compression.**
|
||||
|
||||
### Quick Takeaways
|
||||
|
||||
✅ **Default 0.5 works for 90% of use cases**
|
||||
✅ **Lower values (0.2-0.3) for dissimilar files or expensive storage**
|
||||
✅ **Higher values (0.6-0.7) for very similar files or cheap storage**
|
||||
✅ **Monitor delta ratios to tune for your use case**
|
||||
✅ **Never set to 1.0 or higher (defeats the purpose)**
|
||||
✅ **Never set to 0.0 (disables delta compression entirely)**
|
||||
|
||||
### Golden Rule
|
||||
|
||||
**If you're not sure, keep the default `0.5`.**
|
||||
|
||||
It's a sensible balance that:
|
||||
- Prevents inefficient compression (no deltas > 50% of original size)
|
||||
- Allows excellent savings on similar files (most deltas are < 20%)
|
||||
- Works well for typical versioned releases
|
||||
- Requires no manual tuning for most use cases
|
||||
|
||||
---
|
||||
|
||||
**Related Documentation**:
|
||||
- [CLAUDE.md](../CLAUDE.md) - Environment variables reference
|
||||
- [README.md](../README.md) - Docker usage and configuration
|
||||
- [docs/sdk/getting-started.md](sdk/getting-started.md) - SDK configuration guide
|
||||
@@ -1,21 +1,23 @@
|
||||
# AWS S3 CLI Compatibility Plan for DeltaGlider
|
||||
# AWS S3 CLI Compatibility for DeltaGlider
|
||||
|
||||
## Current State
|
||||
|
||||
DeltaGlider currently provides a custom CLI with the following commands:
|
||||
DeltaGlider provides AWS S3 CLI compatible commands with automatic delta compression:
|
||||
|
||||
### Existing Commands
|
||||
- `deltaglider put <file> <s3_url>` - Upload file with delta compression
|
||||
- `deltaglider get <s3_url> [-o output]` - Download and reconstruct file
|
||||
### Commands
|
||||
- `deltaglider cp <source> <destination>` - Copy files with delta compression
|
||||
- `deltaglider ls [s3_url]` - List buckets and objects
|
||||
- `deltaglider rm <s3_url>` - Remove objects
|
||||
- `deltaglider sync <source> <destination>` - Synchronize directories
|
||||
- `deltaglider verify <s3_url>` - Verify file integrity
|
||||
|
||||
### Current Usage Examples
|
||||
```bash
|
||||
# Upload a file
|
||||
deltaglider put myfile.zip s3://bucket/path/to/file.zip
|
||||
deltaglider cp myfile.zip s3://bucket/path/to/file.zip
|
||||
|
||||
# Download a file (auto-detects .delta)
|
||||
deltaglider get s3://bucket/path/to/file.zip
|
||||
# Download a file
|
||||
deltaglider cp s3://bucket/path/to/file.zip .
|
||||
|
||||
# Verify integrity
|
||||
deltaglider verify s3://bucket/path/to/file.zip.delta
|
||||
@@ -168,18 +170,7 @@ Additional flags specific to DeltaGlider's delta compression:
|
||||
3. Create migration guide from aws-cli
|
||||
4. Performance benchmarks comparing to aws-cli
|
||||
|
||||
## Migration Path for Existing Users
|
||||
|
||||
### Alias Support During Transition
|
||||
```bash
|
||||
# Old command -> New command mapping
|
||||
deltaglider put FILE S3_URL -> deltaglider cp FILE S3_URL
|
||||
deltaglider get S3_URL -> deltaglider cp S3_URL .
|
||||
deltaglider verify S3_URL -> deltaglider ls --verify S3_URL
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
- `DELTAGLIDER_LEGACY_MODE=1` - Use old command syntax
|
||||
## Environment Variables
|
||||
- `DELTAGLIDER_AWS_COMPAT=1` - Strict AWS S3 CLI compatibility mode
|
||||
|
||||
## Success Criteria
|
||||
|
||||
@@ -57,7 +57,7 @@ aws s3 cp readonlyrest-1.66.1_es8.0.0.zip s3://releases/
|
||||
# Size on S3: 82.5MB
|
||||
|
||||
# With DeltaGlider
|
||||
deltaglider put readonlyrest-1.66.1_es8.0.0.zip s3://releases/
|
||||
deltaglider cp readonlyrest-1.66.1_es8.0.0.zip s3://releases/
|
||||
# Size on S3: 65KB (99.92% smaller!)
|
||||
```
|
||||
|
||||
@@ -186,7 +186,7 @@ This intelligence meant our 127,455 checksum files were uploaded directly, avoid
|
||||
```bash
|
||||
# Simple integration into our CI/CD
|
||||
- aws s3 cp $FILE s3://releases/
|
||||
+ deltaglider put $FILE s3://releases/
|
||||
+ deltaglider cp $FILE s3://releases/
|
||||
```
|
||||
|
||||
### Week 4: Full Migration
|
||||
@@ -253,10 +253,10 @@ Storage costs scale linearly with data growth. Without DeltaGlider:
|
||||
pip install deltaglider
|
||||
|
||||
# Upload a file (automatic compression)
|
||||
deltaglider put my-release-v1.0.0.zip s3://releases/
|
||||
deltaglider cp my-release-v1.0.0.zip s3://releases/
|
||||
|
||||
# Download (automatic reconstruction)
|
||||
deltaglider get s3://releases/my-release-v1.0.0.zip
|
||||
deltaglider cp s3://releases/my-release-v1.0.0.zip .
|
||||
|
||||
# It's that simple.
|
||||
```
|
||||
@@ -277,12 +277,12 @@ completely_different: 0% # No compression (uploaded as-is)
|
||||
**GitHub Actions**:
|
||||
```yaml
|
||||
- name: Upload Release
|
||||
run: deltaglider put dist/*.zip s3://releases/${{ github.ref_name }}/
|
||||
run: deltaglider cp dist/*.zip s3://releases/${{ github.ref_name }}/
|
||||
```
|
||||
|
||||
**Jenkins Pipeline**:
|
||||
```groovy
|
||||
sh "deltaglider put ${WORKSPACE}/target/*.jar s3://artifacts/"
|
||||
sh "deltaglider cp ${WORKSPACE}/target/*.jar s3://artifacts/"
|
||||
```
|
||||
|
||||
**Python Script**:
|
||||
@@ -327,7 +327,7 @@ python calculate_savings.py --path /your/releases
|
||||
# Try it yourself
|
||||
docker run -p 9000:9000 minio/minio # Local S3
|
||||
pip install deltaglider
|
||||
deltaglider put your-file.zip s3://test/
|
||||
deltaglider cp your-file.zip s3://test/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
# DeltaGlider Python SDK Documentation
|
||||
|
||||
The DeltaGlider Python SDK provides a **100% boto3-compatible API** that works as a drop-in replacement for AWS S3 SDK, while achieving 99%+ compression for versioned artifacts through intelligent binary delta compression.
|
||||
The DeltaGlider Python SDK provides a **boto3-compatible API for core S3 operations** (~20% of methods covering 80% of use cases), while achieving 99%+ compression for versioned artifacts through intelligent binary delta compression.
|
||||
|
||||
## 🎯 Key Highlights
|
||||
|
||||
- **Drop-in boto3 Replacement**: Use your existing boto3 S3 code, just change the import
|
||||
- **boto3-Compatible Core API**: 21 essential S3 methods that work exactly like boto3
|
||||
- **99%+ Compression**: Automatically for versioned files and archives
|
||||
- **Zero Learning Curve**: If you know boto3, you already know DeltaGlider
|
||||
- **Full Compatibility**: Works with AWS S3, MinIO, Cloudflare R2, and all S3-compatible storage
|
||||
- **Familiar API**: If you know boto3, you already know DeltaGlider's core methods
|
||||
- **Full S3 Compatibility**: Works with AWS S3, MinIO, Cloudflare R2, and all S3-compatible storage
|
||||
- **See [BOTO3_COMPATIBILITY.md](../../BOTO3_COMPATIBILITY.md)**: For complete method coverage details
|
||||
|
||||
## Quick Links
|
||||
|
||||
@@ -22,18 +23,44 @@ DeltaGlider provides three ways to interact with your S3 storage:
|
||||
|
||||
### 1. boto3-Compatible API (Recommended) 🌟
|
||||
|
||||
Drop-in replacement for boto3 S3 client with automatic compression:
|
||||
Core boto3 S3 methods with automatic compression (see [BOTO3_COMPATIBILITY.md](../../BOTO3_COMPATIBILITY.md) for full list):
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
# Exactly like boto3.client('s3'), but with 99% compression!
|
||||
# Core boto3 S3 methods work exactly the same, with 99% compression!
|
||||
client = create_client()
|
||||
|
||||
# Standard boto3 S3 methods - just work!
|
||||
client.put_object(Bucket='releases', Key='v1.0.0/app.zip', Body=data)
|
||||
response = client.get_object(Bucket='releases', Key='v1.0.0/app.zip')
|
||||
client.list_objects(Bucket='releases', Prefix='v1.0.0/')
|
||||
|
||||
# Optimized list_objects with smart performance defaults (NEW!)
|
||||
# Fast by default - no unnecessary metadata fetching
|
||||
response = client.list_objects(Bucket='releases', Prefix='v1.0.0/')
|
||||
for obj in response['Contents']:
|
||||
print(f"{obj['Key']}: {obj['Size']} bytes")
|
||||
|
||||
# Pagination for large buckets
|
||||
response = client.list_objects(Bucket='releases', MaxKeys=100)
|
||||
while response.get('IsTruncated'):
|
||||
# Process current page
|
||||
for obj in response['Contents']:
|
||||
print(obj['Key'])
|
||||
# Get next page
|
||||
response = client.list_objects(
|
||||
Bucket='releases',
|
||||
MaxKeys=100,
|
||||
ContinuationToken=response.get('NextContinuationToken')
|
||||
)
|
||||
|
||||
# Get detailed compression stats only when needed
|
||||
response = client.list_objects(Bucket='releases', FetchMetadata=True) # Slower but detailed
|
||||
|
||||
# Quick bucket statistics
|
||||
stats = client.get_bucket_stats('releases') # Fast overview
|
||||
stats = client.get_bucket_stats('releases', detailed_stats=True) # With compression metrics
|
||||
|
||||
client.delete_object(Bucket='releases', Key='old-version.zip')
|
||||
```
|
||||
|
||||
@@ -61,7 +88,7 @@ deltaglider sync ./builds/ s3://releases/
|
||||
|
||||
## Migration from boto3
|
||||
|
||||
Migrating from boto3 to DeltaGlider is as simple as changing your import:
|
||||
For core S3 operations, migrating is as simple as changing your import:
|
||||
|
||||
```python
|
||||
# Before (boto3)
|
||||
@@ -69,20 +96,24 @@ import boto3
|
||||
client = boto3.client('s3')
|
||||
client.put_object(Bucket='mybucket', Key='myfile.zip', Body=data)
|
||||
|
||||
# After (DeltaGlider) - That's it! 99% compression automatically
|
||||
# After (DeltaGlider) - Core methods work the same, with 99% compression!
|
||||
from deltaglider import create_client
|
||||
client = create_client()
|
||||
client.put_object(Bucket='mybucket', Key='myfile.zip', Body=data)
|
||||
```
|
||||
|
||||
**Note**: DeltaGlider implements ~21 core S3 methods. For advanced features (versioning, ACLs, multipart uploads >5GB), use boto3 directly. See [BOTO3_COMPATIBILITY.md](../../BOTO3_COMPATIBILITY.md) for details.
|
||||
|
||||
## Key Features
|
||||
|
||||
- **100% boto3 Compatibility**: All S3 methods work exactly as expected
|
||||
- **Core boto3 Compatibility**: 21 essential S3 methods work exactly as expected (~20% coverage, 80% use cases)
|
||||
- **99%+ Compression**: For versioned artifacts and similar files
|
||||
- **Intelligent Detection**: Automatically determines when to use delta compression
|
||||
- **Data Integrity**: SHA256 verification on every operation
|
||||
- **Transparent**: Works with existing tools and workflows
|
||||
- **Production Ready**: Battle-tested with 200K+ files
|
||||
- **Thoroughly Tested**: 99 integration/unit tests with comprehensive coverage
|
||||
- **Type Safe**: Full mypy type checking, zero type errors
|
||||
|
||||
## When to Use DeltaGlider
|
||||
|
||||
@@ -183,7 +214,7 @@ client = create_client(
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
# Works exactly like boto3!
|
||||
# Core boto3 methods work exactly like boto3!
|
||||
client = create_client()
|
||||
|
||||
# Upload multiple software versions
|
||||
@@ -215,7 +246,7 @@ for version in versions:
|
||||
2. **Delta Compression**: Subsequent similar files are compared using xdelta3
|
||||
3. **Smart Storage**: Only the differences (deltas) are stored
|
||||
4. **Transparent Reconstruction**: Files are automatically reconstructed on download
|
||||
5. **boto3 Compatibility**: All operations maintain full boto3 API compatibility
|
||||
5. **Core boto3 Compatibility**: Essential operations maintain full boto3 API compatibility
|
||||
|
||||
## Performance
|
||||
|
||||
|
||||
437
docs/sdk/api.md
437
docs/sdk/api.md
@@ -21,7 +21,6 @@ Factory function to create a configured DeltaGlider client with sensible default
|
||||
def create_client(
|
||||
endpoint_url: Optional[str] = None,
|
||||
log_level: str = "INFO",
|
||||
cache_dir: str = "/tmp/.deltaglider/cache",
|
||||
**kwargs
|
||||
) -> DeltaGliderClient
|
||||
```
|
||||
@@ -30,11 +29,12 @@ def create_client(
|
||||
|
||||
- **endpoint_url** (`Optional[str]`): S3 endpoint URL for MinIO, R2, or other S3-compatible storage. If None, uses AWS S3.
|
||||
- **log_level** (`str`): Logging verbosity level. Options: "DEBUG", "INFO", "WARNING", "ERROR". Default: "INFO".
|
||||
- **cache_dir** (`str`): Directory for local reference cache. Default: "/tmp/.deltaglider/cache".
|
||||
- **kwargs**: Additional arguments passed to `DeltaService`:
|
||||
- **tool_version** (`str`): Version string for metadata. Default: "deltaglider/0.1.0"
|
||||
- **max_ratio** (`float`): Maximum acceptable delta/file ratio. Default: 0.5
|
||||
|
||||
**Security Note**: DeltaGlider automatically uses ephemeral, process-isolated cache (`/tmp/deltaglider-*`) that is cleaned up on exit. No configuration needed.
|
||||
|
||||
#### Returns
|
||||
|
||||
`DeltaGliderClient`: Configured client instance ready for use.
|
||||
@@ -48,11 +48,8 @@ client = create_client()
|
||||
# Custom endpoint for MinIO
|
||||
client = create_client(endpoint_url="http://localhost:9000")
|
||||
|
||||
# Debug mode with custom cache
|
||||
client = create_client(
|
||||
log_level="DEBUG",
|
||||
cache_dir="/var/cache/deltaglider"
|
||||
)
|
||||
# Debug mode
|
||||
client = create_client(log_level="DEBUG")
|
||||
|
||||
# Custom delta ratio threshold
|
||||
client = create_client(max_ratio=0.3) # Only use delta if <30% of original
|
||||
@@ -75,7 +72,327 @@ class DeltaGliderClient:
|
||||
|
||||
**Note**: Use `create_client()` instead of instantiating directly.
|
||||
|
||||
### Methods
|
||||
### boto3-Compatible Methods (Recommended)
|
||||
|
||||
These methods provide compatibility with boto3's core S3 client operations. DeltaGlider implements 21 essential S3 methods covering ~80% of common use cases. See [BOTO3_COMPATIBILITY.md](../../BOTO3_COMPATIBILITY.md) for complete coverage details.
|
||||
|
||||
#### `list_objects`
|
||||
|
||||
List objects in a bucket with smart performance optimizations.
|
||||
|
||||
```python
|
||||
def list_objects(
|
||||
self,
|
||||
Bucket: str,
|
||||
Prefix: str = "",
|
||||
Delimiter: str = "",
|
||||
MaxKeys: int = 1000,
|
||||
ContinuationToken: Optional[str] = None,
|
||||
StartAfter: Optional[str] = None,
|
||||
FetchMetadata: bool = False,
|
||||
**kwargs
|
||||
) -> dict[str, Any]
|
||||
```
|
||||
|
||||
##### Parameters
|
||||
|
||||
- **Bucket** (`str`): S3 bucket name.
|
||||
- **Prefix** (`str`): Filter results to keys beginning with prefix.
|
||||
- **Delimiter** (`str`): Delimiter for grouping keys (e.g., '/' for folders).
|
||||
- **MaxKeys** (`int`): Maximum number of keys to return (for pagination). Default: 1000.
|
||||
- **ContinuationToken** (`Optional[str]`): Token from previous response for pagination.
|
||||
- **StartAfter** (`Optional[str]`): Start listing after this key (alternative pagination).
|
||||
- **FetchMetadata** (`bool`): If True, fetch compression metadata for delta files only. Default: False.
|
||||
- **IMPORTANT**: Non-delta files NEVER trigger metadata fetching (no performance impact).
|
||||
- With `FetchMetadata=False`: ~50ms for 1000 objects (1 API call)
|
||||
- With `FetchMetadata=True`: ~2-3s for 1000 objects (1 + N delta files API calls)
|
||||
|
||||
##### Performance Optimization
|
||||
|
||||
The method intelligently optimizes performance by:
|
||||
1. **Never** fetching metadata for non-delta files (they don't need it)
|
||||
2. Only fetching metadata for delta files when explicitly requested
|
||||
3. Supporting efficient pagination for large buckets
|
||||
|
||||
##### Returns
|
||||
|
||||
boto3-compatible dict with:
|
||||
- **Contents** (`list[dict]`): List of S3Object dicts with Key, Size, LastModified, Metadata
|
||||
- **CommonPrefixes** (`list[dict]`): Optional list of common prefixes (folders)
|
||||
- **IsTruncated** (`bool`): Whether more results are available
|
||||
- **NextContinuationToken** (`str`): Token for next page
|
||||
- **KeyCount** (`int`): Number of keys returned
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# Fast listing for UI display (no metadata fetching)
|
||||
response = client.list_objects(Bucket='releases')
|
||||
for obj in response['Contents']:
|
||||
print(f"{obj['Key']}: {obj['Size']} bytes")
|
||||
|
||||
# Paginated listing for large buckets
|
||||
response = client.list_objects(Bucket='releases', MaxKeys=100)
|
||||
while response.get('IsTruncated'):
|
||||
for obj in response['Contents']:
|
||||
print(obj['Key'])
|
||||
response = client.list_objects(
|
||||
Bucket='releases',
|
||||
MaxKeys=100,
|
||||
ContinuationToken=response.get('NextContinuationToken')
|
||||
)
|
||||
|
||||
# Get detailed compression stats (slower, only for analytics)
|
||||
response = client.list_objects(
|
||||
Bucket='releases',
|
||||
FetchMetadata=True # Only fetches for delta files
|
||||
)
|
||||
for obj in response['Contents']:
|
||||
metadata = obj.get('Metadata', {})
|
||||
if metadata.get('deltaglider-is-delta') == 'true':
|
||||
compression = metadata.get('deltaglider-compression-ratio', 'unknown')
|
||||
print(f"{obj['Key']}: {compression} compression")
|
||||
```
|
||||
|
||||
#### `get_bucket_stats`
|
||||
|
||||
Get statistics for a bucket with optional detailed compression metrics. Results are cached per client session for performance.
|
||||
|
||||
```python
|
||||
def get_bucket_stats(
|
||||
self,
|
||||
bucket: str,
|
||||
detailed_stats: bool = False
|
||||
) -> BucketStats
|
||||
```
|
||||
|
||||
##### Parameters
|
||||
|
||||
- **bucket** (`str`): S3 bucket name.
|
||||
- **detailed_stats** (`bool`): If True, fetch accurate compression ratios for delta files. Default: False.
|
||||
- With `detailed_stats=False`: ~50ms for any bucket size (LIST calls only)
|
||||
- With `detailed_stats=True`: ~2-3s per 1000 objects (adds HEAD calls for delta files)
|
||||
|
||||
##### Caching Behavior
|
||||
|
||||
- **Session-scoped cache**: Results cached within client instance lifetime
|
||||
- **Automatic invalidation**: Cache cleared on bucket mutations (put, delete, bucket operations)
|
||||
- **Intelligent reuse**: Detailed stats can serve quick stat requests
|
||||
- **Manual cache control**: Use `clear_cache()` to invalidate all cached stats
|
||||
|
||||
##### Returns
|
||||
|
||||
`BucketStats`: Dataclass containing:
|
||||
- **bucket** (`str`): Bucket name
|
||||
- **object_count** (`int`): Total number of objects
|
||||
- **total_size** (`int`): Original size in bytes (before compression)
|
||||
- **compressed_size** (`int`): Actual stored size in bytes
|
||||
- **space_saved** (`int`): Bytes saved through compression
|
||||
- **average_compression_ratio** (`float`): Average compression ratio (0.0-1.0)
|
||||
- **delta_objects** (`int`): Number of delta-compressed objects
|
||||
- **direct_objects** (`int`): Number of directly stored objects
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# Quick stats for dashboard display (cached after first call)
|
||||
stats = client.get_bucket_stats('releases')
|
||||
print(f"Objects: {stats.object_count}, Size: {stats.total_size}")
|
||||
|
||||
# Second call hits cache (instant response)
|
||||
stats = client.get_bucket_stats('releases')
|
||||
print(f"Space saved: {stats.space_saved} bytes")
|
||||
|
||||
# Detailed stats for analytics (slower but accurate, also cached)
|
||||
stats = client.get_bucket_stats('releases', detailed_stats=True)
|
||||
print(f"Compression ratio: {stats.average_compression_ratio:.1%}")
|
||||
|
||||
# Quick call after detailed call reuses detailed cache (more accurate)
|
||||
quick_stats = client.get_bucket_stats('releases') # Uses detailed cache
|
||||
|
||||
# Clear cache to force refresh
|
||||
client.clear_cache()
|
||||
stats = client.get_bucket_stats('releases') # Fresh computation
|
||||
```
|
||||
|
||||
#### `put_object`
|
||||
|
||||
Upload an object to S3 with automatic delta compression (boto3-compatible).
|
||||
|
||||
```python
|
||||
def put_object(
|
||||
self,
|
||||
Bucket: str,
|
||||
Key: str,
|
||||
Body: bytes | str | Path | None = None,
|
||||
Metadata: Optional[Dict[str, str]] = None,
|
||||
ContentType: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]
|
||||
```
|
||||
|
||||
##### Parameters
|
||||
|
||||
- **Bucket** (`str`): S3 bucket name.
|
||||
- **Key** (`str`): Object key (path in bucket).
|
||||
- **Body** (`bytes | str | Path`): Object data.
|
||||
- **Metadata** (`Optional[Dict[str, str]]`): Custom metadata.
|
||||
- **ContentType** (`Optional[str]`): MIME type (for compatibility).
|
||||
|
||||
##### Returns
|
||||
|
||||
Dict with ETag and DeltaGlider compression info.
|
||||
|
||||
#### `get_object`
|
||||
|
||||
Download an object from S3 with automatic delta reconstruction (boto3-compatible).
|
||||
|
||||
```python
|
||||
def get_object(
|
||||
self,
|
||||
Bucket: str,
|
||||
Key: str,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]
|
||||
```
|
||||
|
||||
##### Returns
|
||||
|
||||
Dict with Body stream and metadata (identical to boto3).
|
||||
|
||||
#### `create_bucket`
|
||||
|
||||
Create an S3 bucket (boto3-compatible).
|
||||
|
||||
```python
|
||||
def create_bucket(
|
||||
self,
|
||||
Bucket: str,
|
||||
CreateBucketConfiguration: Optional[Dict[str, str]] = None,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]
|
||||
```
|
||||
|
||||
##### Parameters
|
||||
|
||||
- **Bucket** (`str`): Name of the bucket to create.
|
||||
- **CreateBucketConfiguration** (`Optional[Dict[str, str]]`): Bucket configuration with optional LocationConstraint.
|
||||
|
||||
##### Returns
|
||||
|
||||
Dict with Location of created bucket.
|
||||
|
||||
##### Notes
|
||||
|
||||
- Idempotent: Creating an existing bucket returns success
|
||||
- Use for basic bucket creation without advanced S3 features
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# Create bucket in default region
|
||||
client.create_bucket(Bucket='my-releases')
|
||||
|
||||
# Create bucket in specific region
|
||||
client.create_bucket(
|
||||
Bucket='my-backups',
|
||||
CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}
|
||||
)
|
||||
```
|
||||
|
||||
#### `delete_bucket`
|
||||
|
||||
Delete an S3 bucket (boto3-compatible).
|
||||
|
||||
```python
|
||||
def delete_bucket(
|
||||
self,
|
||||
Bucket: str,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]
|
||||
```
|
||||
|
||||
##### Parameters
|
||||
|
||||
- **Bucket** (`str`): Name of the bucket to delete.
|
||||
|
||||
##### Returns
|
||||
|
||||
Dict confirming deletion.
|
||||
|
||||
##### Notes
|
||||
|
||||
- Idempotent: Deleting a non-existent bucket returns success
|
||||
- Bucket must be empty before deletion
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# Delete empty bucket
|
||||
client.delete_bucket(Bucket='old-releases')
|
||||
```
|
||||
|
||||
#### `list_buckets`
|
||||
|
||||
List all S3 buckets (boto3-compatible). Includes cached statistics when available.
|
||||
|
||||
```python
|
||||
def list_buckets(
|
||||
self,
|
||||
**kwargs
|
||||
) -> Dict[str, Any]
|
||||
```
|
||||
|
||||
##### Returns
|
||||
|
||||
Dict with list of buckets and owner information (identical to boto3). Each bucket may include optional `DeltaGliderStats` metadata if statistics have been previously cached.
|
||||
|
||||
##### Response Structure
|
||||
|
||||
```python
|
||||
{
|
||||
'Buckets': [
|
||||
{
|
||||
'Name': 'bucket-name',
|
||||
'CreationDate': datetime(2025, 1, 1),
|
||||
'DeltaGliderStats': { # Optional, only if cached
|
||||
'Cached': True,
|
||||
'Detailed': bool, # Whether detailed stats were fetched
|
||||
'ObjectCount': int,
|
||||
'TotalSize': int,
|
||||
'CompressedSize': int,
|
||||
'SpaceSaved': int,
|
||||
'AverageCompressionRatio': float,
|
||||
'DeltaObjects': int,
|
||||
'DirectObjects': int
|
||||
}
|
||||
}
|
||||
],
|
||||
'Owner': {...}
|
||||
}
|
||||
```
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# List all buckets
|
||||
response = client.list_buckets()
|
||||
for bucket in response['Buckets']:
|
||||
print(f"{bucket['Name']} - Created: {bucket['CreationDate']}")
|
||||
|
||||
# Check if stats are cached
|
||||
if 'DeltaGliderStats' in bucket:
|
||||
stats = bucket['DeltaGliderStats']
|
||||
print(f" Cached stats: {stats['ObjectCount']} objects, "
|
||||
f"{stats['AverageCompressionRatio']:.1%} compression")
|
||||
|
||||
# Fetch stats first, then list buckets to see cached data
|
||||
client.get_bucket_stats('my-bucket', detailed_stats=True)
|
||||
response = client.list_buckets()
|
||||
# Now 'my-bucket' will include DeltaGliderStats in response
|
||||
```
|
||||
|
||||
### Simple API Methods
|
||||
|
||||
#### `upload`
|
||||
|
||||
@@ -209,6 +526,104 @@ else:
|
||||
# Re-upload or investigate
|
||||
```
|
||||
|
||||
### Cache Management Methods
|
||||
|
||||
DeltaGlider maintains two types of caches for performance optimization:
|
||||
1. **Reference cache**: Binary reference files used for delta reconstruction
|
||||
2. **Statistics cache**: Bucket statistics (session-scoped)
|
||||
|
||||
#### `clear_cache`
|
||||
|
||||
Clear all cached data including reference files and bucket statistics.
|
||||
|
||||
```python
|
||||
def clear_cache(self) -> None
|
||||
```
|
||||
|
||||
##### Description
|
||||
|
||||
Removes all cached reference files from the local filesystem and invalidates all bucket statistics. Useful for:
|
||||
- Forcing fresh statistics computation
|
||||
- Freeing disk space in long-running applications
|
||||
- Ensuring latest data after external bucket modifications
|
||||
- Testing and development workflows
|
||||
|
||||
##### Cache Types Cleared
|
||||
|
||||
1. **Reference Cache**: Binary reference files stored in `/tmp/deltaglider-*/`
|
||||
- Encrypted at rest with ephemeral keys
|
||||
- Content-addressed storage (SHA256-based filenames)
|
||||
- Automatically cleaned up on process exit
|
||||
|
||||
2. **Statistics Cache**: Bucket statistics cached per client session
|
||||
- Metadata about compression ratios and object counts
|
||||
- Session-scoped (not persisted to disk)
|
||||
- Automatically invalidated on bucket mutations
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# Long-running application
|
||||
client = create_client()
|
||||
|
||||
# Work with files
|
||||
for i in range(1000):
|
||||
client.upload(f"file_{i}.zip", "s3://bucket/")
|
||||
|
||||
# Periodic cache cleanup to prevent disk buildup
|
||||
if i % 100 == 0:
|
||||
client.clear_cache()
|
||||
|
||||
# Force fresh statistics after external changes
|
||||
stats_before = client.get_bucket_stats('releases') # Cached
|
||||
# ... external tool modifies bucket ...
|
||||
client.clear_cache()
|
||||
stats_after = client.get_bucket_stats('releases') # Fresh data
|
||||
|
||||
# Development workflow
|
||||
client.clear_cache() # Start with clean state
|
||||
```
|
||||
|
||||
#### `evict_cache`
|
||||
|
||||
Remove a specific cached reference file from the local cache.
|
||||
|
||||
```python
|
||||
def evict_cache(self, s3_url: str) -> None
|
||||
```
|
||||
|
||||
##### Parameters
|
||||
|
||||
- **s3_url** (`str`): S3 URL of the reference file to evict (e.g., `s3://bucket/prefix/reference.bin`)
|
||||
|
||||
##### Description
|
||||
|
||||
Removes a specific reference file from the cache without affecting other cached files or statistics. Useful for:
|
||||
- Selective cache invalidation when specific references are updated
|
||||
- Memory management in applications with many delta spaces
|
||||
- Testing specific delta compression scenarios
|
||||
|
||||
##### Examples
|
||||
|
||||
```python
|
||||
# Evict specific reference after update
|
||||
client.upload("new-reference.zip", "s3://releases/v2.0.0/")
|
||||
client.evict_cache("s3://releases/v2.0.0/reference.bin")
|
||||
|
||||
# Next upload will fetch fresh reference
|
||||
client.upload("similar-file.zip", "s3://releases/v2.0.0/")
|
||||
|
||||
# Selective eviction for specific delta spaces
|
||||
delta_spaces = ["v1.0.0", "v1.1.0", "v1.2.0"]
|
||||
for space in delta_spaces:
|
||||
client.evict_cache(f"s3://releases/{space}/reference.bin")
|
||||
```
|
||||
|
||||
##### See Also
|
||||
|
||||
- [docs/CACHE_MANAGEMENT.md](../../CACHE_MANAGEMENT.md): Complete cache management guide
|
||||
- `clear_cache()`: Clear all caches
|
||||
|
||||
#### `lifecycle_policy`
|
||||
|
||||
Set lifecycle policy for S3 prefix (placeholder for future implementation).
|
||||
@@ -472,9 +887,10 @@ DeltaGlider respects these environment variables:
|
||||
### DeltaGlider Configuration
|
||||
|
||||
- **DG_LOG_LEVEL**: Logging level (DEBUG, INFO, WARNING, ERROR)
|
||||
- **DG_CACHE_DIR**: Local cache directory
|
||||
- **DG_MAX_RATIO**: Default maximum delta ratio
|
||||
|
||||
**Note**: Cache is automatically managed (ephemeral, process-isolated) and requires no configuration.
|
||||
|
||||
### Example
|
||||
|
||||
```bash
|
||||
@@ -485,10 +901,9 @@ export AWS_SECRET_ACCESS_KEY=minioadmin
|
||||
|
||||
# Configure DeltaGlider
|
||||
export DG_LOG_LEVEL=DEBUG
|
||||
export DG_CACHE_DIR=/var/cache/deltaglider
|
||||
export DG_MAX_RATIO=0.3
|
||||
|
||||
# Now use normally
|
||||
# Now use normally (cache managed automatically)
|
||||
python my_script.py
|
||||
```
|
||||
|
||||
|
||||
@@ -4,14 +4,612 @@ Real-world examples and patterns for using DeltaGlider in production application
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Software Release Management](#software-release-management)
|
||||
2. [Database Backup System](#database-backup-system)
|
||||
3. [CI/CD Pipeline Integration](#cicd-pipeline-integration)
|
||||
4. [Container Registry Storage](#container-registry-storage)
|
||||
5. [Machine Learning Model Versioning](#machine-learning-model-versioning)
|
||||
6. [Game Asset Distribution](#game-asset-distribution)
|
||||
7. [Log Archive Management](#log-archive-management)
|
||||
8. [Multi-Region Replication](#multi-region-replication)
|
||||
1. [Performance-Optimized Bucket Listing](#performance-optimized-bucket-listing)
|
||||
2. [Bucket Statistics and Monitoring](#bucket-statistics-and-monitoring)
|
||||
3. [Session-Level Cache Management](#session-level-cache-management)
|
||||
4. [Bucket Management](#bucket-management)
|
||||
5. [Software Release Management](#software-release-management)
|
||||
6. [Database Backup System](#database-backup-system)
|
||||
7. [CI/CD Pipeline Integration](#cicd-pipeline-integration)
|
||||
8. [Container Registry Storage](#container-registry-storage)
|
||||
9. [Machine Learning Model Versioning](#machine-learning-model-versioning)
|
||||
10. [Game Asset Distribution](#game-asset-distribution)
|
||||
11. [Log Archive Management](#log-archive-management)
|
||||
12. [Multi-Region Replication](#multi-region-replication)
|
||||
|
||||
## Performance-Optimized Bucket Listing
|
||||
|
||||
DeltaGlider's smart `list_objects` method eliminates the N+1 query problem by intelligently managing metadata fetching.
|
||||
|
||||
### Fast Web UI Listing (No Metadata)
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import time
|
||||
|
||||
client = create_client()
|
||||
|
||||
def fast_bucket_listing(bucket: str):
|
||||
"""Ultra-fast listing for web UI display (~50ms for 1000 objects)."""
|
||||
start = time.time()
|
||||
|
||||
# Default: FetchMetadata=False - no HEAD requests
|
||||
response = client.list_objects(
|
||||
Bucket=bucket,
|
||||
MaxKeys=100 # Pagination for UI
|
||||
)
|
||||
|
||||
# Process objects for display
|
||||
items = []
|
||||
for obj in response.contents:
|
||||
items.append({
|
||||
"key": obj.key,
|
||||
"size": obj.size,
|
||||
"last_modified": obj.last_modified,
|
||||
"is_delta": obj.is_delta, # Determined from filename
|
||||
# No compression_ratio - would require HEAD request
|
||||
})
|
||||
|
||||
elapsed = time.time() - start
|
||||
print(f"Listed {len(items)} objects in {elapsed*1000:.0f}ms")
|
||||
|
||||
return items, response.next_continuation_token
|
||||
|
||||
# Example: List first page
|
||||
items, next_token = fast_bucket_listing('releases')
|
||||
```
|
||||
|
||||
### Paginated Listing for Large Buckets
|
||||
|
||||
```python
|
||||
def paginated_listing(bucket: str, page_size: int = 50):
|
||||
"""Efficiently paginate through large buckets."""
|
||||
all_objects = []
|
||||
continuation_token = None
|
||||
|
||||
while True:
|
||||
response = client.list_objects(
|
||||
Bucket=bucket,
|
||||
MaxKeys=page_size,
|
||||
ContinuationToken=continuation_token,
|
||||
FetchMetadata=False # Keep it fast
|
||||
)
|
||||
|
||||
all_objects.extend(response.contents)
|
||||
|
||||
if not response.is_truncated:
|
||||
break
|
||||
|
||||
continuation_token = response.next_continuation_token
|
||||
print(f"Fetched {len(all_objects)} objects so far...")
|
||||
|
||||
return all_objects
|
||||
|
||||
# Example: List all objects efficiently
|
||||
all_objects = paginated_listing('releases', page_size=100)
|
||||
print(f"Total objects: {len(all_objects)}")
|
||||
```
|
||||
|
||||
### Analytics Dashboard with Compression Stats
|
||||
|
||||
```python
|
||||
def dashboard_with_stats(bucket: str):
|
||||
"""Dashboard view with optional detailed stats."""
|
||||
|
||||
# Quick overview (fast - no metadata)
|
||||
stats = client.get_bucket_stats(bucket, detailed_stats=False)
|
||||
|
||||
print(f"Quick Stats for {bucket}:")
|
||||
print(f" Total Objects: {stats.object_count}")
|
||||
print(f" Delta Files: {stats.delta_objects}")
|
||||
print(f" Regular Files: {stats.direct_objects}")
|
||||
print(f" Total Size: {stats.total_size / (1024**3):.2f} GB")
|
||||
print(f" Stored Size: {stats.compressed_size / (1024**3):.2f} GB")
|
||||
|
||||
# Detailed compression analysis (slower - fetches metadata for deltas only)
|
||||
if stats.delta_objects > 0:
|
||||
detailed_stats = client.get_bucket_stats(bucket, detailed_stats=True)
|
||||
print(f"\nDetailed Compression Stats:")
|
||||
print(f" Average Compression: {detailed_stats.average_compression_ratio:.1%}")
|
||||
print(f" Space Saved: {detailed_stats.space_saved / (1024**3):.2f} GB")
|
||||
|
||||
# Example usage
|
||||
dashboard_with_stats('releases')
|
||||
```
|
||||
|
||||
### Smart Metadata Fetching for Analytics
|
||||
|
||||
```python
|
||||
def compression_analysis(bucket: str, prefix: str = ""):
|
||||
"""Analyze compression effectiveness with selective metadata fetching."""
|
||||
|
||||
# Only fetch metadata when we need compression stats
|
||||
response = client.list_objects(
|
||||
Bucket=bucket,
|
||||
Prefix=prefix,
|
||||
FetchMetadata=True # Fetches metadata ONLY for .delta files
|
||||
)
|
||||
|
||||
# Analyze compression effectiveness
|
||||
delta_files = [obj for obj in response.contents if obj.is_delta]
|
||||
|
||||
if delta_files:
|
||||
total_original = sum(obj.original_size for obj in delta_files)
|
||||
total_compressed = sum(obj.compressed_size for obj in delta_files)
|
||||
avg_ratio = (total_original - total_compressed) / total_original
|
||||
|
||||
print(f"Compression Analysis for {prefix or 'all files'}:")
|
||||
print(f" Delta Files: {len(delta_files)}")
|
||||
print(f" Original Size: {total_original / (1024**2):.1f} MB")
|
||||
print(f" Compressed Size: {total_compressed / (1024**2):.1f} MB")
|
||||
print(f" Average Compression: {avg_ratio:.1%}")
|
||||
|
||||
# Find best and worst compression
|
||||
best = max(delta_files, key=lambda x: x.compression_ratio or 0)
|
||||
worst = min(delta_files, key=lambda x: x.compression_ratio or 1)
|
||||
|
||||
print(f" Best Compression: {best.key} ({best.compression_ratio:.1%})")
|
||||
print(f" Worst Compression: {worst.key} ({worst.compression_ratio:.1%})")
|
||||
|
||||
# Example: Analyze v2.0 releases
|
||||
compression_analysis('releases', 'v2.0/')
|
||||
```
|
||||
|
||||
### Performance Comparison
|
||||
|
||||
```python
|
||||
def performance_comparison(bucket: str):
|
||||
"""Compare performance with and without metadata fetching."""
|
||||
import time
|
||||
|
||||
# Test 1: Fast listing (no metadata)
|
||||
start = time.time()
|
||||
response_fast = client.list_objects(
|
||||
Bucket=bucket,
|
||||
MaxKeys=100,
|
||||
FetchMetadata=False # Default
|
||||
)
|
||||
time_fast = (time.time() - start) * 1000
|
||||
|
||||
# Test 2: Detailed listing (with metadata for deltas)
|
||||
start = time.time()
|
||||
response_detailed = client.list_objects(
|
||||
Bucket=bucket,
|
||||
MaxKeys=100,
|
||||
FetchMetadata=True # Fetches for delta files only
|
||||
)
|
||||
time_detailed = (time.time() - start) * 1000
|
||||
|
||||
delta_count = sum(1 for obj in response_fast.contents if obj.is_delta)
|
||||
|
||||
print(f"Performance Comparison for {bucket}:")
|
||||
print(f" Fast Listing: {time_fast:.0f}ms (1 API call)")
|
||||
print(f" Detailed Listing: {time_detailed:.0f}ms (1 + {delta_count} API calls)")
|
||||
print(f" Speed Improvement: {time_detailed/time_fast:.1f}x slower with metadata")
|
||||
print(f"\nRecommendation: Use FetchMetadata=True only when you need:")
|
||||
print(" - Exact original file sizes for delta files")
|
||||
print(" - Accurate compression ratios")
|
||||
print(" - Reference key information")
|
||||
|
||||
# Example: Compare performance
|
||||
performance_comparison('releases')
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Default to Fast Mode**: Always use `FetchMetadata=False` (default) unless you specifically need compression stats.
|
||||
|
||||
2. **Never Fetch for Non-Deltas**: The SDK automatically skips metadata fetching for non-delta files even when `FetchMetadata=True`.
|
||||
|
||||
## Bucket Statistics and Monitoring
|
||||
|
||||
DeltaGlider provides powerful bucket statistics with session-level caching for performance.
|
||||
|
||||
### Quick Dashboard Stats (Cached)
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
def show_bucket_dashboard(bucket: str):
|
||||
"""Display real-time bucket statistics with caching."""
|
||||
|
||||
# First call: computes stats (~50ms)
|
||||
stats = client.get_bucket_stats(bucket)
|
||||
|
||||
# Second call: instant (cached)
|
||||
stats = client.get_bucket_stats(bucket)
|
||||
|
||||
print(f"Dashboard for {stats.bucket}")
|
||||
print(f"=" * 60)
|
||||
print(f"Total Objects: {stats.object_count:,}")
|
||||
print(f" Delta Objects: {stats.delta_objects:,}")
|
||||
print(f" Direct Objects: {stats.direct_objects:,}")
|
||||
print()
|
||||
print(f"Original Size: {stats.total_size / (1024**3):.2f} GB")
|
||||
print(f"Stored Size: {stats.compressed_size / (1024**3):.2f} GB")
|
||||
print(f"Space Saved: {stats.space_saved / (1024**3):.2f} GB")
|
||||
print(f"Compression Ratio: {stats.average_compression_ratio:.1%}")
|
||||
|
||||
# Example: Show stats for multiple buckets (each cached separately)
|
||||
for bucket_name in ['releases', 'backups', 'archives']:
|
||||
show_bucket_dashboard(bucket_name)
|
||||
```
|
||||
|
||||
### Detailed Compression Analysis
|
||||
|
||||
```python
|
||||
def detailed_compression_report(bucket: str):
|
||||
"""Generate detailed compression report with accurate ratios."""
|
||||
|
||||
# Detailed stats fetch metadata for delta files (slower, accurate)
|
||||
stats = client.get_bucket_stats(bucket, detailed_stats=True)
|
||||
|
||||
efficiency = (stats.space_saved / stats.total_size * 100) if stats.total_size > 0 else 0
|
||||
|
||||
print(f"Detailed Compression Report: {stats.bucket}")
|
||||
print(f"=" * 60)
|
||||
print(f"Object Distribution:")
|
||||
print(f" Total: {stats.object_count:,}")
|
||||
print(f" Delta-Compressed: {stats.delta_objects:,} ({stats.delta_objects/stats.object_count*100:.1f}%)")
|
||||
print(f" Direct Storage: {stats.direct_objects:,} ({stats.direct_objects/stats.object_count*100:.1f}%)")
|
||||
print()
|
||||
print(f"Storage Efficiency:")
|
||||
print(f" Original Data: {stats.total_size / (1024**3):.2f} GB")
|
||||
print(f" Actual Storage: {stats.compressed_size / (1024**3):.2f} GB")
|
||||
print(f" Space Saved: {stats.space_saved / (1024**3):.2f} GB")
|
||||
print(f" Efficiency: {efficiency:.1f}%")
|
||||
print(f" Avg Compression: {stats.average_compression_ratio:.2%}")
|
||||
|
||||
# Calculate estimated monthly costs (example: $0.023/GB S3 Standard)
|
||||
cost_without = stats.total_size / (1024**3) * 0.023
|
||||
cost_with = stats.compressed_size / (1024**3) * 0.023
|
||||
monthly_savings = cost_without - cost_with
|
||||
|
||||
print()
|
||||
print(f"Estimated Monthly S3 Costs ($0.023/GB):")
|
||||
print(f" Without DeltaGlider: ${cost_without:.2f}")
|
||||
print(f" With DeltaGlider: ${cost_with:.2f}")
|
||||
print(f" Monthly Savings: ${monthly_savings:.2f}")
|
||||
|
||||
# Example: Detailed report
|
||||
detailed_compression_report('releases')
|
||||
```
|
||||
|
||||
### List Buckets with Cached Stats
|
||||
|
||||
```python
|
||||
def list_buckets_with_stats():
|
||||
"""List all buckets and show cached statistics if available."""
|
||||
|
||||
# Pre-fetch stats for important buckets
|
||||
important_buckets = ['releases', 'backups']
|
||||
for bucket_name in important_buckets:
|
||||
client.get_bucket_stats(bucket_name, detailed_stats=True)
|
||||
|
||||
# List all buckets (includes cached stats automatically)
|
||||
response = client.list_buckets()
|
||||
|
||||
print("All Buckets:")
|
||||
print(f"{'Name':<30} {'Objects':<10} {'Compression':<15} {'Cached'}")
|
||||
print("=" * 70)
|
||||
|
||||
for bucket in response['Buckets']:
|
||||
name = bucket['Name']
|
||||
|
||||
# Check if stats are cached
|
||||
if 'DeltaGliderStats' in bucket:
|
||||
stats = bucket['DeltaGliderStats']
|
||||
obj_count = f"{stats['ObjectCount']:,}"
|
||||
compression = f"{stats['AverageCompressionRatio']:.1%}"
|
||||
cached = "✓ (detailed)" if stats['Detailed'] else "✓ (quick)"
|
||||
else:
|
||||
obj_count = "N/A"
|
||||
compression = "N/A"
|
||||
cached = "✗"
|
||||
|
||||
print(f"{name:<30} {obj_count:<10} {compression:<15} {cached}")
|
||||
|
||||
# Example: List with stats
|
||||
list_buckets_with_stats()
|
||||
```
|
||||
|
||||
### Monitoring Dashboard (Real-Time)
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
def monitoring_dashboard(buckets: list[str], refresh_seconds: int = 60):
|
||||
"""Real-time monitoring dashboard with periodic refresh."""
|
||||
|
||||
while True:
|
||||
print("\033[2J\033[H") # Clear screen
|
||||
print(f"DeltaGlider Monitoring Dashboard - {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print("=" * 80)
|
||||
|
||||
for bucket_name in buckets:
|
||||
# Get cached stats (instant) or compute fresh
|
||||
stats = client.get_bucket_stats(bucket_name)
|
||||
|
||||
print(f"\n{bucket_name}:")
|
||||
print(f" Objects: {stats.object_count:,} | "
|
||||
f"Delta: {stats.delta_objects:,} | "
|
||||
f"Direct: {stats.direct_objects:,}")
|
||||
print(f" Size: {stats.compressed_size/(1024**3):.2f} GB | "
|
||||
f"Saved: {stats.space_saved/(1024**3):.2f} GB | "
|
||||
f"Compression: {stats.average_compression_ratio:.1%}")
|
||||
|
||||
print(f"\n{'=' * 80}")
|
||||
print(f"Refreshing in {refresh_seconds} seconds... (Ctrl+C to exit)")
|
||||
|
||||
time.sleep(refresh_seconds)
|
||||
|
||||
# Clear cache for fresh data on next iteration
|
||||
client.clear_cache()
|
||||
|
||||
# Example: Monitor key buckets
|
||||
try:
|
||||
monitoring_dashboard(['releases', 'backups', 'archives'], refresh_seconds=30)
|
||||
except KeyboardInterrupt:
|
||||
print("\nMonitoring stopped.")
|
||||
```
|
||||
|
||||
## Session-Level Cache Management
|
||||
|
||||
DeltaGlider maintains session-level caches for optimal performance in long-running applications.
|
||||
|
||||
### Long-Running Application Pattern
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import time
|
||||
|
||||
def long_running_upload_service():
|
||||
"""Upload service with periodic cache cleanup."""
|
||||
|
||||
client = create_client()
|
||||
processed_count = 0
|
||||
|
||||
while True:
|
||||
# Simulate file processing
|
||||
files_to_upload = get_pending_files() # Your file queue
|
||||
|
||||
for file_path in files_to_upload:
|
||||
try:
|
||||
summary = client.upload(file_path, "s3://releases/")
|
||||
processed_count += 1
|
||||
|
||||
print(f"Uploaded {file_path}: {summary.savings_percent:.0f}% saved")
|
||||
|
||||
# Periodic cache cleanup (every 100 files)
|
||||
if processed_count % 100 == 0:
|
||||
client.clear_cache()
|
||||
print(f"Cache cleared after {processed_count} files")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error uploading {file_path}: {e}")
|
||||
|
||||
time.sleep(60) # Check for new files every minute
|
||||
|
||||
# Example: Run upload service
|
||||
# long_running_upload_service()
|
||||
```
|
||||
|
||||
### Cache Invalidation After External Changes
|
||||
|
||||
```python
|
||||
def handle_external_bucket_changes(bucket: str):
|
||||
"""Refresh statistics after external tools modify bucket."""
|
||||
|
||||
# Get initial stats (cached)
|
||||
stats_before = client.get_bucket_stats(bucket)
|
||||
print(f"Before: {stats_before.object_count} objects")
|
||||
|
||||
# External process modifies bucket
|
||||
print("External backup tool running...")
|
||||
run_external_backup_tool(bucket) # Your external tool
|
||||
|
||||
# Clear cache to get fresh data
|
||||
client.clear_cache()
|
||||
|
||||
# Get updated stats
|
||||
stats_after = client.get_bucket_stats(bucket)
|
||||
print(f"After: {stats_after.object_count} objects")
|
||||
print(f"Added: {stats_after.object_count - stats_before.object_count} objects")
|
||||
|
||||
# Example usage
|
||||
handle_external_bucket_changes('backups')
|
||||
```
|
||||
|
||||
### Selective Cache Eviction
|
||||
|
||||
```python
|
||||
def selective_cache_management():
|
||||
"""Manage cache for specific delta spaces."""
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Upload to multiple delta spaces
|
||||
versions = ['v1.0.0', 'v1.1.0', 'v1.2.0']
|
||||
|
||||
for version in versions:
|
||||
client.upload(f"app-{version}.zip", f"s3://releases/{version}/")
|
||||
|
||||
# Update reference for specific version
|
||||
print("Updating v1.1.0 reference...")
|
||||
client.upload("new-reference.zip", "s3://releases/v1.1.0/")
|
||||
|
||||
# Evict only v1.1.0 cache (others remain cached)
|
||||
client.evict_cache("s3://releases/v1.1.0/reference.bin")
|
||||
|
||||
# Next upload to v1.1.0 fetches fresh reference
|
||||
# v1.0.0 and v1.2.0 still use cached references
|
||||
client.upload("similar-file.zip", "s3://releases/v1.1.0/")
|
||||
|
||||
# Example: Selective eviction
|
||||
selective_cache_management()
|
||||
```
|
||||
|
||||
### Testing with Clean Cache
|
||||
|
||||
```python
|
||||
import pytest
|
||||
from deltaglider import create_client
|
||||
|
||||
def test_upload_workflow():
|
||||
"""Test with clean cache state."""
|
||||
|
||||
client = create_client()
|
||||
client.clear_cache() # Start with clean state
|
||||
|
||||
# Test first upload (no reference exists)
|
||||
summary1 = client.upload("file1.zip", "s3://test-bucket/prefix/")
|
||||
assert not summary1.is_delta # First file is reference
|
||||
|
||||
# Test subsequent upload (uses cached reference)
|
||||
summary2 = client.upload("file2.zip", "s3://test-bucket/prefix/")
|
||||
assert summary2.is_delta # Should use delta
|
||||
|
||||
# Clear and test again
|
||||
client.clear_cache()
|
||||
summary3 = client.upload("file3.zip", "s3://test-bucket/prefix/")
|
||||
assert summary3.is_delta # Still delta (reference in S3)
|
||||
|
||||
# Run test
|
||||
# test_upload_workflow()
|
||||
```
|
||||
|
||||
### Cache Performance Monitoring
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
def measure_cache_performance(bucket: str):
|
||||
"""Measure performance impact of caching."""
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Test 1: Cold cache
|
||||
client.clear_cache()
|
||||
start = time.time()
|
||||
stats1 = client.get_bucket_stats(bucket, detailed_stats=True)
|
||||
cold_time = (time.time() - start) * 1000
|
||||
|
||||
# Test 2: Warm cache
|
||||
start = time.time()
|
||||
stats2 = client.get_bucket_stats(bucket, detailed_stats=True)
|
||||
warm_time = (time.time() - start) * 1000
|
||||
|
||||
# Test 3: Quick stats from detailed cache
|
||||
start = time.time()
|
||||
stats3 = client.get_bucket_stats(bucket, detailed_stats=False)
|
||||
reuse_time = (time.time() - start) * 1000
|
||||
|
||||
print(f"Cache Performance for {bucket}:")
|
||||
print(f" Cold Cache (detailed): {cold_time:.0f}ms")
|
||||
print(f" Warm Cache (detailed): {warm_time:.0f}ms")
|
||||
print(f" Cache Reuse (quick): {reuse_time:.0f}ms")
|
||||
print(f" Speedup (detailed): {cold_time/warm_time:.1f}x")
|
||||
print(f" Speedup (reuse): {cold_time/reuse_time:.1f}x")
|
||||
|
||||
# Example: Measure cache performance
|
||||
measure_cache_performance('releases')
|
||||
```
|
||||
|
||||
3. **Use Pagination**: For large buckets, use `MaxKeys` and `ContinuationToken` to paginate results.
|
||||
|
||||
4. **Cache Results**: If you need metadata frequently, consider caching the results to avoid repeated HEAD requests.
|
||||
|
||||
5. **Batch Analytics**: When doing analytics, fetch metadata once and process the results rather than making multiple calls.
|
||||
|
||||
## Bucket Management
|
||||
|
||||
DeltaGlider provides boto3-compatible bucket management methods for creating, listing, and deleting buckets without requiring boto3.
|
||||
|
||||
### Complete Bucket Lifecycle
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Create bucket
|
||||
client.create_bucket(Bucket='my-releases')
|
||||
|
||||
# Create bucket in specific region
|
||||
client.create_bucket(
|
||||
Bucket='eu-backups',
|
||||
CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}
|
||||
)
|
||||
|
||||
# List all buckets
|
||||
response = client.list_buckets()
|
||||
for bucket in response['Buckets']:
|
||||
print(f"{bucket['Name']} - Created: {bucket['CreationDate']}")
|
||||
|
||||
# Upload some objects
|
||||
with open('app-v1.0.0.zip', 'rb') as f:
|
||||
client.put_object(Bucket='my-releases', Key='v1.0.0/app.zip', Body=f)
|
||||
|
||||
# Delete objects first (bucket must be empty)
|
||||
client.delete_object(Bucket='my-releases', Key='v1.0.0/app.zip')
|
||||
|
||||
# Delete bucket
|
||||
client.delete_bucket(Bucket='my-releases')
|
||||
```
|
||||
|
||||
### Idempotent Operations
|
||||
|
||||
Bucket management operations are idempotent for safe automation:
|
||||
|
||||
```python
|
||||
# Creating existing bucket returns success (no error)
|
||||
client.create_bucket(Bucket='my-releases')
|
||||
client.create_bucket(Bucket='my-releases') # Safe, returns success
|
||||
|
||||
# Deleting non-existent bucket returns success (no error)
|
||||
client.delete_bucket(Bucket='non-existent') # Safe, returns success
|
||||
```
|
||||
|
||||
### Hybrid boto3/DeltaGlider Usage
|
||||
|
||||
For advanced S3 features not in DeltaGlider's 21 core methods, use boto3 directly:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
import boto3
|
||||
|
||||
# DeltaGlider for core operations with compression
|
||||
dg_client = create_client()
|
||||
|
||||
# boto3 for advanced features
|
||||
s3_client = boto3.client('s3')
|
||||
|
||||
# Use DeltaGlider for object operations (with compression)
|
||||
with open('release.zip', 'rb') as f:
|
||||
dg_client.put_object(Bucket='releases', Key='v1.0.0/release.zip', Body=f)
|
||||
|
||||
# Use boto3 for advanced bucket features
|
||||
s3_client.put_bucket_versioning(
|
||||
Bucket='releases',
|
||||
VersioningConfiguration={'Status': 'Enabled'}
|
||||
)
|
||||
|
||||
# Use boto3 for bucket policies
|
||||
policy = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::releases/*"
|
||||
}]
|
||||
}
|
||||
s3_client.put_bucket_policy(Bucket='releases', Policy=json.dumps(policy))
|
||||
```
|
||||
|
||||
See [BOTO3_COMPATIBILITY.md](../../BOTO3_COMPATIBILITY.md) for complete method coverage.
|
||||
|
||||
## Software Release Management
|
||||
|
||||
|
||||
@@ -69,6 +69,42 @@ Or via environment variable:
|
||||
export AWS_ENDPOINT_URL=http://minio.local:9000
|
||||
```
|
||||
|
||||
### DeltaGlider Configuration
|
||||
|
||||
DeltaGlider supports the following environment variables:
|
||||
|
||||
**Logging & Performance**:
|
||||
- `DG_LOG_LEVEL`: Logging level (default: `INFO`, options: `DEBUG`, `INFO`, `WARNING`, `ERROR`)
|
||||
- `DG_MAX_RATIO`: Maximum delta/file ratio (default: `0.5`, range: `0.0-1.0`)
|
||||
- **See [DG_MAX_RATIO.md](../DG_MAX_RATIO.md) for complete tuning guide**
|
||||
- Controls when to use delta compression vs. direct storage
|
||||
- Lower (0.2-0.3) = conservative, only high-quality compression
|
||||
- Higher (0.6-0.7) = permissive, accept modest savings
|
||||
|
||||
**Cache Configuration**:
|
||||
- `DG_CACHE_BACKEND`: Cache backend type (default: `filesystem`, options: `filesystem`, `memory`)
|
||||
- `DG_CACHE_MEMORY_SIZE_MB`: Memory cache size in MB (default: `100`)
|
||||
- `DG_CACHE_ENCRYPTION_KEY`: Optional base64-encoded Fernet key for persistent encryption
|
||||
|
||||
**Security**:
|
||||
- Encryption is **always enabled** (cannot be disabled)
|
||||
- Ephemeral encryption keys per process (forward secrecy)
|
||||
- Corrupted cache files automatically deleted
|
||||
- Set `DG_CACHE_ENCRYPTION_KEY` only for cross-process cache sharing
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
# Use memory cache for faster performance in CI/CD
|
||||
export DG_CACHE_BACKEND=memory
|
||||
export DG_CACHE_MEMORY_SIZE_MB=500
|
||||
|
||||
# Enable debug logging
|
||||
export DG_LOG_LEVEL=DEBUG
|
||||
|
||||
# Adjust delta compression threshold
|
||||
export DG_MAX_RATIO=0.3 # More aggressive compression
|
||||
```
|
||||
|
||||
## Your First Upload
|
||||
|
||||
### Basic Example
|
||||
|
||||
64
examples/boto3_compatible_types.py
Normal file
64
examples/boto3_compatible_types.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""Example: Using boto3-compatible responses without importing boto3.
|
||||
|
||||
This demonstrates how DeltaGlider provides full type safety and boto3 compatibility
|
||||
without requiring boto3 imports in user code.
|
||||
|
||||
As of v5.0.0, DeltaGlider returns plain dicts (not custom dataclasses) that are
|
||||
100% compatible with boto3 S3 responses. You get IDE autocomplete through TypedDict
|
||||
type hints without any runtime overhead.
|
||||
"""
|
||||
|
||||
from deltaglider import ListObjectsV2Response, S3Object, create_client
|
||||
|
||||
# Create client (no boto3 import needed!)
|
||||
client = create_client()
|
||||
|
||||
# Type hints work perfectly without boto3
|
||||
def process_files(bucket: str, prefix: str) -> None:
|
||||
"""Process files in S3 with full type safety."""
|
||||
# Return type is fully typed - IDE autocomplete works!
|
||||
response: ListObjectsV2Response = client.list_objects(
|
||||
Bucket=bucket, Prefix=prefix, Delimiter="/"
|
||||
)
|
||||
|
||||
# Response is a plain dict - 100% boto3-compatible
|
||||
# TypedDict provides autocomplete and type checking
|
||||
for obj in response["Contents"]:
|
||||
# obj is typed as S3Object - all fields have autocomplete!
|
||||
key: str = obj["Key"] # ✅ IDE knows this is str
|
||||
size: int = obj["Size"] # ✅ IDE knows this is int
|
||||
print(f"{key}: {size} bytes")
|
||||
|
||||
# DeltaGlider metadata is in the standard Metadata field
|
||||
metadata = obj.get("Metadata", {})
|
||||
if metadata.get("deltaglider-is-delta") == "true":
|
||||
compression = metadata.get("deltaglider-compression-ratio", "unknown")
|
||||
print(f" └─ Delta file (compression: {compression})")
|
||||
|
||||
# Optional fields work too
|
||||
for prefix_dict in response.get("CommonPrefixes", []):
|
||||
print(f"Directory: {prefix_dict['Prefix']}")
|
||||
|
||||
# Pagination info
|
||||
if response.get("IsTruncated"):
|
||||
next_token = response.get("NextContinuationToken")
|
||||
print(f"More results available, token: {next_token}")
|
||||
|
||||
|
||||
# This is 100% compatible with boto3 code!
|
||||
def works_with_boto3_or_deltaglider(s3_client) -> None:
|
||||
"""This function works with EITHER boto3 or DeltaGlider client."""
|
||||
# Because the response structure is identical!
|
||||
response = s3_client.list_objects(Bucket="my-bucket")
|
||||
|
||||
for obj in response["Contents"]:
|
||||
print(obj["Key"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
print("✅ Full type safety without boto3 imports!")
|
||||
print("✅ 100% compatible with boto3")
|
||||
print("✅ Drop-in replacement")
|
||||
print("✅ Plain dict responses (not custom dataclasses)")
|
||||
print("✅ DeltaGlider metadata in standard Metadata field")
|
||||
116
examples/bucket_management.py
Normal file
116
examples/bucket_management.py
Normal file
@@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Example: Bucket management without boto3.
|
||||
|
||||
This example shows how to use DeltaGlider's bucket management APIs
|
||||
to create, list, and delete buckets without needing boto3 directly.
|
||||
"""
|
||||
|
||||
from deltaglider import create_client
|
||||
|
||||
# Create client (works with AWS S3, MinIO, or any S3-compatible storage)
|
||||
client = create_client()
|
||||
|
||||
# For local MinIO/S3-compatible storage:
|
||||
# client = create_client(endpoint_url='http://localhost:9000')
|
||||
|
||||
print("=" * 70)
|
||||
print("DeltaGlider Bucket Management Example")
|
||||
print("=" * 70)
|
||||
|
||||
# 1. List existing buckets
|
||||
print("\n1. List all buckets:")
|
||||
try:
|
||||
response = client.list_buckets()
|
||||
if response["Buckets"]:
|
||||
for bucket in response["Buckets"]:
|
||||
print(f" - {bucket['Name']} (created: {bucket.get('CreationDate', 'unknown')})")
|
||||
else:
|
||||
print(" No buckets found")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# 2. Create a new bucket
|
||||
bucket_name = "my-deltaglider-bucket"
|
||||
print(f"\n2. Create bucket '{bucket_name}':")
|
||||
try:
|
||||
response = client.create_bucket(Bucket=bucket_name)
|
||||
print(f" ✅ Created: {response['Location']}")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# 3. Create bucket with region (if using AWS)
|
||||
# Uncomment for AWS S3:
|
||||
# print("\n3. Create bucket in specific region:")
|
||||
# try:
|
||||
# response = client.create_bucket(
|
||||
# Bucket='my-regional-bucket',
|
||||
# CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}
|
||||
# )
|
||||
# print(f" ✅ Created: {response['Location']}")
|
||||
# except Exception as e:
|
||||
# print(f" Error: {e}")
|
||||
|
||||
# 4. Upload some files to the bucket
|
||||
print(f"\n4. Upload files to '{bucket_name}':")
|
||||
try:
|
||||
# Upload a simple file
|
||||
client.put_object(
|
||||
Bucket=bucket_name,
|
||||
Key="test-file.txt",
|
||||
Body=b"Hello from DeltaGlider!",
|
||||
)
|
||||
print(" ✅ Uploaded: test-file.txt")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# 5. List objects in the bucket
|
||||
print(f"\n5. List objects in '{bucket_name}':")
|
||||
try:
|
||||
response = client.list_objects(Bucket=bucket_name)
|
||||
if response.contents:
|
||||
for obj in response.contents:
|
||||
print(f" - {obj.key} ({obj.size} bytes)")
|
||||
else:
|
||||
print(" No objects found")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# 6. Delete all objects in the bucket (required before deleting bucket)
|
||||
print(f"\n6. Delete all objects in '{bucket_name}':")
|
||||
try:
|
||||
response = client.list_objects(Bucket=bucket_name)
|
||||
for obj in response.contents:
|
||||
client.delete_object(Bucket=bucket_name, Key=obj.key)
|
||||
print(f" ✅ Deleted: {obj.key}")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# 7. Delete the bucket
|
||||
print(f"\n7. Delete bucket '{bucket_name}':")
|
||||
try:
|
||||
response = client.delete_bucket(Bucket=bucket_name)
|
||||
print(f" ✅ Deleted bucket (status: {response['ResponseMetadata']['HTTPStatusCode']})")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# 8. Verify bucket is deleted
|
||||
print("\n8. Verify bucket deletion:")
|
||||
try:
|
||||
response = client.list_buckets()
|
||||
bucket_names = [b["Name"] for b in response["Buckets"]]
|
||||
if bucket_name in bucket_names:
|
||||
print(f" ❌ Bucket still exists!")
|
||||
else:
|
||||
print(f" ✅ Bucket successfully deleted")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("✅ Bucket management complete - no boto3 required!")
|
||||
print("=" * 70)
|
||||
|
||||
print("\n📚 Key Benefits:")
|
||||
print(" - No need to import boto3 directly")
|
||||
print(" - Consistent API with other DeltaGlider operations")
|
||||
print(" - Works with AWS S3, MinIO, and S3-compatible storage")
|
||||
print(" - Idempotent operations (safe to retry)")
|
||||
101
examples/credentials_example.py
Normal file
101
examples/credentials_example.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Example: Using explicit AWS credentials with DeltaGlider.
|
||||
|
||||
This example demonstrates how to pass AWS credentials directly to
|
||||
DeltaGlider's create_client() function, which is useful when:
|
||||
|
||||
1. You need to use different credentials than your environment default
|
||||
2. You're working with temporary credentials (session tokens)
|
||||
3. You want to avoid relying on environment variables
|
||||
4. You're implementing multi-tenant systems with different AWS accounts
|
||||
"""
|
||||
|
||||
from deltaglider import create_client
|
||||
|
||||
|
||||
def example_basic_credentials():
|
||||
"""Use basic AWS credentials (access key + secret key)."""
|
||||
client = create_client(
|
||||
aws_access_key_id="AKIAIOSFODNN7EXAMPLE",
|
||||
aws_secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
region_name="us-west-2",
|
||||
)
|
||||
|
||||
# Now use the client normally
|
||||
# client.put_object(Bucket="my-bucket", Key="file.zip", Body=b"data")
|
||||
print("✓ Created client with explicit credentials")
|
||||
|
||||
|
||||
def example_temporary_credentials():
|
||||
"""Use temporary AWS credentials (with session token)."""
|
||||
client = create_client(
|
||||
aws_access_key_id="ASIAIOSFODNN7EXAMPLE",
|
||||
aws_secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
aws_session_token="FwoGZXIvYXdzEBEaDH...", # From STS
|
||||
region_name="us-east-1",
|
||||
)
|
||||
|
||||
print("✓ Created client with temporary credentials")
|
||||
|
||||
|
||||
def example_environment_credentials():
|
||||
"""Use default credential chain (environment variables, IAM role, etc.)."""
|
||||
# When credentials are omitted, DeltaGlider uses boto3's default credential chain:
|
||||
# 1. Environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
|
||||
# 2. AWS credentials file (~/.aws/credentials)
|
||||
# 3. IAM role (for EC2 instances)
|
||||
client = create_client()
|
||||
|
||||
print("✓ Created client with default credential chain")
|
||||
|
||||
|
||||
def example_minio_credentials():
|
||||
"""Use credentials for MinIO or other S3-compatible services."""
|
||||
client = create_client(
|
||||
endpoint_url="http://localhost:9000",
|
||||
aws_access_key_id="minioadmin",
|
||||
aws_secret_access_key="minioadmin",
|
||||
)
|
||||
|
||||
print("✓ Created client for MinIO with custom credentials")
|
||||
|
||||
|
||||
def example_multi_tenant():
|
||||
"""Example: Different credentials for different tenants."""
|
||||
|
||||
# Tenant A uses one AWS account
|
||||
tenant_a_client = create_client(
|
||||
aws_access_key_id="TENANT_A_KEY",
|
||||
aws_secret_access_key="TENANT_A_SECRET",
|
||||
region_name="us-west-2",
|
||||
)
|
||||
|
||||
# Tenant B uses a different AWS account
|
||||
tenant_b_client = create_client(
|
||||
aws_access_key_id="TENANT_B_KEY",
|
||||
aws_secret_access_key="TENANT_B_SECRET",
|
||||
region_name="eu-west-1",
|
||||
)
|
||||
|
||||
print("✓ Created separate clients for multi-tenant scenario")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("DeltaGlider Credentials Examples\n" + "=" * 40)
|
||||
|
||||
print("\n1. Basic credentials:")
|
||||
example_basic_credentials()
|
||||
|
||||
print("\n2. Temporary credentials:")
|
||||
example_temporary_credentials()
|
||||
|
||||
print("\n3. Environment credentials:")
|
||||
example_environment_credentials()
|
||||
|
||||
print("\n4. MinIO credentials:")
|
||||
example_minio_credentials()
|
||||
|
||||
print("\n5. Multi-tenant scenario:")
|
||||
example_multi_tenant()
|
||||
|
||||
print("\n" + "=" * 40)
|
||||
print("All examples completed successfully!")
|
||||
@@ -13,7 +13,7 @@ maintainers = [
|
||||
{name = "Beshu Tech Team", email = "info@beshu.tech"},
|
||||
]
|
||||
readme = "README.md"
|
||||
license = {text = "MIT"}
|
||||
license = "MIT"
|
||||
requires-python = ">=3.11"
|
||||
keywords = [
|
||||
"s3",
|
||||
@@ -35,7 +35,6 @@ classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: System Administrators",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
@@ -52,6 +51,7 @@ classifiers = [
|
||||
dependencies = [
|
||||
"boto3>=1.35.0",
|
||||
"click>=8.1.0",
|
||||
"cryptography>=42.0.0",
|
||||
"python-dateutil>=2.9.0",
|
||||
]
|
||||
|
||||
@@ -115,7 +115,6 @@ dev-dependencies = [
|
||||
[tool.setuptools_scm]
|
||||
# Automatically determine version from git tags
|
||||
write_to = "src/deltaglider/_version.py"
|
||||
version_scheme = "release-branch-semver"
|
||||
local_scheme = "no-local-version"
|
||||
|
||||
[tool.ruff]
|
||||
@@ -146,8 +145,12 @@ disallow_untyped_defs = true
|
||||
disallow_any_unimported = false
|
||||
no_implicit_optional = true
|
||||
check_untyped_defs = true
|
||||
namespace_packages = true
|
||||
explicit_package_bases = true
|
||||
namespace_packages = false
|
||||
mypy_path = "src"
|
||||
exclude = [
|
||||
"^build/",
|
||||
"^dist/",
|
||||
]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "8.0"
|
||||
|
||||
@@ -7,23 +7,36 @@ except ImportError:
|
||||
__version__ = "0.0.0+unknown"
|
||||
|
||||
# Import client API
|
||||
from .client import (
|
||||
from .client import DeltaGliderClient, create_client
|
||||
from .client_models import (
|
||||
BucketStats,
|
||||
CompressionEstimate,
|
||||
DeltaGliderClient,
|
||||
ListObjectsResponse,
|
||||
ObjectInfo,
|
||||
UploadSummary,
|
||||
create_client,
|
||||
)
|
||||
from .core import DeltaService, DeltaSpace, ObjectKey
|
||||
|
||||
# Import boto3-compatible type aliases (no boto3 import required!)
|
||||
from .types import (
|
||||
CopyObjectResponse,
|
||||
CreateBucketResponse,
|
||||
DeleteObjectResponse,
|
||||
DeleteObjectsResponse,
|
||||
GetObjectResponse,
|
||||
HeadObjectResponse,
|
||||
ListBucketsResponse,
|
||||
ListObjectsV2Response,
|
||||
PutObjectResponse,
|
||||
S3Object,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
# Client
|
||||
"DeltaGliderClient",
|
||||
"create_client",
|
||||
# Data classes
|
||||
# Data classes (legacy - will be deprecated in favor of TypedDict)
|
||||
"UploadSummary",
|
||||
"CompressionEstimate",
|
||||
"ObjectInfo",
|
||||
@@ -33,4 +46,15 @@ __all__ = [
|
||||
"DeltaService",
|
||||
"DeltaSpace",
|
||||
"ObjectKey",
|
||||
# boto3-compatible types (no boto3 import needed!)
|
||||
"ListObjectsV2Response",
|
||||
"PutObjectResponse",
|
||||
"GetObjectResponse",
|
||||
"DeleteObjectResponse",
|
||||
"DeleteObjectsResponse",
|
||||
"HeadObjectResponse",
|
||||
"ListBucketsResponse",
|
||||
"CreateBucketResponse",
|
||||
"CopyObjectResponse",
|
||||
"S3Object",
|
||||
]
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
# file generated by setuptools-scm
|
||||
# don't change, don't track in version control
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
"__version_tuple__",
|
||||
"version",
|
||||
"version_tuple",
|
||||
"__commit_id__",
|
||||
"commit_id",
|
||||
]
|
||||
|
||||
TYPE_CHECKING = False
|
||||
if TYPE_CHECKING:
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
||||
COMMIT_ID = Union[str, None]
|
||||
else:
|
||||
VERSION_TUPLE = object
|
||||
COMMIT_ID = object
|
||||
|
||||
version: str
|
||||
__version__: str
|
||||
__version_tuple__: VERSION_TUPLE
|
||||
version_tuple: VERSION_TUPLE
|
||||
commit_id: COMMIT_ID
|
||||
__commit_id__: COMMIT_ID
|
||||
|
||||
__version__ = version = '0.2.0.dev0'
|
||||
__version_tuple__ = version_tuple = (0, 2, 0, 'dev0')
|
||||
|
||||
__commit_id__ = commit_id = 'g432ddd89c'
|
||||
@@ -1,6 +1,9 @@
|
||||
"""Adapters for DeltaGlider."""
|
||||
|
||||
from .cache_cas import ContentAddressedCache
|
||||
from .cache_encrypted import EncryptedCache
|
||||
from .cache_fs import FsCacheAdapter
|
||||
from .cache_memory import MemoryCache
|
||||
from .clock_utc import UtcClockAdapter
|
||||
from .diff_xdelta import XdeltaAdapter
|
||||
from .hash_sha import Sha256Adapter
|
||||
@@ -13,6 +16,9 @@ __all__ = [
|
||||
"XdeltaAdapter",
|
||||
"Sha256Adapter",
|
||||
"FsCacheAdapter",
|
||||
"ContentAddressedCache",
|
||||
"EncryptedCache",
|
||||
"MemoryCache",
|
||||
"UtcClockAdapter",
|
||||
"StdLoggerAdapter",
|
||||
"NoopMetricsAdapter",
|
||||
|
||||
270
src/deltaglider/adapters/cache_cas.py
Normal file
270
src/deltaglider/adapters/cache_cas.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""Content-Addressed Storage (CAS) cache adapter.
|
||||
|
||||
This adapter stores cached references using their SHA256 hash as the filename,
|
||||
eliminating collision risks and enabling automatic deduplication.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Unix-only imports for file locking
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
from ..core.errors import CacheCorruptionError, CacheMissError
|
||||
from ..ports.cache import CachePort
|
||||
from ..ports.hash import HashPort
|
||||
|
||||
|
||||
class ContentAddressedCache(CachePort):
|
||||
"""Content-addressed storage cache using SHA256 as filename.
|
||||
|
||||
Key Features:
|
||||
- Zero collision risk (SHA256 namespace is the filename)
|
||||
- Automatic deduplication (same content = same filename)
|
||||
- No metadata tracking needed (self-describing)
|
||||
- Secure by design (tampering changes SHA, breaks lookup)
|
||||
|
||||
Storage Layout:
|
||||
- base_dir/
|
||||
- ab/
|
||||
- cd/
|
||||
- abcdef123456... (full SHA256 as filename)
|
||||
|
||||
The two-level directory structure (first 2 chars, next 2 chars) prevents
|
||||
filesystem performance degradation from too many files in one directory.
|
||||
"""
|
||||
|
||||
def __init__(self, base_dir: Path, hasher: HashPort):
|
||||
"""Initialize content-addressed cache.
|
||||
|
||||
Args:
|
||||
base_dir: Root directory for cache storage
|
||||
hasher: Hash adapter for SHA256 computation
|
||||
"""
|
||||
self.base_dir = base_dir
|
||||
self.hasher = hasher
|
||||
# Mapping of (bucket, prefix) -> sha256 for compatibility
|
||||
# This is ephemeral and only used within a single process
|
||||
self._deltaspace_to_sha: dict[tuple[str, str], str] = {}
|
||||
|
||||
def _cas_path(self, sha256: str) -> Path:
|
||||
"""Get content-addressed path from SHA256 hash.
|
||||
|
||||
Uses two-level directory structure for filesystem optimization:
|
||||
- First 2 hex chars as L1 directory (256 buckets)
|
||||
- Next 2 hex chars as L2 directory (256 buckets per L1)
|
||||
- Full SHA as filename
|
||||
|
||||
Example: abcdef1234... -> ab/cd/abcdef1234...
|
||||
|
||||
Args:
|
||||
sha256: Full SHA256 hash (64 hex chars)
|
||||
|
||||
Returns:
|
||||
Path to file in content-addressed storage
|
||||
"""
|
||||
if len(sha256) < 4:
|
||||
raise ValueError(f"Invalid SHA256: {sha256}")
|
||||
|
||||
# Two-level directory structure
|
||||
l1_dir = sha256[:2] # First 2 chars
|
||||
l2_dir = sha256[2:4] # Next 2 chars
|
||||
|
||||
return self.base_dir / l1_dir / l2_dir / sha256
|
||||
|
||||
def ref_path(self, bucket: str, prefix: str) -> Path:
|
||||
"""Get path where reference should be cached.
|
||||
|
||||
For CAS, we need the SHA to compute the path. This method looks up
|
||||
the SHA from the ephemeral mapping. If not found, it returns a
|
||||
placeholder path (backward compatibility with has_ref checks).
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
|
||||
Returns:
|
||||
Path to cached reference (may not exist)
|
||||
"""
|
||||
key = (bucket, prefix)
|
||||
|
||||
# If we have the SHA mapping, use CAS path
|
||||
if key in self._deltaspace_to_sha:
|
||||
sha = self._deltaspace_to_sha[key]
|
||||
return self._cas_path(sha)
|
||||
|
||||
# Fallback: return a non-existent placeholder
|
||||
# This enables has_ref to return False for unmapped deltaspaces
|
||||
return self.base_dir / "_unmapped" / bucket / prefix / "reference.bin"
|
||||
|
||||
def has_ref(self, bucket: str, prefix: str, sha: str) -> bool:
|
||||
"""Check if reference exists with given SHA.
|
||||
|
||||
In CAS, existence check is simple: if file exists at SHA path,
|
||||
it MUST have that SHA (content-addressed guarantee).
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
sha: Expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
True if reference exists with this SHA
|
||||
"""
|
||||
path = self._cas_path(sha)
|
||||
return path.exists()
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference with atomic SHA validation.
|
||||
|
||||
In CAS, the SHA IS the filename, so if the file exists, it's already
|
||||
validated by definition. We still perform an integrity check to detect
|
||||
filesystem corruption.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
expected_sha: Expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
Path to validated cached file
|
||||
|
||||
Raises:
|
||||
CacheMissError: File not found in cache
|
||||
CacheCorruptionError: SHA mismatch (filesystem corruption)
|
||||
"""
|
||||
path = self._cas_path(expected_sha)
|
||||
|
||||
if not path.exists():
|
||||
raise CacheMissError(f"Cache miss for SHA {expected_sha[:8]}...")
|
||||
|
||||
# Lock file and validate content atomically
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
# Acquire shared lock (Unix only)
|
||||
if sys.platform != "win32":
|
||||
fcntl.flock(f.fileno(), fcntl.LOCK_SH)
|
||||
|
||||
# Read and hash content
|
||||
content = f.read()
|
||||
actual_sha = hashlib.sha256(content).hexdigest()
|
||||
|
||||
# Release lock automatically when exiting context
|
||||
|
||||
# Validate SHA (should never fail in CAS unless filesystem corruption)
|
||||
if actual_sha != expected_sha:
|
||||
# Filesystem corruption detected
|
||||
try:
|
||||
path.unlink()
|
||||
except OSError:
|
||||
pass # Best effort cleanup
|
||||
|
||||
raise CacheCorruptionError(
|
||||
f"Filesystem corruption detected: file {path.name} has wrong content. "
|
||||
f"Expected SHA {expected_sha}, got {actual_sha}"
|
||||
)
|
||||
|
||||
# Update mapping for ref_path compatibility
|
||||
self._deltaspace_to_sha[(bucket, prefix)] = expected_sha
|
||||
|
||||
return path
|
||||
|
||||
except OSError as e:
|
||||
raise CacheMissError(f"Cache read error for SHA {expected_sha[:8]}...: {e}") from e
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Cache reference file using content-addressed storage.
|
||||
|
||||
The file is stored at a path determined by its SHA256 hash.
|
||||
If a file with the same content already exists, it's reused
|
||||
(automatic deduplication).
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
src: Source file to cache
|
||||
|
||||
Returns:
|
||||
Path to cached file (content-addressed)
|
||||
"""
|
||||
# Compute SHA of source file
|
||||
sha = self.hasher.sha256(src)
|
||||
path = self._cas_path(sha)
|
||||
|
||||
# If file already exists, we're done (deduplication)
|
||||
if path.exists():
|
||||
# Update mapping
|
||||
self._deltaspace_to_sha[(bucket, prefix)] = sha
|
||||
return path
|
||||
|
||||
# Create directory structure with secure permissions
|
||||
path.parent.mkdir(parents=True, mode=0o700, exist_ok=True)
|
||||
|
||||
# Atomic write using temp file + rename
|
||||
temp_path = path.parent / f".tmp.{sha}"
|
||||
try:
|
||||
shutil.copy2(src, temp_path)
|
||||
# Atomic rename (POSIX guarantee)
|
||||
temp_path.rename(path)
|
||||
except Exception:
|
||||
# Cleanup on failure
|
||||
if temp_path.exists():
|
||||
temp_path.unlink()
|
||||
raise
|
||||
|
||||
# Update mapping
|
||||
self._deltaspace_to_sha[(bucket, prefix)] = sha
|
||||
|
||||
return path
|
||||
|
||||
def evict(self, bucket: str, prefix: str) -> None:
|
||||
"""Remove cached reference for given deltaspace.
|
||||
|
||||
In CAS, eviction is more complex because:
|
||||
1. Multiple deltaspaces may reference the same SHA (deduplication)
|
||||
2. We can't delete the file unless we know no other deltaspace uses it
|
||||
|
||||
For safety, we only remove the mapping, not the actual file.
|
||||
Orphaned files will be cleaned up by cache expiry (future feature).
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
"""
|
||||
key = (bucket, prefix)
|
||||
|
||||
# Remove mapping (safe operation)
|
||||
if key in self._deltaspace_to_sha:
|
||||
del self._deltaspace_to_sha[key]
|
||||
|
||||
# NOTE: We don't delete the actual CAS file because:
|
||||
# - Other deltaspaces may reference the same SHA
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cached references.
|
||||
|
||||
Removes all cached files and mappings. This is a destructive operation
|
||||
that forcibly removes the entire cache directory.
|
||||
|
||||
Use cases:
|
||||
- Long-running applications that need to free disk space
|
||||
- Manual cache invalidation
|
||||
- Test cleanup
|
||||
- Ensuring fresh data fetch after configuration changes
|
||||
"""
|
||||
import shutil
|
||||
|
||||
# Clear in-memory mapping
|
||||
self._deltaspace_to_sha.clear()
|
||||
|
||||
# Remove all cache files (destructive!)
|
||||
if self.base_dir.exists():
|
||||
shutil.rmtree(self.base_dir, ignore_errors=True)
|
||||
|
||||
# Recreate base directory with secure permissions
|
||||
self.base_dir.mkdir(parents=True, mode=0o700, exist_ok=True)
|
||||
# - The ephemeral cache will be cleaned on process exit anyway
|
||||
# - For persistent cache (future), we'd need reference counting
|
||||
305
src/deltaglider/adapters/cache_encrypted.py
Normal file
305
src/deltaglider/adapters/cache_encrypted.py
Normal file
@@ -0,0 +1,305 @@
|
||||
"""Encrypted cache wrapper using Fernet symmetric encryption.
|
||||
|
||||
This adapter wraps any CachePort implementation and adds transparent encryption/decryption.
|
||||
It uses Fernet (symmetric encryption based on AES-128-CBC with HMAC authentication).
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
from ..core.errors import CacheCorruptionError, CacheMissError
|
||||
from ..ports.cache import CachePort
|
||||
|
||||
|
||||
class EncryptedCache(CachePort):
|
||||
"""Encrypted cache wrapper using Fernet symmetric encryption.
|
||||
|
||||
Wraps any CachePort implementation and transparently encrypts data at rest.
|
||||
Uses Fernet which provides:
|
||||
- AES-128-CBC encryption
|
||||
- HMAC authentication (prevents tampering)
|
||||
- Automatic key rotation support
|
||||
- Safe for ephemeral process-isolated caches
|
||||
|
||||
Key Management:
|
||||
- Ephemeral key generated per process (default, most secure)
|
||||
- Or use DG_CACHE_ENCRYPTION_KEY env var (base64-encoded Fernet key)
|
||||
- For production: use secrets management system (AWS KMS, HashiCorp Vault, etc.)
|
||||
|
||||
Security Properties:
|
||||
- Confidentiality: Data encrypted at rest
|
||||
- Integrity: HMAC prevents tampering
|
||||
- Authenticity: Only valid keys can decrypt
|
||||
- Forward Secrecy: Ephemeral keys destroyed on process exit
|
||||
"""
|
||||
|
||||
def __init__(self, backend: CachePort, encryption_key: bytes | None = None):
|
||||
"""Initialize encrypted cache wrapper.
|
||||
|
||||
Args:
|
||||
backend: Underlying cache implementation (CAS, filesystem, memory, etc.)
|
||||
encryption_key: Optional Fernet key (32 bytes base64-encoded).
|
||||
If None, generates ephemeral key for this process.
|
||||
"""
|
||||
self.backend = backend
|
||||
|
||||
# Key management: ephemeral (default) or provided
|
||||
if encryption_key is None:
|
||||
# Generate ephemeral key for this process (most secure)
|
||||
self._key = Fernet.generate_key()
|
||||
self._ephemeral = True
|
||||
else:
|
||||
# Use provided key (for persistent cache scenarios)
|
||||
self._key = encryption_key
|
||||
self._ephemeral = False
|
||||
|
||||
self._cipher = Fernet(self._key)
|
||||
|
||||
# Mapping: (bucket, prefix) -> plaintext_sha256
|
||||
# Needed because backend uses SHA for storage, but encrypted content has different SHA
|
||||
self._plaintext_sha_map: dict[tuple[str, str], str] = {}
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, backend: CachePort) -> "EncryptedCache":
|
||||
"""Create encrypted cache with key from environment.
|
||||
|
||||
Looks for DG_CACHE_ENCRYPTION_KEY environment variable.
|
||||
If not found, generates ephemeral key.
|
||||
|
||||
Args:
|
||||
backend: Underlying cache implementation
|
||||
|
||||
Returns:
|
||||
EncryptedCache instance
|
||||
"""
|
||||
key_str = os.environ.get("DG_CACHE_ENCRYPTION_KEY")
|
||||
if key_str:
|
||||
# Decode base64-encoded key
|
||||
encryption_key = key_str.encode("utf-8")
|
||||
else:
|
||||
# Use ephemeral key
|
||||
encryption_key = None
|
||||
|
||||
return cls(backend, encryption_key)
|
||||
|
||||
def ref_path(self, bucket: str, prefix: str) -> Path:
|
||||
"""Get path where reference should be cached.
|
||||
|
||||
Delegates to backend. Path structure determined by backend
|
||||
(e.g., CAS uses SHA256-based paths).
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
|
||||
Returns:
|
||||
Path from backend
|
||||
"""
|
||||
return self.backend.ref_path(bucket, prefix)
|
||||
|
||||
def has_ref(self, bucket: str, prefix: str, sha: str) -> bool:
|
||||
"""Check if reference exists with given SHA.
|
||||
|
||||
Note: SHA is of the *unencrypted* content. The backend may store
|
||||
encrypted data, but we verify against original content hash.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
sha: SHA256 of unencrypted content
|
||||
|
||||
Returns:
|
||||
True if encrypted reference exists with this SHA
|
||||
"""
|
||||
# Delegate to backend
|
||||
# Backend may use SHA for content-addressed storage of encrypted data
|
||||
return self.backend.has_ref(bucket, prefix, sha)
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference with decryption and validation.
|
||||
|
||||
Retrieves encrypted data from backend, decrypts it, validates SHA,
|
||||
and returns path to decrypted temporary file.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
expected_sha: Expected SHA256 of *decrypted* content
|
||||
|
||||
Returns:
|
||||
Path to decrypted validated file (temporary)
|
||||
|
||||
Raises:
|
||||
CacheMissError: File not in cache
|
||||
CacheCorruptionError: Decryption failed or SHA mismatch
|
||||
"""
|
||||
# Check if we have this plaintext SHA mapped
|
||||
key = (bucket, prefix)
|
||||
if key not in self._plaintext_sha_map:
|
||||
raise CacheMissError(f"Cache miss for {bucket}/{prefix}")
|
||||
|
||||
# Verify the requested SHA matches our mapping
|
||||
if self._plaintext_sha_map[key] != expected_sha:
|
||||
raise CacheCorruptionError(
|
||||
f"SHA mismatch for {bucket}/{prefix}: "
|
||||
f"expected {expected_sha}, have {self._plaintext_sha_map[key]}"
|
||||
)
|
||||
|
||||
# Get encrypted file from backend using ref_path (not validated, we validate plaintext)
|
||||
encrypted_path = self.backend.ref_path(bucket, prefix)
|
||||
if not encrypted_path.exists():
|
||||
raise CacheMissError(f"Encrypted cache file not found for {bucket}/{prefix}")
|
||||
|
||||
# Read encrypted content
|
||||
try:
|
||||
with open(encrypted_path, "rb") as f:
|
||||
encrypted_data = f.read()
|
||||
except OSError as e:
|
||||
raise CacheMissError(f"Cannot read encrypted cache: {e}") from e
|
||||
|
||||
# Decrypt
|
||||
try:
|
||||
decrypted_data = self._cipher.decrypt(encrypted_data)
|
||||
except Exception as e:
|
||||
# Fernet raises InvalidToken for tampering/wrong key
|
||||
# SECURITY: Auto-delete corrupted cache files
|
||||
try:
|
||||
encrypted_path.unlink(missing_ok=True)
|
||||
# Clean up mapping
|
||||
if key in self._plaintext_sha_map:
|
||||
del self._plaintext_sha_map[key]
|
||||
except Exception:
|
||||
pass # Best effort cleanup
|
||||
raise CacheCorruptionError(
|
||||
f"Decryption failed for {bucket}/{prefix}: {e}. "
|
||||
f"Corrupted cache deleted automatically."
|
||||
) from e
|
||||
|
||||
# Validate SHA of decrypted content
|
||||
import hashlib
|
||||
|
||||
actual_sha = hashlib.sha256(decrypted_data).hexdigest()
|
||||
if actual_sha != expected_sha:
|
||||
# SECURITY: Auto-delete corrupted cache files
|
||||
try:
|
||||
encrypted_path.unlink(missing_ok=True)
|
||||
# Clean up mapping
|
||||
if key in self._plaintext_sha_map:
|
||||
del self._plaintext_sha_map[key]
|
||||
except Exception:
|
||||
pass # Best effort cleanup
|
||||
raise CacheCorruptionError(
|
||||
f"Decrypted content SHA mismatch for {bucket}/{prefix}: "
|
||||
f"expected {expected_sha}, got {actual_sha}. "
|
||||
f"Corrupted cache deleted automatically."
|
||||
)
|
||||
|
||||
# Write decrypted content to temporary file
|
||||
# Use same path as encrypted file but with .decrypted suffix
|
||||
decrypted_path = encrypted_path.with_suffix(".decrypted")
|
||||
try:
|
||||
with open(decrypted_path, "wb") as f:
|
||||
f.write(decrypted_data)
|
||||
except OSError as e:
|
||||
raise CacheCorruptionError(f"Cannot write decrypted cache: {e}") from e
|
||||
|
||||
return decrypted_path
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Encrypt and cache reference file.
|
||||
|
||||
Reads source file, encrypts it, and stores encrypted version via backend.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
src: Source file to encrypt and cache
|
||||
|
||||
Returns:
|
||||
Path to encrypted cached file (from backend)
|
||||
"""
|
||||
# Read source file
|
||||
try:
|
||||
with open(src, "rb") as f:
|
||||
plaintext_data = f.read()
|
||||
except OSError as e:
|
||||
raise CacheCorruptionError(f"Cannot read source file {src}: {e}") from e
|
||||
|
||||
# Compute plaintext SHA for mapping
|
||||
import hashlib
|
||||
|
||||
plaintext_sha = hashlib.sha256(plaintext_data).hexdigest()
|
||||
|
||||
# Encrypt
|
||||
encrypted_data = self._cipher.encrypt(plaintext_data)
|
||||
|
||||
# Write encrypted data to temporary file
|
||||
temp_encrypted = src.with_suffix(".encrypted.tmp")
|
||||
try:
|
||||
with open(temp_encrypted, "wb") as f:
|
||||
f.write(encrypted_data)
|
||||
|
||||
# Store encrypted file via backend
|
||||
result_path = self.backend.write_ref(bucket, prefix, temp_encrypted)
|
||||
|
||||
# Store mapping of plaintext SHA
|
||||
key = (bucket, prefix)
|
||||
self._plaintext_sha_map[key] = plaintext_sha
|
||||
|
||||
return result_path
|
||||
|
||||
finally:
|
||||
# Cleanup temporary file
|
||||
if temp_encrypted.exists():
|
||||
temp_encrypted.unlink()
|
||||
|
||||
def evict(self, bucket: str, prefix: str) -> None:
|
||||
"""Remove cached reference (encrypted version).
|
||||
|
||||
Delegates to backend. Also cleans up any .decrypted temporary files and mappings.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
"""
|
||||
# Remove from plaintext SHA mapping
|
||||
key = (bucket, prefix)
|
||||
if key in self._plaintext_sha_map:
|
||||
del self._plaintext_sha_map[key]
|
||||
|
||||
# Get path to potentially clean up .decrypted files
|
||||
try:
|
||||
path = self.backend.ref_path(bucket, prefix)
|
||||
decrypted_path = path.with_suffix(".decrypted")
|
||||
if decrypted_path.exists():
|
||||
decrypted_path.unlink()
|
||||
except Exception:
|
||||
# Best effort cleanup
|
||||
pass
|
||||
|
||||
# Evict from backend
|
||||
self.backend.evict(bucket, prefix)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cached references and encryption mappings.
|
||||
|
||||
Removes all cached data and clears encryption key mappings.
|
||||
This is the proper way to forcibly clean up cache in long-running
|
||||
applications.
|
||||
|
||||
Use cases:
|
||||
- Long-running applications needing to free resources
|
||||
- Manual cache invalidation after key rotation
|
||||
- Test cleanup
|
||||
- Memory pressure situations
|
||||
|
||||
Note: After clearing, the cache will use a fresh encryption key
|
||||
(ephemeral mode) or the same persistent key (if DG_CACHE_ENCRYPTION_KEY set).
|
||||
"""
|
||||
# Clear encryption mapping
|
||||
self._plaintext_sha_map.clear()
|
||||
|
||||
# Delegate to backend to clear actual files/memory
|
||||
self.backend.clear()
|
||||
@@ -1,8 +1,15 @@
|
||||
"""Filesystem cache adapter."""
|
||||
|
||||
import hashlib
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Unix-only imports for file locking
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
from ..core.errors import CacheCorruptionError, CacheMissError
|
||||
from ..ports.cache import CachePort
|
||||
from ..ports.hash import HashPort
|
||||
|
||||
@@ -29,6 +36,60 @@ class FsCacheAdapter(CachePort):
|
||||
actual_sha = self.hasher.sha256(path)
|
||||
return actual_sha == sha
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference with atomic SHA validation.
|
||||
|
||||
This method prevents TOCTOU attacks by validating the SHA at use-time,
|
||||
not just at check-time.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Prefix/deltaspace within bucket
|
||||
expected_sha: Expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
Path to validated cached file
|
||||
|
||||
Raises:
|
||||
CacheMissError: File not found in cache
|
||||
CacheCorruptionError: SHA mismatch detected
|
||||
"""
|
||||
path = self.ref_path(bucket, prefix)
|
||||
|
||||
if not path.exists():
|
||||
raise CacheMissError(f"Cache miss for {bucket}/{prefix}")
|
||||
|
||||
# Lock file and validate content atomically
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
# Acquire shared lock (Unix only)
|
||||
if sys.platform != "win32":
|
||||
fcntl.flock(f.fileno(), fcntl.LOCK_SH)
|
||||
|
||||
# Read and hash content
|
||||
content = f.read()
|
||||
actual_sha = hashlib.sha256(content).hexdigest()
|
||||
|
||||
# Release lock automatically when exiting context
|
||||
|
||||
# Validate SHA
|
||||
if actual_sha != expected_sha:
|
||||
# File corrupted or tampered - remove it
|
||||
try:
|
||||
path.unlink()
|
||||
except OSError:
|
||||
pass # Best effort cleanup
|
||||
|
||||
raise CacheCorruptionError(
|
||||
f"Cache corruption detected for {bucket}/{prefix}: "
|
||||
f"expected {expected_sha}, got {actual_sha}"
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
except OSError as e:
|
||||
raise CacheMissError(f"Cache read error for {bucket}/{prefix}: {e}") from e
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Cache reference file."""
|
||||
path = self.ref_path(bucket, prefix)
|
||||
|
||||
279
src/deltaglider/adapters/cache_memory.py
Normal file
279
src/deltaglider/adapters/cache_memory.py
Normal file
@@ -0,0 +1,279 @@
|
||||
"""In-memory cache implementation with optional size limits.
|
||||
|
||||
This adapter stores cached references entirely in memory, avoiding filesystem I/O.
|
||||
Useful for:
|
||||
- High-performance scenarios where memory is abundant
|
||||
- Containerized environments with limited filesystem access
|
||||
- Testing and development
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Unix-only imports for compatibility
|
||||
if sys.platform != "win32":
|
||||
import fcntl # noqa: F401
|
||||
|
||||
from ..core.errors import CacheCorruptionError, CacheMissError
|
||||
from ..ports.cache import CachePort
|
||||
from ..ports.hash import HashPort
|
||||
|
||||
|
||||
class MemoryCache(CachePort):
|
||||
"""In-memory cache implementation with LRU eviction.
|
||||
|
||||
Stores cached references in memory as bytes. Useful for high-performance
|
||||
scenarios or when filesystem access is limited.
|
||||
|
||||
Features:
|
||||
- Zero filesystem I/O (everything in RAM)
|
||||
- Optional size limits with LRU eviction
|
||||
- Thread-safe operations
|
||||
- Temporary file creation for compatibility with file-based APIs
|
||||
|
||||
Limitations:
|
||||
- Data lost on process exit (ephemeral only)
|
||||
- Memory usage proportional to cache size
|
||||
- Not suitable for very large reference files
|
||||
|
||||
Storage Layout:
|
||||
- Key: (bucket, prefix) tuple
|
||||
- Value: (content_bytes, sha256) tuple
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hasher: HashPort,
|
||||
max_size_mb: int = 100,
|
||||
temp_dir: Path | None = None,
|
||||
):
|
||||
"""Initialize in-memory cache.
|
||||
|
||||
Args:
|
||||
hasher: Hash adapter for SHA256 computation
|
||||
max_size_mb: Maximum cache size in megabytes (default 100MB)
|
||||
temp_dir: Directory for temporary files (default: system temp)
|
||||
"""
|
||||
self.hasher = hasher
|
||||
self.max_size_bytes = max_size_mb * 1024 * 1024
|
||||
|
||||
# Storage: (bucket, prefix) -> (content_bytes, sha256)
|
||||
self._cache: dict[tuple[str, str], tuple[bytes, str]] = {}
|
||||
|
||||
# Size tracking
|
||||
self._current_size = 0
|
||||
|
||||
# Access order for LRU eviction: (bucket, prefix) list
|
||||
self._access_order: list[tuple[str, str]] = []
|
||||
|
||||
# Temp directory for file-based API compatibility
|
||||
if temp_dir is None:
|
||||
import tempfile
|
||||
|
||||
self.temp_dir = Path(tempfile.gettempdir()) / "deltaglider-mem-cache"
|
||||
else:
|
||||
self.temp_dir = temp_dir
|
||||
|
||||
self.temp_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
|
||||
|
||||
def _update_access(self, key: tuple[str, str]) -> None:
|
||||
"""Update LRU access order.
|
||||
|
||||
Args:
|
||||
key: Cache key (bucket, prefix)
|
||||
"""
|
||||
# Remove old position if exists
|
||||
if key in self._access_order:
|
||||
self._access_order.remove(key)
|
||||
|
||||
# Add to end (most recently used)
|
||||
self._access_order.append(key)
|
||||
|
||||
def _evict_lru(self, needed_bytes: int) -> None:
|
||||
"""Evict least recently used entries to free space.
|
||||
|
||||
Args:
|
||||
needed_bytes: Bytes needed for new entry
|
||||
"""
|
||||
while self._current_size + needed_bytes > self.max_size_bytes and self._access_order:
|
||||
# Evict least recently used
|
||||
lru_key = self._access_order[0]
|
||||
bucket, prefix = lru_key
|
||||
|
||||
# Remove from cache
|
||||
if lru_key in self._cache:
|
||||
content, _ = self._cache[lru_key]
|
||||
self._current_size -= len(content)
|
||||
del self._cache[lru_key]
|
||||
|
||||
# Remove from access order
|
||||
self._access_order.remove(lru_key)
|
||||
|
||||
def ref_path(self, bucket: str, prefix: str) -> Path:
|
||||
"""Get placeholder path for in-memory reference.
|
||||
|
||||
Returns a virtual path that doesn't actually exist on filesystem.
|
||||
Used for API compatibility.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
|
||||
Returns:
|
||||
Virtual path (may not exist on filesystem)
|
||||
"""
|
||||
# Return virtual path for compatibility
|
||||
# Actual data is in memory, but we need Path for API
|
||||
safe_bucket = bucket.replace("/", "_")
|
||||
safe_prefix = prefix.replace("/", "_")
|
||||
return self.temp_dir / safe_bucket / safe_prefix / "reference.bin"
|
||||
|
||||
def has_ref(self, bucket: str, prefix: str, sha: str) -> bool:
|
||||
"""Check if reference exists in memory with given SHA.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
sha: Expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
True if reference exists with this SHA
|
||||
"""
|
||||
key = (bucket, prefix)
|
||||
if key not in self._cache:
|
||||
return False
|
||||
|
||||
_, cached_sha = self._cache[key]
|
||||
return cached_sha == sha
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference from memory with validation.
|
||||
|
||||
Retrieves content from memory, validates SHA, and writes to
|
||||
temporary file for compatibility with file-based APIs.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
expected_sha: Expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
Path to temporary file containing content
|
||||
|
||||
Raises:
|
||||
CacheMissError: Content not in cache
|
||||
CacheCorruptionError: SHA mismatch
|
||||
"""
|
||||
key = (bucket, prefix)
|
||||
|
||||
# Check if in cache
|
||||
if key not in self._cache:
|
||||
raise CacheMissError(f"Cache miss for {bucket}/{prefix}")
|
||||
|
||||
# Get content and validate
|
||||
content, cached_sha = self._cache[key]
|
||||
|
||||
# Update LRU
|
||||
self._update_access(key)
|
||||
|
||||
# Validate SHA
|
||||
if cached_sha != expected_sha:
|
||||
# SHA mismatch - possible corruption
|
||||
raise CacheCorruptionError(
|
||||
f"Memory cache SHA mismatch for {bucket}/{prefix}: "
|
||||
f"expected {expected_sha}, got {cached_sha}"
|
||||
)
|
||||
|
||||
# Write to temporary file for API compatibility
|
||||
temp_path = self.ref_path(bucket, prefix)
|
||||
temp_path.parent.mkdir(parents=True, exist_ok=True, mode=0o700)
|
||||
|
||||
try:
|
||||
with open(temp_path, "wb") as f:
|
||||
f.write(content)
|
||||
except OSError as e:
|
||||
raise CacheMissError(f"Cannot write temp file: {e}") from e
|
||||
|
||||
return temp_path
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Store reference file in memory.
|
||||
|
||||
Reads file content and stores in memory with SHA hash.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
src: Source file to cache
|
||||
|
||||
Returns:
|
||||
Virtual path (content is in memory)
|
||||
"""
|
||||
# Read source file
|
||||
try:
|
||||
with open(src, "rb") as f:
|
||||
content = f.read()
|
||||
except OSError as e:
|
||||
raise CacheCorruptionError(f"Cannot read source file {src}: {e}") from e
|
||||
|
||||
# Compute SHA
|
||||
sha = hashlib.sha256(content).hexdigest()
|
||||
|
||||
# Check if we need to evict
|
||||
content_size = len(content)
|
||||
if content_size > self.max_size_bytes:
|
||||
raise CacheCorruptionError(
|
||||
f"File too large for memory cache: {content_size} bytes "
|
||||
f"(limit: {self.max_size_bytes} bytes)"
|
||||
)
|
||||
|
||||
# Evict LRU entries if needed
|
||||
self._evict_lru(content_size)
|
||||
|
||||
# Store in memory
|
||||
key = (bucket, prefix)
|
||||
self._cache[key] = (content, sha)
|
||||
self._current_size += content_size
|
||||
|
||||
# Update LRU
|
||||
self._update_access(key)
|
||||
|
||||
# Return virtual path
|
||||
return self.ref_path(bucket, prefix)
|
||||
|
||||
def evict(self, bucket: str, prefix: str) -> None:
|
||||
"""Remove cached reference from memory.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Deltaspace prefix
|
||||
"""
|
||||
key = (bucket, prefix)
|
||||
|
||||
# Remove from cache
|
||||
if key in self._cache:
|
||||
content, _ = self._cache[key]
|
||||
self._current_size -= len(content)
|
||||
del self._cache[key]
|
||||
|
||||
# Remove from LRU tracking
|
||||
if key in self._access_order:
|
||||
self._access_order.remove(key)
|
||||
|
||||
# Clean up temp file if exists
|
||||
temp_path = self.ref_path(bucket, prefix)
|
||||
if temp_path.exists():
|
||||
try:
|
||||
temp_path.unlink()
|
||||
except OSError:
|
||||
pass # Best effort
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cached content from memory.
|
||||
|
||||
Useful for testing and cleanup.
|
||||
"""
|
||||
self._cache.clear()
|
||||
self._access_order.clear()
|
||||
self._current_size = 0
|
||||
@@ -21,13 +21,31 @@ class S3StorageAdapter(StoragePort):
|
||||
self,
|
||||
client: Optional["S3Client"] = None,
|
||||
endpoint_url: str | None = None,
|
||||
boto3_kwargs: dict[str, Any] | None = None,
|
||||
):
|
||||
"""Initialize with S3 client."""
|
||||
"""Initialize with S3 client.
|
||||
|
||||
Args:
|
||||
client: Pre-configured S3 client (if None, one will be created)
|
||||
endpoint_url: S3 endpoint URL override (for MinIO, LocalStack, etc.)
|
||||
boto3_kwargs: Additional kwargs to pass to boto3.client() including:
|
||||
- aws_access_key_id: AWS access key
|
||||
- aws_secret_access_key: AWS secret key
|
||||
- aws_session_token: AWS session token (for temporary credentials)
|
||||
- region_name: AWS region name
|
||||
"""
|
||||
if client is None:
|
||||
self.client = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=endpoint_url or os.environ.get("AWS_ENDPOINT_URL"),
|
||||
)
|
||||
# Build boto3 client parameters
|
||||
client_params: dict[str, Any] = {
|
||||
"service_name": "s3",
|
||||
"endpoint_url": endpoint_url or os.environ.get("AWS_ENDPOINT_URL"),
|
||||
}
|
||||
|
||||
# Merge in any additional boto3 kwargs (credentials, region, etc.)
|
||||
if boto3_kwargs:
|
||||
client_params.update(boto3_kwargs)
|
||||
|
||||
self.client = boto3.client(**client_params)
|
||||
else:
|
||||
self.client = client
|
||||
|
||||
@@ -145,7 +163,7 @@ class S3StorageAdapter(StoragePort):
|
||||
|
||||
try:
|
||||
response = self.client.get_object(Bucket=bucket, Key=object_key)
|
||||
return response["Body"] # type: ignore[return-value]
|
||||
return response["Body"] # type: ignore[no-any-return]
|
||||
except ClientError as e:
|
||||
if e.response["Error"]["Code"] == "NoSuchKey":
|
||||
raise FileNotFoundError(f"Object not found: {key}") from e
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
"""CLI main entry point."""
|
||||
|
||||
import atexit
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from ...adapters import (
|
||||
FsCacheAdapter,
|
||||
NoopMetricsAdapter,
|
||||
S3StorageAdapter,
|
||||
Sha256Adapter,
|
||||
@@ -16,8 +18,9 @@ from ...adapters import (
|
||||
UtcClockAdapter,
|
||||
XdeltaAdapter,
|
||||
)
|
||||
from ...core import DeltaService, DeltaSpace, ObjectKey
|
||||
from ...core import DeltaService, ObjectKey
|
||||
from ...ports import MetricsPort
|
||||
from ...ports.cache import CachePort
|
||||
from .aws_compat import (
|
||||
copy_s3_to_s3,
|
||||
determine_operation,
|
||||
@@ -38,10 +41,14 @@ def create_service(
|
||||
) -> DeltaService:
|
||||
"""Create service with wired adapters."""
|
||||
# Get config from environment
|
||||
cache_dir = Path(os.environ.get("DG_CACHE_DIR", "/tmp/.deltaglider/reference_cache"))
|
||||
max_ratio = float(os.environ.get("DG_MAX_RATIO", "0.5"))
|
||||
metrics_type = os.environ.get("DG_METRICS", "logging") # Options: noop, logging, cloudwatch
|
||||
|
||||
# SECURITY: Always use ephemeral process-isolated cache
|
||||
cache_dir = Path(tempfile.mkdtemp(prefix="deltaglider-", dir="/tmp"))
|
||||
# Register cleanup handler to remove cache on exit
|
||||
atexit.register(lambda: shutil.rmtree(cache_dir, ignore_errors=True))
|
||||
|
||||
# Set AWS environment variables if provided
|
||||
if endpoint_url:
|
||||
os.environ["AWS_ENDPOINT_URL"] = endpoint_url
|
||||
@@ -54,7 +61,24 @@ def create_service(
|
||||
hasher = Sha256Adapter()
|
||||
storage = S3StorageAdapter(endpoint_url=endpoint_url)
|
||||
diff = XdeltaAdapter()
|
||||
cache = FsCacheAdapter(cache_dir, hasher)
|
||||
|
||||
# SECURITY: Configurable cache with encryption and backend selection
|
||||
from deltaglider.adapters import ContentAddressedCache, EncryptedCache, MemoryCache
|
||||
|
||||
# Select backend: memory or filesystem
|
||||
cache_backend = os.environ.get("DG_CACHE_BACKEND", "filesystem") # Options: filesystem, memory
|
||||
base_cache: CachePort
|
||||
if cache_backend == "memory":
|
||||
max_size_mb = int(os.environ.get("DG_CACHE_MEMORY_SIZE_MB", "100"))
|
||||
base_cache = MemoryCache(hasher, max_size_mb=max_size_mb, temp_dir=cache_dir)
|
||||
else:
|
||||
# Filesystem-backed with Content-Addressed Storage
|
||||
base_cache = ContentAddressedCache(cache_dir, hasher)
|
||||
|
||||
# Always apply encryption with ephemeral keys (security hardening)
|
||||
# Encryption key is optional via DG_CACHE_ENCRYPTION_KEY (ephemeral if not set)
|
||||
cache: CachePort = EncryptedCache.from_env(base_cache)
|
||||
|
||||
clock = UtcClockAdapter()
|
||||
logger = StdLoggerAdapter(level=log_level)
|
||||
|
||||
@@ -240,6 +264,13 @@ def ls(
|
||||
prefix_str: str
|
||||
bucket_name, prefix_str = parse_s3_url(s3_url)
|
||||
|
||||
# Ensure prefix ends with / if it's meant to be a directory
|
||||
# This helps with proper path handling
|
||||
if prefix_str and not prefix_str.endswith("/"):
|
||||
# Check if this is a file or directory by listing
|
||||
# For now, assume it's a directory prefix
|
||||
prefix_str = prefix_str + "/"
|
||||
|
||||
# Format bytes to human readable
|
||||
def format_bytes(size: int) -> str:
|
||||
if not human_readable:
|
||||
@@ -251,53 +282,66 @@ def ls(
|
||||
size_float /= 1024.0
|
||||
return f"{size_float:.1f}P"
|
||||
|
||||
# List objects
|
||||
list_prefix = f"{bucket_name}/{prefix_str}" if prefix_str else bucket_name
|
||||
objects = list(service.storage.list(list_prefix))
|
||||
# List objects using SDK (automatically filters .delta and reference.bin)
|
||||
from deltaglider.client import DeltaGliderClient
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
dg_response = client.list_objects(
|
||||
Bucket=bucket_name,
|
||||
Prefix=prefix_str,
|
||||
MaxKeys=10000,
|
||||
Delimiter="/" if not recursive else "",
|
||||
)
|
||||
objects = dg_response["Contents"]
|
||||
|
||||
# Filter by recursive flag
|
||||
if not recursive:
|
||||
# Only show direct children
|
||||
seen_prefixes = set()
|
||||
# Show common prefixes (subdirectories) from S3 response
|
||||
for common_prefix in dg_response.get("CommonPrefixes", []):
|
||||
prefix_path = common_prefix.get("Prefix", "")
|
||||
# Show only the directory name, not the full path
|
||||
if prefix_str:
|
||||
# Strip the current prefix to show only the subdirectory
|
||||
display_name = prefix_path[len(prefix_str) :]
|
||||
else:
|
||||
display_name = prefix_path
|
||||
click.echo(f" PRE {display_name}")
|
||||
|
||||
# Only show files at current level (not in subdirectories)
|
||||
filtered_objects = []
|
||||
for obj in objects:
|
||||
rel_path = obj.key[len(prefix_str) :] if prefix_str else obj.key
|
||||
if "/" in rel_path:
|
||||
# It's in a subdirectory
|
||||
subdir = rel_path.split("/")[0] + "/"
|
||||
if subdir not in seen_prefixes:
|
||||
seen_prefixes.add(subdir)
|
||||
# Show as directory
|
||||
full_prefix = f"{prefix_str}{subdir}" if prefix_str else subdir
|
||||
click.echo(f" PRE {full_prefix}")
|
||||
else:
|
||||
# Direct file
|
||||
if rel_path: # Only add if there's actually a file at this level
|
||||
filtered_objects.append(obj)
|
||||
obj_key = obj["Key"]
|
||||
rel_path = obj_key[len(prefix_str) :] if prefix_str else obj_key
|
||||
# Only include if it's a direct child (no / in relative path)
|
||||
if "/" not in rel_path and rel_path:
|
||||
filtered_objects.append(obj)
|
||||
objects = filtered_objects
|
||||
|
||||
# Display objects
|
||||
# Display objects (SDK already filters reference.bin and strips .delta)
|
||||
total_size = 0
|
||||
total_count = 0
|
||||
|
||||
for obj in objects:
|
||||
# Skip reference.bin files (internal)
|
||||
if obj.key.endswith("/reference.bin"):
|
||||
continue
|
||||
|
||||
total_size += obj.size
|
||||
total_size += obj["Size"]
|
||||
total_count += 1
|
||||
|
||||
# Format the display
|
||||
size_str = format_bytes(obj.size)
|
||||
date_str = obj.last_modified.strftime("%Y-%m-%d %H:%M:%S")
|
||||
size_str = format_bytes(obj["Size"])
|
||||
# last_modified is a string from SDK, parse it if needed
|
||||
last_modified = obj.get("LastModified", "")
|
||||
if isinstance(last_modified, str):
|
||||
# Already a string, extract date portion
|
||||
date_str = last_modified[:19].replace("T", " ")
|
||||
else:
|
||||
date_str = last_modified.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# Remove .delta extension from display
|
||||
display_key = obj.key
|
||||
if display_key.endswith(".delta"):
|
||||
display_key = display_key[:-6]
|
||||
# Show only the filename relative to current prefix (like AWS CLI)
|
||||
if prefix_str:
|
||||
display_key = obj["Key"][len(prefix_str) :]
|
||||
else:
|
||||
display_key = obj["Key"]
|
||||
|
||||
click.echo(f"{date_str} {size_str:>10} s3://{bucket_name}/{display_key}")
|
||||
click.echo(f"{date_str} {size_str:>10} {display_key}")
|
||||
|
||||
# Show summary if requested
|
||||
if summarize:
|
||||
@@ -433,7 +477,9 @@ def rm(
|
||||
# Show warnings if any references were kept
|
||||
for warning in result.get("warnings", []):
|
||||
if "Kept reference" in warning:
|
||||
click.echo(f"Keeping reference file (still in use): s3://{bucket}/{warning.split()[2]}")
|
||||
click.echo(
|
||||
f"Keeping reference file (still in use): s3://{bucket}/{warning.split()[2]}"
|
||||
)
|
||||
|
||||
# Report any errors
|
||||
if result["failed_count"] > 0:
|
||||
@@ -553,130 +599,6 @@ def sync(
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("file", type=click.Path(exists=True, path_type=Path))
|
||||
@click.argument("s3_url")
|
||||
@click.option("--max-ratio", type=float, help="Max delta/file ratio (default: 0.5)")
|
||||
@click.pass_obj
|
||||
def put(service: DeltaService, file: Path, s3_url: str, max_ratio: float | None) -> None:
|
||||
"""Upload file as reference or delta (legacy command, use 'cp' instead)."""
|
||||
# Parse S3 URL
|
||||
if not s3_url.startswith("s3://"):
|
||||
click.echo(f"Error: Invalid S3 URL: {s3_url}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
# Extract bucket and prefix
|
||||
s3_path = s3_url[5:].rstrip("/")
|
||||
parts = s3_path.split("/", 1)
|
||||
bucket = parts[0]
|
||||
prefix = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
delta_space = DeltaSpace(bucket=bucket, prefix=prefix)
|
||||
|
||||
try:
|
||||
summary = service.put(file, delta_space, max_ratio)
|
||||
|
||||
# Output JSON summary
|
||||
output = {
|
||||
"operation": summary.operation,
|
||||
"bucket": summary.bucket,
|
||||
"key": summary.key,
|
||||
"original_name": summary.original_name,
|
||||
"file_size": summary.file_size,
|
||||
"file_sha256": summary.file_sha256,
|
||||
}
|
||||
|
||||
if summary.delta_size is not None:
|
||||
output["delta_size"] = summary.delta_size
|
||||
output["delta_ratio"] = round(summary.delta_ratio or 0, 3)
|
||||
|
||||
if summary.ref_key:
|
||||
output["ref_key"] = summary.ref_key
|
||||
output["ref_sha256"] = summary.ref_sha256
|
||||
|
||||
output["cache_hit"] = summary.cache_hit
|
||||
|
||||
click.echo(json.dumps(output, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("s3_url")
|
||||
@click.option("-o", "--output", type=click.Path(path_type=Path), help="Output file path")
|
||||
@click.pass_obj
|
||||
def get(service: DeltaService, s3_url: str, output: Path | None) -> None:
|
||||
"""Download and hydrate delta file.
|
||||
|
||||
The S3 URL can be either:
|
||||
- Full path to delta file: s3://bucket/path/to/file.zip.delta
|
||||
- Path to original file (will append .delta): s3://bucket/path/to/file.zip
|
||||
"""
|
||||
# Parse S3 URL
|
||||
if not s3_url.startswith("s3://"):
|
||||
click.echo(f"Error: Invalid S3 URL: {s3_url}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
s3_path = s3_url[5:]
|
||||
parts = s3_path.split("/", 1)
|
||||
if len(parts) != 2:
|
||||
click.echo(f"Error: Invalid S3 URL: {s3_url}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
bucket = parts[0]
|
||||
key = parts[1]
|
||||
|
||||
# Try to determine if this is a direct file or needs .delta appended
|
||||
# First try the key as-is
|
||||
obj_key = ObjectKey(bucket=bucket, key=key)
|
||||
|
||||
# Check if the file exists using the service's storage port
|
||||
# which already has proper credentials configured
|
||||
try:
|
||||
# Try to head the object as-is
|
||||
obj_head = service.storage.head(f"{bucket}/{key}")
|
||||
if obj_head is not None:
|
||||
click.echo(f"Found file: s3://{bucket}/{key}")
|
||||
else:
|
||||
# If not found and doesn't end with .delta, try adding .delta
|
||||
if not key.endswith(".delta"):
|
||||
delta_key = f"{key}.delta"
|
||||
delta_head = service.storage.head(f"{bucket}/{delta_key}")
|
||||
if delta_head is not None:
|
||||
key = delta_key
|
||||
obj_key = ObjectKey(bucket=bucket, key=key)
|
||||
click.echo(f"Found delta file: s3://{bucket}/{key}")
|
||||
else:
|
||||
click.echo(
|
||||
f"Error: File not found: s3://{bucket}/{key} (also tried .delta)", err=True
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
click.echo(f"Error: File not found: s3://{bucket}/{key}", err=True)
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
# For unexpected errors, just proceed with the original key
|
||||
click.echo(f"Warning: Could not check file existence, proceeding with: s3://{bucket}/{key}")
|
||||
|
||||
# Determine output path
|
||||
if output is None:
|
||||
# Extract original name from delta name
|
||||
if key.endswith(".delta"):
|
||||
output = Path(Path(key).stem)
|
||||
else:
|
||||
output = Path(Path(key).name)
|
||||
|
||||
try:
|
||||
service.get(obj_key, output)
|
||||
click.echo(f"Successfully retrieved: {output}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("s3_url")
|
||||
@click.pass_obj
|
||||
@@ -718,6 +640,84 @@ def verify(service: DeltaService, s3_url: str) -> None:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("bucket")
|
||||
@click.option("--detailed", is_flag=True, help="Fetch detailed compression metrics (slower)")
|
||||
@click.option("--json", "output_json", is_flag=True, help="Output in JSON format")
|
||||
@click.pass_obj
|
||||
def stats(service: DeltaService, bucket: str, detailed: bool, output_json: bool) -> None:
|
||||
"""Get bucket statistics and compression metrics.
|
||||
|
||||
BUCKET can be specified as:
|
||||
- s3://bucket-name/
|
||||
- s3://bucket-name
|
||||
- bucket-name
|
||||
"""
|
||||
from ...client import DeltaGliderClient
|
||||
|
||||
try:
|
||||
# Parse bucket from S3 URL if needed
|
||||
if bucket.startswith("s3://"):
|
||||
# Remove s3:// prefix and any trailing slashes
|
||||
bucket = bucket[5:].rstrip("/")
|
||||
# Extract just the bucket name (first path component)
|
||||
bucket = bucket.split("/")[0] if "/" in bucket else bucket
|
||||
|
||||
if not bucket:
|
||||
click.echo("Error: Invalid bucket name", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
# Create client from service
|
||||
client = DeltaGliderClient(service=service)
|
||||
|
||||
# Get bucket stats
|
||||
bucket_stats = client.get_bucket_stats(bucket, detailed_stats=detailed)
|
||||
|
||||
if output_json:
|
||||
# JSON output
|
||||
output = {
|
||||
"bucket": bucket_stats.bucket,
|
||||
"object_count": bucket_stats.object_count,
|
||||
"total_size": bucket_stats.total_size,
|
||||
"compressed_size": bucket_stats.compressed_size,
|
||||
"space_saved": bucket_stats.space_saved,
|
||||
"average_compression_ratio": bucket_stats.average_compression_ratio,
|
||||
"delta_objects": bucket_stats.delta_objects,
|
||||
"direct_objects": bucket_stats.direct_objects,
|
||||
}
|
||||
click.echo(json.dumps(output, indent=2))
|
||||
else:
|
||||
# Human-readable output
|
||||
def format_bytes(size: float) -> str:
|
||||
"""Format bytes to human-readable size."""
|
||||
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||
if size < 1024.0:
|
||||
return f"{size:.2f} {unit}"
|
||||
size /= 1024.0
|
||||
return f"{size:.2f} PB"
|
||||
|
||||
click.echo(f"Bucket Statistics: {bucket_stats.bucket}")
|
||||
click.echo(f"{'=' * 60}")
|
||||
click.echo(f"Total Objects: {bucket_stats.object_count:,}")
|
||||
click.echo(f" Delta Objects: {bucket_stats.delta_objects:,}")
|
||||
click.echo(f" Direct Objects: {bucket_stats.direct_objects:,}")
|
||||
click.echo("")
|
||||
click.echo(
|
||||
f"Original Size: {format_bytes(bucket_stats.total_size)} ({bucket_stats.total_size:,} bytes)"
|
||||
)
|
||||
click.echo(
|
||||
f"Compressed Size: {format_bytes(bucket_stats.compressed_size)} ({bucket_stats.compressed_size:,} bytes)"
|
||||
)
|
||||
click.echo(
|
||||
f"Space Saved: {format_bytes(bucket_stats.space_saved)} ({bucket_stats.space_saved:,} bytes)"
|
||||
)
|
||||
click.echo(f"Compression Ratio: {bucket_stats.average_compression_ratio:.1%}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main entry point."""
|
||||
cli()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
35
src/deltaglider/client_delete_helpers.py
Normal file
35
src/deltaglider/client_delete_helpers.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""Helper utilities for client delete operations."""
|
||||
|
||||
from .core import DeltaService, ObjectKey
|
||||
from .core.errors import NotFoundError
|
||||
|
||||
|
||||
def delete_with_delta_suffix(
|
||||
service: DeltaService, bucket: str, key: str
|
||||
) -> tuple[str, dict[str, object]]:
|
||||
"""Delete an object, retrying with '.delta' suffix when needed.
|
||||
|
||||
Args:
|
||||
service: DeltaService-like instance exposing ``delete(ObjectKey)``.
|
||||
bucket: Target bucket.
|
||||
key: Requested key (without forcing .delta suffix).
|
||||
|
||||
Returns:
|
||||
Tuple containing the actual key deleted in storage and the delete result dict.
|
||||
|
||||
Raises:
|
||||
NotFoundError: Propagated when both the direct and '.delta' keys are missing.
|
||||
"""
|
||||
actual_key = key
|
||||
object_key = ObjectKey(bucket=bucket, key=actual_key)
|
||||
|
||||
try:
|
||||
delete_result = service.delete(object_key)
|
||||
except NotFoundError:
|
||||
if key.endswith(".delta"):
|
||||
raise
|
||||
actual_key = f"{key}.delta"
|
||||
object_key = ObjectKey(bucket=bucket, key=actual_key)
|
||||
delete_result = service.delete(object_key)
|
||||
|
||||
return actual_key, delete_result
|
||||
99
src/deltaglider/client_models.py
Normal file
99
src/deltaglider/client_models.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""Shared data models for the DeltaGlider client."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class UploadSummary:
|
||||
"""User-friendly upload summary."""
|
||||
|
||||
operation: str
|
||||
bucket: str
|
||||
key: str
|
||||
original_size: int
|
||||
stored_size: int
|
||||
is_delta: bool
|
||||
delta_ratio: float = 0.0
|
||||
|
||||
@property
|
||||
def original_size_mb(self) -> float:
|
||||
"""Original size in MB."""
|
||||
return self.original_size / (1024 * 1024)
|
||||
|
||||
@property
|
||||
def stored_size_mb(self) -> float:
|
||||
"""Stored size in MB."""
|
||||
return self.stored_size / (1024 * 1024)
|
||||
|
||||
@property
|
||||
def savings_percent(self) -> float:
|
||||
"""Percentage saved through compression."""
|
||||
if self.original_size == 0:
|
||||
return 0.0
|
||||
return ((self.original_size - self.stored_size) / self.original_size) * 100
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompressionEstimate:
|
||||
"""Compression estimate for a file."""
|
||||
|
||||
original_size: int
|
||||
estimated_compressed_size: int
|
||||
estimated_ratio: float
|
||||
confidence: float
|
||||
recommended_reference: str | None = None
|
||||
should_use_delta: bool = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectInfo:
|
||||
"""Detailed object information with compression stats."""
|
||||
|
||||
key: str
|
||||
size: int
|
||||
last_modified: str
|
||||
etag: str | None = None
|
||||
storage_class: str = "STANDARD"
|
||||
|
||||
# DeltaGlider-specific fields
|
||||
original_size: int | None = None
|
||||
compressed_size: int | None = None
|
||||
compression_ratio: float | None = None
|
||||
is_delta: bool = False
|
||||
reference_key: str | None = None
|
||||
delta_chain_length: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class ListObjectsResponse:
|
||||
"""Response from list_objects, compatible with boto3."""
|
||||
|
||||
name: str # Bucket name
|
||||
prefix: str = ""
|
||||
delimiter: str = ""
|
||||
max_keys: int = 1000
|
||||
common_prefixes: list[dict[str, str]] = field(default_factory=list)
|
||||
contents: list[ObjectInfo] = field(default_factory=list)
|
||||
is_truncated: bool = False
|
||||
next_continuation_token: str | None = None
|
||||
continuation_token: str | None = None
|
||||
key_count: int = 0
|
||||
|
||||
@property
|
||||
def objects(self) -> list[ObjectInfo]:
|
||||
"""Alias for contents, for convenience."""
|
||||
return self.contents
|
||||
|
||||
|
||||
@dataclass
|
||||
class BucketStats:
|
||||
"""Statistics for a bucket."""
|
||||
|
||||
bucket: str
|
||||
object_count: int
|
||||
total_size: int
|
||||
compressed_size: int
|
||||
space_saved: int
|
||||
average_compression_ratio: float
|
||||
delta_objects: int
|
||||
direct_objects: int
|
||||
37
src/deltaglider/client_operations/__init__.py
Normal file
37
src/deltaglider/client_operations/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Client operation modules for DeltaGliderClient.
|
||||
|
||||
This package contains modular operation implementations:
|
||||
- bucket: S3 bucket management (create, delete, list)
|
||||
- presigned: Presigned URL generation for temporary access
|
||||
- batch: Batch upload/download operations
|
||||
- stats: Statistics and analytics operations
|
||||
"""
|
||||
|
||||
from .batch import download_batch, upload_batch, upload_chunked
|
||||
from .bucket import create_bucket, delete_bucket, list_buckets
|
||||
from .presigned import generate_presigned_post, generate_presigned_url
|
||||
from .stats import (
|
||||
estimate_compression,
|
||||
find_similar_files,
|
||||
get_bucket_stats,
|
||||
get_object_info,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Bucket operations
|
||||
"create_bucket",
|
||||
"delete_bucket",
|
||||
"list_buckets",
|
||||
# Presigned operations
|
||||
"generate_presigned_url",
|
||||
"generate_presigned_post",
|
||||
# Batch operations
|
||||
"upload_chunked",
|
||||
"upload_batch",
|
||||
"download_batch",
|
||||
# Stats operations
|
||||
"get_bucket_stats",
|
||||
"get_object_info",
|
||||
"estimate_compression",
|
||||
"find_similar_files",
|
||||
]
|
||||
159
src/deltaglider/client_operations/batch.py
Normal file
159
src/deltaglider/client_operations/batch.py
Normal file
@@ -0,0 +1,159 @@
|
||||
"""Batch upload/download operations for DeltaGlider client.
|
||||
|
||||
This module contains DeltaGlider-specific batch operations:
|
||||
- upload_batch
|
||||
- download_batch
|
||||
- upload_chunked
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from ..client_models import UploadSummary
|
||||
|
||||
|
||||
def upload_chunked(
|
||||
client: Any, # DeltaGliderClient
|
||||
file_path: str | Path,
|
||||
s3_url: str,
|
||||
chunk_size: int = 5 * 1024 * 1024,
|
||||
progress_callback: Callable[[int, int, int, int], None] | None = None,
|
||||
max_ratio: float = 0.5,
|
||||
) -> UploadSummary:
|
||||
"""Upload a file in chunks with progress callback.
|
||||
|
||||
This method reads the file in chunks to avoid loading large files entirely into memory,
|
||||
making it suitable for uploading very large files. Progress is reported after each chunk.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
file_path: Local file to upload
|
||||
s3_url: S3 destination URL (s3://bucket/path/filename)
|
||||
chunk_size: Size of each chunk in bytes (default 5MB)
|
||||
progress_callback: Callback(chunk_number, total_chunks, bytes_sent, total_bytes)
|
||||
max_ratio: Maximum acceptable delta/file ratio for compression
|
||||
|
||||
Returns:
|
||||
UploadSummary with compression statistics
|
||||
|
||||
Example:
|
||||
def on_progress(chunk_num, total_chunks, bytes_sent, total_bytes):
|
||||
percent = (bytes_sent / total_bytes) * 100
|
||||
print(f"Upload progress: {percent:.1f}%")
|
||||
|
||||
client.upload_chunked(
|
||||
"large_file.zip",
|
||||
"s3://bucket/releases/large_file.zip",
|
||||
chunk_size=10 * 1024 * 1024, # 10MB chunks
|
||||
progress_callback=on_progress
|
||||
)
|
||||
"""
|
||||
file_path = Path(file_path)
|
||||
file_size = file_path.stat().st_size
|
||||
|
||||
# For small files, just use regular upload
|
||||
if file_size <= chunk_size:
|
||||
if progress_callback:
|
||||
progress_callback(1, 1, file_size, file_size)
|
||||
result: UploadSummary = client.upload(file_path, s3_url, max_ratio=max_ratio)
|
||||
return result
|
||||
|
||||
# Calculate chunks
|
||||
total_chunks = (file_size + chunk_size - 1) // chunk_size
|
||||
|
||||
# Create a temporary file for chunked processing
|
||||
# For now, we read the entire file but report progress in chunks
|
||||
# Future enhancement: implement true streaming upload in storage adapter
|
||||
bytes_read = 0
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
for chunk_num in range(1, total_chunks + 1):
|
||||
# Read chunk (simulated for progress reporting)
|
||||
chunk_data = f.read(chunk_size)
|
||||
bytes_read += len(chunk_data)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(chunk_num, total_chunks, bytes_read, file_size)
|
||||
|
||||
# Perform the actual upload
|
||||
# TODO: When storage adapter supports streaming, pass chunks directly
|
||||
upload_result: UploadSummary = client.upload(file_path, s3_url, max_ratio=max_ratio)
|
||||
|
||||
# Final progress callback
|
||||
if progress_callback:
|
||||
progress_callback(total_chunks, total_chunks, file_size, file_size)
|
||||
|
||||
return upload_result
|
||||
|
||||
|
||||
def upload_batch(
|
||||
client: Any, # DeltaGliderClient
|
||||
files: list[str | Path],
|
||||
s3_prefix: str,
|
||||
max_ratio: float = 0.5,
|
||||
progress_callback: Callable[[str, int, int], None] | None = None,
|
||||
) -> list[UploadSummary]:
|
||||
"""Upload multiple files in batch.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
files: List of local file paths
|
||||
s3_prefix: S3 destination prefix (s3://bucket/prefix/)
|
||||
max_ratio: Maximum acceptable delta/file ratio
|
||||
progress_callback: Callback(filename, current_file_index, total_files)
|
||||
|
||||
Returns:
|
||||
List of UploadSummary objects
|
||||
"""
|
||||
results = []
|
||||
|
||||
for i, file_path in enumerate(files):
|
||||
file_path = Path(file_path)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(file_path.name, i + 1, len(files))
|
||||
|
||||
# Upload each file
|
||||
s3_url = f"{s3_prefix.rstrip('/')}/{file_path.name}"
|
||||
summary = client.upload(file_path, s3_url, max_ratio=max_ratio)
|
||||
results.append(summary)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def download_batch(
|
||||
client: Any, # DeltaGliderClient
|
||||
s3_urls: list[str],
|
||||
output_dir: str | Path,
|
||||
progress_callback: Callable[[str, int, int], None] | None = None,
|
||||
) -> list[Path]:
|
||||
"""Download multiple files in batch.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
s3_urls: List of S3 URLs to download
|
||||
output_dir: Local directory to save files
|
||||
progress_callback: Callback(filename, current_file_index, total_files)
|
||||
|
||||
Returns:
|
||||
List of downloaded file paths
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
results = []
|
||||
|
||||
for i, s3_url in enumerate(s3_urls):
|
||||
# Extract filename from URL
|
||||
filename = s3_url.split("/")[-1]
|
||||
if filename.endswith(".delta"):
|
||||
filename = filename[:-6] # Remove .delta suffix
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(filename, i + 1, len(s3_urls))
|
||||
|
||||
output_path = output_dir / filename
|
||||
client.download(s3_url, output_path)
|
||||
results.append(output_path)
|
||||
|
||||
return results
|
||||
174
src/deltaglider/client_operations/bucket.py
Normal file
174
src/deltaglider/client_operations/bucket.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Bucket management operations for DeltaGlider client.
|
||||
|
||||
This module contains boto3-compatible bucket operations:
|
||||
- create_bucket
|
||||
- delete_bucket
|
||||
- list_buckets
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def create_bucket(
|
||||
client: Any, # DeltaGliderClient (avoiding circular import)
|
||||
Bucket: str,
|
||||
CreateBucketConfiguration: dict[str, str] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> dict[str, Any]:
|
||||
"""Create an S3 bucket (boto3-compatible).
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
Bucket: Bucket name to create
|
||||
CreateBucketConfiguration: Optional bucket configuration (e.g., LocationConstraint)
|
||||
**kwargs: Additional S3 parameters (for compatibility)
|
||||
|
||||
Returns:
|
||||
Response dict with bucket location
|
||||
|
||||
Example:
|
||||
>>> client = create_client()
|
||||
>>> client.create_bucket(Bucket='my-bucket')
|
||||
>>> # With region
|
||||
>>> client.create_bucket(
|
||||
... Bucket='my-bucket',
|
||||
... CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}
|
||||
... )
|
||||
"""
|
||||
storage_adapter = client.service.storage
|
||||
|
||||
# Check if storage adapter has boto3 client
|
||||
if hasattr(storage_adapter, "client"):
|
||||
try:
|
||||
params: dict[str, Any] = {"Bucket": Bucket}
|
||||
if CreateBucketConfiguration:
|
||||
params["CreateBucketConfiguration"] = CreateBucketConfiguration
|
||||
|
||||
response = storage_adapter.client.create_bucket(**params)
|
||||
return {
|
||||
"Location": response.get("Location", f"/{Bucket}"),
|
||||
"ResponseMetadata": {
|
||||
"HTTPStatusCode": 200,
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "BucketAlreadyExists" in error_msg or "BucketAlreadyOwnedByYou" in error_msg:
|
||||
# Bucket already exists - return success
|
||||
client.service.logger.debug(f"Bucket {Bucket} already exists")
|
||||
return {
|
||||
"Location": f"/{Bucket}",
|
||||
"ResponseMetadata": {
|
||||
"HTTPStatusCode": 200,
|
||||
},
|
||||
}
|
||||
raise RuntimeError(f"Failed to create bucket: {e}") from e
|
||||
else:
|
||||
raise NotImplementedError("Storage adapter does not support bucket creation")
|
||||
|
||||
|
||||
def delete_bucket(
|
||||
client: Any, # DeltaGliderClient
|
||||
Bucket: str,
|
||||
**kwargs: Any,
|
||||
) -> dict[str, Any]:
|
||||
"""Delete an S3 bucket (boto3-compatible).
|
||||
|
||||
Note: Bucket must be empty before deletion.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
Bucket: Bucket name to delete
|
||||
**kwargs: Additional S3 parameters (for compatibility)
|
||||
|
||||
Returns:
|
||||
Response dict with deletion status
|
||||
|
||||
Example:
|
||||
>>> client = create_client()
|
||||
>>> client.delete_bucket(Bucket='my-bucket')
|
||||
"""
|
||||
storage_adapter = client.service.storage
|
||||
|
||||
# Check if storage adapter has boto3 client
|
||||
if hasattr(storage_adapter, "client"):
|
||||
try:
|
||||
storage_adapter.client.delete_bucket(Bucket=Bucket)
|
||||
return {
|
||||
"ResponseMetadata": {
|
||||
"HTTPStatusCode": 204,
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "NoSuchBucket" in error_msg:
|
||||
# Bucket doesn't exist - return success
|
||||
client.service.logger.debug(f"Bucket {Bucket} does not exist")
|
||||
return {
|
||||
"ResponseMetadata": {
|
||||
"HTTPStatusCode": 204,
|
||||
},
|
||||
}
|
||||
raise RuntimeError(f"Failed to delete bucket: {e}") from e
|
||||
else:
|
||||
raise NotImplementedError("Storage adapter does not support bucket deletion")
|
||||
|
||||
|
||||
def list_buckets(
|
||||
client: Any, # DeltaGliderClient
|
||||
**kwargs: Any,
|
||||
) -> dict[str, Any]:
|
||||
"""List all S3 buckets (boto3-compatible).
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
**kwargs: Additional S3 parameters (for compatibility)
|
||||
|
||||
Returns:
|
||||
Response dict with bucket list
|
||||
|
||||
Example:
|
||||
>>> client = create_client()
|
||||
>>> response = client.list_buckets()
|
||||
>>> for bucket in response['Buckets']:
|
||||
... print(bucket['Name'])
|
||||
"""
|
||||
storage_adapter = client.service.storage
|
||||
|
||||
# Check if storage adapter has boto3 client
|
||||
if hasattr(storage_adapter, "client"):
|
||||
try:
|
||||
raw_response = storage_adapter.client.list_buckets()
|
||||
|
||||
buckets: list[dict[str, Any]] = []
|
||||
for bucket_entry in raw_response.get("Buckets", []):
|
||||
bucket_data = dict(bucket_entry)
|
||||
name = bucket_data.get("Name")
|
||||
if isinstance(name, str) and name:
|
||||
cached_stats, detailed = client._get_cached_bucket_stats_for_listing(name)
|
||||
if cached_stats is not None:
|
||||
bucket_data["DeltaGliderStats"] = {
|
||||
"Cached": True,
|
||||
"Detailed": detailed,
|
||||
"ObjectCount": cached_stats.object_count,
|
||||
"TotalSize": cached_stats.total_size,
|
||||
"CompressedSize": cached_stats.compressed_size,
|
||||
"SpaceSaved": cached_stats.space_saved,
|
||||
"AverageCompressionRatio": cached_stats.average_compression_ratio,
|
||||
"DeltaObjects": cached_stats.delta_objects,
|
||||
"DirectObjects": cached_stats.direct_objects,
|
||||
}
|
||||
|
||||
buckets.append(bucket_data)
|
||||
|
||||
return {
|
||||
"Buckets": buckets,
|
||||
"Owner": raw_response.get("Owner", {}),
|
||||
"ResponseMetadata": {
|
||||
"HTTPStatusCode": 200,
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to list buckets: {e}") from e
|
||||
else:
|
||||
raise NotImplementedError("Storage adapter does not support bucket listing")
|
||||
124
src/deltaglider/client_operations/presigned.py
Normal file
124
src/deltaglider/client_operations/presigned.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""Presigned URL operations for DeltaGlider client.
|
||||
|
||||
This module contains boto3-compatible presigned URL operations:
|
||||
- generate_presigned_url
|
||||
- generate_presigned_post
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def try_boto3_presigned_operation(
|
||||
client: Any, # DeltaGliderClient
|
||||
operation: str,
|
||||
**kwargs: Any,
|
||||
) -> Any | None:
|
||||
"""Try to generate presigned operation using boto3 client, return None if not available."""
|
||||
storage_adapter = client.service.storage
|
||||
|
||||
# Check if storage adapter has boto3 client
|
||||
if hasattr(storage_adapter, "client"):
|
||||
try:
|
||||
if operation == "url":
|
||||
return str(storage_adapter.client.generate_presigned_url(**kwargs))
|
||||
elif operation == "post":
|
||||
return dict(storage_adapter.client.generate_presigned_post(**kwargs))
|
||||
except AttributeError:
|
||||
# storage_adapter does not have a 'client' attribute
|
||||
pass
|
||||
except Exception as e:
|
||||
# Fall back to manual construction if needed
|
||||
client.service.logger.warning(f"Failed to generate presigned {operation}: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def generate_presigned_url(
|
||||
client: Any, # DeltaGliderClient
|
||||
ClientMethod: str,
|
||||
Params: dict[str, Any],
|
||||
ExpiresIn: int = 3600,
|
||||
) -> str:
|
||||
"""Generate presigned URL (boto3-compatible).
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
ClientMethod: Method name ('get_object' or 'put_object')
|
||||
Params: Parameters dict with Bucket and Key
|
||||
ExpiresIn: URL expiration in seconds
|
||||
|
||||
Returns:
|
||||
Presigned URL string
|
||||
"""
|
||||
# Try boto3 first, fallback to manual construction
|
||||
url = try_boto3_presigned_operation(
|
||||
client,
|
||||
"url",
|
||||
ClientMethod=ClientMethod,
|
||||
Params=Params,
|
||||
ExpiresIn=ExpiresIn,
|
||||
)
|
||||
if url is not None:
|
||||
return str(url)
|
||||
|
||||
# Fallback: construct URL manually (less secure, for dev/testing only)
|
||||
bucket = Params.get("Bucket", "")
|
||||
key = Params.get("Key", "")
|
||||
|
||||
if client.endpoint_url:
|
||||
base_url = client.endpoint_url
|
||||
else:
|
||||
base_url = f"https://{bucket}.s3.amazonaws.com"
|
||||
|
||||
# Warning: This is not a real presigned URL, just a placeholder
|
||||
client.service.logger.warning("Using placeholder presigned URL - not suitable for production")
|
||||
return f"{base_url}/{key}?expires={ExpiresIn}"
|
||||
|
||||
|
||||
def generate_presigned_post(
|
||||
client: Any, # DeltaGliderClient
|
||||
Bucket: str,
|
||||
Key: str,
|
||||
Fields: dict[str, str] | None = None,
|
||||
Conditions: list[Any] | None = None,
|
||||
ExpiresIn: int = 3600,
|
||||
) -> dict[str, Any]:
|
||||
"""Generate presigned POST data for HTML forms (boto3-compatible).
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
Bucket: S3 bucket name
|
||||
Key: Object key
|
||||
Fields: Additional fields to include
|
||||
Conditions: Upload conditions
|
||||
ExpiresIn: URL expiration in seconds
|
||||
|
||||
Returns:
|
||||
Dict with 'url' and 'fields' for form submission
|
||||
"""
|
||||
# Try boto3 first, fallback to manual construction
|
||||
response = try_boto3_presigned_operation(
|
||||
client,
|
||||
"post",
|
||||
Bucket=Bucket,
|
||||
Key=Key,
|
||||
Fields=Fields,
|
||||
Conditions=Conditions,
|
||||
ExpiresIn=ExpiresIn,
|
||||
)
|
||||
if response is not None:
|
||||
return dict(response)
|
||||
|
||||
# Fallback: return minimal structure for compatibility
|
||||
if client.endpoint_url:
|
||||
url = f"{client.endpoint_url}/{Bucket}"
|
||||
else:
|
||||
url = f"https://{Bucket}.s3.amazonaws.com"
|
||||
|
||||
return {
|
||||
"url": url,
|
||||
"fields": {
|
||||
"key": Key,
|
||||
**(Fields or {}),
|
||||
},
|
||||
}
|
||||
443
src/deltaglider/client_operations/stats.py
Normal file
443
src/deltaglider/client_operations/stats.py
Normal file
@@ -0,0 +1,443 @@
|
||||
"""Statistics and analysis operations for DeltaGlider client.
|
||||
|
||||
This module contains DeltaGlider-specific statistics operations:
|
||||
- get_bucket_stats
|
||||
- get_object_info
|
||||
- estimate_compression
|
||||
- find_similar_files
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from ..client_models import BucketStats, CompressionEstimate, ObjectInfo
|
||||
|
||||
|
||||
def get_object_info(
|
||||
client: Any, # DeltaGliderClient
|
||||
s3_url: str,
|
||||
) -> ObjectInfo:
|
||||
"""Get detailed object information including compression stats.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
s3_url: S3 URL of the object
|
||||
|
||||
Returns:
|
||||
ObjectInfo with detailed metadata
|
||||
"""
|
||||
# Parse URL
|
||||
if not s3_url.startswith("s3://"):
|
||||
raise ValueError(f"Invalid S3 URL: {s3_url}")
|
||||
|
||||
s3_path = s3_url[5:]
|
||||
parts = s3_path.split("/", 1)
|
||||
bucket = parts[0]
|
||||
key = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
# Get object metadata
|
||||
obj_head = client.service.storage.head(f"{bucket}/{key}")
|
||||
if not obj_head:
|
||||
raise FileNotFoundError(f"Object not found: {s3_url}")
|
||||
|
||||
metadata = obj_head.metadata
|
||||
is_delta = key.endswith(".delta")
|
||||
|
||||
return ObjectInfo(
|
||||
key=key,
|
||||
size=obj_head.size,
|
||||
last_modified=metadata.get("last_modified", ""),
|
||||
etag=metadata.get("etag"),
|
||||
original_size=int(metadata.get("file_size", obj_head.size)),
|
||||
compressed_size=obj_head.size,
|
||||
compression_ratio=float(metadata.get("compression_ratio", 0.0)),
|
||||
is_delta=is_delta,
|
||||
reference_key=metadata.get("ref_key"),
|
||||
)
|
||||
|
||||
|
||||
def get_bucket_stats(
|
||||
client: Any, # DeltaGliderClient
|
||||
bucket: str,
|
||||
detailed_stats: bool = False,
|
||||
) -> BucketStats:
|
||||
"""Get statistics for a bucket with optional detailed compression metrics.
|
||||
|
||||
This method provides two modes:
|
||||
- Quick stats (default): Fast overview using LIST only (~50ms)
|
||||
- Detailed stats: Accurate compression metrics with HEAD requests (slower)
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
bucket: S3 bucket name
|
||||
detailed_stats: If True, fetch accurate compression ratios for delta files (default: False)
|
||||
|
||||
Returns:
|
||||
BucketStats with compression and space savings info
|
||||
|
||||
Performance:
|
||||
- With detailed_stats=False: ~50ms for any bucket size (1 LIST call per 1000 objects)
|
||||
- With detailed_stats=True: ~2-3s per 1000 objects (adds HEAD calls for delta files only)
|
||||
|
||||
Example:
|
||||
# Quick stats for dashboard display
|
||||
stats = client.get_bucket_stats('releases')
|
||||
print(f"Objects: {stats.object_count}, Size: {stats.total_size}")
|
||||
|
||||
# Detailed stats for analytics (slower but accurate)
|
||||
stats = client.get_bucket_stats('releases', detailed_stats=True)
|
||||
print(f"Compression ratio: {stats.average_compression_ratio:.1%}")
|
||||
"""
|
||||
# List all objects DIRECTLY from storage adapter to see reference.bin files
|
||||
# (client.list_objects filters them out for user-facing operations)
|
||||
all_objects = []
|
||||
start_after = None
|
||||
|
||||
import concurrent.futures
|
||||
|
||||
# Phase 1: Collect all objects and identify delta files
|
||||
raw_objects = []
|
||||
delta_keys = []
|
||||
|
||||
while True:
|
||||
# Call storage adapter directly to see ALL files including reference.bin
|
||||
response = client.service.storage.list_objects(
|
||||
bucket=bucket,
|
||||
prefix="",
|
||||
max_keys=1000,
|
||||
start_after=start_after,
|
||||
)
|
||||
|
||||
# Collect objects and identify delta files
|
||||
for obj_dict in response.get("objects", []):
|
||||
raw_objects.append(obj_dict)
|
||||
if obj_dict["key"].endswith(".delta"):
|
||||
delta_keys.append(obj_dict["key"])
|
||||
|
||||
if not response.get("is_truncated"):
|
||||
break
|
||||
|
||||
start_after = response.get("next_continuation_token")
|
||||
|
||||
# Phase 2: Fetch metadata for delta files in parallel (10x faster)
|
||||
metadata_map = {}
|
||||
if delta_keys:
|
||||
client.service.logger.info(
|
||||
f"Fetching metadata for {len(delta_keys)} delta files in parallel..."
|
||||
)
|
||||
|
||||
def fetch_metadata(key: str) -> tuple[str, dict[str, Any] | None]:
|
||||
try:
|
||||
obj_head = client.service.storage.head(f"{bucket}/{key}")
|
||||
if obj_head and obj_head.metadata:
|
||||
return key, obj_head.metadata
|
||||
except Exception as e:
|
||||
client.service.logger.debug(f"Failed to fetch metadata for {key}: {e}")
|
||||
return key, None
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=min(10, len(delta_keys))
|
||||
) as executor:
|
||||
futures = [executor.submit(fetch_metadata, key) for key in delta_keys]
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
key, metadata = future.result()
|
||||
if metadata:
|
||||
metadata_map[key] = metadata
|
||||
|
||||
# Phase 3: Build ObjectInfo list with metadata
|
||||
for obj_dict in raw_objects:
|
||||
key = obj_dict["key"]
|
||||
size = obj_dict["size"]
|
||||
is_delta = key.endswith(".delta")
|
||||
|
||||
# Get metadata from our parallel fetch
|
||||
metadata = metadata_map.get(key, {})
|
||||
|
||||
# Parse compression ratio and original size
|
||||
compression_ratio = 0.0
|
||||
original_size = size
|
||||
if is_delta and metadata:
|
||||
try:
|
||||
ratio_str = metadata.get("compression_ratio", "0.0")
|
||||
compression_ratio = float(ratio_str) if ratio_str != "unknown" else 0.0
|
||||
except (ValueError, TypeError):
|
||||
compression_ratio = 0.0
|
||||
try:
|
||||
original_size = int(metadata.get("file_size", size))
|
||||
client.service.logger.debug(f"Delta {key}: using original_size={original_size}")
|
||||
except (ValueError, TypeError):
|
||||
original_size = size
|
||||
|
||||
all_objects.append(
|
||||
ObjectInfo(
|
||||
key=key,
|
||||
size=size,
|
||||
last_modified=obj_dict.get("last_modified", ""),
|
||||
etag=obj_dict.get("etag"),
|
||||
storage_class=obj_dict.get("storage_class", "STANDARD"),
|
||||
original_size=original_size,
|
||||
compressed_size=size,
|
||||
is_delta=is_delta,
|
||||
compression_ratio=compression_ratio,
|
||||
reference_key=metadata.get("ref_key") if metadata else None,
|
||||
)
|
||||
)
|
||||
|
||||
# Calculate statistics - COUNT ALL FILES
|
||||
total_original_size = 0
|
||||
total_compressed_size = 0
|
||||
delta_count = 0
|
||||
direct_count = 0
|
||||
reference_files = {} # Track all reference.bin files and their deltaspaces
|
||||
|
||||
# First pass: identify what we have
|
||||
for obj in all_objects:
|
||||
if obj.key.endswith("/reference.bin") or obj.key == "reference.bin":
|
||||
# Extract deltaspace prefix
|
||||
if "/" in obj.key:
|
||||
deltaspace = obj.key.rsplit("/reference.bin", 1)[0]
|
||||
else:
|
||||
deltaspace = "" # Root level reference.bin
|
||||
reference_files[deltaspace] = obj.size
|
||||
elif obj.is_delta:
|
||||
delta_count += 1
|
||||
else:
|
||||
direct_count += 1
|
||||
|
||||
# Second pass: calculate sizes
|
||||
for obj in all_objects:
|
||||
# Skip reference.bin in this pass (we'll handle it separately)
|
||||
if obj.key.endswith("/reference.bin") or obj.key == "reference.bin":
|
||||
continue
|
||||
|
||||
if obj.is_delta:
|
||||
# Delta file: original from metadata, compressed = delta size
|
||||
if obj.original_size and obj.original_size != obj.size:
|
||||
client.service.logger.debug(
|
||||
f"Delta {obj.key}: using original_size={obj.original_size}"
|
||||
)
|
||||
total_original_size += obj.original_size
|
||||
else:
|
||||
client.service.logger.warning(
|
||||
f"Delta {obj.key}: no original_size, using compressed size={obj.size}"
|
||||
)
|
||||
total_original_size += obj.size
|
||||
total_compressed_size += obj.size
|
||||
else:
|
||||
# Direct files: original = compressed = actual size
|
||||
total_original_size += obj.size
|
||||
total_compressed_size += obj.size
|
||||
|
||||
# Handle reference.bin files
|
||||
total_reference_size = sum(reference_files.values())
|
||||
|
||||
if delta_count > 0 and total_reference_size > 0:
|
||||
# Add all reference.bin files to compressed size
|
||||
total_compressed_size += total_reference_size
|
||||
client.service.logger.info(
|
||||
f"Including {len(reference_files)} reference.bin file(s) ({total_reference_size:,} bytes) in compressed size"
|
||||
)
|
||||
elif delta_count == 0 and total_reference_size > 0:
|
||||
# ORPHANED REFERENCE WARNING
|
||||
waste_mb = total_reference_size / 1024 / 1024
|
||||
client.service.logger.warning(
|
||||
f"\n{'=' * 60}\n"
|
||||
f"WARNING: ORPHANED REFERENCE FILE(S) DETECTED!\n"
|
||||
f"{'=' * 60}\n"
|
||||
f"Found {len(reference_files)} reference.bin file(s) totaling {total_reference_size:,} bytes ({waste_mb:.2f} MB)\n"
|
||||
f"but NO delta files are using them.\n"
|
||||
f"\n"
|
||||
f"This wastes {waste_mb:.2f} MB of storage!\n"
|
||||
f"\n"
|
||||
f"Orphaned reference files:\n"
|
||||
)
|
||||
for deltaspace, size in reference_files.items():
|
||||
path = f"{deltaspace}/reference.bin" if deltaspace else "reference.bin"
|
||||
client.service.logger.warning(f" - s3://{bucket}/{path} ({size:,} bytes)")
|
||||
|
||||
client.service.logger.warning("\nConsider removing these orphaned files:\n")
|
||||
for deltaspace in reference_files:
|
||||
path = f"{deltaspace}/reference.bin" if deltaspace else "reference.bin"
|
||||
client.service.logger.warning(f" aws s3 rm s3://{bucket}/{path}")
|
||||
|
||||
client.service.logger.warning(f"{'=' * 60}")
|
||||
|
||||
space_saved = total_original_size - total_compressed_size
|
||||
avg_ratio = (space_saved / total_original_size) if total_original_size > 0 else 0.0
|
||||
|
||||
return BucketStats(
|
||||
bucket=bucket,
|
||||
object_count=delta_count + direct_count, # Only count user files, not reference.bin
|
||||
total_size=total_original_size,
|
||||
compressed_size=total_compressed_size,
|
||||
space_saved=space_saved,
|
||||
average_compression_ratio=avg_ratio,
|
||||
delta_objects=delta_count,
|
||||
direct_objects=direct_count,
|
||||
)
|
||||
|
||||
|
||||
def estimate_compression(
|
||||
client: Any, # DeltaGliderClient
|
||||
file_path: str | Path,
|
||||
bucket: str,
|
||||
prefix: str = "",
|
||||
sample_size: int = 1024 * 1024,
|
||||
) -> CompressionEstimate:
|
||||
"""Estimate compression ratio before upload.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
file_path: Local file to estimate
|
||||
bucket: Target bucket
|
||||
prefix: Target prefix (for finding similar files)
|
||||
sample_size: Bytes to sample for estimation (default 1MB)
|
||||
|
||||
Returns:
|
||||
CompressionEstimate with predicted compression
|
||||
"""
|
||||
file_path = Path(file_path)
|
||||
file_size = file_path.stat().st_size
|
||||
|
||||
# Check file extension
|
||||
ext = file_path.suffix.lower()
|
||||
delta_extensions = {
|
||||
".zip",
|
||||
".tar",
|
||||
".gz",
|
||||
".tar.gz",
|
||||
".tgz",
|
||||
".bz2",
|
||||
".tar.bz2",
|
||||
".xz",
|
||||
".tar.xz",
|
||||
".7z",
|
||||
".rar",
|
||||
".dmg",
|
||||
".iso",
|
||||
".pkg",
|
||||
".deb",
|
||||
".rpm",
|
||||
".apk",
|
||||
".jar",
|
||||
".war",
|
||||
".ear",
|
||||
}
|
||||
|
||||
# Already compressed formats that won't benefit from delta
|
||||
incompressible = {".jpg", ".jpeg", ".png", ".mp4", ".mp3", ".avi", ".mov"}
|
||||
|
||||
if ext in incompressible:
|
||||
return CompressionEstimate(
|
||||
original_size=file_size,
|
||||
estimated_compressed_size=file_size,
|
||||
estimated_ratio=0.0,
|
||||
confidence=0.95,
|
||||
should_use_delta=False,
|
||||
)
|
||||
|
||||
if ext not in delta_extensions:
|
||||
# Unknown type, conservative estimate
|
||||
return CompressionEstimate(
|
||||
original_size=file_size,
|
||||
estimated_compressed_size=file_size,
|
||||
estimated_ratio=0.0,
|
||||
confidence=0.5,
|
||||
should_use_delta=file_size > 1024 * 1024, # Only for files > 1MB
|
||||
)
|
||||
|
||||
# Look for similar files in the target location
|
||||
similar_files = find_similar_files(client, bucket, prefix, file_path.name)
|
||||
|
||||
if similar_files:
|
||||
# If we have similar files, estimate high compression
|
||||
estimated_ratio = 0.99 # 99% compression typical for similar versions
|
||||
confidence = 0.9
|
||||
recommended_ref = similar_files[0]["Key"] if similar_files else None
|
||||
else:
|
||||
# First file of its type
|
||||
estimated_ratio = 0.0
|
||||
confidence = 0.7
|
||||
recommended_ref = None
|
||||
|
||||
estimated_size = int(file_size * (1 - estimated_ratio))
|
||||
|
||||
return CompressionEstimate(
|
||||
original_size=file_size,
|
||||
estimated_compressed_size=estimated_size,
|
||||
estimated_ratio=estimated_ratio,
|
||||
confidence=confidence,
|
||||
recommended_reference=recommended_ref,
|
||||
should_use_delta=True,
|
||||
)
|
||||
|
||||
|
||||
def find_similar_files(
|
||||
client: Any, # DeltaGliderClient
|
||||
bucket: str,
|
||||
prefix: str,
|
||||
filename: str,
|
||||
limit: int = 5,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Find similar files that could serve as references.
|
||||
|
||||
Args:
|
||||
client: DeltaGliderClient instance
|
||||
bucket: S3 bucket
|
||||
prefix: Prefix to search in
|
||||
filename: Filename to match against
|
||||
limit: Maximum number of results
|
||||
|
||||
Returns:
|
||||
List of similar files with scores
|
||||
"""
|
||||
# List objects in the prefix (no metadata needed for similarity check)
|
||||
response = client.list_objects(
|
||||
Bucket=bucket,
|
||||
Prefix=prefix,
|
||||
MaxKeys=1000,
|
||||
FetchMetadata=False, # Don't need metadata for similarity
|
||||
)
|
||||
|
||||
similar: list[dict[str, Any]] = []
|
||||
base_name = Path(filename).stem
|
||||
ext = Path(filename).suffix
|
||||
|
||||
for obj in response["Contents"]:
|
||||
obj_key = obj["Key"]
|
||||
obj_base = Path(obj_key).stem
|
||||
obj_ext = Path(obj_key).suffix
|
||||
|
||||
# Skip delta files and references
|
||||
if obj_key.endswith(".delta") or obj_key.endswith("reference.bin"):
|
||||
continue
|
||||
|
||||
score = 0.0
|
||||
|
||||
# Extension match
|
||||
if ext == obj_ext:
|
||||
score += 0.5
|
||||
|
||||
# Base name similarity
|
||||
if base_name in obj_base or obj_base in base_name:
|
||||
score += 0.3
|
||||
|
||||
# Version pattern match
|
||||
if re.search(r"v?\d+[\.\d]*", base_name) and re.search(r"v?\d+[\.\d]*", obj_base):
|
||||
score += 0.2
|
||||
|
||||
if score > 0.5:
|
||||
similar.append(
|
||||
{
|
||||
"Key": obj_key,
|
||||
"Size": obj["Size"],
|
||||
"Similarity": score,
|
||||
"LastModified": obj["LastModified"],
|
||||
}
|
||||
)
|
||||
|
||||
# Sort by similarity
|
||||
similar.sort(key=lambda x: x["Similarity"], reverse=True) # type: ignore
|
||||
|
||||
return similar[:limit]
|
||||
@@ -47,3 +47,15 @@ class PolicyViolationWarning(Warning):
|
||||
"""Policy violation warning."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CacheMissError(DeltaGliderError):
|
||||
"""Cache miss - file not found in cache."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CacheCorruptionError(DeltaGliderError):
|
||||
"""Cache corruption - SHA mismatch or tampering detected."""
|
||||
|
||||
pass
|
||||
|
||||
@@ -21,7 +21,6 @@ from .errors import (
|
||||
IntegrityMismatchError,
|
||||
NotFoundError,
|
||||
PolicyViolationWarning,
|
||||
StorageIOError,
|
||||
)
|
||||
from .models import (
|
||||
DeltaMeta,
|
||||
@@ -171,10 +170,28 @@ class DeltaService:
|
||||
if obj_head is None:
|
||||
raise NotFoundError(f"Object not found: {object_key.key}")
|
||||
|
||||
# Check if this is a regular S3 object (not uploaded via DeltaGlider)
|
||||
# Regular S3 objects won't have DeltaGlider metadata
|
||||
if "file_sha256" not in obj_head.metadata:
|
||||
raise StorageIOError(f"Missing metadata on {object_key.key}")
|
||||
# This is a regular S3 object, download it directly
|
||||
self.logger.info(
|
||||
"Downloading regular S3 object (no DeltaGlider metadata)",
|
||||
key=object_key.key,
|
||||
)
|
||||
self._get_direct(object_key, obj_head, out)
|
||||
duration = (self.clock.now() - start_time).total_seconds()
|
||||
self.logger.log_operation(
|
||||
op="get",
|
||||
key=object_key.key,
|
||||
deltaspace=f"{object_key.bucket}",
|
||||
sizes={"file": obj_head.size},
|
||||
durations={"total": duration},
|
||||
cache_hit=False,
|
||||
)
|
||||
self.metrics.timing("deltaglider.get.duration", duration)
|
||||
return
|
||||
|
||||
# Check if this is a direct upload (non-delta)
|
||||
# Check if this is a direct upload (non-delta) uploaded via DeltaGlider
|
||||
if obj_head.metadata.get("compression") == "none":
|
||||
# Direct download without delta processing
|
||||
self._get_direct(object_key, obj_head, out)
|
||||
@@ -213,7 +230,10 @@ class DeltaService:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmp_path = Path(tmpdir)
|
||||
delta_path = tmp_path / "delta"
|
||||
ref_path = self.cache.ref_path(delta_space.bucket, delta_space.prefix)
|
||||
# SECURITY: Use validated ref to prevent TOCTOU attacks
|
||||
ref_path = self.cache.get_validated_ref(
|
||||
delta_space.bucket, delta_space.prefix, delta_meta.ref_sha256
|
||||
)
|
||||
out_path = tmp_path / "output"
|
||||
|
||||
# Download delta
|
||||
@@ -391,7 +411,8 @@ class DeltaService:
|
||||
if not cache_hit:
|
||||
self._cache_reference(delta_space, ref_sha256)
|
||||
|
||||
ref_path = self.cache.ref_path(delta_space.bucket, delta_space.prefix)
|
||||
# SECURITY: Use validated ref to prevent TOCTOU attacks
|
||||
ref_path = self.cache.get_validated_ref(delta_space.bucket, delta_space.prefix, ref_sha256)
|
||||
|
||||
# Create delta
|
||||
with tempfile.NamedTemporaryFile(suffix=".delta") as delta_file:
|
||||
@@ -659,12 +680,42 @@ class DeltaService:
|
||||
self.logger.debug(f"Could not clear cache for {object_key.key}: {e}")
|
||||
|
||||
elif is_delta:
|
||||
# Simply delete the delta file
|
||||
# Delete the delta file
|
||||
self.storage.delete(full_key)
|
||||
result["deleted"] = True
|
||||
result["type"] = "delta"
|
||||
result["original_name"] = obj_head.metadata.get("original_name", "unknown")
|
||||
|
||||
# Check if this was the last delta in the DeltaSpace - if so, clean up reference.bin
|
||||
if "/" in object_key.key:
|
||||
deltaspace_prefix = "/".join(object_key.key.split("/")[:-1])
|
||||
ref_key = f"{deltaspace_prefix}/reference.bin"
|
||||
|
||||
# Check if any other delta files exist in this DeltaSpace
|
||||
remaining_deltas = []
|
||||
for obj in self.storage.list(f"{object_key.bucket}/{deltaspace_prefix}"):
|
||||
if obj.key.endswith(".delta") and obj.key != object_key.key:
|
||||
remaining_deltas.append(obj.key)
|
||||
|
||||
if not remaining_deltas:
|
||||
# No more deltas - clean up the orphaned reference.bin
|
||||
ref_full_key = f"{object_key.bucket}/{ref_key}"
|
||||
ref_head = self.storage.head(ref_full_key)
|
||||
if ref_head:
|
||||
self.storage.delete(ref_full_key)
|
||||
self.logger.info(
|
||||
"Cleaned up orphaned reference.bin",
|
||||
ref_key=ref_key,
|
||||
reason="no remaining deltas",
|
||||
)
|
||||
result["cleaned_reference"] = ref_key
|
||||
|
||||
# Clear from cache
|
||||
try:
|
||||
self.cache.evict(object_key.bucket, deltaspace_prefix)
|
||||
except Exception as e:
|
||||
self.logger.debug(f"Could not clear cache for {deltaspace_prefix}: {e}")
|
||||
|
||||
elif is_direct:
|
||||
# Simply delete the direct upload
|
||||
self.storage.delete(full_key)
|
||||
@@ -797,7 +848,9 @@ class DeltaService:
|
||||
|
||||
# Check if there are any remaining files in this deltaspace
|
||||
# (outside of the deletion prefix)
|
||||
deltaspace_list_prefix = f"{bucket}/{deltaspace_prefix}" if deltaspace_prefix else bucket
|
||||
deltaspace_list_prefix = (
|
||||
f"{bucket}/{deltaspace_prefix}" if deltaspace_prefix else bucket
|
||||
)
|
||||
remaining_objects = list(self.storage.list(deltaspace_list_prefix))
|
||||
|
||||
# Filter out objects that are being deleted (within our deletion scope)
|
||||
@@ -830,7 +883,9 @@ class DeltaService:
|
||||
warnings_list = result["warnings"]
|
||||
assert isinstance(warnings_list, list)
|
||||
warnings_list.append(f"Kept reference {ref_key} (still in use)")
|
||||
self.logger.info(f"Kept reference {ref_key} - still in use outside deletion scope")
|
||||
self.logger.info(
|
||||
f"Kept reference {ref_key} - still in use outside deletion scope"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
failed_count = result["failed_count"]
|
||||
|
||||
@@ -15,6 +15,26 @@ class CachePort(Protocol):
|
||||
"""Check if reference exists and matches SHA."""
|
||||
...
|
||||
|
||||
def get_validated_ref(self, bucket: str, prefix: str, expected_sha: str) -> Path:
|
||||
"""Get cached reference with atomic SHA validation.
|
||||
|
||||
This method MUST be used instead of ref_path() to prevent TOCTOU attacks.
|
||||
It validates the SHA256 hash at the time of use, not just at cache check time.
|
||||
|
||||
Args:
|
||||
bucket: S3 bucket name
|
||||
prefix: Prefix/deltaspace within bucket
|
||||
expected_sha: Expected SHA256 hash of the file
|
||||
|
||||
Returns:
|
||||
Path to the validated cached file
|
||||
|
||||
Raises:
|
||||
CacheMissError: If cached file doesn't exist
|
||||
CacheCorruptionError: If SHA doesn't match (file corrupted or tampered)
|
||||
"""
|
||||
...
|
||||
|
||||
def write_ref(self, bucket: str, prefix: str, src: Path) -> Path:
|
||||
"""Cache reference file."""
|
||||
...
|
||||
@@ -22,3 +42,18 @@ class CachePort(Protocol):
|
||||
def evict(self, bucket: str, prefix: str) -> None:
|
||||
"""Remove cached reference."""
|
||||
...
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cached references.
|
||||
|
||||
This method forcibly removes all cached data, useful for:
|
||||
- Long-running applications that need to free memory
|
||||
- Test cleanup
|
||||
- Manual cache invalidation
|
||||
- Ensuring fresh data fetch
|
||||
|
||||
Note: For filesystem caches, this removes all files in the cache directory.
|
||||
For memory caches, this clears all in-memory data.
|
||||
For encrypted caches, this also clears encryption key mappings.
|
||||
"""
|
||||
...
|
||||
|
||||
152
src/deltaglider/response_builders.py
Normal file
152
src/deltaglider/response_builders.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""Type-safe response builders using TypedDicts for internal type safety.
|
||||
|
||||
This module provides builder functions that construct boto3-compatible responses
|
||||
with full compile-time type validation using TypedDicts. At runtime, TypedDicts
|
||||
are plain dicts, so there's no conversion overhead.
|
||||
|
||||
Benefits:
|
||||
- Field name typos caught by mypy (e.g., "HTTPStatusCode" → "HttpStatusCode")
|
||||
- Wrong types caught by mypy (e.g., string instead of int)
|
||||
- Missing required fields caught by mypy
|
||||
- Extra unknown fields caught by mypy
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from .types import (
|
||||
CommonPrefix,
|
||||
DeleteObjectResponse,
|
||||
GetObjectResponse,
|
||||
ListObjectsV2Response,
|
||||
PutObjectResponse,
|
||||
ResponseMetadata,
|
||||
S3Object,
|
||||
)
|
||||
|
||||
|
||||
def build_response_metadata(status_code: int = 200) -> ResponseMetadata:
|
||||
"""Build ResponseMetadata with full type safety via TypedDict.
|
||||
|
||||
TypedDict is a dict at runtime - no conversion needed!
|
||||
mypy validates all fields match ResponseMetadata TypedDict.
|
||||
Uses our types.py TypedDict which has proper NotRequired fields.
|
||||
"""
|
||||
# Build as TypedDict - mypy validates field names and types!
|
||||
metadata: ResponseMetadata = {
|
||||
"HTTPStatusCode": status_code,
|
||||
# All other fields are NotRequired - can be omitted!
|
||||
}
|
||||
return metadata # Returns dict at runtime, ResponseMetadata type at compile-time
|
||||
|
||||
|
||||
def build_put_response(
|
||||
etag: str,
|
||||
*,
|
||||
version_id: str | None = None,
|
||||
deltaglider_info: dict[str, Any] | None = None,
|
||||
) -> PutObjectResponse:
|
||||
"""Build PutObjectResponse with full type safety via TypedDict.
|
||||
|
||||
Uses our types.py TypedDict which has proper NotRequired fields.
|
||||
mypy validates all field names, types, and structure.
|
||||
"""
|
||||
# Build as TypedDict - mypy catches typos and type errors!
|
||||
response: PutObjectResponse = {
|
||||
"ETag": etag,
|
||||
"ResponseMetadata": build_response_metadata(),
|
||||
}
|
||||
|
||||
if version_id:
|
||||
response["VersionId"] = version_id
|
||||
|
||||
# DeltaGlider extension - add as Any field
|
||||
if deltaglider_info:
|
||||
response["DeltaGliderInfo"] = deltaglider_info # type: ignore[typeddict-item]
|
||||
|
||||
return response # Returns dict at runtime, PutObjectResponse type at compile-time
|
||||
|
||||
|
||||
def build_get_response(
|
||||
body: Any,
|
||||
content_length: int,
|
||||
etag: str,
|
||||
metadata: dict[str, Any],
|
||||
) -> GetObjectResponse:
|
||||
"""Build GetObjectResponse with full type safety via TypedDict.
|
||||
|
||||
Uses our types.py TypedDict which has proper NotRequired fields.
|
||||
mypy validates all field names, types, and structure.
|
||||
"""
|
||||
# Build as TypedDict - mypy catches typos and type errors!
|
||||
response: GetObjectResponse = {
|
||||
"Body": body,
|
||||
"ContentLength": content_length,
|
||||
"ETag": etag,
|
||||
"Metadata": metadata,
|
||||
"ResponseMetadata": build_response_metadata(),
|
||||
}
|
||||
return response # Returns dict at runtime, GetObjectResponse type at compile-time
|
||||
|
||||
|
||||
def build_list_objects_response(
|
||||
bucket: str,
|
||||
prefix: str,
|
||||
delimiter: str,
|
||||
max_keys: int,
|
||||
contents: list[S3Object],
|
||||
common_prefixes: list[CommonPrefix] | None,
|
||||
is_truncated: bool,
|
||||
next_continuation_token: str | None,
|
||||
continuation_token: str | None,
|
||||
) -> ListObjectsV2Response:
|
||||
"""Build ListObjectsV2Response with full type safety via TypedDict.
|
||||
|
||||
Uses our types.py TypedDict which has proper NotRequired fields.
|
||||
mypy validates all field names, types, and structure.
|
||||
"""
|
||||
# Build as TypedDict - mypy catches typos and type errors!
|
||||
response: ListObjectsV2Response = {
|
||||
"IsTruncated": is_truncated,
|
||||
"Contents": contents,
|
||||
"Name": bucket,
|
||||
"Prefix": prefix,
|
||||
"Delimiter": delimiter,
|
||||
"MaxKeys": max_keys,
|
||||
"KeyCount": len(contents),
|
||||
"ResponseMetadata": build_response_metadata(),
|
||||
}
|
||||
|
||||
# Add optional fields
|
||||
if common_prefixes:
|
||||
response["CommonPrefixes"] = common_prefixes
|
||||
|
||||
if next_continuation_token:
|
||||
response["NextContinuationToken"] = next_continuation_token
|
||||
|
||||
if continuation_token:
|
||||
response["ContinuationToken"] = continuation_token
|
||||
|
||||
return response # Returns dict at runtime, ListObjectsV2Response type at compile-time
|
||||
|
||||
|
||||
def build_delete_response(
|
||||
delete_marker: bool = False,
|
||||
status_code: int = 204,
|
||||
deltaglider_info: dict[str, Any] | None = None,
|
||||
) -> DeleteObjectResponse:
|
||||
"""Build DeleteObjectResponse with full type safety via TypedDict.
|
||||
|
||||
Uses our types.py TypedDict which has proper NotRequired fields.
|
||||
mypy validates all field names, types, and structure.
|
||||
"""
|
||||
# Build as TypedDict - mypy catches typos and type errors!
|
||||
response: DeleteObjectResponse = {
|
||||
"DeleteMarker": delete_marker,
|
||||
"ResponseMetadata": build_response_metadata(status_code),
|
||||
}
|
||||
|
||||
# DeltaGlider extension
|
||||
if deltaglider_info:
|
||||
response["DeltaGliderInfo"] = deltaglider_info # type: ignore[typeddict-item]
|
||||
|
||||
return response # Returns dict at runtime, DeleteObjectResponse type at compile-time
|
||||
355
src/deltaglider/types.py
Normal file
355
src/deltaglider/types.py
Normal file
@@ -0,0 +1,355 @@
|
||||
"""Type definitions for boto3-compatible responses.
|
||||
|
||||
These TypedDict definitions provide type hints for DeltaGlider's boto3-compatible
|
||||
responses. All methods return plain `dict[str, Any]` at runtime for maximum
|
||||
flexibility and boto3 compatibility.
|
||||
|
||||
## Basic Usage (Recommended)
|
||||
|
||||
Use DeltaGlider with simple dict access - no type imports needed:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
|
||||
client = create_client()
|
||||
|
||||
# Returns plain dict - 100% boto3 compatible
|
||||
response = client.put_object(Bucket='my-bucket', Key='file.zip', Body=data)
|
||||
print(response['ETag'])
|
||||
|
||||
# List objects with dict access
|
||||
listing = client.list_objects(Bucket='my-bucket')
|
||||
for obj in listing['Contents']:
|
||||
print(f"{obj['Key']}: {obj['Size']} bytes")
|
||||
```
|
||||
|
||||
## Optional Type Hints
|
||||
|
||||
For IDE autocomplete and type checking, you can use our convenience TypedDicts:
|
||||
|
||||
```python
|
||||
from deltaglider import create_client
|
||||
from deltaglider.types import PutObjectResponse, ListObjectsV2Response
|
||||
|
||||
client = create_client()
|
||||
response: PutObjectResponse = client.put_object(...) # IDE autocomplete
|
||||
listing: ListObjectsV2Response = client.list_objects(...)
|
||||
```
|
||||
|
||||
## Advanced: boto3-stubs Integration
|
||||
|
||||
For strictest type checking (requires boto3-stubs installation):
|
||||
|
||||
```bash
|
||||
pip install boto3-stubs[s3]
|
||||
```
|
||||
|
||||
```python
|
||||
from mypy_boto3_s3.type_defs import PutObjectOutputTypeDef
|
||||
response: PutObjectOutputTypeDef = client.put_object(...)
|
||||
```
|
||||
|
||||
**Note**: boto3-stubs TypedDefs are very strict and require ALL optional fields.
|
||||
DeltaGlider returns partial dicts for better boto3 compatibility, so boto3-stubs
|
||||
types may show false positive errors. Use `dict[str, Any]` or our TypedDicts instead.
|
||||
|
||||
## Design Philosophy
|
||||
|
||||
DeltaGlider returns `dict[str, Any]` from all boto3-compatible methods because:
|
||||
1. **Flexibility**: boto3 responses vary by service and operation
|
||||
2. **Compatibility**: Exact match with boto3 runtime behavior
|
||||
3. **Simplicity**: No complex type dependencies for users
|
||||
4. **Optional Typing**: Users choose their preferred level of type safety
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any, Literal, NotRequired, TypedDict
|
||||
|
||||
# ============================================================================
|
||||
# S3 Object Types
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class S3Object(TypedDict):
|
||||
"""An S3 object returned in list operations.
|
||||
|
||||
Compatible with boto3's S3.Client.list_objects_v2() response Contents.
|
||||
"""
|
||||
|
||||
Key: str
|
||||
Size: int
|
||||
LastModified: datetime
|
||||
ETag: NotRequired[str]
|
||||
StorageClass: NotRequired[str]
|
||||
Owner: NotRequired[dict[str, str]]
|
||||
Metadata: NotRequired[dict[str, str]]
|
||||
|
||||
|
||||
class CommonPrefix(TypedDict):
|
||||
"""A common prefix (directory) in S3 listing.
|
||||
|
||||
Compatible with boto3's S3.Client.list_objects_v2() response CommonPrefixes.
|
||||
"""
|
||||
|
||||
Prefix: str
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Response Metadata (used in all responses)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class ResponseMetadata(TypedDict):
|
||||
"""Metadata about the API response.
|
||||
|
||||
Compatible with all boto3 responses.
|
||||
"""
|
||||
|
||||
RequestId: NotRequired[str]
|
||||
HostId: NotRequired[str]
|
||||
HTTPStatusCode: int
|
||||
HTTPHeaders: NotRequired[dict[str, str]]
|
||||
RetryAttempts: NotRequired[int]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# List Operations Response Types
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class ListObjectsV2Response(TypedDict):
|
||||
"""Response from list_objects_v2 operation.
|
||||
|
||||
100% compatible with boto3's S3.Client.list_objects_v2() response.
|
||||
|
||||
Example:
|
||||
```python
|
||||
client = create_client()
|
||||
response: ListObjectsV2Response = client.list_objects(
|
||||
Bucket='my-bucket',
|
||||
Prefix='path/',
|
||||
Delimiter='/'
|
||||
)
|
||||
|
||||
for obj in response['Contents']:
|
||||
print(f"{obj['Key']}: {obj['Size']} bytes")
|
||||
|
||||
for prefix in response.get('CommonPrefixes', []):
|
||||
print(f"Directory: {prefix['Prefix']}")
|
||||
```
|
||||
"""
|
||||
|
||||
Contents: list[S3Object]
|
||||
Name: NotRequired[str] # Bucket name
|
||||
Prefix: NotRequired[str]
|
||||
Delimiter: NotRequired[str]
|
||||
MaxKeys: NotRequired[int]
|
||||
CommonPrefixes: NotRequired[list[CommonPrefix]]
|
||||
EncodingType: NotRequired[str]
|
||||
KeyCount: NotRequired[int]
|
||||
ContinuationToken: NotRequired[str]
|
||||
NextContinuationToken: NotRequired[str]
|
||||
StartAfter: NotRequired[str]
|
||||
IsTruncated: NotRequired[bool]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Put/Get/Delete Response Types
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class PutObjectResponse(TypedDict):
|
||||
"""Response from put_object operation.
|
||||
|
||||
Compatible with boto3's S3.Client.put_object() response.
|
||||
"""
|
||||
|
||||
ETag: str
|
||||
VersionId: NotRequired[str]
|
||||
ServerSideEncryption: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
class GetObjectResponse(TypedDict):
|
||||
"""Response from get_object operation.
|
||||
|
||||
Compatible with boto3's S3.Client.get_object() response.
|
||||
"""
|
||||
|
||||
Body: Any # StreamingBody in boto3, bytes in DeltaGlider
|
||||
ContentLength: int
|
||||
ContentType: NotRequired[str]
|
||||
ETag: NotRequired[str]
|
||||
LastModified: NotRequired[datetime]
|
||||
Metadata: NotRequired[dict[str, str]]
|
||||
VersionId: NotRequired[str]
|
||||
StorageClass: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
class DeleteObjectResponse(TypedDict):
|
||||
"""Response from delete_object operation.
|
||||
|
||||
Compatible with boto3's S3.Client.delete_object() response.
|
||||
"""
|
||||
|
||||
DeleteMarker: NotRequired[bool]
|
||||
VersionId: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
class DeletedObject(TypedDict):
|
||||
"""A successfully deleted object.
|
||||
|
||||
Compatible with boto3's S3.Client.delete_objects() response Deleted.
|
||||
"""
|
||||
|
||||
Key: str
|
||||
VersionId: NotRequired[str]
|
||||
DeleteMarker: NotRequired[bool]
|
||||
DeleteMarkerVersionId: NotRequired[str]
|
||||
|
||||
|
||||
class DeleteError(TypedDict):
|
||||
"""An error that occurred during deletion.
|
||||
|
||||
Compatible with boto3's S3.Client.delete_objects() response Errors.
|
||||
"""
|
||||
|
||||
Key: str
|
||||
Code: str
|
||||
Message: str
|
||||
VersionId: NotRequired[str]
|
||||
|
||||
|
||||
class DeleteObjectsResponse(TypedDict):
|
||||
"""Response from delete_objects operation.
|
||||
|
||||
Compatible with boto3's S3.Client.delete_objects() response.
|
||||
"""
|
||||
|
||||
Deleted: NotRequired[list[DeletedObject]]
|
||||
Errors: NotRequired[list[DeleteError]]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Head Object Response
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class HeadObjectResponse(TypedDict):
|
||||
"""Response from head_object operation.
|
||||
|
||||
Compatible with boto3's S3.Client.head_object() response.
|
||||
"""
|
||||
|
||||
ContentLength: int
|
||||
ContentType: NotRequired[str]
|
||||
ETag: NotRequired[str]
|
||||
LastModified: NotRequired[datetime]
|
||||
Metadata: NotRequired[dict[str, str]]
|
||||
VersionId: NotRequired[str]
|
||||
StorageClass: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Bucket Operations
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class Bucket(TypedDict):
|
||||
"""An S3 bucket.
|
||||
|
||||
Compatible with boto3's S3.Client.list_buckets() response Buckets.
|
||||
"""
|
||||
|
||||
Name: str
|
||||
CreationDate: datetime
|
||||
|
||||
|
||||
class ListBucketsResponse(TypedDict):
|
||||
"""Response from list_buckets operation.
|
||||
|
||||
Compatible with boto3's S3.Client.list_buckets() response.
|
||||
"""
|
||||
|
||||
Buckets: list[Bucket]
|
||||
Owner: NotRequired[dict[str, str]]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
class CreateBucketResponse(TypedDict):
|
||||
"""Response from create_bucket operation.
|
||||
|
||||
Compatible with boto3's S3.Client.create_bucket() response.
|
||||
"""
|
||||
|
||||
Location: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Multipart Upload Types
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class CompletedPart(TypedDict):
|
||||
"""A completed part in a multipart upload."""
|
||||
|
||||
PartNumber: int
|
||||
ETag: str
|
||||
|
||||
|
||||
class CompleteMultipartUploadResponse(TypedDict):
|
||||
"""Response from complete_multipart_upload operation."""
|
||||
|
||||
Location: NotRequired[str]
|
||||
Bucket: NotRequired[str]
|
||||
Key: NotRequired[str]
|
||||
ETag: NotRequired[str]
|
||||
VersionId: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Copy Operations
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class CopyObjectResponse(TypedDict):
|
||||
"""Response from copy_object operation.
|
||||
|
||||
Compatible with boto3's S3.Client.copy_object() response.
|
||||
"""
|
||||
|
||||
CopyObjectResult: NotRequired[dict[str, Any]]
|
||||
ETag: NotRequired[str]
|
||||
LastModified: NotRequired[datetime]
|
||||
VersionId: NotRequired[str]
|
||||
ResponseMetadata: NotRequired[ResponseMetadata]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Type Aliases for Convenience
|
||||
# ============================================================================
|
||||
|
||||
# Common parameter types
|
||||
BucketName = str
|
||||
ObjectKey = str
|
||||
Prefix = str
|
||||
Delimiter = str
|
||||
|
||||
# Storage class options
|
||||
StorageClass = Literal[
|
||||
"STANDARD",
|
||||
"REDUCED_REDUNDANCY",
|
||||
"STANDARD_IA",
|
||||
"ONEZONE_IA",
|
||||
"INTELLIGENT_TIERING",
|
||||
"GLACIER",
|
||||
"DEEP_ARCHIVE",
|
||||
"GLACIER_IR",
|
||||
]
|
||||
@@ -8,7 +8,7 @@ from unittest.mock import Mock
|
||||
import pytest
|
||||
|
||||
from deltaglider.adapters import (
|
||||
FsCacheAdapter,
|
||||
ContentAddressedCache,
|
||||
NoopMetricsAdapter,
|
||||
Sha256Adapter,
|
||||
StdLoggerAdapter,
|
||||
@@ -59,9 +59,9 @@ def real_hasher():
|
||||
|
||||
@pytest.fixture
|
||||
def cache_adapter(temp_dir, real_hasher):
|
||||
"""Create filesystem cache adapter."""
|
||||
"""Create content-addressed storage cache adapter."""
|
||||
cache_dir = temp_dir / "cache"
|
||||
return FsCacheAdapter(cache_dir, real_hasher)
|
||||
return ContentAddressedCache(cache_dir, real_hasher)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -15,10 +15,19 @@ from deltaglider.app.cli.main import cli
|
||||
def extract_json_from_cli_output(output: str) -> dict:
|
||||
"""Extract JSON from CLI output that may contain log messages."""
|
||||
lines = output.split("\n")
|
||||
json_start = next(i for i, line in enumerate(lines) if line.strip().startswith("{"))
|
||||
json_end = next(i for i in range(json_start, len(lines)) if lines[i].strip() == "}") + 1
|
||||
json_text = "\n".join(lines[json_start:json_end])
|
||||
return json.loads(json_text)
|
||||
for i, line in enumerate(lines):
|
||||
if line.strip().startswith("{"):
|
||||
json_start = i
|
||||
json_end = (
|
||||
next(
|
||||
(j for j in range(json_start, len(lines)) if lines[j].strip() == "}"),
|
||||
len(lines) - 1,
|
||||
)
|
||||
+ 1
|
||||
)
|
||||
json_text = "\n".join(lines[json_start:json_end])
|
||||
return json.loads(json_text)
|
||||
raise ValueError("No JSON found in CLI output")
|
||||
|
||||
|
||||
@pytest.mark.e2e
|
||||
@@ -72,34 +81,35 @@ class TestLocalStackE2E:
|
||||
file2.write_text("Plugin version 1.0.1 content with minor changes")
|
||||
|
||||
# Upload first file (becomes reference)
|
||||
result = runner.invoke(cli, ["put", str(file1), f"s3://{test_bucket}/plugins/"])
|
||||
result = runner.invoke(cli, ["cp", str(file1), f"s3://{test_bucket}/plugins/"])
|
||||
assert result.exit_code == 0
|
||||
output1 = extract_json_from_cli_output(result.output)
|
||||
assert output1["operation"] == "create_reference"
|
||||
assert output1["key"] == "plugins/reference.bin"
|
||||
assert "reference" in result.output.lower() or "upload:" in result.output
|
||||
|
||||
# Verify reference was created
|
||||
objects = s3_client.list_objects_v2(Bucket=test_bucket, Prefix="plugins/")
|
||||
# Verify reference was created (deltaspace is root, files are at root level)
|
||||
objects = s3_client.list_objects_v2(Bucket=test_bucket)
|
||||
assert "Contents" in objects
|
||||
keys = [obj["Key"] for obj in objects["Contents"]]
|
||||
assert "plugins/reference.bin" in keys
|
||||
assert "plugins/plugin-v1.0.0.zip.delta" in keys
|
||||
# Files are stored at root level: reference.bin and plugin-v1.0.0.zip.delta
|
||||
assert "reference.bin" in keys
|
||||
assert "plugin-v1.0.0.zip.delta" in keys
|
||||
|
||||
# Upload second file (creates delta)
|
||||
result = runner.invoke(cli, ["put", str(file2), f"s3://{test_bucket}/plugins/"])
|
||||
result = runner.invoke(cli, ["cp", str(file2), f"s3://{test_bucket}/plugins/"])
|
||||
assert result.exit_code == 0
|
||||
output2 = extract_json_from_cli_output(result.output)
|
||||
assert output2["operation"] == "create_delta"
|
||||
assert output2["key"] == "plugins/plugin-v1.0.1.zip.delta"
|
||||
assert "delta_ratio" in output2
|
||||
assert "upload:" in result.output
|
||||
|
||||
# Verify delta was created
|
||||
objects = s3_client.list_objects_v2(Bucket=test_bucket)
|
||||
keys = [obj["Key"] for obj in objects["Contents"]]
|
||||
assert "plugin-v1.0.1.zip.delta" in keys
|
||||
|
||||
# Download and verify second file
|
||||
output_file = tmpdir / "downloaded.zip"
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
[
|
||||
"get",
|
||||
f"s3://{test_bucket}/plugins/plugin-v1.0.1.zip.delta",
|
||||
"-o",
|
||||
"cp",
|
||||
f"s3://{test_bucket}/plugin-v1.0.1.zip.delta",
|
||||
str(output_file),
|
||||
],
|
||||
)
|
||||
@@ -109,41 +119,42 @@ class TestLocalStackE2E:
|
||||
# Verify integrity
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
["verify", f"s3://{test_bucket}/plugins/plugin-v1.0.1.zip.delta"],
|
||||
["verify", f"s3://{test_bucket}/plugin-v1.0.1.zip.delta"],
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
verify_output = extract_json_from_cli_output(result.output)
|
||||
assert verify_output["valid"] is True
|
||||
|
||||
def test_multiple_deltaspaces(self, test_bucket, s3_client):
|
||||
"""Test multiple deltaspace directories with separate references."""
|
||||
"""Test shared deltaspace with multiple files."""
|
||||
runner = CliRunner()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
# Create test files for different deltaspaces
|
||||
# Create test files for the same deltaspace
|
||||
file_a1 = tmpdir / "app-a-v1.zip"
|
||||
file_a1.write_text("Application A version 1")
|
||||
|
||||
file_b1 = tmpdir / "app-b-v1.zip"
|
||||
file_b1.write_text("Application B version 1")
|
||||
|
||||
# Upload to different deltaspaces
|
||||
result = runner.invoke(cli, ["put", str(file_a1), f"s3://{test_bucket}/apps/app-a/"])
|
||||
# Upload to same deltaspace (apps/) with different target paths
|
||||
result = runner.invoke(cli, ["cp", str(file_a1), f"s3://{test_bucket}/apps/app-a/"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
result = runner.invoke(cli, ["put", str(file_b1), f"s3://{test_bucket}/apps/app-b/"])
|
||||
result = runner.invoke(cli, ["cp", str(file_b1), f"s3://{test_bucket}/apps/app-b/"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify each deltaspace has its own reference
|
||||
objects_a = s3_client.list_objects_v2(Bucket=test_bucket, Prefix="apps/app-a/")
|
||||
keys_a = [obj["Key"] for obj in objects_a["Contents"]]
|
||||
assert "apps/app-a/reference.bin" in keys_a
|
||||
|
||||
objects_b = s3_client.list_objects_v2(Bucket=test_bucket, Prefix="apps/app-b/")
|
||||
keys_b = [obj["Key"] for obj in objects_b["Contents"]]
|
||||
assert "apps/app-b/reference.bin" in keys_b
|
||||
# Verify deltaspace has reference (both files share apps/ deltaspace)
|
||||
objects = s3_client.list_objects_v2(Bucket=test_bucket, Prefix="apps/")
|
||||
assert "Contents" in objects
|
||||
keys = [obj["Key"] for obj in objects["Contents"]]
|
||||
# Should have: apps/reference.bin, apps/app-a-v1.zip.delta, apps/app-b-v1.zip.delta
|
||||
# Both files share the same deltaspace (apps/) so only one reference
|
||||
assert "apps/reference.bin" in keys
|
||||
assert "apps/app-a-v1.zip.delta" in keys
|
||||
assert "apps/app-b-v1.zip.delta" in keys
|
||||
|
||||
def test_large_delta_warning(self, test_bucket, s3_client):
|
||||
"""Test delta compression with different content."""
|
||||
@@ -160,14 +171,14 @@ class TestLocalStackE2E:
|
||||
file2.write_text("B" * 1000) # Completely different
|
||||
|
||||
# Upload first file
|
||||
result = runner.invoke(cli, ["put", str(file1), f"s3://{test_bucket}/test/"])
|
||||
result = runner.invoke(cli, ["cp", str(file1), f"s3://{test_bucket}/test/"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Upload second file with low max-ratio
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
[
|
||||
"put",
|
||||
"cp",
|
||||
str(file2),
|
||||
f"s3://{test_bucket}/test/",
|
||||
"--max-ratio",
|
||||
@@ -175,9 +186,11 @@ class TestLocalStackE2E:
|
||||
], # Very low threshold
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
# Even with completely different content, xdelta3 is efficient
|
||||
output = extract_json_from_cli_output(result.output)
|
||||
assert output["operation"] == "create_delta"
|
||||
# Delta ratio should be small even for different files (xdelta3 is very efficient)
|
||||
assert "delta_ratio" in output
|
||||
assert output["delta_ratio"] > 0.01 # Should exceed the very low threshold we set
|
||||
# Should still upload successfully even though delta exceeds threshold
|
||||
assert "upload:" in result.output
|
||||
|
||||
# Verify delta was created
|
||||
objects = s3_client.list_objects_v2(Bucket=test_bucket)
|
||||
assert "Contents" in objects
|
||||
keys = [obj["Key"] for obj in objects["Contents"]]
|
||||
assert "file2.zip.delta" in keys
|
||||
|
||||
343
tests/integration/test_bucket_management.py
Normal file
343
tests/integration/test_bucket_management.py
Normal file
@@ -0,0 +1,343 @@
|
||||
"""Tests for bucket management APIs."""
|
||||
|
||||
from typing import Any
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from deltaglider.app.cli.main import create_service
|
||||
from deltaglider.client import DeltaGliderClient
|
||||
from deltaglider.client_models import BucketStats
|
||||
|
||||
|
||||
class TestBucketManagement:
|
||||
"""Test bucket creation, listing, and deletion."""
|
||||
|
||||
def test_create_bucket_success(self):
|
||||
"""Test creating a bucket successfully."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.create_bucket.return_value = {"Location": "/test-bucket"}
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.create_bucket(Bucket="test-bucket")
|
||||
|
||||
# Verify response
|
||||
assert response["Location"] == "/test-bucket"
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
# Verify boto3 was called correctly
|
||||
mock_boto3_client.create_bucket.assert_called_once_with(Bucket="test-bucket")
|
||||
|
||||
def test_create_bucket_with_region(self):
|
||||
"""Test creating a bucket in a specific region."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.create_bucket.return_value = {
|
||||
"Location": "http://test-bucket.s3.us-west-2.amazonaws.com/"
|
||||
}
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.create_bucket(
|
||||
Bucket="test-bucket",
|
||||
CreateBucketConfiguration={"LocationConstraint": "us-west-2"},
|
||||
)
|
||||
|
||||
# Verify response
|
||||
assert "Location" in response
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
# Verify boto3 was called with region config
|
||||
mock_boto3_client.create_bucket.assert_called_once_with(
|
||||
Bucket="test-bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}
|
||||
)
|
||||
|
||||
def test_create_bucket_already_exists(self):
|
||||
"""Test creating a bucket that already exists returns success."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client to raise BucketAlreadyExists
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.create_bucket.side_effect = Exception("BucketAlreadyOwnedByYou")
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.create_bucket(Bucket="existing-bucket")
|
||||
|
||||
# Should return success (idempotent)
|
||||
assert response["Location"] == "/existing-bucket"
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
def test_list_buckets_success(self):
|
||||
"""Test listing buckets."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.list_buckets.return_value = {
|
||||
"Buckets": [
|
||||
{"Name": "bucket1", "CreationDate": "2025-01-01T00:00:00Z"},
|
||||
{"Name": "bucket2", "CreationDate": "2025-01-02T00:00:00Z"},
|
||||
],
|
||||
"Owner": {"DisplayName": "test-user", "ID": "12345"},
|
||||
}
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.list_buckets()
|
||||
|
||||
# Verify response
|
||||
assert len(response["Buckets"]) == 2
|
||||
assert response["Buckets"][0]["Name"] == "bucket1"
|
||||
assert response["Buckets"][1]["Name"] == "bucket2"
|
||||
assert response["Owner"]["DisplayName"] == "test-user"
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
def test_list_buckets_empty(self):
|
||||
"""Test listing buckets when none exist."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client with empty result
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.list_buckets.return_value = {"Buckets": [], "Owner": {}}
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.list_buckets()
|
||||
|
||||
# Verify empty list
|
||||
assert response["Buckets"] == []
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
def test_list_buckets_includes_cached_stats(self):
|
||||
"""Bucket list should merge cached stats when available."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.list_buckets.return_value = {
|
||||
"Buckets": [
|
||||
{"Name": "bucket1", "CreationDate": "2025-01-01T00:00:00Z"},
|
||||
{"Name": "bucket2", "CreationDate": "2025-01-02T00:00:00Z"},
|
||||
],
|
||||
"Owner": {"DisplayName": "test-user", "ID": "12345"},
|
||||
}
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
|
||||
cached_stats = BucketStats(
|
||||
bucket="bucket1",
|
||||
object_count=10,
|
||||
total_size=1000,
|
||||
compressed_size=600,
|
||||
space_saved=400,
|
||||
average_compression_ratio=0.4,
|
||||
delta_objects=6,
|
||||
direct_objects=4,
|
||||
)
|
||||
client._store_bucket_stats_cache("bucket1", detailed_stats=True, stats=cached_stats)
|
||||
|
||||
response = client.list_buckets()
|
||||
|
||||
bucket1 = next(bucket for bucket in response["Buckets"] if bucket["Name"] == "bucket1")
|
||||
assert bucket1["DeltaGliderStats"]["Cached"] is True
|
||||
assert bucket1["DeltaGliderStats"]["Detailed"] is True
|
||||
assert bucket1["DeltaGliderStats"]["ObjectCount"] == cached_stats.object_count
|
||||
assert bucket1["DeltaGliderStats"]["TotalSize"] == cached_stats.total_size
|
||||
|
||||
bucket2 = next(bucket for bucket in response["Buckets"] if bucket["Name"] == "bucket2")
|
||||
assert "DeltaGliderStats" not in bucket2
|
||||
|
||||
def test_delete_bucket_success(self):
|
||||
"""Test deleting a bucket successfully."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.delete_bucket.return_value = None
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.delete_bucket(Bucket="test-bucket")
|
||||
|
||||
# Verify response
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 204
|
||||
|
||||
# Verify boto3 was called
|
||||
mock_boto3_client.delete_bucket.assert_called_once_with(Bucket="test-bucket")
|
||||
|
||||
def test_delete_bucket_not_found(self):
|
||||
"""Test deleting a bucket that doesn't exist returns success."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client to raise NoSuchBucket
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.delete_bucket.side_effect = Exception("NoSuchBucket")
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.delete_bucket(Bucket="nonexistent-bucket")
|
||||
|
||||
# Should return success (idempotent)
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 204
|
||||
|
||||
def test_delete_bucket_not_empty_raises_error(self):
|
||||
"""Test deleting a non-empty bucket raises an error."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client to raise BucketNotEmpty
|
||||
mock_boto3_client = Mock()
|
||||
mock_boto3_client.delete_bucket.side_effect = Exception(
|
||||
"BucketNotEmpty: The bucket you tried to delete is not empty"
|
||||
)
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
|
||||
with pytest.raises(RuntimeError, match="Failed to delete bucket"):
|
||||
client.delete_bucket(Bucket="full-bucket")
|
||||
|
||||
def test_get_bucket_stats_caches_per_session(self, monkeypatch):
|
||||
"""Verify bucket stats are cached within the client session."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
mock_storage.client = Mock()
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
|
||||
quick_stats = BucketStats(
|
||||
bucket="bucket1",
|
||||
object_count=5,
|
||||
total_size=500,
|
||||
compressed_size=300,
|
||||
space_saved=200,
|
||||
average_compression_ratio=0.4,
|
||||
delta_objects=3,
|
||||
direct_objects=2,
|
||||
)
|
||||
detailed_stats = BucketStats(
|
||||
bucket="bucket1",
|
||||
object_count=5,
|
||||
total_size=520,
|
||||
compressed_size=300,
|
||||
space_saved=220,
|
||||
average_compression_ratio=0.423,
|
||||
delta_objects=3,
|
||||
direct_objects=2,
|
||||
)
|
||||
|
||||
call_count = {"value": 0}
|
||||
|
||||
def fake_get_bucket_stats(_: Any, bucket: str, detailed_stats_flag: bool) -> BucketStats:
|
||||
call_count["value"] += 1
|
||||
assert bucket == "bucket1"
|
||||
return detailed_stats if detailed_stats_flag else quick_stats
|
||||
|
||||
monkeypatch.setattr("deltaglider.client._get_bucket_stats", fake_get_bucket_stats)
|
||||
|
||||
# First call should invoke underlying function
|
||||
result_quick = client.get_bucket_stats("bucket1")
|
||||
assert result_quick is quick_stats
|
||||
assert call_count["value"] == 1
|
||||
|
||||
# Second quick call should hit cache
|
||||
assert client.get_bucket_stats("bucket1") is quick_stats
|
||||
assert call_count["value"] == 1
|
||||
|
||||
# Detailed call triggers new computation
|
||||
result_detailed = client.get_bucket_stats("bucket1", detailed_stats=True)
|
||||
assert result_detailed is detailed_stats
|
||||
assert call_count["value"] == 2
|
||||
|
||||
# Quick call after detailed uses detailed cached value (more accurate)
|
||||
assert client.get_bucket_stats("bucket1") is detailed_stats
|
||||
assert call_count["value"] == 2
|
||||
|
||||
# Clearing the cache should force recomputation
|
||||
client.clear_cache()
|
||||
assert client.get_bucket_stats("bucket1") is quick_stats
|
||||
assert call_count["value"] == 3
|
||||
|
||||
def test_bucket_methods_without_boto3_client(self):
|
||||
"""Test that bucket methods raise NotImplementedError when storage doesn't support it."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Storage adapter without boto3 client (no 'client' attribute)
|
||||
delattr(mock_storage, "client")
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
|
||||
# All bucket methods should raise NotImplementedError
|
||||
with pytest.raises(NotImplementedError):
|
||||
client.create_bucket(Bucket="test")
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
client.delete_bucket(Bucket="test")
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
client.list_buckets()
|
||||
|
||||
def test_complete_bucket_lifecycle(self):
|
||||
"""Test complete bucket lifecycle: create, use, delete."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock boto3 client
|
||||
mock_boto3_client = Mock()
|
||||
mock_storage.client = mock_boto3_client
|
||||
|
||||
# Setup responses
|
||||
mock_boto3_client.create_bucket.return_value = {"Location": "/test-lifecycle"}
|
||||
mock_boto3_client.list_buckets.return_value = {
|
||||
"Buckets": [{"Name": "test-lifecycle", "CreationDate": "2025-01-01T00:00:00Z"}],
|
||||
"Owner": {},
|
||||
}
|
||||
mock_boto3_client.delete_bucket.return_value = None
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
|
||||
# 1. Create bucket
|
||||
create_response = client.create_bucket(Bucket="test-lifecycle")
|
||||
assert create_response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
# 2. List buckets - verify it exists
|
||||
list_response = client.list_buckets()
|
||||
bucket_names = [b["Name"] for b in list_response["Buckets"]]
|
||||
assert "test-lifecycle" in bucket_names
|
||||
|
||||
# 3. Delete bucket
|
||||
delete_response = client.delete_bucket(Bucket="test-lifecycle")
|
||||
assert delete_response["ResponseMetadata"]["HTTPStatusCode"] == 204
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -10,7 +10,6 @@ from deltaglider import create_client
|
||||
from deltaglider.client import (
|
||||
BucketStats,
|
||||
CompressionEstimate,
|
||||
ListObjectsResponse,
|
||||
ObjectInfo,
|
||||
)
|
||||
|
||||
@@ -125,7 +124,7 @@ class MockStorage:
|
||||
@pytest.fixture
|
||||
def client(tmp_path):
|
||||
"""Create a client with mocked storage."""
|
||||
client = create_client(cache_dir=str(tmp_path / "cache"))
|
||||
client = create_client()
|
||||
|
||||
# Replace storage with mock
|
||||
mock_storage = MockStorage()
|
||||
@@ -146,6 +145,65 @@ def client(tmp_path):
|
||||
return client
|
||||
|
||||
|
||||
class TestCredentialHandling:
|
||||
"""Test AWS credential passing."""
|
||||
|
||||
def test_create_client_with_explicit_credentials(self, tmp_path):
|
||||
"""Test that credentials can be passed directly to create_client."""
|
||||
# This test verifies the API accepts credentials, not that they work
|
||||
# (we'd need a real S3 or LocalStack for that)
|
||||
client = create_client(
|
||||
aws_access_key_id="AKIAIOSFODNN7EXAMPLE",
|
||||
aws_secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
region_name="us-west-2",
|
||||
)
|
||||
|
||||
# Verify the client was created
|
||||
assert client is not None
|
||||
assert client.service is not None
|
||||
|
||||
# Verify credentials were passed to the storage adapter's boto3 client
|
||||
# The storage adapter should have a client with these credentials
|
||||
storage = client.service.storage
|
||||
assert hasattr(storage, "client")
|
||||
|
||||
# Check that the boto3 client was configured with our credentials
|
||||
# Note: boto3 doesn't expose credentials directly, but we can verify
|
||||
# the client was created (if credentials were invalid, this would fail)
|
||||
assert storage.client is not None
|
||||
|
||||
def test_create_client_with_session_token(self, tmp_path):
|
||||
"""Test passing temporary credentials with session token."""
|
||||
client = create_client(
|
||||
aws_access_key_id="ASIAIOSFODNN7EXAMPLE",
|
||||
aws_secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
aws_session_token="FwoGZXIvYXdzEBEaDH...",
|
||||
)
|
||||
|
||||
assert client is not None
|
||||
assert client.service.storage.client is not None
|
||||
|
||||
def test_create_client_without_credentials_uses_environment(self, tmp_path):
|
||||
"""Test that omitting credentials falls back to environment/IAM."""
|
||||
# This should use boto3's default credential chain
|
||||
client = create_client()
|
||||
|
||||
assert client is not None
|
||||
assert client.service.storage.client is not None
|
||||
|
||||
def test_create_client_with_endpoint_and_credentials(self, tmp_path):
|
||||
"""Test passing both endpoint URL and credentials."""
|
||||
client = create_client(
|
||||
endpoint_url="http://localhost:9000",
|
||||
aws_access_key_id="minioadmin",
|
||||
aws_secret_access_key="minioadmin",
|
||||
)
|
||||
|
||||
assert client is not None
|
||||
# Endpoint should be available
|
||||
assert client.endpoint_url == "http://localhost:9000"
|
||||
|
||||
|
||||
class TestBoto3Compatibility:
|
||||
"""Test boto3-compatible methods."""
|
||||
|
||||
@@ -196,23 +254,56 @@ class TestBoto3Compatibility:
|
||||
content = response["Body"].read()
|
||||
assert content == b"Test Content"
|
||||
|
||||
def test_get_object_regular_s3_file(self, client):
|
||||
"""Test get_object with regular S3 files (not uploaded via DeltaGlider)."""
|
||||
|
||||
content = b"Regular S3 File Content"
|
||||
|
||||
# Add as a regular S3 object WITHOUT DeltaGlider metadata
|
||||
client.service.storage.objects["test-bucket/regular-file.pdf"] = {
|
||||
"data": content,
|
||||
"size": len(content),
|
||||
"metadata": {}, # No DeltaGlider metadata
|
||||
}
|
||||
|
||||
# Should successfully download the regular S3 object
|
||||
response = client.get_object(Bucket="test-bucket", Key="regular-file.pdf")
|
||||
|
||||
assert "Body" in response
|
||||
downloaded_content = response["Body"].read()
|
||||
assert downloaded_content == content
|
||||
assert response["ContentLength"] == len(content)
|
||||
|
||||
def test_list_objects(self, client):
|
||||
"""Test list_objects with various options."""
|
||||
# List all objects
|
||||
"""Test list_objects with various options (boto3-compatible dict response)."""
|
||||
# List all objects (default: FetchMetadata=False)
|
||||
response = client.list_objects(Bucket="test-bucket")
|
||||
|
||||
assert isinstance(response, ListObjectsResponse)
|
||||
assert response.key_count > 0
|
||||
assert len(response.contents) > 0
|
||||
# Response is now a boto3-compatible dict (not ListObjectsResponse)
|
||||
assert isinstance(response, dict)
|
||||
assert response["KeyCount"] > 0
|
||||
assert len(response["Contents"]) > 0
|
||||
|
||||
# Verify S3Object structure
|
||||
for obj in response["Contents"]:
|
||||
assert "Key" in obj
|
||||
assert "Size" in obj
|
||||
assert "LastModified" in obj
|
||||
assert "Metadata" in obj # DeltaGlider metadata
|
||||
|
||||
# Test with FetchMetadata=True (should only affect delta files)
|
||||
response_with_metadata = client.list_objects(Bucket="test-bucket", FetchMetadata=True)
|
||||
assert isinstance(response_with_metadata, dict)
|
||||
assert response_with_metadata["KeyCount"] > 0
|
||||
|
||||
def test_list_objects_with_delimiter(self, client):
|
||||
"""Test list_objects with delimiter for folder simulation."""
|
||||
"""Test list_objects with delimiter for folder simulation (boto3-compatible dict response)."""
|
||||
response = client.list_objects(Bucket="test-bucket", Prefix="", Delimiter="/")
|
||||
|
||||
# Should have common prefixes for folders
|
||||
assert len(response.common_prefixes) > 0
|
||||
assert {"Prefix": "folder1/"} in response.common_prefixes
|
||||
assert {"Prefix": "folder2/"} in response.common_prefixes
|
||||
assert len(response.get("CommonPrefixes", [])) > 0
|
||||
assert {"Prefix": "folder1/"} in response["CommonPrefixes"]
|
||||
assert {"Prefix": "folder2/"} in response["CommonPrefixes"]
|
||||
|
||||
def test_delete_object(self, client):
|
||||
"""Test delete_object."""
|
||||
@@ -224,6 +315,24 @@ class TestBoto3Compatibility:
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 204
|
||||
assert "test-bucket/to-delete.txt" not in client.service.storage.objects
|
||||
|
||||
def test_delete_object_with_delta_suffix_fallback(self, client):
|
||||
"""Test delete_object with automatic .delta suffix fallback."""
|
||||
# Add object with .delta suffix (as DeltaGlider stores it)
|
||||
client.service.storage.objects["test-bucket/file.zip.delta"] = {
|
||||
"size": 100,
|
||||
"metadata": {
|
||||
"original_name": "file.zip",
|
||||
"compression": "delta",
|
||||
},
|
||||
}
|
||||
|
||||
# Delete using original name (without .delta)
|
||||
response = client.delete_object(Bucket="test-bucket", Key="file.zip")
|
||||
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 204
|
||||
assert response["DeltaGliderInfo"]["Deleted"] is True
|
||||
assert "test-bucket/file.zip.delta" not in client.service.storage.objects
|
||||
|
||||
def test_delete_objects(self, client):
|
||||
"""Test batch delete."""
|
||||
# Add objects
|
||||
@@ -325,6 +434,7 @@ class TestDeltaGliderFeatures:
|
||||
|
||||
def test_get_bucket_stats(self, client):
|
||||
"""Test getting bucket statistics."""
|
||||
# Test quick stats (default: detailed_stats=False)
|
||||
stats = client.get_bucket_stats("test-bucket")
|
||||
|
||||
assert isinstance(stats, BucketStats)
|
||||
@@ -332,6 +442,11 @@ class TestDeltaGliderFeatures:
|
||||
assert stats.total_size > 0
|
||||
assert stats.delta_objects >= 1 # We have archive.zip.delta
|
||||
|
||||
# Test with detailed_stats=True
|
||||
detailed_stats = client.get_bucket_stats("test-bucket", detailed_stats=True)
|
||||
assert isinstance(detailed_stats, BucketStats)
|
||||
assert detailed_stats.object_count == stats.object_count
|
||||
|
||||
def test_upload_chunked(self, client, tmp_path):
|
||||
"""Test chunked upload with progress callback."""
|
||||
# Create a test file
|
||||
|
||||
524
tests/integration/test_delete_objects_recursive.py
Normal file
524
tests/integration/test_delete_objects_recursive.py
Normal file
@@ -0,0 +1,524 @@
|
||||
"""Comprehensive tests for DeltaGliderClient.delete_objects_recursive() method."""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from deltaglider import create_client
|
||||
|
||||
|
||||
class MockStorage:
|
||||
"""Mock storage for testing."""
|
||||
|
||||
def __init__(self):
|
||||
self.objects = {}
|
||||
self.delete_calls = []
|
||||
|
||||
def head(self, key):
|
||||
"""Mock head operation."""
|
||||
from deltaglider.ports.storage import ObjectHead
|
||||
|
||||
if key in self.objects:
|
||||
obj = self.objects[key]
|
||||
return ObjectHead(
|
||||
key=key,
|
||||
size=obj["size"],
|
||||
etag=obj.get("etag", "mock-etag"),
|
||||
last_modified=obj.get("last_modified", datetime.now(UTC)),
|
||||
metadata=obj.get("metadata", {}),
|
||||
)
|
||||
return None
|
||||
|
||||
def list(self, prefix):
|
||||
"""Mock list operation for StoragePort interface."""
|
||||
for key, _obj in self.objects.items():
|
||||
if key.startswith(prefix):
|
||||
obj_head = self.head(key)
|
||||
if obj_head is not None:
|
||||
yield obj_head
|
||||
|
||||
def delete(self, key):
|
||||
"""Mock delete operation."""
|
||||
self.delete_calls.append(key)
|
||||
if key in self.objects:
|
||||
del self.objects[key]
|
||||
return True
|
||||
return False
|
||||
|
||||
def get(self, key):
|
||||
"""Mock get operation."""
|
||||
if key in self.objects:
|
||||
return self.objects[key].get("content", b"mock-content")
|
||||
return None
|
||||
|
||||
def put(self, key, data, metadata=None):
|
||||
"""Mock put operation."""
|
||||
self.objects[key] = {
|
||||
"size": len(data),
|
||||
"content": data,
|
||||
"metadata": metadata or {},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_storage():
|
||||
"""Create mock storage."""
|
||||
return MockStorage()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client(tmp_path):
|
||||
"""Create DeltaGliderClient with mock storage."""
|
||||
# Use create_client to get a properly configured client
|
||||
client = create_client()
|
||||
|
||||
# Replace storage with mock
|
||||
mock_storage = MockStorage()
|
||||
client.service.storage = mock_storage
|
||||
|
||||
return client
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveBasicFunctionality:
|
||||
"""Test basic functionality of delete_objects_recursive."""
|
||||
|
||||
def test_delete_single_object_with_file_prefix(self, client):
|
||||
"""Test deleting a single object when prefix is a file (no trailing slash)."""
|
||||
# Setup: Add a regular file
|
||||
client.service.storage.objects["test-bucket/file.txt"] = {"size": 100}
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.txt")
|
||||
|
||||
# Verify response structure
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
assert "DeletedCount" in response
|
||||
assert "FailedCount" in response
|
||||
assert "DeltaGliderInfo" in response
|
||||
|
||||
# Verify DeltaGliderInfo structure
|
||||
info = response["DeltaGliderInfo"]
|
||||
assert "DeltasDeleted" in info
|
||||
assert "ReferencesDeleted" in info
|
||||
assert "DirectDeleted" in info
|
||||
assert "OtherDeleted" in info
|
||||
|
||||
def test_delete_directory_with_trailing_slash(self, client):
|
||||
"""Test deleting all objects under a prefix with trailing slash."""
|
||||
# Setup: Add multiple files under a prefix
|
||||
client.service.storage.objects["test-bucket/dir/file1.txt"] = {"size": 100}
|
||||
client.service.storage.objects["test-bucket/dir/file2.txt"] = {"size": 200}
|
||||
client.service.storage.objects["test-bucket/dir/sub/file3.txt"] = {"size": 300}
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="dir/")
|
||||
|
||||
# Verify
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
assert response["DeletedCount"] >= 0
|
||||
assert response["FailedCount"] == 0
|
||||
|
||||
def test_delete_empty_prefix_returns_zero_counts(self, client):
|
||||
"""Test deleting with empty prefix returns zero counts."""
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="")
|
||||
|
||||
# Verify
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
assert response["DeletedCount"] >= 0
|
||||
assert response["FailedCount"] == 0
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveDeltaSuffixHandling:
|
||||
"""Test delta suffix fallback logic."""
|
||||
|
||||
def test_delete_file_with_delta_suffix_fallback(self, client):
|
||||
"""Test that delete falls back to .delta suffix if original not found."""
|
||||
# Setup: Add file with .delta suffix
|
||||
client.service.storage.objects["test-bucket/archive.zip.delta"] = {
|
||||
"size": 500,
|
||||
"metadata": {"original_name": "archive.zip"},
|
||||
}
|
||||
|
||||
# Execute: Delete using original name (without .delta)
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="archive.zip")
|
||||
|
||||
# Verify
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
assert "test-bucket/archive.zip.delta" not in client.service.storage.objects
|
||||
|
||||
def test_delete_file_already_with_delta_suffix(self, client):
|
||||
"""Test deleting a file that already has .delta suffix."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/file.zip.delta"] = {"size": 300}
|
||||
|
||||
# Execute: Delete using .delta suffix directly
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.zip.delta")
|
||||
|
||||
# Verify
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
def test_delta_suffix_not_added_for_directory_prefix(self, client):
|
||||
"""Test that .delta suffix is not added when prefix ends with /."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/dir/file.txt"] = {"size": 100}
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="dir/")
|
||||
|
||||
# Verify - should not attempt to delete "dir/.delta"
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveStatisticsAggregation:
|
||||
"""Test statistics aggregation from core service."""
|
||||
|
||||
def test_aggregates_deleted_count_from_service_and_single_deletes(self, client):
|
||||
"""Test that deleted counts are aggregated correctly."""
|
||||
# Setup: Mock service.delete_recursive to return specific counts
|
||||
mock_result = {
|
||||
"deleted_count": 5,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 2,
|
||||
"references_deleted": 1,
|
||||
"direct_deleted": 2,
|
||||
"other_deleted": 0,
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="test/")
|
||||
|
||||
# Verify aggregation
|
||||
assert response["DeletedCount"] == 5
|
||||
assert response["FailedCount"] == 0
|
||||
assert response["DeltaGliderInfo"]["DeltasDeleted"] == 2
|
||||
assert response["DeltaGliderInfo"]["ReferencesDeleted"] == 1
|
||||
assert response["DeltaGliderInfo"]["DirectDeleted"] == 2
|
||||
assert response["DeltaGliderInfo"]["OtherDeleted"] == 0
|
||||
|
||||
def test_aggregates_single_delete_counts_with_service_counts(self, client):
|
||||
"""Test that single file deletes are aggregated with service counts."""
|
||||
# Setup: Add file to trigger single delete path
|
||||
client.service.storage.objects["test-bucket/file.txt"] = {"size": 100}
|
||||
|
||||
# Mock service.delete_recursive to return additional counts
|
||||
mock_result = {
|
||||
"deleted_count": 3,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 1,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 2,
|
||||
"other_deleted": 0,
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.txt")
|
||||
|
||||
# Verify that counts include both single delete and service delete
|
||||
assert response["DeletedCount"] >= 3 # At least service count
|
||||
assert response["DeltaGliderInfo"]["DeltasDeleted"] >= 1
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveErrorHandling:
|
||||
"""Test error handling and error aggregation."""
|
||||
|
||||
def test_single_delete_error_captured_in_errors_list(self, client):
|
||||
"""Test that errors from single deletes are captured."""
|
||||
# Setup: Add file
|
||||
client.service.storage.objects["test-bucket/file.txt"] = {"size": 100}
|
||||
|
||||
# Mock delete_with_delta_suffix to raise exception
|
||||
with patch("deltaglider.client.delete_with_delta_suffix") as mock_delete:
|
||||
mock_delete.side_effect = RuntimeError("Simulated delete error")
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.txt")
|
||||
|
||||
# Verify error captured
|
||||
assert response["FailedCount"] > 0
|
||||
assert "Errors" in response
|
||||
assert any("Simulated delete error" in err for err in response["Errors"])
|
||||
|
||||
def test_service_errors_propagated_in_response(self, client):
|
||||
"""Test that errors from service.delete_recursive are propagated."""
|
||||
# Mock service to return errors
|
||||
mock_result = {
|
||||
"deleted_count": 2,
|
||||
"failed_count": 1,
|
||||
"deltas_deleted": 2,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
"errors": ["Error deleting object1", "Error deleting object2"],
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="test/")
|
||||
|
||||
# Verify
|
||||
assert response["FailedCount"] == 1
|
||||
assert "Errors" in response
|
||||
assert "Error deleting object1" in response["Errors"]
|
||||
assert "Error deleting object2" in response["Errors"]
|
||||
|
||||
def test_combines_single_and_service_errors(self, client):
|
||||
"""Test that errors from both single deletes and service are combined."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/file.txt"] = {"size": 100}
|
||||
|
||||
# Mock service to also return errors
|
||||
mock_result = {
|
||||
"deleted_count": 1,
|
||||
"failed_count": 1,
|
||||
"deltas_deleted": 0,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
"errors": ["Service delete error"],
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Mock delete_with_delta_suffix to raise exception
|
||||
with patch("deltaglider.client.delete_with_delta_suffix") as mock_delete:
|
||||
mock_delete.side_effect = RuntimeError("Single delete error")
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.txt")
|
||||
|
||||
# Verify both errors present
|
||||
assert "Errors" in response
|
||||
errors_str = " ".join(response["Errors"])
|
||||
assert "Single delete error" in errors_str
|
||||
assert "Service delete error" in errors_str
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveWarningsHandling:
|
||||
"""Test warning aggregation."""
|
||||
|
||||
def test_service_warnings_propagated_in_response(self, client):
|
||||
"""Test that warnings from service.delete_recursive are propagated."""
|
||||
# Mock service to return warnings
|
||||
mock_result = {
|
||||
"deleted_count": 3,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 2,
|
||||
"references_deleted": 1,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
"warnings": ["Reference deleted, 2 dependent deltas invalidated"],
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="test/")
|
||||
|
||||
# Verify
|
||||
assert "Warnings" in response
|
||||
assert "Reference deleted, 2 dependent deltas invalidated" in response["Warnings"]
|
||||
|
||||
def test_single_delete_warnings_propagated(self, client):
|
||||
"""Test that warnings from single deletes are captured."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/ref.bin"] = {"size": 100}
|
||||
|
||||
# Mock service
|
||||
mock_result = {
|
||||
"deleted_count": 0,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 0,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Mock delete_with_delta_suffix to return warnings
|
||||
with patch("deltaglider.client.delete_with_delta_suffix") as mock_delete:
|
||||
mock_delete.return_value = (
|
||||
"ref.bin",
|
||||
{
|
||||
"deleted": True,
|
||||
"type": "reference",
|
||||
"warnings": ["Warning from single delete"],
|
||||
},
|
||||
)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="ref.bin")
|
||||
|
||||
# Verify
|
||||
assert "Warnings" in response
|
||||
assert "Warning from single delete" in response["Warnings"]
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveSingleDeleteDetails:
|
||||
"""Test SingleDeletes detail tracking."""
|
||||
|
||||
def test_single_delete_details_included_for_file_prefix(self, client):
|
||||
"""Test that SingleDeletes details are included when deleting file prefix."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/file.txt"] = {"size": 100}
|
||||
|
||||
# Mock service
|
||||
mock_result = {
|
||||
"deleted_count": 0,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 0,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Mock delete_with_delta_suffix
|
||||
with patch("deltaglider.client.delete_with_delta_suffix") as mock_delete:
|
||||
mock_delete.return_value = (
|
||||
"file.txt",
|
||||
{
|
||||
"deleted": True,
|
||||
"type": "direct",
|
||||
"dependent_deltas": 0,
|
||||
"warnings": [],
|
||||
},
|
||||
)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.txt")
|
||||
|
||||
# Verify
|
||||
assert "SingleDeletes" in response["DeltaGliderInfo"]
|
||||
single_deletes = response["DeltaGliderInfo"]["SingleDeletes"]
|
||||
assert len(single_deletes) > 0
|
||||
assert single_deletes[0]["Key"] == "file.txt"
|
||||
assert single_deletes[0]["Type"] == "direct"
|
||||
assert "DependentDeltas" in single_deletes[0]
|
||||
assert "Warnings" in single_deletes[0]
|
||||
|
||||
def test_single_delete_includes_stored_key_when_different(self, client):
|
||||
"""Test that StoredKey is included when actual key differs from requested."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/file.zip.delta"] = {"size": 200}
|
||||
|
||||
# Mock delete_with_delta_suffix to return different key
|
||||
from deltaglider import client_delete_helpers
|
||||
|
||||
original_delete = client_delete_helpers.delete_with_delta_suffix
|
||||
|
||||
def mock_delete(service, bucket, key):
|
||||
actual_key = "file.zip.delta" if key == "file.zip" else key
|
||||
return (
|
||||
actual_key,
|
||||
{
|
||||
"deleted": True,
|
||||
"type": "delta",
|
||||
"dependent_deltas": 0,
|
||||
"warnings": [],
|
||||
},
|
||||
)
|
||||
|
||||
client_delete_helpers.delete_with_delta_suffix = mock_delete
|
||||
|
||||
# Mock service
|
||||
mock_result = {
|
||||
"deleted_count": 0,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 0,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
try:
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.zip")
|
||||
|
||||
# Verify
|
||||
assert "SingleDeletes" in response["DeltaGliderInfo"]
|
||||
single_deletes = response["DeltaGliderInfo"]["SingleDeletes"]
|
||||
if len(single_deletes) > 0:
|
||||
# If actual key differs, StoredKey should be present
|
||||
detail = single_deletes[0]
|
||||
if detail["Key"] != "file.zip.delta":
|
||||
assert "StoredKey" in detail
|
||||
finally:
|
||||
client_delete_helpers.delete_with_delta_suffix = original_delete
|
||||
|
||||
|
||||
class TestDeleteObjectsRecursiveEdgeCases:
|
||||
"""Test edge cases and boundary conditions."""
|
||||
|
||||
def test_nonexistent_prefix_returns_zero_counts(self, client):
|
||||
"""Test deleting nonexistent prefix returns zero counts."""
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="nonexistent/path/")
|
||||
|
||||
# Verify
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
assert response["DeletedCount"] >= 0
|
||||
assert response["FailedCount"] == 0
|
||||
|
||||
def test_duplicate_candidates_handled_correctly(self, client):
|
||||
"""Test that duplicate delete candidates are handled correctly."""
|
||||
# Setup: This tests the seen_candidates logic
|
||||
client.service.storage.objects["test-bucket/file.delta"] = {"size": 100}
|
||||
|
||||
# Execute: Should not attempt to delete "file.delta" twice
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.delta")
|
||||
|
||||
# Verify no errors
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
|
||||
def test_unknown_result_type_categorized_as_other(self, client):
|
||||
"""Test that unknown result types are categorized as 'other'."""
|
||||
# Setup
|
||||
client.service.storage.objects["test-bucket/file.txt"] = {"size": 100}
|
||||
|
||||
# Mock service
|
||||
mock_result = {
|
||||
"deleted_count": 0,
|
||||
"failed_count": 0,
|
||||
"deltas_deleted": 0,
|
||||
"references_deleted": 0,
|
||||
"direct_deleted": 0,
|
||||
"other_deleted": 0,
|
||||
}
|
||||
client.service.delete_recursive = Mock(return_value=mock_result)
|
||||
|
||||
# Mock delete_with_delta_suffix to return unknown type
|
||||
with patch("deltaglider.client.delete_with_delta_suffix") as mock_delete:
|
||||
mock_delete.return_value = (
|
||||
"file.txt",
|
||||
{
|
||||
"deleted": True,
|
||||
"type": "unknown_type", # Not in single_counts keys
|
||||
"dependent_deltas": 0,
|
||||
"warnings": [],
|
||||
},
|
||||
)
|
||||
|
||||
# Execute
|
||||
response = client.delete_objects_recursive(Bucket="test-bucket", Prefix="file.txt")
|
||||
|
||||
# Verify it's categorized as "other"
|
||||
assert response["DeltaGliderInfo"]["OtherDeleted"] >= 1
|
||||
# Also verify the detail shows the unknown type
|
||||
if "SingleDeletes" in response["DeltaGliderInfo"]:
|
||||
assert response["DeltaGliderInfo"]["SingleDeletes"][0]["Type"] == "unknown_type"
|
||||
|
||||
def test_kwargs_parameter_accepted(self, client):
|
||||
"""Test that additional kwargs are accepted without error."""
|
||||
# Execute with extra parameters
|
||||
response = client.delete_objects_recursive(
|
||||
Bucket="test-bucket",
|
||||
Prefix="test/",
|
||||
ExtraParam="value", # Should be ignored
|
||||
AnotherParam=123,
|
||||
)
|
||||
|
||||
# Verify no errors
|
||||
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||
445
tests/integration/test_filtering_and_cleanup.py
Normal file
445
tests/integration/test_filtering_and_cleanup.py
Normal file
@@ -0,0 +1,445 @@
|
||||
"""Tests for SDK filtering and delete cleanup functionality."""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from deltaglider.app.cli.main import create_service
|
||||
from deltaglider.client import DeltaGliderClient
|
||||
from deltaglider.core import ObjectKey
|
||||
from deltaglider.ports.storage import ObjectHead
|
||||
|
||||
|
||||
class TestSDKFiltering:
|
||||
"""Test that SDK filters .delta and reference.bin from list_objects()."""
|
||||
|
||||
def test_list_objects_filters_delta_suffix(self):
|
||||
"""Test that .delta suffix is stripped from object keys."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock list_objects response with .delta files
|
||||
mock_storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{
|
||||
"key": "releases/app-v1.zip.delta",
|
||||
"size": 1000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "abc123",
|
||||
"storage_class": "STANDARD",
|
||||
},
|
||||
{
|
||||
"key": "releases/app-v2.zip.delta",
|
||||
"size": 1500,
|
||||
"last_modified": "2025-01-02T00:00:00Z",
|
||||
"etag": "def456",
|
||||
"storage_class": "STANDARD",
|
||||
},
|
||||
{
|
||||
"key": "releases/README.md",
|
||||
"size": 500,
|
||||
"last_modified": "2025-01-03T00:00:00Z",
|
||||
"etag": "ghi789",
|
||||
"storage_class": "STANDARD",
|
||||
},
|
||||
],
|
||||
"common_prefixes": [],
|
||||
"is_truncated": False,
|
||||
"next_continuation_token": None,
|
||||
}
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.list_objects(Bucket="test-bucket", Prefix="releases/")
|
||||
|
||||
# Response is now a boto3-compatible dict
|
||||
contents = response["Contents"]
|
||||
|
||||
# Verify .delta suffix is stripped
|
||||
keys = [obj["Key"] for obj in contents]
|
||||
assert "releases/app-v1.zip" in keys
|
||||
assert "releases/app-v2.zip" in keys
|
||||
assert "releases/README.md" in keys
|
||||
|
||||
# Verify NO .delta suffixes in output
|
||||
for key in keys:
|
||||
assert not key.endswith(".delta"), f"Found .delta suffix in: {key}"
|
||||
|
||||
# Verify is_delta flag is set correctly in Metadata
|
||||
delta_objects = [
|
||||
obj for obj in contents if obj.get("Metadata", {}).get("deltaglider-is-delta") == "true"
|
||||
]
|
||||
assert len(delta_objects) == 2
|
||||
|
||||
def test_list_objects_filters_reference_bin(self):
|
||||
"""Test that reference.bin files are completely filtered out."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock list_objects response with reference.bin files
|
||||
mock_storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{
|
||||
"key": "releases/reference.bin",
|
||||
"size": 50000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "ref123",
|
||||
"storage_class": "STANDARD",
|
||||
},
|
||||
{
|
||||
"key": "releases/1.0/reference.bin",
|
||||
"size": 50000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "ref456",
|
||||
"storage_class": "STANDARD",
|
||||
},
|
||||
{
|
||||
"key": "releases/app.zip.delta",
|
||||
"size": 1000,
|
||||
"last_modified": "2025-01-02T00:00:00Z",
|
||||
"etag": "app123",
|
||||
"storage_class": "STANDARD",
|
||||
},
|
||||
],
|
||||
"common_prefixes": [],
|
||||
"is_truncated": False,
|
||||
"next_continuation_token": None,
|
||||
}
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.list_objects(Bucket="test-bucket", Prefix="releases/")
|
||||
|
||||
# Response is now a boto3-compatible dict
|
||||
contents = response["Contents"]
|
||||
|
||||
# Verify NO reference.bin files in output
|
||||
keys = [obj["Key"] for obj in contents]
|
||||
for key in keys:
|
||||
assert not key.endswith("reference.bin"), f"Found reference.bin in: {key}"
|
||||
|
||||
# Should only have the app.zip (with .delta stripped)
|
||||
assert len(contents) == 1
|
||||
assert contents[0]["Key"] == "releases/app.zip"
|
||||
assert contents[0].get("Metadata", {}).get("deltaglider-is-delta") == "true"
|
||||
|
||||
def test_list_objects_combined_filtering(self):
|
||||
"""Test filtering of both .delta and reference.bin together."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock comprehensive file list
|
||||
mock_storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{
|
||||
"key": "data/reference.bin",
|
||||
"size": 50000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "1",
|
||||
},
|
||||
{
|
||||
"key": "data/file1.zip.delta",
|
||||
"size": 1000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "2",
|
||||
},
|
||||
{
|
||||
"key": "data/file2.zip.delta",
|
||||
"size": 1500,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "3",
|
||||
},
|
||||
{
|
||||
"key": "data/file3.txt",
|
||||
"size": 500,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "4",
|
||||
},
|
||||
{
|
||||
"key": "data/sub/reference.bin",
|
||||
"size": 50000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "5",
|
||||
},
|
||||
{
|
||||
"key": "data/sub/app.jar.delta",
|
||||
"size": 2000,
|
||||
"last_modified": "2025-01-01T00:00:00Z",
|
||||
"etag": "6",
|
||||
},
|
||||
],
|
||||
"common_prefixes": [],
|
||||
"is_truncated": False,
|
||||
"next_continuation_token": None,
|
||||
}
|
||||
|
||||
client = DeltaGliderClient(service)
|
||||
response = client.list_objects(Bucket="test-bucket", Prefix="data/")
|
||||
|
||||
# Response is now a boto3-compatible dict
|
||||
contents = response["Contents"]
|
||||
|
||||
# Should filter out 2 reference.bin files
|
||||
# Should strip .delta from 3 files
|
||||
# Should keep 1 regular file as-is
|
||||
assert len(contents) == 4 # 3 deltas + 1 regular file
|
||||
|
||||
keys = [obj["Key"] for obj in contents]
|
||||
expected_keys = ["data/file1.zip", "data/file2.zip", "data/file3.txt", "data/sub/app.jar"]
|
||||
assert sorted(keys) == sorted(expected_keys)
|
||||
|
||||
# Verify no internal files visible
|
||||
for key in keys:
|
||||
assert not key.endswith(".delta")
|
||||
assert not key.endswith("reference.bin")
|
||||
|
||||
|
||||
class TestSingleDeleteCleanup:
|
||||
"""Test that single delete() cleans up orphaned reference.bin."""
|
||||
|
||||
def test_delete_last_delta_cleans_reference(self):
|
||||
"""Test that deleting the last delta file removes orphaned reference.bin."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock head for both delta and reference.bin
|
||||
def mock_head_func(key):
|
||||
if key.endswith("app.zip.delta"):
|
||||
return ObjectHead(
|
||||
key="releases/app.zip.delta",
|
||||
size=1000,
|
||||
etag="abc123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={"original_name": "app.zip", "ref_key": "releases/reference.bin"},
|
||||
)
|
||||
elif key.endswith("reference.bin"):
|
||||
return ObjectHead(
|
||||
key="releases/reference.bin",
|
||||
size=50000,
|
||||
etag="ref123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
)
|
||||
return None
|
||||
|
||||
mock_storage.head.side_effect = mock_head_func
|
||||
|
||||
# Mock list to show NO other deltas remain
|
||||
mock_storage.list.return_value = [
|
||||
ObjectHead(
|
||||
key="releases/reference.bin",
|
||||
size=50000,
|
||||
etag="ref123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
),
|
||||
]
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
# Delete the last delta
|
||||
result = service.delete(ObjectKey(bucket="test-bucket", key="releases/app.zip.delta"))
|
||||
|
||||
# Verify delta was deleted
|
||||
assert result["deleted"] is True
|
||||
assert result["type"] == "delta"
|
||||
|
||||
# Verify reference.bin cleanup was triggered
|
||||
assert "cleaned_reference" in result
|
||||
assert result["cleaned_reference"] == "releases/reference.bin"
|
||||
|
||||
# Verify both files were deleted
|
||||
assert mock_storage.delete.call_count == 2
|
||||
delete_calls = [call[0][0] for call in mock_storage.delete.call_args_list]
|
||||
assert "test-bucket/releases/app.zip.delta" in delete_calls
|
||||
assert "test-bucket/releases/reference.bin" in delete_calls
|
||||
|
||||
def test_delete_delta_keeps_reference_when_others_exist(self):
|
||||
"""Test that reference.bin is kept when other deltas remain."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock the delta file being deleted
|
||||
mock_storage.head.return_value = ObjectHead(
|
||||
key="releases/app-v1.zip.delta",
|
||||
size=1000,
|
||||
etag="abc123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={"original_name": "app-v1.zip"},
|
||||
)
|
||||
|
||||
# Mock list to show OTHER deltas still exist
|
||||
mock_storage.list.return_value = [
|
||||
ObjectHead(
|
||||
key="releases/app-v2.zip.delta",
|
||||
size=1500,
|
||||
etag="def456",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
),
|
||||
ObjectHead(
|
||||
key="releases/reference.bin",
|
||||
size=50000,
|
||||
etag="ref123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
),
|
||||
]
|
||||
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
# Delete one delta (but others remain)
|
||||
result = service.delete(ObjectKey(bucket="test-bucket", key="releases/app-v1.zip.delta"))
|
||||
|
||||
# Verify delta was deleted
|
||||
assert result["deleted"] is True
|
||||
assert result["type"] == "delta"
|
||||
|
||||
# Verify reference.bin was NOT cleaned up
|
||||
assert "cleaned_reference" not in result
|
||||
|
||||
# Verify only the delta was deleted, not reference.bin
|
||||
assert mock_storage.delete.call_count == 1
|
||||
mock_storage.delete.assert_called_once_with("test-bucket/releases/app-v1.zip.delta")
|
||||
|
||||
def test_delete_delta_no_reference_exists(self):
|
||||
"""Test deleting delta when reference.bin doesn't exist (edge case)."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock the delta file
|
||||
mock_storage.head.return_value = ObjectHead(
|
||||
key="releases/app.zip.delta",
|
||||
size=1000,
|
||||
etag="abc123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={"original_name": "app.zip"},
|
||||
)
|
||||
|
||||
# Mock list shows no other deltas
|
||||
mock_storage.list.return_value = []
|
||||
|
||||
# Mock head for reference.bin returns None (doesn't exist)
|
||||
def mock_head_func(key):
|
||||
if key.endswith("reference.bin"):
|
||||
return None
|
||||
return ObjectHead(
|
||||
key="releases/app.zip.delta",
|
||||
size=1000,
|
||||
etag="abc123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
)
|
||||
|
||||
mock_storage.head.side_effect = mock_head_func
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
# Delete the delta
|
||||
result = service.delete(ObjectKey(bucket="test-bucket", key="releases/app.zip.delta"))
|
||||
|
||||
# Verify delta was deleted
|
||||
assert result["deleted"] is True
|
||||
assert result["type"] == "delta"
|
||||
|
||||
# Verify no reference cleanup (since it didn't exist)
|
||||
assert "cleaned_reference" not in result
|
||||
|
||||
# Only delta should be deleted
|
||||
assert mock_storage.delete.call_count == 1
|
||||
|
||||
def test_delete_isolated_deltaspaces(self):
|
||||
"""Test that cleanup only affects the specific DeltaSpace."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock head for both delta and reference.bin
|
||||
def mock_head_func(key):
|
||||
if "1.0/app.zip.delta" in key:
|
||||
return ObjectHead(
|
||||
key="releases/1.0/app.zip.delta",
|
||||
size=1000,
|
||||
etag="abc123",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={"original_name": "app.zip"},
|
||||
)
|
||||
elif "1.0/reference.bin" in key:
|
||||
return ObjectHead(
|
||||
key="releases/1.0/reference.bin",
|
||||
size=50000,
|
||||
etag="ref1",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
)
|
||||
return None
|
||||
|
||||
mock_storage.head.side_effect = mock_head_func
|
||||
|
||||
# Mock list for 1.0 - no other deltas
|
||||
mock_storage.list.return_value = [
|
||||
ObjectHead(
|
||||
key="releases/1.0/reference.bin",
|
||||
size=50000,
|
||||
etag="ref1",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
),
|
||||
]
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
# Delete from 1.0
|
||||
result = service.delete(ObjectKey(bucket="test-bucket", key="releases/1.0/app.zip.delta"))
|
||||
|
||||
# Should clean up only 1.0/reference.bin
|
||||
assert result["cleaned_reference"] == "releases/1.0/reference.bin"
|
||||
|
||||
# Verify correct files deleted
|
||||
delete_calls = [call[0][0] for call in mock_storage.delete.call_args_list]
|
||||
assert "test-bucket/releases/1.0/app.zip.delta" in delete_calls
|
||||
assert "test-bucket/releases/1.0/reference.bin" in delete_calls
|
||||
|
||||
|
||||
class TestRecursiveDeleteCleanup:
|
||||
"""Test that recursive delete properly cleans up references."""
|
||||
|
||||
def test_recursive_delete_reference_cleanup_already_works(self):
|
||||
"""Verify existing recursive delete reference cleanup is working."""
|
||||
service = create_service()
|
||||
mock_storage = Mock()
|
||||
service.storage = mock_storage
|
||||
|
||||
# Mock objects in deltaspace
|
||||
mock_storage.list.return_value = [
|
||||
ObjectHead(
|
||||
key="data/app.zip.delta",
|
||||
size=1000,
|
||||
etag="1",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
),
|
||||
ObjectHead(
|
||||
key="data/reference.bin",
|
||||
size=50000,
|
||||
etag="2",
|
||||
last_modified=datetime.now(UTC),
|
||||
metadata={},
|
||||
),
|
||||
]
|
||||
|
||||
mock_storage.head.return_value = None
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
result = service.delete_recursive("test-bucket", "data/")
|
||||
|
||||
# Should delete both delta and reference
|
||||
assert result["deleted_count"] == 2
|
||||
assert result["deltas_deleted"] == 1
|
||||
assert result["references_deleted"] == 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -1,146 +0,0 @@
|
||||
"""Integration test for get command."""
|
||||
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from deltaglider.app.cli.main import cli
|
||||
from deltaglider.core import ObjectKey
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_service():
|
||||
"""Create a mock DeltaService."""
|
||||
return Mock()
|
||||
|
||||
|
||||
def test_get_command_with_original_name(mock_service):
|
||||
"""Test get command with original filename (auto-appends .delta)."""
|
||||
runner = CliRunner()
|
||||
|
||||
# Mock the service.get method and storage.head
|
||||
mock_service.get = Mock()
|
||||
mock_service.storage.head = Mock(
|
||||
side_effect=[
|
||||
None, # First check for original file returns None
|
||||
Mock(), # Second check for .delta file returns something
|
||||
]
|
||||
)
|
||||
|
||||
with patch("deltaglider.app.cli.main.create_service", return_value=mock_service):
|
||||
# Run get with original filename (should auto-append .delta)
|
||||
result = runner.invoke(cli, ["get", "s3://test-bucket/data/myfile.zip"])
|
||||
|
||||
# Check it was successful
|
||||
assert result.exit_code == 0
|
||||
assert "Found delta file: s3://test-bucket/data/myfile.zip.delta" in result.output
|
||||
assert "Successfully retrieved: myfile.zip" in result.output
|
||||
|
||||
# Verify the service was called with the correct arguments
|
||||
mock_service.get.assert_called_once()
|
||||
call_args = mock_service.get.call_args
|
||||
obj_key = call_args[0][0]
|
||||
output_path = call_args[0][1]
|
||||
|
||||
assert isinstance(obj_key, ObjectKey)
|
||||
assert obj_key.bucket == "test-bucket"
|
||||
assert obj_key.key == "data/myfile.zip.delta"
|
||||
assert output_path == Path("myfile.zip")
|
||||
|
||||
|
||||
def test_get_command_with_delta_name(mock_service):
|
||||
"""Test get command with explicit .delta filename."""
|
||||
runner = CliRunner()
|
||||
|
||||
# Mock the service.get method and storage.head
|
||||
mock_service.get = Mock()
|
||||
mock_service.storage.head = Mock(return_value=Mock()) # File exists
|
||||
|
||||
with patch("deltaglider.app.cli.main.create_service", return_value=mock_service):
|
||||
# Run get with explicit .delta filename
|
||||
result = runner.invoke(cli, ["get", "s3://test-bucket/data/myfile.zip.delta"])
|
||||
|
||||
# Check it was successful
|
||||
assert result.exit_code == 0
|
||||
assert "Found file: s3://test-bucket/data/myfile.zip.delta" in result.output
|
||||
assert "Successfully retrieved: myfile.zip" in result.output
|
||||
|
||||
# Verify the service was called with the correct arguments
|
||||
mock_service.get.assert_called_once()
|
||||
call_args = mock_service.get.call_args
|
||||
obj_key = call_args[0][0]
|
||||
output_path = call_args[0][1]
|
||||
|
||||
assert isinstance(obj_key, ObjectKey)
|
||||
assert obj_key.bucket == "test-bucket"
|
||||
assert obj_key.key == "data/myfile.zip.delta"
|
||||
assert output_path == Path("myfile.zip")
|
||||
|
||||
|
||||
def test_get_command_with_output_option(mock_service):
|
||||
"""Test get command with custom output path."""
|
||||
runner = CliRunner()
|
||||
|
||||
# Mock the service.get method and storage.head
|
||||
mock_service.get = Mock()
|
||||
mock_service.storage.head = Mock(
|
||||
side_effect=[
|
||||
None, # First check for original file returns None
|
||||
Mock(), # Second check for .delta file returns something
|
||||
]
|
||||
)
|
||||
|
||||
with patch("deltaglider.app.cli.main.create_service", return_value=mock_service):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
output_file = Path(tmpdir) / "custom_output.zip"
|
||||
|
||||
# Run get with custom output path
|
||||
result = runner.invoke(
|
||||
cli, ["get", "s3://test-bucket/data/myfile.zip", "-o", str(output_file)]
|
||||
)
|
||||
|
||||
# Check it was successful
|
||||
assert result.exit_code == 0
|
||||
assert f"Successfully retrieved: {output_file}" in result.output
|
||||
|
||||
# Verify the service was called with the correct arguments
|
||||
mock_service.get.assert_called_once()
|
||||
call_args = mock_service.get.call_args
|
||||
obj_key = call_args[0][0]
|
||||
output_path = call_args[0][1]
|
||||
|
||||
assert isinstance(obj_key, ObjectKey)
|
||||
assert obj_key.bucket == "test-bucket"
|
||||
assert obj_key.key == "data/myfile.zip.delta"
|
||||
assert output_path == output_file
|
||||
|
||||
|
||||
def test_get_command_error_handling(mock_service):
|
||||
"""Test get command error handling."""
|
||||
runner = CliRunner()
|
||||
|
||||
# Mock the service.get method to raise an error
|
||||
mock_service.get = Mock(side_effect=FileNotFoundError("Delta not found"))
|
||||
|
||||
with patch("deltaglider.app.cli.main.create_service", return_value=mock_service):
|
||||
# Run get command
|
||||
result = runner.invoke(cli, ["get", "s3://test-bucket/data/missing.zip"])
|
||||
|
||||
# Check it failed with error message
|
||||
assert result.exit_code == 1
|
||||
assert "Error: Delta not found" in result.output
|
||||
|
||||
|
||||
def test_get_command_invalid_url():
|
||||
"""Test get command with invalid S3 URL."""
|
||||
runner = CliRunner()
|
||||
|
||||
# Run get with invalid URL
|
||||
result = runner.invoke(cli, ["get", "http://invalid-url/file.zip"])
|
||||
|
||||
# Check it failed with error message
|
||||
assert result.exit_code == 1
|
||||
assert "Error: Invalid S3 URL" in result.output
|
||||
@@ -14,7 +14,7 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
def test_core_service_delete_recursive_method_exists(self):
|
||||
"""Test that the core service has the delete_recursive method."""
|
||||
service = create_service()
|
||||
assert hasattr(service, 'delete_recursive')
|
||||
assert hasattr(service, "delete_recursive")
|
||||
assert callable(service.delete_recursive)
|
||||
|
||||
def test_delete_recursive_handles_empty_prefix(self):
|
||||
@@ -41,8 +41,16 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Mock some objects
|
||||
mock_storage.list.return_value = [
|
||||
ObjectHead(key="test/file1.zip.delta", size=100, etag="1", last_modified=None, metadata={}),
|
||||
ObjectHead(key="test/file2.txt", size=200, etag="2", last_modified=None, metadata={"compression": "none"}),
|
||||
ObjectHead(
|
||||
key="test/file1.zip.delta", size=100, etag="1", last_modified=None, metadata={}
|
||||
),
|
||||
ObjectHead(
|
||||
key="test/file2.txt",
|
||||
size=200,
|
||||
etag="2",
|
||||
last_modified=None,
|
||||
metadata={"compression": "none"},
|
||||
),
|
||||
]
|
||||
mock_storage.head.return_value = None
|
||||
mock_storage.delete.return_value = None
|
||||
@@ -51,9 +59,16 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Verify structure
|
||||
required_keys = [
|
||||
"bucket", "prefix", "deleted_count", "failed_count",
|
||||
"deltas_deleted", "references_deleted", "direct_deleted",
|
||||
"other_deleted", "errors", "warnings"
|
||||
"bucket",
|
||||
"prefix",
|
||||
"deleted_count",
|
||||
"failed_count",
|
||||
"deltas_deleted",
|
||||
"references_deleted",
|
||||
"direct_deleted",
|
||||
"other_deleted",
|
||||
"errors",
|
||||
"warnings",
|
||||
]
|
||||
for key in required_keys:
|
||||
assert key in result, f"Missing key: {key}"
|
||||
@@ -71,12 +86,27 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Mock different types of objects
|
||||
mock_objects = [
|
||||
ObjectHead(key="test/app.zip.delta", size=100, etag="1", last_modified=None,
|
||||
metadata={"ref_key": "test/reference.bin"}),
|
||||
ObjectHead(key="test/reference.bin", size=50, etag="2", last_modified=None,
|
||||
metadata={"file_sha256": "abc123"}),
|
||||
ObjectHead(key="test/readme.txt", size=200, etag="3", last_modified=None,
|
||||
metadata={"compression": "none"}),
|
||||
ObjectHead(
|
||||
key="test/app.zip.delta",
|
||||
size=100,
|
||||
etag="1",
|
||||
last_modified=None,
|
||||
metadata={"ref_key": "test/reference.bin"},
|
||||
),
|
||||
ObjectHead(
|
||||
key="test/reference.bin",
|
||||
size=50,
|
||||
etag="2",
|
||||
last_modified=None,
|
||||
metadata={"file_sha256": "abc123"},
|
||||
),
|
||||
ObjectHead(
|
||||
key="test/readme.txt",
|
||||
size=200,
|
||||
etag="3",
|
||||
last_modified=None,
|
||||
metadata={"compression": "none"},
|
||||
),
|
||||
ObjectHead(key="test/config.json", size=300, etag="4", last_modified=None, metadata={}),
|
||||
]
|
||||
|
||||
@@ -87,11 +117,11 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
result = service.delete_recursive("test-bucket", "test/")
|
||||
|
||||
# Should categorize correctly - the exact categorization depends on implementation
|
||||
assert result["deltas_deleted"] == 1 # app.zip.delta
|
||||
assert result["deltas_deleted"] == 1 # app.zip.delta
|
||||
assert result["references_deleted"] == 1 # reference.bin
|
||||
# Direct and other files may be categorized differently based on metadata detection
|
||||
assert result["direct_deleted"] + result["other_deleted"] == 2 # readme.txt + config.json
|
||||
assert result["deleted_count"] == 4 # total
|
||||
assert result["deleted_count"] == 4 # total
|
||||
assert result["failed_count"] == 0
|
||||
|
||||
def test_delete_recursive_handles_storage_errors_gracefully(self):
|
||||
@@ -102,8 +132,12 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Mock objects
|
||||
mock_storage.list.return_value = [
|
||||
ObjectHead(key="test/good.zip.delta", size=100, etag="1", last_modified=None, metadata={}),
|
||||
ObjectHead(key="test/bad.zip.delta", size=200, etag="2", last_modified=None, metadata={}),
|
||||
ObjectHead(
|
||||
key="test/good.zip.delta", size=100, etag="1", last_modified=None, metadata={}
|
||||
),
|
||||
ObjectHead(
|
||||
key="test/bad.zip.delta", size=200, etag="2", last_modified=None, metadata={}
|
||||
),
|
||||
]
|
||||
mock_storage.head.return_value = None
|
||||
|
||||
@@ -117,8 +151,8 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
result = service.delete_recursive("test-bucket", "test/")
|
||||
|
||||
# Should handle partial failure
|
||||
assert result["deleted_count"] == 1 # good.zip.delta succeeded
|
||||
assert result["failed_count"] == 1 # bad.zip.delta failed
|
||||
assert result["deleted_count"] == 1 # good.zip.delta succeeded
|
||||
assert result["failed_count"] == 1 # bad.zip.delta failed
|
||||
assert len(result["errors"]) == 1
|
||||
assert "bad" in result["errors"][0]
|
||||
|
||||
@@ -130,12 +164,18 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Create delta files that should trigger parent reference checking
|
||||
mock_objects = [
|
||||
ObjectHead(key="project/team-a/v1/app.zip.delta", size=100, etag="1",
|
||||
last_modified=None, metadata={"ref_key": "project/reference.bin"}),
|
||||
ObjectHead(
|
||||
key="project/team-a/v1/app.zip.delta",
|
||||
size=100,
|
||||
etag="1",
|
||||
last_modified=None,
|
||||
metadata={"ref_key": "project/reference.bin"},
|
||||
),
|
||||
]
|
||||
|
||||
# Mock list to return objects for initial scan, then parent reference when checked
|
||||
list_calls = []
|
||||
|
||||
def mock_list(prefix):
|
||||
list_calls.append(prefix)
|
||||
if prefix == "test-bucket/project/team-a/v1/":
|
||||
@@ -143,14 +183,24 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
elif prefix == "test-bucket/project":
|
||||
# Return parent reference when checking deltaspace
|
||||
return [
|
||||
ObjectHead(key="project/reference.bin", size=50, etag="ref",
|
||||
last_modified=None, metadata={"file_sha256": "abc123"})
|
||||
ObjectHead(
|
||||
key="project/reference.bin",
|
||||
size=50,
|
||||
etag="ref",
|
||||
last_modified=None,
|
||||
metadata={"file_sha256": "abc123"},
|
||||
)
|
||||
]
|
||||
return []
|
||||
|
||||
mock_storage.list.side_effect = mock_list
|
||||
mock_storage.head.return_value = ObjectHead(key="project/reference.bin", size=50, etag="ref",
|
||||
last_modified=None, metadata={"file_sha256": "abc123"})
|
||||
mock_storage.head.return_value = ObjectHead(
|
||||
key="project/reference.bin",
|
||||
size=50,
|
||||
etag="ref",
|
||||
last_modified=None,
|
||||
metadata={"file_sha256": "abc123"},
|
||||
)
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
result = service.delete_recursive("test-bucket", "project/team-a/v1/")
|
||||
@@ -167,7 +217,7 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
with patch('deltaglider.app.cli.main.create_service') as mock_create_service:
|
||||
with patch("deltaglider.app.cli.main.create_service") as mock_create_service:
|
||||
mock_service = Mock()
|
||||
mock_create_service.return_value = mock_service
|
||||
|
||||
@@ -178,7 +228,7 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
"deleted_count": 2,
|
||||
"failed_count": 0,
|
||||
"warnings": [],
|
||||
"errors": []
|
||||
"errors": [],
|
||||
}
|
||||
|
||||
result = runner.invoke(cli, ["rm", "-r", "s3://test-bucket/test/"])
|
||||
@@ -195,14 +245,18 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
with patch('deltaglider.app.cli.main.create_service') as mock_create_service:
|
||||
with patch("deltaglider.app.cli.main.create_service") as mock_create_service:
|
||||
mock_service = Mock()
|
||||
mock_create_service.return_value = mock_service
|
||||
|
||||
# Mock list for dryrun preview
|
||||
mock_service.storage.list.return_value = [
|
||||
ObjectHead(key="test/file1.zip.delta", size=100, etag="1", last_modified=None, metadata={}),
|
||||
ObjectHead(key="test/file2.txt", size=200, etag="2", last_modified=None, metadata={}),
|
||||
ObjectHead(
|
||||
key="test/file1.zip.delta", size=100, etag="1", last_modified=None, metadata={}
|
||||
),
|
||||
ObjectHead(
|
||||
key="test/file2.txt", size=200, etag="2", last_modified=None, metadata={}
|
||||
),
|
||||
]
|
||||
|
||||
result = runner.invoke(cli, ["rm", "-r", "--dryrun", "s3://test-bucket/test/"])
|
||||
@@ -219,20 +273,25 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
service.storage = mock_storage
|
||||
|
||||
# Test that both methods exist and are callable
|
||||
assert hasattr(service, 'delete')
|
||||
assert hasattr(service, 'delete_recursive')
|
||||
assert hasattr(service, "delete")
|
||||
assert hasattr(service, "delete_recursive")
|
||||
assert callable(service.delete)
|
||||
assert callable(service.delete_recursive)
|
||||
|
||||
# Mock for single delete
|
||||
mock_storage.head.return_value = ObjectHead(
|
||||
key="test/file.zip.delta", size=100, etag="1",
|
||||
last_modified=None, metadata={"original_name": "file.zip"}
|
||||
key="test/file.zip.delta",
|
||||
size=100,
|
||||
etag="1",
|
||||
last_modified=None,
|
||||
metadata={"original_name": "file.zip"},
|
||||
)
|
||||
mock_storage.list.return_value = [] # No other deltas remain
|
||||
mock_storage.delete.return_value = None
|
||||
|
||||
# Test single delete
|
||||
from deltaglider.core import ObjectKey
|
||||
|
||||
result = service.delete(ObjectKey(bucket="test-bucket", key="test/file.zip.delta"))
|
||||
|
||||
assert result["deleted"]
|
||||
@@ -246,10 +305,20 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Simple scenario: one delta and its reference
|
||||
mock_objects = [
|
||||
ObjectHead(key="simple/file.zip.delta", size=100, etag="1",
|
||||
last_modified=None, metadata={"ref_key": "simple/reference.bin"}),
|
||||
ObjectHead(key="simple/reference.bin", size=50, etag="2",
|
||||
last_modified=None, metadata={"file_sha256": "abc123"}),
|
||||
ObjectHead(
|
||||
key="simple/file.zip.delta",
|
||||
size=100,
|
||||
etag="1",
|
||||
last_modified=None,
|
||||
metadata={"ref_key": "simple/reference.bin"},
|
||||
),
|
||||
ObjectHead(
|
||||
key="simple/reference.bin",
|
||||
size=50,
|
||||
etag="2",
|
||||
last_modified=None,
|
||||
metadata={"file_sha256": "abc123"},
|
||||
),
|
||||
]
|
||||
|
||||
mock_storage.list.return_value = mock_objects
|
||||
@@ -272,11 +341,22 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
|
||||
# Mix of different object types
|
||||
mock_objects = [
|
||||
ObjectHead(key="mixed/app.zip.delta", size=100, etag="1", last_modified=None, metadata={}),
|
||||
ObjectHead(key="mixed/reference.bin", size=50, etag="2", last_modified=None, metadata={}),
|
||||
ObjectHead(key="mixed/readme.txt", size=200, etag="3", last_modified=None,
|
||||
metadata={"compression": "none"}),
|
||||
ObjectHead(key="mixed/config.json", size=300, etag="4", last_modified=None, metadata={}),
|
||||
ObjectHead(
|
||||
key="mixed/app.zip.delta", size=100, etag="1", last_modified=None, metadata={}
|
||||
),
|
||||
ObjectHead(
|
||||
key="mixed/reference.bin", size=50, etag="2", last_modified=None, metadata={}
|
||||
),
|
||||
ObjectHead(
|
||||
key="mixed/readme.txt",
|
||||
size=200,
|
||||
etag="3",
|
||||
last_modified=None,
|
||||
metadata={"compression": "none"},
|
||||
),
|
||||
ObjectHead(
|
||||
key="mixed/config.json", size=300, etag="4", last_modified=None, metadata={}
|
||||
),
|
||||
]
|
||||
|
||||
mock_storage.list.return_value = mock_objects
|
||||
@@ -298,8 +378,12 @@ class TestRecursiveDeleteReferenceCleanup:
|
||||
assert isinstance(result["warnings"], list)
|
||||
|
||||
# Validate counts add up
|
||||
total_by_type = (result["deltas_deleted"] + result["references_deleted"] +
|
||||
result["direct_deleted"] + result["other_deleted"])
|
||||
total_by_type = (
|
||||
result["deltas_deleted"]
|
||||
+ result["references_deleted"]
|
||||
+ result["direct_deleted"]
|
||||
+ result["other_deleted"]
|
||||
)
|
||||
assert result["deleted_count"] == total_by_type
|
||||
|
||||
# Validate specific counts for this scenario
|
||||
|
||||
214
tests/integration/test_stats_command.py
Normal file
214
tests/integration/test_stats_command.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""Integration tests for stats CLI command."""
|
||||
|
||||
import json
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from click.testing import CliRunner
|
||||
|
||||
from deltaglider.app.cli.main import cli
|
||||
from deltaglider.client_models import BucketStats
|
||||
|
||||
|
||||
class TestStatsCommand:
|
||||
"""Test stats CLI command."""
|
||||
|
||||
def test_stats_json_output(self):
|
||||
"""Test stats command with JSON output."""
|
||||
# Create mock bucket stats
|
||||
mock_stats = BucketStats(
|
||||
bucket="test-bucket",
|
||||
object_count=10,
|
||||
total_size=1000000,
|
||||
compressed_size=500000,
|
||||
space_saved=500000,
|
||||
average_compression_ratio=0.5,
|
||||
delta_objects=7,
|
||||
direct_objects=3,
|
||||
)
|
||||
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
# Setup mock client
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.return_value = mock_stats
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
# Run command
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "test-bucket", "--json"])
|
||||
|
||||
# Verify
|
||||
assert result.exit_code == 0
|
||||
output = json.loads(result.output)
|
||||
assert output["bucket"] == "test-bucket"
|
||||
assert output["object_count"] == 10
|
||||
assert output["total_size"] == 1000000
|
||||
assert output["compressed_size"] == 500000
|
||||
assert output["space_saved"] == 500000
|
||||
assert output["average_compression_ratio"] == 0.5
|
||||
assert output["delta_objects"] == 7
|
||||
assert output["direct_objects"] == 3
|
||||
|
||||
# Verify client was called correctly
|
||||
mock_client.get_bucket_stats.assert_called_once_with(
|
||||
"test-bucket", detailed_stats=False
|
||||
)
|
||||
|
||||
def test_stats_json_output_detailed(self):
|
||||
"""Test stats command with detailed JSON output."""
|
||||
mock_stats = BucketStats(
|
||||
bucket="test-bucket",
|
||||
object_count=5,
|
||||
total_size=2000000,
|
||||
compressed_size=100000,
|
||||
space_saved=1900000,
|
||||
average_compression_ratio=0.95,
|
||||
delta_objects=5,
|
||||
direct_objects=0,
|
||||
)
|
||||
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.return_value = mock_stats
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "test-bucket", "--detailed", "--json"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
output = json.loads(result.output)
|
||||
assert output["average_compression_ratio"] == 0.95
|
||||
|
||||
# Verify detailed flag was passed
|
||||
mock_client.get_bucket_stats.assert_called_once_with("test-bucket", detailed_stats=True)
|
||||
|
||||
def test_stats_human_readable_output(self):
|
||||
"""Test stats command with human-readable output."""
|
||||
mock_stats = BucketStats(
|
||||
bucket="test-bucket",
|
||||
object_count=10,
|
||||
total_size=1500000, # ~1.43 MB
|
||||
compressed_size=300000, # ~293 KB
|
||||
space_saved=1200000, # ~1.14 MB
|
||||
average_compression_ratio=0.8,
|
||||
delta_objects=7,
|
||||
direct_objects=3,
|
||||
)
|
||||
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.return_value = mock_stats
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "test-bucket"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
output = result.output
|
||||
|
||||
# Verify human-readable format
|
||||
assert "Bucket Statistics: test-bucket" in output
|
||||
assert "Total Objects:" in output
|
||||
assert "10" in output
|
||||
assert "Delta Objects:" in output
|
||||
assert "7" in output
|
||||
assert "Direct Objects:" in output
|
||||
assert "3" in output
|
||||
assert "Original Size:" in output
|
||||
assert "Compressed Size:" in output
|
||||
assert "Space Saved:" in output
|
||||
assert "Compression Ratio:" in output
|
||||
assert "80.0%" in output # 0.8 = 80%
|
||||
|
||||
def test_stats_error_handling(self):
|
||||
"""Test stats command error handling."""
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.side_effect = Exception("Bucket not found")
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "nonexistent-bucket"])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "Error: Bucket not found" in result.output
|
||||
|
||||
def test_stats_with_s3_url(self):
|
||||
"""Test stats command with s3:// URL format."""
|
||||
mock_stats = BucketStats(
|
||||
bucket="test-bucket",
|
||||
object_count=5,
|
||||
total_size=1000000,
|
||||
compressed_size=500000,
|
||||
space_saved=500000,
|
||||
average_compression_ratio=0.5,
|
||||
delta_objects=3,
|
||||
direct_objects=2,
|
||||
)
|
||||
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.return_value = mock_stats
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "s3://test-bucket", "--json"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Verify bucket name was parsed correctly from S3 URL
|
||||
mock_client.get_bucket_stats.assert_called_once_with(
|
||||
"test-bucket", detailed_stats=False
|
||||
)
|
||||
|
||||
def test_stats_with_s3_url_trailing_slash(self):
|
||||
"""Test stats command with s3:// URL format with trailing slash."""
|
||||
mock_stats = BucketStats(
|
||||
bucket="test-bucket",
|
||||
object_count=5,
|
||||
total_size=1000000,
|
||||
compressed_size=500000,
|
||||
space_saved=500000,
|
||||
average_compression_ratio=0.5,
|
||||
delta_objects=3,
|
||||
direct_objects=2,
|
||||
)
|
||||
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.return_value = mock_stats
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "s3://test-bucket/", "--json"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Verify bucket name was parsed correctly from S3 URL with trailing slash
|
||||
mock_client.get_bucket_stats.assert_called_once_with(
|
||||
"test-bucket", detailed_stats=False
|
||||
)
|
||||
|
||||
def test_stats_with_s3_url_with_prefix(self):
|
||||
"""Test stats command with s3:// URL format with prefix (should ignore prefix)."""
|
||||
mock_stats = BucketStats(
|
||||
bucket="test-bucket",
|
||||
object_count=5,
|
||||
total_size=1000000,
|
||||
compressed_size=500000,
|
||||
space_saved=500000,
|
||||
average_compression_ratio=0.5,
|
||||
delta_objects=3,
|
||||
direct_objects=2,
|
||||
)
|
||||
|
||||
with patch("deltaglider.client.DeltaGliderClient") as mock_client_class:
|
||||
mock_client = Mock()
|
||||
mock_client.get_bucket_stats.return_value = mock_stats
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["stats", "s3://test-bucket/some/prefix/", "--json"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Verify only bucket name was extracted, prefix ignored
|
||||
mock_client.get_bucket_stats.assert_called_once_with(
|
||||
"test-bucket", detailed_stats=False
|
||||
)
|
||||
189
tests/unit/test_cache_encrypted.py
Normal file
189
tests/unit/test_cache_encrypted.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""Tests for encrypted cache adapter."""
|
||||
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
from deltaglider.adapters import ContentAddressedCache, EncryptedCache, Sha256Adapter
|
||||
from deltaglider.core.errors import CacheCorruptionError, CacheMissError
|
||||
|
||||
|
||||
class TestEncryptedCache:
|
||||
"""Test encrypted cache wrapper functionality."""
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir(self):
|
||||
"""Create temporary directory for tests."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
yield Path(tmpdir)
|
||||
|
||||
@pytest.fixture
|
||||
def hasher(self):
|
||||
"""Create SHA256 hasher."""
|
||||
return Sha256Adapter()
|
||||
|
||||
@pytest.fixture
|
||||
def backend(self, temp_dir, hasher):
|
||||
"""Create CAS backend."""
|
||||
return ContentAddressedCache(temp_dir, hasher)
|
||||
|
||||
@pytest.fixture
|
||||
def encrypted_cache(self, backend):
|
||||
"""Create encrypted cache with ephemeral key."""
|
||||
return EncryptedCache(backend)
|
||||
|
||||
def test_ephemeral_key_generation(self, backend):
|
||||
"""Test that ephemeral key is generated automatically."""
|
||||
cache = EncryptedCache(backend)
|
||||
|
||||
assert cache._ephemeral is True
|
||||
assert cache._key is not None
|
||||
assert len(cache._key) == 44 # Base64-encoded 32-byte key
|
||||
|
||||
def test_provided_key_usage(self, backend):
|
||||
"""Test using provided encryption key."""
|
||||
key = Fernet.generate_key()
|
||||
cache = EncryptedCache(backend, encryption_key=key)
|
||||
|
||||
assert cache._ephemeral is False
|
||||
assert cache._key == key
|
||||
|
||||
def test_write_and_read_encrypted(self, encrypted_cache, temp_dir):
|
||||
"""Test writing and reading encrypted content."""
|
||||
# Create test file
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Secret data that should be encrypted"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
# Compute expected SHA
|
||||
import hashlib
|
||||
|
||||
expected_sha = hashlib.sha256(test_content).hexdigest()
|
||||
|
||||
# Write to encrypted cache
|
||||
encrypted_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Read back and validate
|
||||
decrypted_path = encrypted_cache.get_validated_ref(
|
||||
"test-bucket", "test-prefix", expected_sha
|
||||
)
|
||||
|
||||
# Verify decrypted content matches original
|
||||
decrypted_content = decrypted_path.read_bytes()
|
||||
assert decrypted_content == test_content
|
||||
|
||||
def test_encrypted_storage_not_readable(self, encrypted_cache, backend, temp_dir):
|
||||
"""Test that stored data is actually encrypted."""
|
||||
# Create test file
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Plaintext secret"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
# Write to encrypted cache
|
||||
encrypted_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Get the encrypted file path from backend
|
||||
backend_path = backend.ref_path("test-bucket", "test-prefix")
|
||||
|
||||
# Read encrypted content directly
|
||||
encrypted_content = backend_path.read_bytes()
|
||||
|
||||
# Verify content is NOT the same as plaintext
|
||||
assert encrypted_content != test_content
|
||||
# Verify content doesn't contain plaintext substring
|
||||
assert b"secret" not in encrypted_content.lower()
|
||||
|
||||
def test_cache_miss(self, encrypted_cache):
|
||||
"""Test cache miss error."""
|
||||
with pytest.raises(CacheMissError):
|
||||
encrypted_cache.get_validated_ref("no-bucket", "no-prefix", "fakehash")
|
||||
|
||||
def test_decryption_with_wrong_sha(self, encrypted_cache, temp_dir):
|
||||
"""Test that wrong SHA is detected after decryption."""
|
||||
# Create test file
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Test content"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
# Write to cache
|
||||
encrypted_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Try to read with wrong SHA
|
||||
with pytest.raises(CacheCorruptionError, match="SHA mismatch"):
|
||||
encrypted_cache.get_validated_ref("test-bucket", "test-prefix", "wrong_sha_hash_here")
|
||||
|
||||
def test_decryption_with_wrong_key(self, temp_dir):
|
||||
"""Test that decryption fails with wrong key."""
|
||||
# Create shared backend
|
||||
from deltaglider.adapters import ContentAddressedCache, Sha256Adapter
|
||||
|
||||
hasher = Sha256Adapter()
|
||||
backend = ContentAddressedCache(temp_dir / "shared", hasher)
|
||||
|
||||
# Create two caches with different keys sharing same backend
|
||||
cache1 = EncryptedCache(backend)
|
||||
|
||||
# Write with cache1
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Encrypted data"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
import hashlib
|
||||
|
||||
expected_sha = hashlib.sha256(test_content).hexdigest()
|
||||
|
||||
cache1.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Create cache2 with different key (fresh instance, different ephemeral key)
|
||||
# and manually add to its mapping (simulating persistent storage scenario)
|
||||
cache2 = EncryptedCache(backend)
|
||||
cache2._plaintext_sha_map[("test-bucket", "test-prefix")] = expected_sha
|
||||
|
||||
# Try to read with cache2 (different key) - should fail decryption
|
||||
with pytest.raises(CacheCorruptionError, match="Decryption failed"):
|
||||
cache2.get_validated_ref("test-bucket", "test-prefix", expected_sha)
|
||||
|
||||
def test_evict_cleans_decrypted_files(self, encrypted_cache, temp_dir):
|
||||
"""Test that evict cleans up .decrypted temporary files."""
|
||||
# Create and store file
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Test"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
import hashlib
|
||||
|
||||
expected_sha = hashlib.sha256(test_content).hexdigest()
|
||||
|
||||
encrypted_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Read to create .decrypted file
|
||||
decrypted_path = encrypted_cache.get_validated_ref(
|
||||
"test-bucket", "test-prefix", expected_sha
|
||||
)
|
||||
assert decrypted_path.exists()
|
||||
|
||||
# Evict
|
||||
encrypted_cache.evict("test-bucket", "test-prefix")
|
||||
|
||||
# Verify .decrypted file is removed
|
||||
assert not decrypted_path.exists()
|
||||
|
||||
def test_from_env_with_no_key(self, backend, monkeypatch):
|
||||
"""Test from_env creates ephemeral key when env var not set."""
|
||||
monkeypatch.delenv("DG_CACHE_ENCRYPTION_KEY", raising=False)
|
||||
|
||||
cache = EncryptedCache.from_env(backend)
|
||||
|
||||
assert cache._ephemeral is True
|
||||
|
||||
def test_from_env_with_key(self, backend, monkeypatch):
|
||||
"""Test from_env uses key from environment."""
|
||||
key = Fernet.generate_key()
|
||||
monkeypatch.setenv("DG_CACHE_ENCRYPTION_KEY", key.decode("utf-8"))
|
||||
|
||||
cache = EncryptedCache.from_env(backend)
|
||||
|
||||
assert cache._ephemeral is False
|
||||
assert cache._key == key
|
||||
200
tests/unit/test_cache_memory.py
Normal file
200
tests/unit/test_cache_memory.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""Tests for in-memory cache adapter."""
|
||||
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from deltaglider.adapters import MemoryCache, Sha256Adapter
|
||||
from deltaglider.core.errors import CacheCorruptionError, CacheMissError
|
||||
|
||||
|
||||
class TestMemoryCache:
|
||||
"""Test in-memory cache functionality."""
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir(self):
|
||||
"""Create temporary directory for tests."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
yield Path(tmpdir)
|
||||
|
||||
@pytest.fixture
|
||||
def hasher(self):
|
||||
"""Create SHA256 hasher."""
|
||||
return Sha256Adapter()
|
||||
|
||||
@pytest.fixture
|
||||
def memory_cache(self, hasher, temp_dir):
|
||||
"""Create memory cache with 1MB limit."""
|
||||
return MemoryCache(hasher, max_size_mb=1, temp_dir=temp_dir)
|
||||
|
||||
def test_write_and_read(self, memory_cache, temp_dir):
|
||||
"""Test basic write and read functionality."""
|
||||
# Create test file
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Hello, memory cache!"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
# Compute expected SHA
|
||||
import hashlib
|
||||
|
||||
expected_sha = hashlib.sha256(test_content).hexdigest()
|
||||
|
||||
# Write to memory cache
|
||||
memory_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Read back
|
||||
retrieved_path = memory_cache.get_validated_ref("test-bucket", "test-prefix", expected_sha)
|
||||
|
||||
# Verify content
|
||||
assert retrieved_path.read_bytes() == test_content
|
||||
|
||||
def test_has_ref_true(self, memory_cache, temp_dir):
|
||||
"""Test has_ref returns True for existing content."""
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Test"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
import hashlib
|
||||
|
||||
sha = hashlib.sha256(test_content).hexdigest()
|
||||
|
||||
memory_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
assert memory_cache.has_ref("test-bucket", "test-prefix", sha) is True
|
||||
|
||||
def test_has_ref_false(self, memory_cache):
|
||||
"""Test has_ref returns False for non-existent content."""
|
||||
assert memory_cache.has_ref("no-bucket", "no-prefix", "fakehash") is False
|
||||
|
||||
def test_cache_miss(self, memory_cache):
|
||||
"""Test cache miss error."""
|
||||
with pytest.raises(CacheMissError):
|
||||
memory_cache.get_validated_ref("no-bucket", "no-prefix", "fakehash")
|
||||
|
||||
def test_sha_mismatch_detection(self, memory_cache, temp_dir):
|
||||
"""Test that SHA mismatch is detected."""
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_file.write_bytes(b"Content")
|
||||
|
||||
memory_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Try to read with wrong SHA
|
||||
with pytest.raises(CacheCorruptionError, match="SHA mismatch"):
|
||||
memory_cache.get_validated_ref("test-bucket", "test-prefix", "wrong_sha")
|
||||
|
||||
def test_lru_eviction(self, hasher, temp_dir):
|
||||
"""Test LRU eviction when cache is full."""
|
||||
# Create small cache (only 10KB)
|
||||
small_cache = MemoryCache(hasher, max_size_mb=0.01, temp_dir=temp_dir)
|
||||
|
||||
# Create files that will exceed cache limit
|
||||
file1 = temp_dir / "file1.txt"
|
||||
file2 = temp_dir / "file2.txt"
|
||||
file3 = temp_dir / "file3.txt"
|
||||
|
||||
# Each file is 5KB
|
||||
file1.write_bytes(b"A" * 5000)
|
||||
file2.write_bytes(b"B" * 5000)
|
||||
file3.write_bytes(b"C" * 5000)
|
||||
|
||||
# Write file1 and file2 (total 10KB, at limit)
|
||||
small_cache.write_ref("bucket", "prefix1", file1)
|
||||
small_cache.write_ref("bucket", "prefix2", file2)
|
||||
|
||||
# Verify both are in cache
|
||||
import hashlib
|
||||
|
||||
sha1 = hashlib.sha256(b"A" * 5000).hexdigest()
|
||||
sha2 = hashlib.sha256(b"B" * 5000).hexdigest()
|
||||
|
||||
assert small_cache.has_ref("bucket", "prefix1", sha1) is True
|
||||
assert small_cache.has_ref("bucket", "prefix2", sha2) is True
|
||||
|
||||
# Write file3 (5KB) - should evict file1 (LRU)
|
||||
small_cache.write_ref("bucket", "prefix3", file3)
|
||||
|
||||
# file1 should be evicted
|
||||
assert small_cache.has_ref("bucket", "prefix1", sha1) is False
|
||||
|
||||
# file2 and file3 should still be in cache
|
||||
sha3 = hashlib.sha256(b"C" * 5000).hexdigest()
|
||||
assert small_cache.has_ref("bucket", "prefix2", sha2) is True
|
||||
assert small_cache.has_ref("bucket", "prefix3", sha3) is True
|
||||
|
||||
def test_file_too_large_for_cache(self, hasher, temp_dir):
|
||||
"""Test error when file exceeds cache size limit."""
|
||||
small_cache = MemoryCache(hasher, max_size_mb=0.001, temp_dir=temp_dir) # 1KB limit
|
||||
|
||||
large_file = temp_dir / "large.txt"
|
||||
large_file.write_bytes(b"X" * 2000) # 2KB file
|
||||
|
||||
with pytest.raises(CacheCorruptionError, match="too large"):
|
||||
small_cache.write_ref("bucket", "prefix", large_file)
|
||||
|
||||
def test_evict_removes_from_memory(self, memory_cache, temp_dir):
|
||||
"""Test that evict removes content from memory."""
|
||||
test_file = temp_dir / "test.txt"
|
||||
test_content = b"Test"
|
||||
test_file.write_bytes(test_content)
|
||||
|
||||
import hashlib
|
||||
|
||||
sha = hashlib.sha256(test_content).hexdigest()
|
||||
|
||||
memory_cache.write_ref("test-bucket", "test-prefix", test_file)
|
||||
|
||||
# Verify it's in cache
|
||||
assert memory_cache.has_ref("test-bucket", "test-prefix", sha) is True
|
||||
|
||||
# Evict
|
||||
memory_cache.evict("test-bucket", "test-prefix")
|
||||
|
||||
# Verify it's gone
|
||||
assert memory_cache.has_ref("test-bucket", "test-prefix", sha) is False
|
||||
|
||||
def test_clear_removes_all(self, memory_cache, temp_dir):
|
||||
"""Test that clear removes all cached content."""
|
||||
# Add multiple files
|
||||
for i in range(3):
|
||||
test_file = temp_dir / f"test{i}.txt"
|
||||
test_file.write_bytes(f"Content {i}".encode())
|
||||
memory_cache.write_ref("bucket", f"prefix{i}", test_file)
|
||||
|
||||
# Verify cache is not empty
|
||||
assert memory_cache._current_size > 0
|
||||
assert len(memory_cache._cache) == 3
|
||||
|
||||
# Clear
|
||||
memory_cache.clear()
|
||||
|
||||
# Verify cache is empty
|
||||
assert memory_cache._current_size == 0
|
||||
assert len(memory_cache._cache) == 0
|
||||
assert len(memory_cache._access_order) == 0
|
||||
|
||||
def test_access_order_updated_on_read(self, memory_cache, temp_dir):
|
||||
"""Test that LRU access order is updated on reads."""
|
||||
# Create two files
|
||||
file1 = temp_dir / "file1.txt"
|
||||
file2 = temp_dir / "file2.txt"
|
||||
file1.write_bytes(b"File 1")
|
||||
file2.write_bytes(b"File 2")
|
||||
|
||||
# Write both
|
||||
memory_cache.write_ref("bucket", "prefix1", file1)
|
||||
memory_cache.write_ref("bucket", "prefix2", file2)
|
||||
|
||||
# Access order should be: [prefix1, prefix2]
|
||||
assert memory_cache._access_order[0] == ("bucket", "prefix1")
|
||||
assert memory_cache._access_order[1] == ("bucket", "prefix2")
|
||||
|
||||
# Read prefix1 again
|
||||
import hashlib
|
||||
|
||||
sha1 = hashlib.sha256(b"File 1").hexdigest()
|
||||
memory_cache.get_validated_ref("bucket", "prefix1", sha1)
|
||||
|
||||
# Access order should now be: [prefix2, prefix1]
|
||||
assert memory_cache._access_order[0] == ("bucket", "prefix2")
|
||||
assert memory_cache._access_order[1] == ("bucket", "prefix1")
|
||||
@@ -147,22 +147,36 @@ class TestDeltaServiceGet:
|
||||
service.get(delta_key, temp_dir / "output.zip")
|
||||
|
||||
def test_get_missing_metadata(self, service, mock_storage, temp_dir):
|
||||
"""Test get with missing metadata."""
|
||||
"""Test get with missing metadata (regular S3 object)."""
|
||||
# Setup
|
||||
delta_key = ObjectKey(bucket="test-bucket", key="test/file.zip.delta")
|
||||
|
||||
# Create test content
|
||||
test_content = b"regular S3 file content"
|
||||
|
||||
# Mock a regular S3 object without DeltaGlider metadata
|
||||
mock_storage.head.return_value = ObjectHead(
|
||||
key="test/file.zip.delta",
|
||||
size=100,
|
||||
size=len(test_content),
|
||||
etag="abc",
|
||||
last_modified=None,
|
||||
metadata={}, # Missing required metadata
|
||||
metadata={}, # Missing DeltaGlider metadata - this is a regular S3 object
|
||||
)
|
||||
|
||||
# Execute and verify
|
||||
from deltaglider.core.errors import StorageIOError
|
||||
# Mock the storage.get to return the content
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
with pytest.raises(StorageIOError):
|
||||
service.get(delta_key, temp_dir / "output.zip")
|
||||
mock_stream = MagicMock()
|
||||
mock_stream.read.side_effect = [test_content, b""] # Return content then EOF
|
||||
mock_storage.get.return_value = mock_stream
|
||||
|
||||
# Execute - should successfully download regular S3 object
|
||||
output_path = temp_dir / "output.zip"
|
||||
service.get(delta_key, output_path)
|
||||
|
||||
# Verify - file should be downloaded
|
||||
assert output_path.exists()
|
||||
assert output_path.read_bytes() == test_content
|
||||
|
||||
|
||||
class TestDeltaServiceVerify:
|
||||
|
||||
454
tests/unit/test_stats_algorithm.py
Normal file
454
tests/unit/test_stats_algorithm.py
Normal file
@@ -0,0 +1,454 @@
|
||||
"""Exhaustive tests for the bucket statistics algorithm."""
|
||||
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from deltaglider.client_operations.stats import get_bucket_stats
|
||||
|
||||
|
||||
class TestBucketStatsAlgorithm:
|
||||
"""Test suite for get_bucket_stats algorithm."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_client(self):
|
||||
"""Create a mock DeltaGliderClient."""
|
||||
client = Mock()
|
||||
client.service = Mock()
|
||||
client.service.storage = Mock()
|
||||
client.service.logger = Mock()
|
||||
return client
|
||||
|
||||
def test_empty_bucket(self, mock_client):
|
||||
"""Test statistics for an empty bucket."""
|
||||
# Setup: Empty bucket
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "empty-bucket")
|
||||
|
||||
# Verify
|
||||
assert stats.bucket == "empty-bucket"
|
||||
assert stats.object_count == 0
|
||||
assert stats.total_size == 0
|
||||
assert stats.compressed_size == 0
|
||||
assert stats.space_saved == 0
|
||||
assert stats.average_compression_ratio == 0.0
|
||||
assert stats.delta_objects == 0
|
||||
assert stats.direct_objects == 0
|
||||
|
||||
def test_bucket_with_only_direct_files(self, mock_client):
|
||||
"""Test bucket with only direct files (no compression)."""
|
||||
# Setup: Bucket with 3 direct files
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "file1.pdf", "size": 1000000, "last_modified": "2024-01-01"},
|
||||
{"key": "file2.html", "size": 500000, "last_modified": "2024-01-02"},
|
||||
{"key": "file3.txt", "size": 250000, "last_modified": "2024-01-03"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
mock_client.service.storage.head.return_value = None
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "direct-only-bucket")
|
||||
|
||||
# Verify
|
||||
assert stats.object_count == 3
|
||||
assert stats.total_size == 1750000 # Sum of all files
|
||||
assert stats.compressed_size == 1750000 # Same as total (no compression)
|
||||
assert stats.space_saved == 0
|
||||
assert stats.average_compression_ratio == 0.0
|
||||
assert stats.delta_objects == 0
|
||||
assert stats.direct_objects == 3
|
||||
|
||||
def test_bucket_with_delta_compression(self, mock_client):
|
||||
"""Test bucket with delta-compressed files."""
|
||||
# Setup: Bucket with reference.bin and 2 delta files
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "file1.zip.delta", "size": 50000, "last_modified": "2024-01-02"},
|
||||
{"key": "file2.zip.delta", "size": 60000, "last_modified": "2024-01-03"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Mock metadata for delta files
|
||||
def mock_head(path):
|
||||
if "file1.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19500000", "compression_ratio": "0.997"}
|
||||
return head
|
||||
elif "file2.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19600000", "compression_ratio": "0.997"}
|
||||
return head
|
||||
return None
|
||||
|
||||
mock_client.service.storage.head.side_effect = mock_head
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "compressed-bucket")
|
||||
|
||||
# Verify
|
||||
assert stats.object_count == 2 # Only delta files counted (not reference.bin)
|
||||
assert stats.total_size == 39100000 # 19.5M + 19.6M
|
||||
assert stats.compressed_size == 20110000 # reference (20M) + deltas (50K + 60K)
|
||||
assert stats.space_saved == 18990000 # ~19MB saved
|
||||
assert stats.average_compression_ratio > 0.48 # ~48.6% compression
|
||||
assert stats.delta_objects == 2
|
||||
assert stats.direct_objects == 0
|
||||
|
||||
def test_orphaned_reference_bin_detection(self, mock_client):
|
||||
"""Test detection of orphaned reference.bin files."""
|
||||
# Setup: Bucket with reference.bin but no delta files
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "regular.pdf", "size": 1000000, "last_modified": "2024-01-02"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
mock_client.service.storage.head.return_value = None
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "orphaned-ref-bucket")
|
||||
|
||||
# Verify stats
|
||||
assert stats.object_count == 1 # Only regular.pdf
|
||||
assert stats.total_size == 1000000 # Only regular.pdf size
|
||||
assert stats.compressed_size == 1000000 # reference.bin NOT included
|
||||
assert stats.space_saved == 0
|
||||
assert stats.delta_objects == 0
|
||||
assert stats.direct_objects == 1
|
||||
|
||||
# Verify warning was logged
|
||||
warning_calls = mock_client.service.logger.warning.call_args_list
|
||||
assert any("ORPHANED REFERENCE FILE" in str(call) for call in warning_calls)
|
||||
assert any("20,000,000 bytes" in str(call) for call in warning_calls)
|
||||
assert any(
|
||||
"aws s3 rm s3://orphaned-ref-bucket/reference.bin" in str(call)
|
||||
for call in warning_calls
|
||||
)
|
||||
|
||||
def test_mixed_bucket(self, mock_client):
|
||||
"""Test bucket with both delta and direct files."""
|
||||
# Setup: Mixed bucket
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "pro/reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "pro/v1.zip.delta", "size": 50000, "last_modified": "2024-01-02"},
|
||||
{"key": "pro/v2.zip.delta", "size": 60000, "last_modified": "2024-01-03"},
|
||||
{"key": "docs/readme.pdf", "size": 500000, "last_modified": "2024-01-04"},
|
||||
{"key": "docs/manual.html", "size": 300000, "last_modified": "2024-01-05"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Mock metadata for delta files
|
||||
def mock_head(path):
|
||||
if "v1.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19500000"}
|
||||
return head
|
||||
elif "v2.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19600000"}
|
||||
return head
|
||||
return None
|
||||
|
||||
mock_client.service.storage.head.side_effect = mock_head
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "mixed-bucket")
|
||||
|
||||
# Verify
|
||||
assert stats.object_count == 4 # 2 delta + 2 direct files
|
||||
assert stats.total_size == 39900000 # 19.5M + 19.6M + 0.5M + 0.3M
|
||||
assert stats.compressed_size == 20910000 # ref (20M) + deltas (110K) + direct (800K)
|
||||
assert stats.space_saved == 18990000
|
||||
assert stats.delta_objects == 2
|
||||
assert stats.direct_objects == 2
|
||||
|
||||
def test_sha1_files_included(self, mock_client):
|
||||
"""Test that .sha1 checksum files are counted properly."""
|
||||
# Setup: Bucket with .sha1 files
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "file1.zip", "size": 1000000, "last_modified": "2024-01-01"},
|
||||
{"key": "file1.zip.sha1", "size": 41, "last_modified": "2024-01-01"},
|
||||
{"key": "file2.tar", "size": 2000000, "last_modified": "2024-01-02"},
|
||||
{"key": "file2.tar.sha1", "size": 41, "last_modified": "2024-01-02"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
mock_client.service.storage.head.return_value = None
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "sha1-bucket")
|
||||
|
||||
# Verify - .sha1 files ARE counted
|
||||
assert stats.object_count == 4
|
||||
assert stats.total_size == 3000082 # All files including .sha1
|
||||
assert stats.compressed_size == 3000082
|
||||
assert stats.direct_objects == 4
|
||||
|
||||
def test_multiple_deltaspaces(self, mock_client):
|
||||
"""Test bucket with multiple deltaspaces (different prefixes)."""
|
||||
# Setup: Multiple deltaspaces
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "pro/reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "pro/v1.zip.delta", "size": 50000, "last_modified": "2024-01-02"},
|
||||
{
|
||||
"key": "enterprise/reference.bin",
|
||||
"size": 25000000,
|
||||
"last_modified": "2024-01-03",
|
||||
},
|
||||
{"key": "enterprise/v1.zip.delta", "size": 70000, "last_modified": "2024-01-04"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Mock metadata
|
||||
def mock_head(path):
|
||||
if "pro/v1.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19500000"}
|
||||
return head
|
||||
elif "enterprise/v1.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "24500000"}
|
||||
return head
|
||||
return None
|
||||
|
||||
mock_client.service.storage.head.side_effect = mock_head
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "multi-deltaspace-bucket")
|
||||
|
||||
# Verify
|
||||
assert stats.object_count == 2 # Only delta files
|
||||
assert stats.total_size == 44000000 # 19.5M + 24.5M
|
||||
assert stats.compressed_size == 45120000 # Both references + both deltas
|
||||
assert stats.delta_objects == 2
|
||||
assert stats.direct_objects == 0
|
||||
|
||||
def test_pagination_handling(self, mock_client):
|
||||
"""Test handling of paginated results."""
|
||||
# Setup: Paginated responses
|
||||
mock_client.service.storage.list_objects.side_effect = [
|
||||
{
|
||||
"objects": [
|
||||
{"key": f"file{i}.txt", "size": 1000, "last_modified": "2024-01-01"}
|
||||
for i in range(1000)
|
||||
],
|
||||
"is_truncated": True,
|
||||
"next_continuation_token": "token1",
|
||||
},
|
||||
{
|
||||
"objects": [
|
||||
{"key": f"file{i}.txt", "size": 1000, "last_modified": "2024-01-01"}
|
||||
for i in range(1000, 1500)
|
||||
],
|
||||
"is_truncated": False,
|
||||
},
|
||||
]
|
||||
mock_client.service.storage.head.return_value = None
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "paginated-bucket")
|
||||
|
||||
# Verify
|
||||
assert stats.object_count == 1500
|
||||
assert stats.total_size == 1500000
|
||||
assert stats.compressed_size == 1500000
|
||||
assert stats.direct_objects == 1500
|
||||
|
||||
# Verify pagination was handled
|
||||
assert mock_client.service.storage.list_objects.call_count == 2
|
||||
|
||||
def test_delta_file_without_metadata(self, mock_client):
|
||||
"""Test handling of delta files with missing metadata."""
|
||||
# Setup: Delta file without metadata
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "file.zip.delta", "size": 50000, "last_modified": "2024-01-02"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# No metadata available
|
||||
mock_client.service.storage.head.return_value = None
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "no-metadata-bucket")
|
||||
|
||||
# Verify - falls back to using delta size as original size
|
||||
assert stats.object_count == 1
|
||||
assert stats.total_size == 50000 # Falls back to delta size
|
||||
assert stats.compressed_size == 20050000 # reference + delta
|
||||
assert stats.delta_objects == 1
|
||||
|
||||
# Verify warning was logged
|
||||
warning_calls = mock_client.service.logger.warning.call_args_list
|
||||
assert any("no original_size" in str(call) for call in warning_calls)
|
||||
|
||||
def test_parallel_metadata_fetching(self, mock_client):
|
||||
"""Test that metadata is fetched in parallel for performance."""
|
||||
# Setup: Many delta files
|
||||
num_deltas = 50
|
||||
objects = [{"key": "reference.bin", "size": 20000000, "last_modified": "2024-01-01"}]
|
||||
objects.extend(
|
||||
[
|
||||
{
|
||||
"key": f"file{i}.zip.delta",
|
||||
"size": 50000 + i,
|
||||
"last_modified": f"2024-01-{i + 2:02d}",
|
||||
}
|
||||
for i in range(num_deltas)
|
||||
]
|
||||
)
|
||||
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": objects,
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Mock metadata
|
||||
def mock_head(path):
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19500000"}
|
||||
return head
|
||||
|
||||
mock_client.service.storage.head.side_effect = mock_head
|
||||
|
||||
# Execute with mocked ThreadPoolExecutor
|
||||
with patch("concurrent.futures.ThreadPoolExecutor") as mock_executor:
|
||||
mock_pool = MagicMock()
|
||||
mock_executor.return_value.__enter__.return_value = mock_pool
|
||||
|
||||
# Simulate parallel execution
|
||||
futures = []
|
||||
for i in range(num_deltas):
|
||||
future = Mock()
|
||||
future.result.return_value = (f"file{i}.zip.delta", {"file_size": "19500000"})
|
||||
futures.append(future)
|
||||
|
||||
mock_pool.submit.side_effect = futures
|
||||
patch_as_completed = patch(
|
||||
"concurrent.futures.as_completed",
|
||||
return_value=futures,
|
||||
)
|
||||
|
||||
with patch_as_completed:
|
||||
_ = get_bucket_stats(mock_client, "parallel-bucket")
|
||||
|
||||
# Verify ThreadPoolExecutor was used with correct max_workers
|
||||
mock_executor.assert_called_once_with(max_workers=10) # min(10, 50) = 10
|
||||
|
||||
def test_detailed_stats_flag(self, mock_client):
|
||||
"""Test that detailed_stats flag controls metadata fetching."""
|
||||
# Setup
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "file.zip.delta", "size": 50000, "last_modified": "2024-01-02"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Test with detailed_stats=False (default)
|
||||
# NOTE: Currently, the implementation always fetches metadata regardless of the flag
|
||||
# This test documents the current behavior
|
||||
_ = get_bucket_stats(mock_client, "test-bucket", detailed_stats=False)
|
||||
|
||||
# Currently metadata is always fetched for delta files
|
||||
assert mock_client.service.storage.head.called
|
||||
|
||||
# Reset mock
|
||||
mock_client.service.storage.head.reset_mock()
|
||||
|
||||
# Test with detailed_stats=True
|
||||
mock_client.service.storage.head.return_value = Mock(metadata={"file_size": "19500000"})
|
||||
|
||||
_ = get_bucket_stats(mock_client, "test-bucket", detailed_stats=True)
|
||||
|
||||
# Should fetch metadata
|
||||
assert mock_client.service.storage.head.called
|
||||
|
||||
def test_error_handling_in_metadata_fetch(self, mock_client):
|
||||
"""Test graceful handling of errors during metadata fetch."""
|
||||
# Setup
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{"key": "file1.zip.delta", "size": 50000, "last_modified": "2024-01-02"},
|
||||
{"key": "file2.zip.delta", "size": 60000, "last_modified": "2024-01-03"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
|
||||
# Mock metadata fetch to fail for one file
|
||||
def mock_head(path):
|
||||
if "file1.zip.delta" in path:
|
||||
raise Exception("S3 error")
|
||||
elif "file2.zip.delta" in path:
|
||||
head = Mock()
|
||||
head.metadata = {"file_size": "19600000"}
|
||||
return head
|
||||
return None
|
||||
|
||||
mock_client.service.storage.head.side_effect = mock_head
|
||||
|
||||
# Execute - should handle error gracefully
|
||||
stats = get_bucket_stats(mock_client, "error-bucket", detailed_stats=True)
|
||||
|
||||
# Verify - file1 uses fallback, file2 uses metadata
|
||||
assert stats.object_count == 2
|
||||
assert stats.delta_objects == 2
|
||||
# file1 falls back to delta size (50000), file2 uses metadata (19600000)
|
||||
assert stats.total_size == 50000 + 19600000
|
||||
|
||||
def test_multiple_orphaned_references(self, mock_client):
|
||||
"""Test detection of multiple orphaned reference.bin files."""
|
||||
# Setup: Multiple orphaned references
|
||||
mock_client.service.storage.list_objects.return_value = {
|
||||
"objects": [
|
||||
{"key": "pro/reference.bin", "size": 20000000, "last_modified": "2024-01-01"},
|
||||
{
|
||||
"key": "enterprise/reference.bin",
|
||||
"size": 25000000,
|
||||
"last_modified": "2024-01-02",
|
||||
},
|
||||
{"key": "community/reference.bin", "size": 15000000, "last_modified": "2024-01-03"},
|
||||
{"key": "regular.pdf", "size": 1000000, "last_modified": "2024-01-04"},
|
||||
],
|
||||
"is_truncated": False,
|
||||
}
|
||||
mock_client.service.storage.head.return_value = None
|
||||
|
||||
# Execute
|
||||
stats = get_bucket_stats(mock_client, "multi-orphaned-bucket")
|
||||
|
||||
# Verify stats
|
||||
assert stats.object_count == 1 # Only regular.pdf
|
||||
assert stats.total_size == 1000000
|
||||
assert stats.compressed_size == 1000000 # No references included
|
||||
assert stats.space_saved == 0
|
||||
|
||||
# Verify warnings for all orphaned references
|
||||
warning_calls = [str(call) for call in mock_client.service.logger.warning.call_args_list]
|
||||
warning_text = " ".join(warning_calls)
|
||||
|
||||
assert "ORPHANED REFERENCE FILE" in warning_text
|
||||
assert "3 reference.bin file(s)" in warning_text
|
||||
assert "60,000,000 bytes" in warning_text # Total of all references
|
||||
assert "s3://multi-orphaned-bucket/pro/reference.bin" in warning_text
|
||||
assert "s3://multi-orphaned-bucket/enterprise/reference.bin" in warning_text
|
||||
assert "s3://multi-orphaned-bucket/community/reference.bin" in warning_text
|
||||
Reference in New Issue
Block a user