Disable uploading and creating browser profiles when org is read-only (#1907)

Fixes #1904 

Follow-up to read-only enforcement, with improved tests.
This commit is contained in:
Tessa Walsh 2024-07-02 02:15:38 -04:00 committed by GitHub
parent e1ef894275
commit bdfc0948d3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 98 additions and 7 deletions

View File

@ -154,7 +154,7 @@ class ProfileOps:
async def commit_to_profile(
self,
browser_commit: ProfileCreate,
storage: StorageRef,
org: Organization,
user: User,
metadata: dict,
existing_profile: Optional[Profile] = None,
@ -196,7 +196,7 @@ class ProfileOps:
hash=resource["hash"],
size=file_size,
filename=resource["path"],
storage=storage,
storage=org.storage,
)
baseid = metadata.get("btrix.baseprofile")
@ -206,6 +206,9 @@ class ProfileOps:
oid = UUID(metadata.get("btrix.org"))
if org.readOnly:
raise HTTPException(status_code=403, detail="org_set_to_read_only")
if await self.orgs.storage_quota_reached(oid):
raise HTTPException(status_code=403, detail="storage_quota_reached")
@ -493,7 +496,7 @@ def init_profiles_api(
):
metadata = await browser_get_metadata(browser_commit.browserid, org)
return await ops.commit_to_profile(browser_commit, org.storage, user, metadata)
return await ops.commit_to_profile(browser_commit, org, user, metadata)
@router.patch("/{profileid}")
async def commit_browser_to_existing(
@ -515,7 +518,7 @@ def init_profiles_api(
description=browser_commit.description or profile.description,
crawlerChannel=profile.crawlerChannel,
),
storage=org.storage,
org=org,
user=user,
metadata=metadata,
existing_profile=profile,

View File

@ -63,6 +63,9 @@ class UploadOps(BaseCrawlOps):
replaceId: Optional[str],
) -> dict[str, Any]:
"""Upload streaming file, length unknown"""
if org.readOnly:
raise HTTPException(status_code=403, detail="org_set_to_read_only")
if await self.orgs.storage_quota_reached(org.id):
raise HTTPException(status_code=403, detail="storage_quota_reached")
@ -122,6 +125,9 @@ class UploadOps(BaseCrawlOps):
user: User,
) -> dict[str, Any]:
"""handle uploading content to uploads subdir + request subdir"""
if org.readOnly:
raise HTTPException(status_code=403, detail="org_set_to_read_only")
if await self.orgs.storage_quota_reached(org.id):
raise HTTPException(status_code=403, detail="storage_quota_reached")

View File

@ -522,6 +522,11 @@ def profile_browser_3_id(admin_auth_headers, default_org_id):
return _create_profile_browser(admin_auth_headers, default_org_id)
@pytest.fixture(scope="session")
def profile_browser_4_id(admin_auth_headers, default_org_id):
return _create_profile_browser(admin_auth_headers, default_org_id)
def _create_profile_browser(
headers: Dict[str, str], oid: UUID, url: str = "https://webrecorder.net"
):

View File

@ -1,9 +1,13 @@
import os
import requests
import uuid
import pytest
from .conftest import API_PREFIX
from .utils import read_in_chunks
curr_dir = os.path.dirname(os.path.realpath(__file__))
new_oid = None
@ -524,7 +528,7 @@ def test_update_read_only(admin_auth_headers, default_org_id):
assert data["readOnly"] is True
assert data["readOnlyReason"] == "Payment suspended"
# Try to start crawls, should fail
# Try to start crawl from new workflow, should fail
crawl_data = {
"runNow": True,
"name": "Read Only Test Crawl",
@ -543,10 +547,32 @@ def test_update_read_only(admin_auth_headers, default_org_id):
data = r.json()
assert data["added"]
assert data["id"]
assert data["run_now_job"] is None
# Reset back to False, future crawls in tests should run fine
cid = data["id"]
assert cid
# Try to start crawl from existing workflow, should fail
r = requests.post(
f"{API_PREFIX}/orgs/{default_org_id}/crawlconfigs/{cid}/run",
headers=admin_auth_headers,
json=crawl_data,
)
assert r.status_code == 403
assert r.json()["detail"] == "org_set_to_read_only"
# Try to upload a WACZ, should fail
with open(os.path.join(curr_dir, "data", "example.wacz"), "rb") as fh:
r = requests.put(
f"{API_PREFIX}/orgs/{default_org_id}/uploads/stream?filename=test.wacz&name=My%20New%20Upload&description=Should%20Fail&collections=&tags=",
headers=admin_auth_headers,
data=read_in_chunks(fh),
)
assert r.status_code == 403
assert r.json()["detail"] == "org_set_to_read_only"
# Reset back to False, future tests should be unaffected
r = requests.post(
f"{API_PREFIX}/orgs/{default_org_id}/read-only",
headers=admin_auth_headers,

View File

@ -504,3 +504,54 @@ def test_delete_profile(admin_auth_headers, default_org_id, profile_2_id):
)
assert r.status_code == 404
assert r.json()["detail"] == "profile_not_found"
def test_create_profile_read_only_org(
admin_auth_headers, default_org_id, profile_browser_4_id
):
# Set org to read-only
r = requests.post(
f"{API_PREFIX}/orgs/{default_org_id}/read-only",
headers=admin_auth_headers,
json={"readOnly": True, "readOnlyReason": "For testing purposes"},
)
assert r.json()["updated"]
prepare_browser_for_profile_commit(
profile_browser_4_id, admin_auth_headers, default_org_id
)
# Try to create profile, verify we get 403 forbidden
start_time = time.monotonic()
time_limit = 300
while True:
try:
r = requests.post(
f"{API_PREFIX}/orgs/{default_org_id}/profiles",
headers=admin_auth_headers,
json={
"browserid": profile_browser_4_id,
"name": "uncreatable",
"description": "because org is read-only",
},
timeout=10,
)
detail = r.json().get("detail")
if detail == "waiting_for_browser":
time.sleep(5)
continue
if detail == "org_set_to_read_only":
assert r.status_code == 403
break
except:
if time.monotonic() - start_time > time_limit:
raise
time.sleep(5)
# Set readOnly back to false on org
r = requests.post(
f"{API_PREFIX}/orgs/{default_org_id}/read-only",
headers=admin_auth_headers,
json={"readOnly": False},
)
assert r.json()["updated"]