Fixes #1252 Supports a generic background job system, with two background jobs, CreateReplicaJob and DeleteReplicaJob. - CreateReplicaJob runs on new crawls, uploads, profiles and updates the `replicas` array with the info about the replica after the job succeeds. - DeleteReplicaJob deletes the replica. - Both jobs are created from the new `replica_job.yaml` template. The CreateReplicaJob sets secrets for primary storage + replica storage, while DeleteReplicaJob only needs the replica storage. - The job is processed in the operator when the job is finalized (deleted), which should happen immediately when the job is done, either because it succeeds or because the backoffLimit is reached (currently set to 3). - /jobs/ api lists all jobs using a paginated response, including filtering and sorting - /jobs/<job id> returns details for a particular job - tests: nightly tests updated to check create + delete replica jobs for crawls as well as uploads, job api endpoints - tests: also fixes to timeouts in nightly tests to avoid crawls finishing too quickly. --------- Co-authored-by: Tessa Walsh <tessa@bitarchivist.net>
80 lines
2.3 KiB
Python
80 lines
2.3 KiB
Python
import requests
|
|
import time
|
|
|
|
from .conftest import API_PREFIX
|
|
from .utils import verify_file_replicated
|
|
|
|
|
|
def test_crawl_timeout(admin_auth_headers, default_org_id, timeout_crawl):
|
|
# Verify that crawl has started
|
|
r = requests.get(
|
|
f"{API_PREFIX}/orgs/{default_org_id}/crawls/{timeout_crawl}/replay.json",
|
|
headers=admin_auth_headers,
|
|
)
|
|
assert r.status_code == 200
|
|
data = r.json()
|
|
assert data["state"] in ("starting", "running")
|
|
|
|
# Wait some time to let crawl start, hit timeout, and gracefully stop
|
|
time.sleep(60)
|
|
|
|
# Verify crawl was stopped
|
|
r = requests.get(
|
|
f"{API_PREFIX}/orgs/{default_org_id}/crawls/{timeout_crawl}/replay.json",
|
|
headers=admin_auth_headers,
|
|
)
|
|
assert r.status_code == 200
|
|
data = r.json()
|
|
assert data["state"] == "partial_complete"
|
|
|
|
|
|
def test_crawl_files_replicated(admin_auth_headers, default_org_id, timeout_crawl):
|
|
time.sleep(20)
|
|
|
|
# Verify replication job was successful
|
|
r = requests.get(
|
|
f"{API_PREFIX}/orgs/{default_org_id}/jobs?sortBy=started&sortDirection=1&jobType=create-replica",
|
|
headers=admin_auth_headers,
|
|
)
|
|
assert r.status_code == 200
|
|
latest_job = r.json()["items"][0]
|
|
assert latest_job["type"] == "create-replica"
|
|
job_id = latest_job["id"]
|
|
|
|
attempts = 0
|
|
while attempts < 5:
|
|
r = requests.get(
|
|
f"{API_PREFIX}/orgs/{default_org_id}/jobs/{job_id}",
|
|
headers=admin_auth_headers,
|
|
)
|
|
assert r.status_code == 200
|
|
job = r.json()
|
|
finished = latest_job.get("finished")
|
|
if not finished:
|
|
attempts += 1
|
|
time.sleep(10)
|
|
continue
|
|
|
|
assert job["success"]
|
|
break
|
|
|
|
# Assert file was updated
|
|
r = requests.get(
|
|
f"{API_PREFIX}/orgs/{default_org_id}/crawls/{timeout_crawl}/replay.json",
|
|
headers=admin_auth_headers,
|
|
)
|
|
assert r.status_code == 200
|
|
data = r.json()
|
|
files = data.get("resources")
|
|
assert files
|
|
for file_ in files:
|
|
assert file_["numReplicas"] == 1
|
|
|
|
# Verify replica is stored
|
|
r = requests.get(
|
|
f"{API_PREFIX}/orgs/{default_org_id}/jobs/{job_id}", headers=admin_auth_headers
|
|
)
|
|
assert r.status_code == 200
|
|
data = r.json()
|
|
verify_file_replicated(data["file_path"])
|