Fix intermittent crawl timeout test failure (#1621)
Fixes #1620 This increases the total timeout from 60 seconds to 120 seconds for crawl to complete, which should be sufficient given how intermittently the failure has been happening. Can increase it further if needed.
This commit is contained in:
parent
4f676e4e82
commit
b3b1e0d7d8
@ -15,17 +15,20 @@ def test_crawl_timeout(admin_auth_headers, default_org_id, timeout_crawl):
|
||||
data = r.json()
|
||||
assert data["state"] in ("starting", "running")
|
||||
|
||||
# Wait some time to let crawl start, hit timeout, and gracefully stop
|
||||
time.sleep(60)
|
||||
attempts = 0
|
||||
while True:
|
||||
# Try for 2 minutes before failing
|
||||
if attempts > 24:
|
||||
assert False
|
||||
|
||||
# Verify crawl was stopped
|
||||
r = requests.get(
|
||||
f"{API_PREFIX}/orgs/{default_org_id}/crawls/{timeout_crawl}/replay.json",
|
||||
headers=admin_auth_headers,
|
||||
)
|
||||
assert r.status_code == 200
|
||||
data = r.json()
|
||||
assert data["state"] == "complete"
|
||||
if r.json()["state"] == "complete":
|
||||
break
|
||||
time.sleep(10)
|
||||
attempts += 1
|
||||
|
||||
|
||||
def test_crawl_files_replicated(admin_auth_headers, default_org_id, timeout_crawl):
|
||||
|
Loading…
Reference in New Issue
Block a user