Yes, the output of the commend cryosparcm joblog PX JY
shows:
Tue, 24 Jun 2025 20:51:09 GMT] License is valid.
[Tue, 24 Jun 2025 20:51:09 GMT] Launching job on lane polaris target polaris ...
[Tue, 24 Jun 2025 20:51:09 GMT] Launching job on cluster polaris
[Tue, 24 Jun 2025 20:51:09 GMT]
====================== Cluster submission script: ========================
==========================================================================
#!/usr/bin/env bash
#### cryoSPARC cluster submission script template for PBS
## Available variables:
## /lus/eagle/projects/FoundEpidem/aravi/cryosparc/cryosparc_worker/bin/cryosparcw run --project P1 --job J2 --master_hostname polaris.alcf.anl.gov --master_command_core_port 18002 > /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test/J2/job.log 2>&1 - the complete command string to run the job
## 1 - the number of CPUs needed
## 0 - the number of GPUs needed.
## Note: The code will use this many GPUs starting from dev id 0.
## The cluster scheduler has the responsibility
## of setting CUDA_VISIBLE_DEVICES or otherwise enuring that the
## job uses the correct cluster-allocated GPUs.
## 8.0 - the amount of RAM needed in GB
## /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test/J2 - absolute path to the job directory
## /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test - absolute path to the project dir
## /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test/J2/job.log - absolute path to the log file for the job
## /lus/eagle/projects/FoundEpidem/aravi/cryosparc/cryosparc_worker/bin/cryosparcw - absolute path to the cryosparc worker command
## --project P1 --job J2 --master_hostname polaris.alcf.anl.gov --master_command_core_port 18002 - arguments to be passed to cryosparcw run
## P1 - uid of the project
## J2 - uid of the job
## aravi - name of the user that created the job (may contain spaces)
## aravi@anl.gov - cryosparc username of the user that created the job (usually an email)
##
## What follows is a simple PBS script:
#PBS -N cryosparc_job
#PBS -l select=1:system=polaris,walltime=01:00:00
#PBS -l filesystems=home:eagle
#PBS -A FoundEpidem
#PBS -q debug
module load nvhpc/23.9 PrgEnv-nvhpc/8.5.0
cd /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test/J2
/lus/eagle/projects/FoundEpidem/aravi/cryosparc/cryosparc_worker/bin/cryosparcw run --project P1 --job J2 --master_hostname polaris.alcf.anl.gov --master_command_core_port 18002 > /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test/J2/job.log 2>&1
==========================================================================
==========================================================================
[Tue, 24 Jun 2025 20:51:09 GMT] -------- Submission command:
qsub /lus/eagle/projects/FoundEpidem/aravi/cryosparc/CS-test/J2/queue_sub_script.sh
[Tue, 24 Jun 2025 20:51:09 GMT] -------- Cluster Job ID:
5238127.polaris-pbs-01.hsn.cm.polaris.alcf.anl.gov
[Tue, 24 Jun 2025 20:51:09 GMT] -------- Queued on cluster at 2025-06-24 20:51:09.479893
[Tue, 24 Jun 2025 20:51:09 GMT] Cluster job status update for P1 J2 failed with exit code 35 (72 status update request retries)
qstat: 5238127.polaris-pbs-01.hsn.cm.polaris.alcf.anl.gov Job has finished, use -x or -H to obtain historical job information
aravi@polaris-login-02:/lus/eagle/projects/FoundEpidem/aravi/cryosparc/cryosparc_master>
It seems to be the same as what is displayed on the web interface.
Checking the job.json manually shows this:
{
"project_uid": "P1",
"uid": "J2",
"PID_main": null,
"PID_monitor": null,
"PID_workers": [],
"bench": {},
"children": [],
"cloned_from": null,
"cluster_job_custom_vars": {},
"cluster_job_id": null,
"cluster_job_monitor_event_id": null,
"cluster_job_monitor_last_run_at": null,
"cluster_job_monitor_retries": 0,
"cluster_job_status": null,
"cluster_job_status_code": null,
"cluster_job_submission_script": null,
"completed_at": null,
"completed_count": 0,
"created_at": {
"$date": "2025-06-24T20:51:05.494Z"
},
"created_by_job_uid": null,
"created_by_user_id": "685b0ddb3a17018b6d14f59d",
"deleted": true,
"description": "Enter a description.",
"enable_bench": false,
"errors_build_inputs": {},
"errors_build_params": {},
"errors_run": [],
"experiment_worker_path": null,
"failed_at": null,
"generate_intermediate_results": false,
"has_error": false,
"has_warning": false,
"heartbeat_at": null,
"input_slot_groups": [],
"instance_information": {},
"interactive": false,
"interactive_hostname": "",
"interactive_port": null,
"intermediate_results_size_bytes": 0,
"intermediate_results_size_last_updated": {
"$date": "2025-06-24T21:15:19.633Z"
},
"is_ancestor_of_final_result": false,
"is_experiment": false,
"is_final_result": false,
"job_dir": "J2",
"job_dir_size": 0,
"job_dir_size_last_updated": {
"$date": "2025-06-24T21:15:19.633Z"
},
"job_type": "instance_launch_test",
"killed_at": null,
"last_accessed": {
"name": "aravi",
"accessed_at": {
"$date": "2025-06-24T21:13:45.349Z"
}
},
"last_intermediate_data_cleared_amount": 0,
"last_intermediate_data_cleared_at": null,
"last_scheduled_at": null,
"last_updated": {
"$date": "2025-06-24T21:15:19.644Z"
},
"launched_at": null,
"output_group_images": {},
"output_result_groups": [],
"output_results": [],
"params_base": {
"use_all_gpus": {
"type": "boolean",
"value": true,
"title": "Benchmark all available GPUs",
"desc": "If enabled, benchmark all available GPUs on the target. This option may not work when submitting to a cluster resource manager.",
"order": 0,
"section": "resource_settings",
"advanced": false,
"hidden": true
},
"gpu_num_gpus": {
"type": "number",
"value": 0,
"title": "Number of GPUs to benchmark",
"desc": "The number of GPUs to request from the scheduler.",
"order": 1,
"section": "resource_settings",
"advanced": false,
"hidden": true
},
"use_ssd": {
"type": "boolean",
"value": false,
"title": "Use SSD for Tests",
"desc": "Whether or not to use the SSD on the worker for the tests.",
"order": 2,
"section": "resource_settings",
"advanced": false,
"hidden": true
}
},
"params_secs": {
"resource_settings": {
"title": "Resource Settings",
"desc": "",
"order": 0
}
},
"params_spec": {},
"parents": [],
"priority": 0,
"project_uid_num": 1,
"queue_index": null,
"queue_message": null,
"queue_status": null,
"queued_at": null,
"queued_job_hash": null,
"queued_to_lane": "",
"resources_allocated": {},
"resources_needed": {
"slots": {
"CPU": 1,
"GPU": 0,
"RAM": 1
},
"fixed": {
"SSD": false
}
},
"run_as_user": null,
"running_at": null,
"started_at": null,
"status": "killed",
"title": "New Job J2",
"tokens_acquired_at": null,
"tokens_requested_at": null,
"type": "instance_launch_test",
"ui_tile_height": 1,
"ui_tile_images": [],
"ui_tile_width": 1,
"uid_num": 2,
"version": "v4.7.1",
"waiting_at": null,
"workspace_uids": [
"W1"
],
"ui_layouts": {},
"last_exported": {
"$date": "2025-06-24T21:15:19.644Z"
},
"no_check_inputs_ready": false,
"queued_to_gpu": null,
"queued_to_hostname": null,
"num_tokens": null,
"job_sig": null,
"status_num": 45,
"progress": [],
"deleted_at": {
"$date": "2025-06-24T21:15:19.633Z"
}