Hi,
Thanks for the response. I tried running in Testing mode first then also tried the Benchmark mode. I received the same error in both modes.
Here’s the output:
$ cryosparcm cli "get_scheduler_targets()" | grep num_gp
[{'cache_path': '/home/cryosparc_user/non_ssd_cache/', 'cache_quota_mb': None, 'cache_reserve_mb': 100000, 'custom_var_names': [], 'custom_vars': {}, 'desc': None, 'hostname': 'accre_non_gpu', 'lane': 'accre_non_gpu', 'name': 'accre_non_gpu', 'qdel_cmd_tpl': 'scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': 'squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': 'sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --account=csb\n#SBATCH --output={{ job_log_path_abs }}\n#SBATCH --error={{ job_log_path_abs }}\n#SBATCH --nodes=1\n#SBATCH --mem={{ (ram_gb*1000)|int }}M\n#SBATCH --ntasks-per-node=1\n#SBATCH --cpus-per-task={{ num_cpu }}\n\nsrun {{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'accre_non_gpu', 'tpl_vars': ['run_cmd', 'cluster_job_id', 'project_uid', 'ram_gb', 'job_log_path_abs', 'num_cpu', 'job_uid', 'command'], 'type': 'cluster', 'worker_bin_path': '/home/cryosparcuser/cryosparc_worker/bin/cryosparcw'}, {'cache_path': '/csbtmp/cryosparc/ssd_data/', 'cache_quota_mb': None, 'cache_reserve_mb': 100000, 'custom_var_names': [], 'custom_vars': {}, 'desc': None, 'hostname': 'accre gpu', 'lane': 'accre gpu', 'name': 'accre gpu', 'qdel_cmd_tpl': 'scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': 'squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': 'sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --partition=a6000x4\n#SBATCH --account=csb_gpu_acc\n#SBATCH --output={{ job_log_path_abs }}\n#SBATCH --error={{ job_log_path_abs }}\n#SBATCH --nodes=1\n#SBATCH --mem={{ (ram_gb*1000)|int }}M\n#SBATCH --ntasks-per-node=1\n#SBATCH --cpus-per-task={{ num_cpu }}\n#SBATCH --gres=gpu:{{ num_gpu }}\n#SBATCH --gres-flags=enforce-binding\n\nsrun echo $CUDA_VISIBLE_DEVICES\nsrun {{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'accre_gpu', 'tpl_vars': ['run_cmd', 'cluster_job_id', 'project_uid', 'ram_gb', 'num_gpu', 'job_log_path_abs', 'num_cpu', 'job_uid', 'command'], 'type': 'cluster', 'worker_bin_path': '/home/cryosparcuser/cryosparc_worker/bin/cryosparcw'}]