Stuck in queue forever

Thanks again for the speedy reply @wtempel. Yes, sure:

$ ls -1 /tmp/mongo*.sock /tmp/cryosparc*.sock
/tmp/cryosparc-supervisor-e29cccc7c796955a404d28cb9d299db7.sock
/tmp/mongodb-39023.sock
$ ps -eo pid,ppid,start,command | grep -e cryosparc -e mongo
1908613       1   Oct 30 /bin/bash /usr/local/apps/cryosparc/libexec/auto_archive.sh
1916547       1   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/supervisord -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/supervisord.conf
1916662 1916547   Oct 30 mongod --auth --dbpath /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_database --port 39023 --oplogSize 64 --replSet meteor --wiredTigerCacheSizeGB 4 --bind_ip_all
1916775 1916547   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/gunicorn -n command_core -b 0.0.0.0:39024 cryosparc_command.command_core:start() -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/gunicorn.conf.py
1916793 1916775   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/gunicorn -n command_core -b 0.0.0.0:39024 cryosparc_command.command_core:start() -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/gunicorn.conf.py
1916819 1916547   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/gunicorn cryosparc_command.command_vis:app -n command_vis -b 0.0.0.0:39025 -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/gunicorn.conf.py
1916821 1916547   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/gunicorn cryosparc_command.command_rtp:start() -n command_rtp -b 0.0.0.0:39027 -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/gunicorn.conf.py
1916834 1916821   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/gunicorn cryosparc_command.command_rtp:start() -n command_rtp -b 0.0.0.0:39027 -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/gunicorn.conf.py
1916835 1916819   Oct 30 python /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/deps/anaconda/envs/cryosparc_master_env/bin/gunicorn cryosparc_command.command_vis:app -n command_vis -b 0.0.0.0:39025 -c /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/gunicorn.conf.py
1916898 1916547   Oct 30 /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/cryosparc_app/nodejs/bin/node ./bundle/main.js
2042350 1910777 19:16:54 grep --color=auto -e cryosparc -e mongo
$ grep HOSTNAME /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/config.sh
export CRYOSPARC_MASTER_HOSTNAME="cn1637"
$ /gpfs/gsfs12/users/yangr3/apps/cryosparc/cryosparc_master/bin/cryosparcm cli "get_scheduler_targets()"
[{'cache_path': None, 'cache_quota_mb': None, 'cache_reserve_mb': 10000, 'custom_var_names': ['lscratch', 'ram_gb_multiplier', 'gpu_type', 'num_hours'], 'custom_vars': {'gpu_type': 'v100x', 'lscratch': '460', 'num_hours': '5', 'ram_gb_multiplier': '1'}, 'desc': None, 'hostname': 'slurm_auto', 'lane': 'slurm_auto', 'name': 'slurm_auto', 'qdel_cmd_tpl': '/usr/local/slurm/bin/scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "/usr/local/slurm/bin/sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': '/usr/local/bin/sjobs --squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': '/usr/local/slurm/bin/sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --output={{ job_dir_abs }}/output.txt\n#SBATCH --error={{ job_dir_abs }}/error.txt\n#SBATCH --mem={{ (ram_gb|float * (ram_gb_multiplier|default(1))|float)|int }}GB\n#SBATCH --time={{ (num_hours|default(4))|int }}:00:00\n#SBATCH --nodes=1\n#SBATCH --ntasks={{ num_cpu }}\n#SBATCH --cpus-per-task=1\n{%- if num_gpu == 0 %}\n#SBATCH --partition=norm\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }}\n{%- else %}\n#SBATCH --partition=gpu\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }},gpu:{{ gpu_type }}:{{ num_gpu }}\n{%- endif %}\n{{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'slurm_auto', 'tpl_vars': ['num_gpu', 'lscratch', 'command', 'project_uid', 'run_cmd', 'ram_gb_multiplier', 'num_cpu', 'job_uid', 'gpu_type', 'num_hours', 'ram_gb', 'cluster_job_id', 'job_dir_abs'], 'type': 'cluster', 'worker_bin_path': '/data/yangr3/apps/cryosparc/cryosparc_worker/bin/cryosparcw'},
 {'cache_path': '/lscratch/25547743', 'cache_quota_mb': None, 'cache_reserve_mb': 10000, 'custom_var_names': ['lscratch', 'ram_gb_multiplier', 'gpu_type', 'num_hours'], 'custom_vars': {'gpu_type': 'v100x', 'lscratch': '460', 'num_hours': '5'}, 'desc': None, 'hostname': 'slurm_custom', 'lane': 'slurm_custom', 'name': 'slurm_custom', 'qdel_cmd_tpl': '/usr/local/slurm/bin/scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "/usr/local/slurm/bin/sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': '/usr/local/bin/sjobs --squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': '/usr/local/slurm/bin/sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --output={{ job_dir_abs }}/output.txt\n#SBATCH --error={{ job_dir_abs }}/error.txt\n#SBATCH --mem={{ (ram_gb|float * (ram_gb_multiplier|default(1))|float)|int }}GB\n#SBATCH --time={{ (num_hours|default(4))|int }}:00:00\n#SBATCH --nodes=1\n#SBATCH --ntasks={{ num_cpu }}\n#SBATCH --cpus-per-task=1\n{%- if num_gpu == 0 %}\n#SBATCH --partition=norm\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }}\n{%- else %}\n#SBATCH --partition=gpu\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }},gpu:{{ (gpu_type|default("p100")) }}:{{ num_gpu }}\n{%- endif %}\n{{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'slurm_custom', 'tpl_vars': ['num_gpu', 'lscratch', 'command', 'project_uid', 'run_cmd', 'ram_gb_multiplier', 'num_cpu', 'job_uid', 'gpu_type', 'num_hours', 'ram_gb', 'cluster_job_id', 'job_dir_abs'], 'type': 'cluster', 'worker_bin_path': '/data/yangr3/apps/cryosparc/cryosparc_worker/bin/cryosparcw'},
 {'cache_path': None, 'cache_quota_mb': None, 'cache_reserve_mb': 10000, 'custom_var_names': ['lscratch', 'ram_gb_multiplier', 'gpu_type', 'num_hours'], 'custom_vars': {}, 'desc': None, 'hostname': 'multi_gpu', 'lane': 'multi_gpu', 'name': 'multi_gpu', 'qdel_cmd_tpl': '/usr/local/slurm/bin/scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "/usr/local/slurm/bin/sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': '/usr/local/bin/sjobs --squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': '/usr/local/slurm/bin/sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --output={{ job_dir_abs }}/output.txt\n#SBATCH --error={{ job_dir_abs }}/error.txt\n#SBATCH --mem={{ (ram_gb|float * (ram_gb_multiplier|default(1))|float)|int }}GB\n#SBATCH --time={{ (num_hours|default(4))|int }}:00:00\n#SBATCH --nodes=1\n#SBATCH --ntasks={{ num_cpu }}\n#SBATCH --cpus-per-task=1\n{%- if num_gpu == 0 %}\n#SBATCH --partition=norm\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }}\n{%- else %}\n#SBATCH --partition=gpu\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }},gpu:{{ (gpu_type|default("p100")) }}:{{ num_gpu }}\n{%- endif %}\n{{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'multi_gpu', 'tpl_vars': ['num_gpu', 'lscratch', 'command', 'project_uid', 'run_cmd', 'ram_gb_multiplier', 'num_cpu', 'job_uid', 'gpu_type', 'num_hours', 'ram_gb', 'cluster_job_id', 'job_dir_abs'], 'type': 'cluster', 'worker_bin_path': '/data/yangr3/apps/cryosparc/cryosparc_worker/bin/cryosparcw'},
 {'cache_path': None, 'cache_quota_mb': None, 'cache_reserve_mb': 10000, 'custom_var_names': ['lscratch', 'ram_gb_multiplier', 'gpu_type', 'num_hours'], 'custom_vars': {}, 'desc': None, 'hostname': 'single_gpu', 'lane': 'single_gpu', 'name': 'single_gpu', 'qdel_cmd_tpl': '/usr/local/slurm/bin/scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "/usr/local/slurm/bin/sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': '/usr/local/bin/sjobs --squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': '/usr/local/slurm/bin/sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --output={{ job_dir_abs }}/output.txt\n#SBATCH --error={{ job_dir_abs }}/error.txt\n#SBATCH --mem={{ (ram_gb|float * (ram_gb_multiplier|default(1))|float)|int }}GB\n#SBATCH --time={{ (num_hours|default(4))|int }}:00:00\n#SBATCH --nodes=1\n#SBATCH --ntasks={{ num_cpu }}\n#SBATCH --cpus-per-task=1\n#SBATCH --partition=gpu\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }},gpu:{{ (gpu_type|default("p100")) }}:1\n{{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'single_gpu', 'tpl_vars': ['lscratch', 'command', 'project_uid', 'run_cmd', 'ram_gb_multiplier', 'num_cpu', 'job_uid', 'gpu_type', 'num_hours', 'ram_gb', 'cluster_job_id', 'job_dir_abs'], 'type': 'cluster', 'worker_bin_path': '/data/yangr3/apps/cryosparc/cryosparc_worker/bin/cryosparcw'},
 {'cache_path': None, 'cache_quota_mb': None, 'cache_reserve_mb': 10000, 'custom_var_names': ['lscratch', 'ram_gb_multiplier', 'gpu_type', 'num_hours'], 'custom_vars': {}, 'desc': None, 'hostname': 'mig_gpu', 'lane': 'mig_gpu', 'name': 'mig_gpu', 'qdel_cmd_tpl': '/usr/local/slurmdev/bin/scancel {{ cluster_job_id }}', 'qinfo_cmd_tpl': "/usr/local/slurmdev/bin/sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'", 'qstat_cmd_tpl': '/usr/local/bin/sjobs --squeue -j {{ cluster_job_id }}', 'qstat_code_cmd_tpl': None, 'qsub_cmd_tpl': '/usr/local/slurmdev/bin/sbatch {{ script_path_abs }}', 'script_tpl': '#!/bin/bash\n#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}\n#SBATCH --output={{ job_dir_abs }}/output.txt\n#SBATCH --error={{ job_dir_abs }}/error.txt\n#SBATCH --mem={{ (ram_gb|float * (ram_gb_multiplier|default(1))|float)|int }}GB\n#SBATCH --time={{ (num_hours|default(4))|int }}:00:00\n#SBATCH --nodes=1\n#SBATCH --ntasks={{ num_cpu }}\n#SBATCH --cpus-per-task=1\n{%- if num_gpu == 0 %}\n#SBATCH --partition=norm\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }}\n{%- else %}\n#SBATCH --partition=gpu\n#SBATCH --gres=lscratch:{{ (lscratch|default(100))|int }},gpu:{{ (gpu_type|default("a100_1g.10g")) }}:1\n{%- endif %}\n{{ run_cmd }}\n', 'send_cmd_tpl': '{{ command }}', 'title': 'mig_gpu', 'tpl_vars': ['num_gpu', 'lscratch', 'command', 'project_uid', 'run_cmd', 'ram_gb_multiplier', 'num_cpu', 'job_uid', 'gpu_type', 'num_hours', 'ram_gb', 'cluster_job_id', 'job_dir_abs'], 'type': 'cluster', 'worker_bin_path': '/data/yangr3/apps/cryosparc/cryosparc_worker/bin/cryosparcw'}]
$ env | grep SLURM_ | grep -e NPROC -e CPUS -e MEM -e TRES
###None showed from this command as slurm was run as a module and called when needed:
$ env |  grep slurm
__LMOD_REF_COUNT_PATH=/usr/local/apps/cryosparc/sbin:1;/data/yangr3/apps/dutree/bin:1;/usr/local/slurm/bin:1;/usr/local/bin:2;/usr/X11R6/bin:1;/usr/local/jdk/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/usr/local/mysql/bin:1;/home/yangr3/.local/bin:1;/home/yangr3/bin:1
MANPATH=/usr/local/slurm/share/man:/usr/local/lmod/lmod/8.7/share/man:
PATH=/data/yangr3/apps/cryosparc/cryosparc_master/bin:/data/yangr3/apps/cryosparc/cryosparc_worker/bin:/data/yangr3/apps/cryosparc/cryosparc_master/bin:/data/yangr3/apps/cryosparc/cryosparc_worker/bin:/usr/local/apps/cryosparc/sbin:/data/yangr3/apps/dutree/bin:/usr/local/slurm/bin:/usr/local/bin:/usr/X11R6/bin:/usr/local/jdk/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/usr/local/mysql/bin:/home/yangr3/.local/bin:/home/yangr3/bin