1SNAKEMAKE(1)                     User Commands                    SNAKEMAKE(1)
2
3
4

NAME

6       snakemake - Metapackage for snakemake: azure extras
7

DESCRIPTION

9       usage: snakemake [-h] [--dry-run] [--profile PROFILE]
10
11              [--workflow-profile   WORKFLOW_PROFILE]   [--cache  [RULE  ...]]
12              [--snakefile FILE] [--cores [N]] [--jobs [N]] [--local-cores  N]
13              [--resources   [NAME=INT   ...]]    [--set-threads  RULE=THREADS
14              [RULE=THREADS  ...]]   [--max-threads  MAX_THREADS]   [--set-re‐
15              sources     RULE:RESOURCE=VALUE    [RULE:RESOURCE=VALUE    ...]]
16              [--set-scatter   NAME=SCATTERITEMS   [NAME=SCATTERITEMS    ...]]
17              [--set-resource-scopes        RESOURCE=[global|local]       [RE‐
18              SOURCE=[global|local]  ...]]    [--default-resources   [NAME=INT
19              ...]]     [--preemption-default    PREEMPTION_DEFAULT]   [--pre‐
20              emptible-rules   PREEMPTIBLE_RULES   [PREEMPTIBLE_RULES    ...]]
21              [--config   [KEY=VALUE  ...]]  [--configfile  FILE  [FILE  ...]]
22              [--envvars VARNAME [VARNAME ...]]  [--directory  DIR]  [--touch]
23              [--keep-going]    [--rerun-triggers    {mtime,params,input,soft‐
24              ware-env,code}   [{mtime,params,input,software-env,code}   ...]]
25              [--force]  [--forceall] [--forcerun [TARGET ...]]  [--prioritize
26              TARGET [TARGET ...]]  [--batch RULE=BATCH/BATCHES] [--until TAR‐
27              GET  [TARGET  ...]]   [--omit-from  TARGET  [TARGET ...]] [--re‐
28              run-incomplete]     [--shadow-prefix      DIR]      [--scheduler
29              [{ilp,greedy}]] [--wms-monitor [WMS_MONITOR]] [--wms-monitor-arg
30              [NAME=VALUE   ...]]    [--scheduler-ilp-solver   {}]   [--sched‐
31              uler-solver-path    SCHEDULER_SOLVER_PATH]    [--conda-base-path
32              CONDA_BASE_PATH] [--no-subworkflows]  [--groups  GROUPS  [GROUPS
33              ...]]   [--group-components  GROUP_COMPONENTS  [GROUP_COMPONENTS
34              ...]]    [--report   [FILE]]    [--report-stylesheet    CSSFILE]
35              [--draft-notebook   TARGET]  [--edit-notebook  TARGET]  [--note‐
36              book-listen   IP:PORT]    [--lint    [{text,json}]]    [--gener‐
37              ate-unit-tests  [TESTPATH]] [--containerize] [--export-cwl FILE]
38              [--list] [--list-target-rules]  [--dag]  [--rulegraph]  [--file‐
39              graph]  [--d3dag]  [--summary]  [--detailed-summary]  [--archive
40              FILE] [--cleanup-metadata FILE  [FILE  ...]]  [--cleanup-shadow]
41              [--skip-script-cleanup]    [--unlock]   [--list-version-changes]
42              [--list-code-changes]                     [--list-input-changes]
43              [--list-params-changes] [--list-untracked] [--delete-all-output]
44              [--delete-temp-output]  [--bash-completion]  [--keep-incomplete]
45              [--drop-metadata]    [--version]   [--reason]   [--gui   [PORT]]
46              [--printshellcmds]  [--debug-dag]  [--stats  FILE]   [--nocolor]
47              [--quiet   [{progress,rules,all}   ...]]   [--print-compilation]
48              [--verbose] [--force-use-threads] [--allow-ambiguity] [--nolock]
49              [--ignore-incomplete]   [--max-inventory-time   SECONDS]  [--la‐
50              tency-wait    SECONDS]     [--wait-for-files     [FILE     ...]]
51              [--wait-for-files-file FILE] [--notemp] [--all-temp] [--keep-re‐
52              mote] [--keep-target-files] [--allowed-rules ALLOWED_RULES  [AL‐
53              LOWED_RULES ...]]  [--target-jobs TARGET_JOBS [TARGET_JOBS ...]]
54              [--local-groupid      LOCAL_GROUPID]      [--max-jobs-per-second
55              MAX_JOBS_PER_SECOND]   [--max-status-checks-per-second  MAX_STA‐
56              TUS_CHECKS_PER_SECOND] [-T RETRIES] [--attempt ATTEMPT] [--wrap‐
57              per-prefix       WRAPPER_PREFIX]      [--default-remote-provider
58              {S3,GS,FTP,SFTP,S3Mocked,gfal,gridftp,iRODS,AzBlob,XRootD}]
59              [--default-remote-prefix DEFAULT_REMOTE_PREFIX] [--no-shared-fs]
60              [--greediness  GREEDINESS]  [--no-hooks]   [--overwrite-shellcmd
61              OVERWRITE_SHELLCMD]  [--debug]  [--runtime-profile FILE] [--mode
62              {0,1,2}]   [--show-failed-logs]   [--log-handler-script    FILE]
63              [--log-service  {none,slack,wms}]  [--slurm]  [--cluster  CMD  |
64              --cluster-sync CMD |  --drmaa  [ARGS]]  [--cluster-config  FILE]
65              [--immediate-submit]   [--jobscript   SCRIPT]  [--jobname  NAME]
66              [--cluster-status CLUSTER_STATUS] [--cluster-cancel CLUSTER_CAN‐
67              CEL]   [--cluster-cancel-nargs   CLUSTER_CANCEL_NARGS]  [--clus‐
68              ter-sidecar CLUSTER_SIDECAR] [--drmaa-log-dir DIR] [--kubernetes
69              [NAMESPACE]]  [--container-image IMAGE] [--k8s-cpu-scalar FLOAT]
70              [--k8s-service-account-name   SERVICEACCOUNTNAME]    [--tibanna]
71              [--tibanna-sfn     TIBANNA_SFN]     [--precommand    PRECOMMAND]
72              [--tibanna-config    TIBANNA_CONFIG    [TIBANNA_CONFIG     ...]]
73              [--google-lifesciences]           [--google-lifesciences-regions
74              GOOGLE_LIFESCIENCES_REGIONS  [GOOGLE_LIFESCIENCES_REGIONS  ...]]
75              [--google-lifesciences-location    GOOGLE_LIFESCIENCES_LOCATION]
76              [--google-lifesciences-keep-cache]   [--google-lifesciences-ser‐
77              vice-account-email    GOOGLE_LIFESCIENCES_SERVICE_ACCOUNT_EMAIL]
78              [--google-lifesciences-network      GOOGLE_LIFESCIENCES_NETWORK]
79              [--google-lifesciences-subnetwork    GOOGLE_LIFESCIENCES_SUBNET‐
80              WORK] [--az-batch] [--az-batch-enable-autoscale] [--az-batch-ac‐
81              count-url    [AZ_BATCH_ACCOUNT_URL]]    [--flux]   [--tes   URL]
82              [--use-conda]            [--conda-not-block-search-path-envvars]
83              [--list-conda-envs]  [--conda-prefix DIR] [--conda-cleanup-envs]
84              [--conda-cleanup-pkgs     [{tarballs,cache}]]      [--conda-cre‐
85              ate-envs-only] [--conda-frontend {conda,mamba}] [--use-singular‐
86              ity]  [--singularity-prefix   DIR]   [--singularity-args   ARGS]
87              [--cleanup-containers] [--use-envmodules] [target ...]
88
89       Snakemake  is a Python based language and execution environment for GNU
90       Makelike workflows.
91
92   options:
93       -h, --help
94              show this help message and exit
95
96   EXECUTION:
97       target Targets to build. May be rules or files. (default: None)
98
99       --dry-run, --dryrun, -n
100              Do not execute anything, and display what would be done. If  you
101              have  a very large workflow, use --dry-run --quiet to just print
102              a summary of the DAG of jobs.  (default: False)
103
104       --profile PROFILE
105              Name of profile to use  for  configuring  Snakemake.   Snakemake
106              will search for a corresponding folder in /etc/xdg/snakemake and
107              /builddir/.config/snakemake.  Alternatively, this can be an  ab‐
108              solute  or  relative  path.  The profile folder has to contain a
109              file 'config.yaml'. This file can be used to set default  values
110              for command line options in YAML format. For example, '--cluster
111              qsub' becomes 'cluster: qsub' in the YAML file. Profiles can  be
112              obtained from https://github.com/snakemake-profiles. The profile
113              can also be set via the environment variable $SNAKEMAKE_PROFILE.
114              To override this variable and use no profile at all, provide the
115              value 'none' to this argument. [env var: SNAKEMAKE_PROFILE] (de‐
116              fault: None)
117
118       --workflow-profile WORKFLOW_PROFILE
119              Path  (relative  to current directory) to workflow specific pro‐
120              file folder to use for  configuring  Snakemake  with  parameters
121              specific for this workflow (like resources). If this flag is not
122              used,  Snakemake  will  by  default  use  'profiles/default'  if
123              present  (searched  both relative to current directory and rela‐
124              tive to Snakefile, in this order).  For  skipping  any  workflow
125              specific profile provide the special value 'none'. Settings made
126              in the workflow profile will override settings made in the  gen‐
127              eral  profile (see --profile). The profile folder has to contain
128              a file 'config.yaml'. This file can be used to set default  val‐
129              ues  for  command  line  options  in  YAML  format. For example,
130              '--cluster qsub' becomes 'cluster: qsub' in the YAML file. It is
131              advisable  to  use the workflow profile to set or overwrite e.g.
132              workflow specific resources like the amount of threads of a par‐
133              ticular  rule or the amount of memory needed.  Note that in such
134              cases, the arguments may be given as nested YAML mappings in the
135              profile,  e.g.  'setthreads: myrule: 4' instead of 'set-threads:
136              myrule=4'. (default: None)
137
138       --cache [RULE ...]
139              Store output files of given rules in a central  cache  given  by
140              the  environment variable $SNAKEMAKE_OUTPUT_CACHE. Likewise, re‐
141              trieve output files of the given rules from this cache  if  they
142              have been created before (by anybody writing to the same cache),
143              instead of actually executing the rules.  Output files are iden‐
144              tified  by  hashing  all  steps,  parameters  and software stack
145              (conda envs or containers)  needed  to  create  them.  (default:
146              None)
147
148       --snakefile FILE, -s FILE
149              The  workflow  definition  in form of a snakefile.  Usually, you
150              should not need to specify  this.  By  default,  Snakemake  will
151              search   for   'Snakefile',  'snakefile',  'workflow/Snakefile',
152              'workflow/snakefile' beneath the current working  directory,  in
153              this  order. Only if you definitely want a different layout, you
154              need to use this parameter.  (default: None)
155
156       --cores [N], -c [N]
157              Use at most N CPU cores/jobs in parallel. If  N  is  omitted  or
158              'all', the limit is set to the number of available CPU cores. In
159              case of cluster/cloud execution, this argument sets the  maximum
160              number  of  cores requested from the cluster or cloud scheduler.
161              (See           https://snakemake.readthedocs.io/en/stable/snakef
162              iles/rules.html#resources-remote-execution  for  more  info)This
163              number is available to rules via workflow.cores. (default: None)
164
165       --jobs [N], -j [N]
166              Use at most N CPU cluster/cloud jobs in parallel. For local exe‐
167              cution this is an alias for --cores. Note: Set to 'unlimited' in
168              case, this does not play a role.  (default: None)
169
170       --local-cores N
171              In cluster/cloud mode, use at most N cores of the  host  machine
172              in  parallel  (default:  number  of  CPU cores of the host). The
173              cores are used to execute local rules.  This option  is  ignored
174              when not in cluster/cloud mode.  (default: 6)
175
176       --resources [NAME=INT ...], --res [NAME=INT ...]
177              Define  additional resources that shall constrain the scheduling
178              analogously to --cores (see above). A resource is defined  as  a
179              name  and an integer value.  E.g. --resources mem_mb=1000. Rules
180              can use resources by defining the  resource  keyword,  e.g.  re‐
181              sources:  mem_mb=600.  If  now  two rules require 600 of the re‐
182              source 'mem_mb' they won't be run in parallel by the  scheduler.
183              In  cluster/cloud  mode,  this  argument will also constrain the
184              amount  of   resources   requested   from   the   server.   (See
185              https://snakemake.readthedocs.io/en/s               table/snake‐
186              files/rules.html#resources-remote-execution for more info)  (de‐
187              fault: None)
188
189       --set-threads RULE=THREADS [RULE=THREADS ...]
190              Overwrite  thread  usage of rules. This allows to finetune work‐
191              flow parallelization. In particular, this is helpful  to  target
192              certain  cluster  nodes by e.g.  shifting a rule to use more, or
193              less threads than defined in the workflow. Thereby, THREADS  has
194              to  be  a  positive  integer, and RULE has to be the name of the
195              rule. (default: None)
196
197       --max-threads MAX_THREADS
198              Define a global maximum number of threads available to any rule.
199              Rules  requesting  more  threads  (via the threads keyword) will
200              have their values reduced to the maximum.  This  can  be  useful
201              when  you want to restrict the maximum number of threads without
202              modifying the workflow definition or overwriting rules individu‐
203              ally with --set-threads. (default: None)
204
205       --set-resources RULE:RESOURCE=VALUE [RULE:RESOURCE=VALUE ...]
206              Overwrite  resource  usage  of  rules.  This allows to fine-tune
207              workflow resources. In particular, this  is  helpful  to  target
208              certain  cluster nodes by e.g.  defining a certain partition for
209              a rule, or overriding a temporary directory. Thereby, VALUE  has
210              to be a positive integer or a string, RULE has to be the name of
211              the rule, and RESOURCE has to be the name of the resource.  (de‐
212              fault: None)
213
214       --set-scatter NAME=SCATTERITEMS [NAME=SCATTERITEMS ...]
215              Overwrite  number  of  scatter items of scattergather processes.
216              This allows  to  fine-tune  workflow  parallelization.  Thereby,
217              SCATTERITEMS  has  to  be a positive integer, and NAME has to be
218              the name of the scattergather process  defined  via  a  scatter‐
219              gather directive in the workflow. (default: None)
220
221       --set-resource-scopes  RESOURCE=[global|local] [RESOURCE=[global|local]
222       ...]
223              Overwrite resource scopes. A scope determines how  a  constraint
224              is  reckoned  in  cluster execution. With RESOURCE=local, a con‐
225              straint applied to RESOURCE using --resources will be considered
226              the  limit  for each group submission. With RESOURCE=global, the
227              constraint will apply across all  groups  cumulatively.  By  de‐
228              fault,  only  `mem_mb`  and  `disk_mb` are considered local, all
229              other resources are global. This may be modified in  the  snake‐
230              file  using  the `resource_scopes:` directive.  Note that number
231              of threads, specified via --cores, is always  considered  local.
232              (See      https://snakemake.readth     edocs.io/en/stable/snake‐
233              files/rules.html#resourcesremote-execution for more  info)  (de‐
234              fault: None)
235
236       --default-resources [NAME=INT ...], --default-res [NAME=INT ...]
237              Define  default values of resources for rules that do not define
238              their own values. In addition to plain integers, python  expres‐
239              sions  over  inputsize are allowed (e.g. '2*input.size_mb'). The
240              inputsize is the sum of the sizes of all input files of a  rule.
241              By default, Snakemake assumes a default for mem_mb, disk_mb, and
242              tmpdir (see below). This option allows to add  further  defaults
243              (e.g. account and partition for slurm) or to overwrite these de‐
244              fault  values.  The  defaults  are  'mem_mb=max(2*input.size_mb,
245              1000)', 'disk_mb=max(2*input.size_mb, 1000)' (i.e., default disk
246              and mem usage is twice the input file size but  at  least  1GB),
247              and  the system temporary directory (as given by $TMPDIR, $TEMP,
248              or $TMP) is used for the tmpdir resource. The tmpdir resource is
249              automatically  used  by  shell commands, scripts and wrappers to
250              store temporary data (as it is mirrored into $TMPDIR, $TEMP, and
251              $TMP  for  the  executed  subprocesses). If this argument is not
252              specified at all, Snakemake just uses  the  tmpdir  resource  as
253              outlined above. (default: None)
254
255       --preemption-default PREEMPTION_DEFAULT
256              A  preemptible  instance  can be requested when using the Google
257              Life Sciences API. If you set a --preemptiondefault,  all  rules
258              will  be  subject to the default.  Specifically, this integer is
259              the number of restart attempts that will be made given that  the
260              instance is killed unexpectedly. Note that preemptible instances
261              have a maximum running time of 24 hours. If you want to set pre‐
262              emptible  instances  for  only  a  subset  of  rules, use --pre‐
263              emptible-rules instead. (default: None)
264
265       --preemptible-rules PREEMPTIBLE_RULES [PREEMPTIBLE_RULES ...]
266              A preemptible instance can be requested when  using  the  Google
267              Life Sciences API. If you want to use these instances for a sub‐
268              set of your rules, you  can  use  --preemptible-rules  and  then
269              specify a list of rule and integer pairs, where each integer in‐
270              dicates the number of restarts to use for the rule's instance in
271              the  case  that the instance is terminated unexpectedly.  --pre‐
272              emptible-rules can be used in combination with  --preemption-de‐
273              fault,  and  will take priority. Note that preemptible instances
274              have a maximum running time of 24. If you want to apply  a  con‐
275              sistent  number  of retries across all your rules, use --preemp‐
276              tiondefault instead. Example: snakemake  --preemptiondefault  10
277              --preemptible-rules map_reads=3 call_variants=0 (default: None)
278
279       --config [KEY=VALUE ...], -C [KEY=VALUE ...]
280              Set  or  overwrite  values  in  the workflow config object.  The
281              workflow config object is accessible as variable  config  inside
282              the workflow. Default values can be set by providing a JSON file
283              (see Documentation).  (default: None)
284
285       --configfile FILE [FILE ...], --configfiles FILE [FILE ...]
286              Specify or overwrite the config file of the  workflow  (see  the
287              docs).  Values specified in JSON or YAML format are available in
288              the global config dictionary inside the workflow. Multiple files
289              overwrite each other in the given order. Thereby missing keys in
290              previous config files are  extended  by  following  configfiles.
291              Note  that this order also includes a config file defined in the
292              workflow definition itself (which will  come  first).  (default:
293              None)
294
295       --envvars VARNAME [VARNAME ...]
296              Environment variables to pass to cloud jobs. (default: None)
297
298       --directory DIR, -d DIR
299              Specify  working directory (relative paths in the snakefile will
300              use this as their origin). (default: None)
301
302       --touch, -t
303              Touch output files (mark them up to date without really changing
304              them) instead of running their commands. This is used to pretend
305              that the rules were executed, in order to  fool  future  invoca‐
306              tions  of  snakemake.  Fails  if a file does not yet exist. Note
307              that this will only touch files that would otherwise  be  recre‐
308              ated  by  Snakemake  (e.g. because their input files are newer).
309              For enforcing a touch, combine this with --force, --forceall, or
310              --forcerun.  Note  however that you lose the provenance informa‐
311              tion when the files have been created in  reality.  Hence,  this
312              should be used only as a last resort. (default: False)
313
314       --keep-going, -k
315              Go on with independent jobs if a job fails. (default: False)
316
317       --rerun-triggers                 {mtime,params,input,software-env,code}
318       [{mtime,params,input,software-env,code} ...]
319              Define what triggers the rerunning of a  job.  By  default,  all
320              triggers  are used, which guarantees that results are consistent
321              with the workflow code and configuration. If you  rather  prefer
322              the traditional way of just considering file modification dates,
323              use ' --rerun-trigger mtime'. (default: ['mtime', 'params', 'in‐
324              put', 'software-env', 'code'])
325
326       --force, -f
327              Force the execution of the selected target or the first rule re‐
328              gardless of already created output.  (default: False)
329
330       --forceall, -F
331              Force the execution of the selected (or the first) rule and  all
332              rules  it  is dependent on regardless of already created output.
333              (default: False)
334
335       --forcerun [TARGET ...], -R [TARGET ...]
336              Force the re-execution or creation of the given rules or  files.
337              Use  this  option if you changed a rule and want to have all its
338              output in your workflow updated.  (default: None)
339
340       --prioritize TARGET [TARGET ...], -P TARGET [TARGET ...]
341              Tell the scheduler to assign creation of given targets (and  all
342              their  dependencies) highest priority.  (EXPERIMENTAL) (default:
343              None)
344
345       --batch RULE=BATCH/BATCHES
346              Only create the given BATCH of the  input  files  of  the  given
347              RULE.  This  can  be used to iteratively run parts of very large
348              workflows. Only the execution plan of the relevant part  of  the
349              workflow  has to be calculated, thereby speeding up DAG computa‐
350              tion. It is recommended to provide the most  suitable  rule  for
351              batching  when  documenting a workflow. It should be some aggre‐
352              gating rule that would be executed only once, and  has  a  large
353              number of input files. For example, it can be a rule that aggre‐
354              gates over samples. (default: None)
355
356       --until TARGET [TARGET ...], -U TARGET [TARGET ...]
357              Runs the pipeline until it reaches the specified rules or files.
358              Only  runs  jobs  that are dependencies of the specified rule or
359              files, does not run sibling DAGs.  (default: None)
360
361       --omit-from TARGET [TARGET ...], -O TARGET [TARGET ...]
362              Prevent the execution or creation of the given rules or files as
363              well  as any rules or files that are downstream of these targets
364              in the DAG. Also runs jobs in sibling DAGs that are  independent
365              of the rules or files specified here. (default: None)
366
367       --rerun-incomplete, --ri
368              Re-run all jobs the output of which is recognized as incomplete.
369              (default: False)
370
371       --shadow-prefix DIR
372              Specify a directory in which the 'shadow' directory is  created.
373              If  not supplied, the value is set to the '.snakemake' directory
374              relative to the working directory. (default: None)
375
376       --scheduler [{ilp,greedy}]
377              Specifies if jobs are selected by a greedy algorithm or by solv‐
378              ing an ilp. The ilp scheduler aims to reduce runtime and hdd us‐
379              age by best possible use of resources. (default: greedy)
380
381       --wms-monitor [WMS_MONITOR]
382              IP and port of workflow management system to monitor the  execu‐
383              tion  of  snakemake  (e.g.   http://127.0.0.1:5000) Note that if
384              your service requires an authorization token,  you  must  export
385              WMS_MONITOR_TOKEN in the environment. (default: None)
386
387       --wms-monitor-arg [NAME=VALUE ...]
388              If the workflow management service accepts extra arguments, pro‐
389              vide. them in key value pairs with --wms-monitor-arg. For  exam‐
390              ple,  to  run  an existing workflow using a wms monitor, you can
391              provide the pair id=12345 and the arguments will be provided  to
392              the endpoint to first interact with the workflow (default: None)
393
394       --scheduler-ilp-solver {}
395              Specifies  solver  to  be  utilized when selecting ilpscheduler.
396              (default: COIN_CMD)
397
398       --scheduler-solver-path SCHEDULER_SOLVER_PATH
399              Set the PATH to search for scheduler solver  binaries  (internal
400              use only). (default: None)
401
402       --conda-base-path CONDA_BASE_PATH
403              Path of conda base installation (home of conda, mamba, activate)
404              (internal use only). (default: None)
405
406       --no-subworkflows, --nosw
407              Do not evaluate or execute subworkflows. (default: False)
408
409   GROUPING:
410       --groups GROUPS [GROUPS ...]
411              Assign rules to groups (this overwrites  any  group  definitions
412              from the workflow). (default: None)
413
414       --group-components GROUP_COMPONENTS [GROUP_COMPONENTS ...]
415              Set  the  number  of  connected components a group is allowed to
416              span. By default, this is 1, but  this  flag  allows  to  extend
417              this.  This  can  be used to run e.g. 3 jobs of the same rule in
418              the same group, although they are not connected. It can be help‐
419              ful  for  putting  together  many  small  jobs or benefitting of
420              shared memory setups. (default: None)
421
422   REPORTS:
423       --report [FILE]
424              Create an HTML report with results and statistics.  This can  be
425              either  a .html file or a .zip file. In the former case, all re‐
426              sults are embedded into the .html (this  only  works  for  small
427              data).  In the latter case, results are stored along with a file
428              report.html in the zip archive. If no filename is given, an  em‐
429              bedded report.html is the default. (default: None)
430
431       --report-stylesheet CSSFILE
432              Custom  stylesheet to use for report. In particular, this can be
433              used for branding the report with e.g. a custom logo, see  docs.
434              (default: None)
435
436   NOTEBOOKS:
437       --draft-notebook TARGET
438              Draft  a  skeleton  notebook  for  the rule used to generate the
439              given target file. This notebook can then be opened in a jupyter
440              server,  executed  and implemented until ready. After saving, it
441              will automatically be reused in non-interactive mode  by  Snake‐
442              make for subsequent jobs. (default: None)
443
444       --edit-notebook TARGET
445              Interactively edit the notebook associated with the rule used to
446              generate the given target file. This will start a local  jupyter
447              notebook  server.  Any  changes to the notebook should be saved,
448              and the server has to be stopped by  closing  the  notebook  and
449              hitting  the 'Quit' button on the jupyter dashboard. Afterwards,
450              the updated notebook will be automatically stored  in  the  path
451              defined  in  the  rule. If the notebook is not yet present, this
452              will create an empty draft. (default: None)
453
454       --notebook-listen IP:PORT
455              The IP address and PORT the notebook server used for editing the
456              notebook  (--edit-notebook)  will  listen  on.  (default: local‐
457              host:8888)
458
459   UTILITIES:
460       --lint [{text,json}]
461              Perform linting on the given workflow. This will print snakemake
462              specific  suggestions to improve code quality (work in progress,
463              more lints to be added in the future). If no  argument  is  pro‐
464              vided, plain text output is used. (default: None)
465
466       --generate-unit-tests [TESTPATH]
467              Automatically  generate  unit tests for each workflow rule. This
468              assumes that all input files of each job  are  already  present.
469              Rules  without a job with present input files will be skipped (a
470              warning will be issued). For each rule, one test  case  will  be
471              created  in  the specified test folder (.tests/unit by default).
472              After successful execution, tests can be run with 'pytest  TEST‐
473              PATH'. (default: None)
474
475       --containerize
476              Print  a  Dockerfile  that provides an execution environment for
477              the workflow, including all conda environments. (default: False)
478
479       --export-cwl FILE
480              Compile workflow to CWL and store it in given  FILE.   (default:
481              None)
482
483       --list, -l
484              Show available rules in given Snakefile. (default: False)
485
486       --list-target-rules, --lt
487              Show  available  target  rules  in  given  Snakefile.  (default:
488              False)
489
490       --dag  Do not execute anything and print the directed acyclic graph  of
491              jobs  in  the  dot  language.  Recommended  use on Unix systems:
492              snakemake --dag | dot | display. Note print statements  in  your
493              Snakefile may interfere with visualization. (default: False)
494
495       --rulegraph
496              Do  not execute anything and print the dependency graph of rules
497              in the dot language. This will be less crowded than above DAG of
498              jobs,  but  also  show  less information. Note that each rule is
499              displayed once, hence the displayed graph will be  cyclic  if  a
500              rule appears in several steps of the workflow. Use this if above
501              option leads to a DAG that is too  large.   Recommended  use  on
502              Unix  systems: snakemake --rulegraph | dot | display. Note print
503              statements in your Snakefile may interfere  with  visualization.
504              (default: False)
505
506       --filegraph
507              Do  not execute anything and print the dependency graph of rules
508              with their input and output files in the dot language.  This  is
509              an  intermediate solution between above DAG of jobs and the rule
510              graph. Note that each rule is displayed  once,  hence  the  dis‐
511              played  graph  will be cyclic if a rule appears in several steps
512              of the workflow. Use this if above option leads to a DAG that is
513              too  large.  Recommended  use on Unix systems: snakemake --file‐
514              graph | dot | display. Note print statements in  your  Snakefile
515              may interfere with visualization. (default: False)
516
517       --d3dag
518              Print the DAG in D3.js compatible JSON format.  (default: False)
519
520       --summary, -S
521              Print  a  summary of all files created by the workflow.  The has
522              the following columns: filename, modification  time,  rule  ver‐
523              sion, status, plan. Thereby rule version contains the versionthe
524              file was created with (see the version keyword  of  rules),  and
525              status  denotes whether the file is missing, its input files are
526              newer or if version or implementation of the rule changed  since
527              file  creation. Finally the last column denotes whether the file
528              will be updated or created during the next  workflow  execution.
529              (default: False)
530
531       --detailed-summary, -D
532              Print  a  summary of all files created by the workflow.  The has
533              the following columns: filename, modification  time,  rule  ver‐
534              sion,  input  file(s), shell command, status, plan. Thereby rule
535              version contains the version the file was created with (see  the
536              version  keyword  of rules), and status denotes whether the file
537              is missing, its input files are newer or if version or implemen‐
538              tation  of  the rule changed since file creation. The input file
539              and shell command columns are self explanatory. Finally the last
540              column  denotes whether the file will be updated or created dur‐
541              ing the next workflow execution. (default: False)
542
543       --archive FILE
544              Archive the workflow into the given tar archive FILE.   The  ar‐
545              chive  will be created such that the workflow can be re-executed
546              on a vanilla system. The function needs conda and git to be  in‐
547              stalled.  It  will  archive every file that is under git version
548              control. Note that it is best practice to  have  the  Snakefile,
549              config  files,  and  scripts  under version control. Hence, they
550              will be included in the archive.  Further,  it  will  add  input
551              files that are not generated by by the workflow itself and conda
552              environments. Note that  symlinks  are  dereferenced.  Supported
553              formats are .tar, .tar.gz, .tar.bz2 and .tar.xz. (default: None)
554
555       --cleanup-metadata FILE [FILE ...], --cm FILE [FILE ...]
556              Cleanup  the  metadata of given files. That means that snakemake
557              removes any tracked version info, and any marks that  files  are
558              incomplete. (default: None)
559
560       --cleanup-shadow
561              Cleanup  old  shadow directories which have not been deleted due
562              to failures or power loss. (default: False)
563
564       --skip-script-cleanup
565              Don't delete wrapper scripts used for execution (default: False)
566
567       --unlock
568              Remove a lock on the working directory. (default: False)
569
570       --list-version-changes, --lv
571              List all output files that have been created  with  a  different
572              version (as determined by the version keyword). (default: False)
573
574       --list-code-changes, --lc
575              List  all  output  files  for which the rule body (run or shell)
576              have changed in the Snakefile. (default: False)
577
578       --list-input-changes, --li
579              List all output files for which the  defined  input  files  have
580              changed in the Snakefile (e.g. new input files were added in the
581              rule definition or files were renamed). For listing  input  file
582              modification in the filesystem, use --summary. (default: False)
583
584       --list-params-changes, --lp
585              List  all output files for which the defined params have changed
586              in the Snakefile. (default: False)
587
588       --list-untracked, --lu
589              List all files in the working directory that are not used in the
590              workflow.  This can be used e.g. for identifying leftover files.
591              Hidden files and directories are ignored. (default: False)
592
593       --delete-all-output
594              Remove all files generated by the workflow.  Use  together  with
595              --dry-run to list files without actually deleting anything. Note
596              that this will not recurse  into  subworkflows.  Write-protected
597              files  are  not  removed. Nevertheless, use with care! (default:
598              False)
599
600       --delete-temp-output
601              Remove all temporary files generated by the workflow.   Use  to‐
602              gether  with  --dry-run  to list files without actually deleting
603              anything. Note that this will  not  recurse  into  subworkflows.
604              (default: False)
605
606       --bash-completion
607              Output  code to register bash completion for snakemake.  Put the
608              following in your .bashrc (including  the  accents):  `snakemake
609              --bash-completion` or issue it in an open terminal session. (de‐
610              fault: False)
611
612       --keep-incomplete
613              Do not remove incomplete output files by failed jobs.  (default:
614              False)
615
616       --drop-metadata
617              Drop  metadata  file  tracking  information  after job finishes.
618              Provenance-information based reports  (e.g.   --report  and  the
619              --list_x_changes  functions)  will  be empty or incomplete. (de‐
620              fault: False)
621
622       --version, -v
623              show program's version number and exit
624
625   OUTPUT:
626       --reason, -r
627              Print the reason for each executed rule (deprecated, always true
628              now). (default: False)
629
630       --gui [PORT]
631              Serve an HTML based user interface to the given network and port
632              e.g. 168.129.10.15:8000. By default Snakemake is only  available
633              in  the  local  network  (default port: 8000). To make Snakemake
634              listen to all ip addresses add the special host address  0.0.0.0
635              to  the  url  (0.0.0.0:8000).  This is important if Snakemake is
636              used in a virtualised environment like Docker.  If  possible,  a
637              browser window is opened. (default: None)
638
639       --printshellcmds, -p
640              Print  out  the shell commands that will be executed.  (default:
641              False)
642
643       --debug-dag
644              Print candidate and selected jobs  (including  their  wildcards)
645              while  inferring  DAG.  This  can  help  to debug unexpected DAG
646              topology or errors. (default: False)
647
648       --stats FILE
649              Write stats about Snakefile execution  in  JSON  format  to  the
650              given file. (default: None)
651
652       --nocolor
653              Do not use a colored output. (default: False)
654
655       --quiet [{progress,rules,all} ...], -q [{progress,rules,all} ...]
656              Do not output certain information. If used without arguments, do
657              not output any progress or rule information. Defining 'all'  re‐
658              sults in no information being printed at all. (default: None)
659
660       --print-compilation
661              Print  the  python  representation  of  the workflow.  (default:
662              False)
663
664       --verbose
665              Print debugging output. (default: False)
666
667   BEHAVIOR:
668       --force-use-threads
669              Force threads rather than processes. Helpful  if  shared  memory
670              (/dev/shm) is full or unavailable. (default: False)
671
672       --allow-ambiguity, -a
673              Don't check for ambiguous rules and simply use the first if sev‐
674              eral can produce the same file. This allows the user to  priori‐
675              tize rules by their order in the snakefile. (default: False)
676
677       --nolock
678              Do not lock the working directory (default: False)
679
680       --ignore-incomplete, --ii
681              Do not check for incomplete output files. (default: False)
682
683       --max-inventory-time SECONDS
684              Spend at most SECONDS seconds to create a file inventory for the
685              working directory. The inventory vastly speeds up file modifica‐
686              tion  and  existence checks when computing which jobs need to be
687              executed.  However, creating the inventory itself can  be  slow,
688              e.g. on network file systems. Hence, we do not spend more than a
689              given amount of time and fall back to individual checks for  the
690              rest. (default: 20)
691
692       --latency-wait SECONDS, --output-wait SECONDS, -w SECONDS
693              Wait given seconds if an output file of a job is not present af‐
694              ter the job finished. This helps if your filesystem suffers from
695              latency (default 5). (default: 5)
696
697       --wait-for-files [FILE ...]
698              Wait --latency-wait seconds for these files to be present before
699              executing the workflow. This option is used internally to handle
700              filesystem latency in cluster environments. (default: None)
701
702       --wait-for-files-file FILE
703              Same  behaviour  as --wait-for-files, but file list is stored in
704              file instead of being passed on the commandline. This is  useful
705              when  the list of files is too long to be passed on the command‐
706              line. (default: None)
707
708       --notemp, --nt
709              Ignore temp() declarations. This is useful when running  only  a
710              part  of  the  workflow,  since temp() would lead to deletion of
711              probably needed files by other parts of the workflow.  (default:
712              False)
713
714       --all-temp
715              Mark  all  output files as temp files. This can be useful for CI
716              testing, in order to save space.  (default: False)
717
718       --keep-remote
719              Keep local copies of remote input files. (default: False)
720
721       --keep-target-files
722              Do not adjust the paths of given target files  relative  to  the
723              working directory. (default: False)
724
725       --allowed-rules ALLOWED_RULES [ALLOWED_RULES ...]
726              Only  consider  given  rules. If omitted, all rules in Snakefile
727              are used. Note that this is intended primarily for internal  use
728              and may lead to unexpected results otherwise. (default: None)
729
730       --target-jobs TARGET_JOBS [TARGET_JOBS ...]
731              Target    particular    jobs    by    RULE:WILDCARD1=VALUE,WILD‐
732              CARD2=VALUE,... This is meant for internal use by Snakemake  it‐
733              self only. (default: None)
734
735       --local-groupid LOCAL_GROUPID
736              Name  for local groupid, meant for internal use only.  (default:
737              local)
738
739       --max-jobs-per-second MAX_JOBS_PER_SECOND
740              Maximal number of cluster/drmaa jobs per second, default is  10,
741              fractions allowed. (default: 10)
742
743       --max-status-checks-per-second MAX_STATUS_CHECKS_PER_SECOND
744              Maximal  number  of job status checks per second, default is 10,
745              fractions allowed. (default: 10)
746
747       -T RETRIES, --retries RETRIES, --restart-times RETRIES
748              Number of times to restart failing jobs (defaults  to  0).  (de‐
749              fault: 0)
750
751       --attempt ATTEMPT
752              Internal  use  only: define the initial value of the attempt pa‐
753              rameter (default: 1). (default: 1)
754
755       --wrapper-prefix WRAPPER_PREFIX
756              Prefix  for  URL  created  from  wrapper   directive   (default:
757              https://github.com/snakemake/snakemakewrappers/raw/).  Set  this
758              to a different URL to use your fork or  a  local  clone  of  the
759              repository,      e.g.,      use      a      git     URL     like
760              'git+file://path/to/your/local/clone@'.                (default:
761              https://github.com/snakemake/snakemake-wrappers/raw/)
762
763       --default-remote-provider
764       {S3,GS,FTP,SFTP,S3Mocked,gfal,gridftp,iRODS,AzBlob,XRootD}
765              Specify default remote provider to be used  for  all  input  and
766              output files that don't yet specify one.  (default: None)
767
768       --default-remote-prefix DEFAULT_REMOTE_PREFIX
769              Specify  prefix for default remote provider. E.g. a bucket name.
770              (default: )
771
772       --no-shared-fs
773              Do not assume that jobs share a common file system.   When  this
774              flag  is activated, Snakemake will assume that the filesystem on
775              a cluster node is not shared with other nodes. For example, this
776              will lead to downloading remote files on each cluster node sepa‐
777              rately. Further, it won't take special  measures  to  deal  with
778              filesystem  latency  issues. This option will in most cases only
779              make sense in combination with  --default-remote-provider.  Fur‐
780              ther, when using --cluster you will have to also provide --clus‐
781              terstatus. Only activate this if you know what  you  are  doing.
782              (default: False)
783
784       --greediness GREEDINESS
785              Set the greediness of scheduling. This value between 0 and 1 de‐
786              termines how careful jobs are selected for  execution.  The  de‐
787              fault  value  (1.0) provides the best speed and still acceptable
788              scheduling quality.  (default: None)
789
790       --no-hooks
791              Do not invoke onstart, onsuccess or onerror hooks  after  execu‐
792              tion. (default: False)
793
794       --overwrite-shellcmd OVERWRITE_SHELLCMD
795              Provide  a shell command that shall be executed instead of those
796              given in the workflow. This is for debugging purposes only. (de‐
797              fault: None)
798
799       --debug
800              Allow  to  debug  rules  with  e.g. PDB. This flag allows to set
801              breakpoints in run blocks. (default: False)
802
803       --runtime-profile FILE
804              Profile Snakemake and write the output to  FILE.  This  requires
805              yappi to be installed. (default: None)
806
807       --mode {0,1,2}
808              Set  execution mode of Snakemake (internal use only).  (default:
809              0)
810
811       --show-failed-logs
812              Automatically display logs of failed jobs. (default: False)
813
814       --log-handler-script FILE
815              Provide a custom script  containing  a  function  'def  log_han‐
816              dler(msg):'. Snakemake will call this function for every logging
817              output (given as a dictionary msg) allowing to e.g. send notifi‐
818              cations  in the form of e.g. slack messages or emails. (default:
819              None)
820
821       --log-service {none,slack,wms}
822              Set a specific messaging service for logging output.   Snakemake
823              will  notify the service on errors and completed execution. Cur‐
824              rently slack and workflow management system (wms) are supported.
825              (default: None)
826
827   SLURM:
828       --slurm
829              Execute  snakemake  rules as SLURM batch jobs according to their
830              'resources'  definition.   SLURM   resources   as   'partition',
831              'ntasks',  'cpus',  etc.  need to be defined per rule within the
832              'resources' definition. Note, that memory can only be defined as
833              'mem_mb' or 'mem_mb_per_cpu' as analogous to the SLURM 'mem' and
834              'mem-per-cpu' flags to sbatch, respectively. Here, the  unit  is
835              always  'MiB'. In addition '-- default_resources' should contain
836              the SLURM account.  (default: False)
837
838   CLUSTER:
839       --cluster CMD
840              Execute snakemake rules with  the  given  submit  command,  e.g.
841              qsub. Snakemake compiles jobs into scripts that are submitted to
842              the cluster with the given command, once all input files  for  a
843              particular job are present.  The submit command can be decorated
844              to make it aware of certain job properties (name, rulename,  in‐
845              put,  output,  params,  wildcards, log, threads and dependencies
846              (see the argument below)), e.g.: $ snakemake --cluster 'qsub -pe
847              threaded {threads}'.  (default: None)
848
849       --cluster-sync CMD
850              cluster submission command will block, returning the remote exit
851              status upon remote termination (for example, this should be used
852              if the cluster command is 'qsub -sync y' (SGE) (default: None)
853
854       --drmaa [ARGS]
855              Execute  snakemake  on  a  cluster accessed via DRMAA, Snakemake
856              compiles jobs into scripts that are  submitted  to  the  cluster
857              with  the  given  command, once all input files for a particular
858              job are present. ARGS can be used to specify options of the  un‐
859              derlying  cluster system, thereby using the job properties name,
860              rulename, input, output, params, wildcards, log, threads and de‐
861              pendencies,  e.g.:  --drmaa ' -pe threaded {threads}'. Note that
862              ARGS must be given in quotes and with a leading whitespace. (de‐
863              fault: None)
864
865       --cluster-config FILE, -u FILE
866              A JSON or YAML file that defines the wildcards used in 'cluster'
867              for specific rules, instead of  having  them  specified  in  the
868              Snakefile. For example, for rule 'job' you may define: { 'job' :
869              { 'time' : '24:00:00' } } to specify the time  for  rule  'job'.
870              You  can specify more than one file. The configuration files are
871              merged with later values overriding earlier ones.   This  option
872              is  deprecated  in favor of using --profile, see docs. (default:
873              [])
874
875       --immediate-submit, --is
876              Immediately submit all jobs to the cluster  instead  of  waiting
877              for  present  input  files.  This will fail, unless you make the
878              cluster aware of job dependencies, e.g. via: $ snakemake --clus‐
879              ter 'sbatch --dependency {dependencies}. Assuming that your sub‐
880              mit script (here sbatch) outputs the generated  job  id  to  the
881              first stdout line, {dependencies} will be filled with space sep‐
882              arated job ids this job depends on. Does not work for  workflows
883              that contain checkpoint rules. (default: False)
884
885       --jobscript SCRIPT, --js SCRIPT
886              Provide  a  custom job script for submission to the cluster. The
887              default script resides as 'jobscript.sh' in the installation di‐
888              rectory. (default: None)
889
890       --jobname NAME, --jn NAME
891              Provide a custom name for the jobscript that is submitted to the
892              cluster (see --cluster).  NAME  is  "snakejob.{name}.{jobid}.sh"
893              per default. The wildcard {jobid} has to be present in the name.
894              (default: snakejob.{name}.{jobid}.sh)
895
896       --cluster-status CLUSTER_STATUS
897              Status command for cluster execution. This is only considered in
898              combination with the --cluster flag. If provided, Snakemake will
899              use the status command to determine if a job has  finished  suc‐
900              cessfully  or  failed.  For this it is necessary that the submit
901              command provided to --cluster returns the cluster job id.  Then,
902              the  status  command  will be invoked with the job id. Snakemake
903              expects it to  return  'success'  if  the  job  was  successful,
904              'failed'  if the job failed and 'running' if the job still runs.
905              (default: None)
906
907       --cluster-cancel CLUSTER_CANCEL
908              Specify a command that allows to stop  currently  running  jobs.
909              The  command  will be passed a single argument, the job id. (de‐
910              fault: None)
911
912       --cluster-cancel-nargs CLUSTER_CANCEL_NARGS
913              Specify maximal number of job ids to  pass  to  --cluster-cancel
914              command, defaults to 1000. (default: 1000)
915
916       --cluster-sidecar CLUSTER_SIDECAR
917              Optional  command to start a sidecar process during cluster exe‐
918              cution. Only active when --cluster is given as  well.  (default:
919              None)
920
921       --drmaa-log-dir DIR
922              Specify  a  directory  in which stdout and stderr files of DRMAA
923              jobs will be written. The value may be given as a relative path,
924              in  which  case Snakemake will use the current invocation direc‐
925              tory as the origin. If given, this will override any given  '-o'
926              and/or '-e' native specification. If not given, all DRMAA stdout
927              and stderr files are written to the current  working  directory.
928              (default: None)
929
930   FLUX:
931       --flux Execute your workflow on a flux cluster. Flux can work with both
932              a shared network filesystem (like NFS) or without. If you  don't
933              have  a  shared filesystem, additionally specify --no-shared-fs.
934              (default: False)
935
936   GOOGLE_LIFE_SCIENCE:
937       --google-lifesciences
938              Execute workflow on Google Cloud cloud using  the  Google  Life.
939              Science  API.  This  requires  default  application  credentials
940              (json) to be created and export to the environment to use Google
941              Cloud Storage, Compute Engine, and Life Sciences. The credential
942              file should be exported  as  GOOGLE_APPLICATION_CREDENTIALS  for
943              snakemake  to  discover.  Also,  --use-conda,  --usesingularity,
944              --config, --configfile are supported and will be  carried  over.
945              (default: False)
946
947       --google-lifesciences-regions GOOGLE_LIFESCIENCES_REGIONS [GOOGLE_LIFE‐
948       SCIENCES_REGIONS ...]
949              Specify one or more valid instance regions (defaults to US) (de‐
950              fault: ['us-east1', 'us-west1', 'uscentral1'])
951
952       --google-lifesciences-location GOOGLE_LIFESCIENCES_LOCATION
953              The  Life  Sciences API service used to schedule the jobs. E.g.,
954              us-centra1 (Iowa) and europe-west2 (London) Watch  the  terminal
955              output  to  see all options found to be available. If not speci‐
956              fied, defaults to the first found with a  matching  prefix  from
957              regions specified with --google-lifesciences-regions.  (default:
958              None)
959
960       --google-lifesciences-keep-cache
961              Cache workflows in your Google Cloud Storage Bucket specified by
962              --default-remote-prefix/{source}/{cache}.  Each workflow working
963              directory is compressed to a .tar.gz, named by the hash  of  the
964              contents,  and  kept  in  Google  Cloud Storage. By default, the
965              caches are deleted at the shutdown step of the  workflow.   (de‐
966              fault: False)
967
968       --google-lifesciences-service-account-email    GOOGLE_LIFESCIENCES_SER‐
969       VICE_ACCOUNT_EMAIL
970              Specify a service account email address (default: None)
971
972       --google-lifesciences-network GOOGLE_LIFESCIENCES_NETWORK
973              Specify a network for a Google Compute Engine VM  instance  (de‐
974              fault: None)
975
976       --google-lifesciences-subnetwork GOOGLE_LIFESCIENCES_SUBNETWORK
977              Specify  a  subnetwork  for  a Google Compute Engine VM instance
978              (default: None)
979
980   KUBERNETES:
981       --kubernetes [NAMESPACE]
982              Execute workflow in a kubernetes cluster (in the  cloud).  NAME‐
983              SPACE  is the namespace you want to use for your job (if nothing
984              specified:  'default').  Usually,  this  requires  --default-re‐
985              mote-provider and --defaultremote-prefix to be set to a S3 or GS
986              bucket where your . data shall be stored. It is  further  advis‐
987              able  to  activate conda integration via --use-conda.  (default:
988              None)
989
990       --container-image IMAGE
991              Docker image to use, e.g., when submitting  jobs  to  kubernetes
992              Defaults    to   'https://hub.docker.com/r/snakemake/snakemake',
993              tagged with the same version as the currently running  Snakemake
994              instance. Note that overwriting this value is up to your respon‐
995              sibility. Any used image has to contain a working snakemake  in‐
996              stallation  that is compatible with (or ideally the same as) the
997              currently running version. (default: None)
998
999       --k8s-cpu-scalar FLOAT
1000              K8s reserves some proportion of available CPUs for its own  use.
1001              So,  where  an  underlying  node may have 8 CPUs, only e.g. 7600
1002              milliCPUs are allocatable to k8s pods (i.e. snakemake jobs).  As
1003              8  >  7.6, k8s can't find a node with enough CPU resource to run
1004              such jobs. This argument acts as a global scalar on  each  job's
1005              CPU request, so that e.g. a job whose rule definition asks for 8
1006              CPUs will request 7600m CPUs from k8s, allowing  it  to  utilise
1007              one  entire node. N.B: the job itself would still see the origi‐
1008              nal value, i.e. as the value substituted in {threads}. (default:
1009              0.95)
1010
1011       --k8s-service-account-name SERVICEACCOUNTNAME
1012              This  argument  allows  the use of customer service accounts for
1013              kubernetes pods. If specified serviceAccountName will  be  added
1014              to  the  pod specs.  This is needed when using workload identity
1015              which is enforced when using Google Cloud GKE  Autopilot.   (de‐
1016              fault: None)
1017
1018   TES:
1019       --tes URL
1020              Send  workflow  tasks to GA4GH TES server specified by url. (de‐
1021              fault: None)
1022
1023   TIBANNA:
1024       --tibanna
1025              Execute workflow on AWS cloud using Tibanna. This requires --de‐
1026              fault-remote-prefix to be set to S3 bucket name and prefix (e.g.
1027              'bucketname/subdirectory') where input  is  already  stored  and
1028              output  will  be  sent to. Using --tibanna implies --default-re‐
1029              sources is set as  default.   Optionally,  use  --precommand  to
1030              specify  any preparation command to run before snakemake command
1031              on the cloud (inside snakemake container on Tibanna VM).   Also,
1032              --use-conda,  --use-singularity, --config, --configfile are sup‐
1033              ported and will be carried over.  (default: False)
1034
1035       --tibanna-sfn TIBANNA_SFN
1036              Name  of  Tibanna  Unicorn  step  function  (e.g.   tibanna_uni‐
1037              corn_monty). This works as serverless scheduler/resource alloca‐
1038              tor and must be deployed first using tibanna cli. (e.g.  tibanna
1039              deploy_unicorn --usergroup=monty --buckets=bucketname) (default:
1040              None)
1041
1042       --precommand PRECOMMAND
1043              Any command to execute before snakemake  command  on  AWS  cloud
1044              such  as  wget,  git  clone,  unzip,  etc.  This  is  used  with
1045              --tibanna.Do not include input/output download/upload commands -
1046              file  transfer  between  S3 bucket and the run environment (con‐
1047              tainer) is automatically handled by Tibanna. (default: None)
1048
1049       --tibanna-config TIBANNA_CONFIG [TIBANNA_CONFIG ...]
1050              Additional  tibanna  config   e.g.   --tibanna-config   spot_in‐
1051              stance=true     subnet=<subnet_id>     security     group=<secu‐
1052              rity_group_id> (default: None)
1053
1054   AZURE_BATCH:
1055       --az-batch
1056              Execute workflow on azure batch (default: False)
1057
1058       --az-batch-enable-autoscale
1059              Enable autoscaling of the azure batch pool  nodes,  this  option
1060              will  set the initial dedicated node count to zero, and requires
1061              five minutes to resize the cluster, so is only  recommended  for
1062              longer running jobs.  (default: False)
1063
1064       --az-batch-account-url [AZ_BATCH_ACCOUNT_URL]
1065              Azure  batch account url, requires AZ_BATCH_ACCOUNT_KEY environ‐
1066              ment variable to be set. (default: None)
1067
1068   CONDA:
1069       --use-conda
1070              If defined in the rule, run job in a conda environment. If  this
1071              flag  is  not  set,  the  conda  directive is ignored. (default:
1072              False)
1073
1074       --conda-not-block-search-path-envvars
1075              Do not block environment variables that modify the  search  path
1076              (R_LIBS,  PYTHONPATH,  PERL5LIB, PERLLIB) when using conda envi‐
1077              ronments. (default: False)
1078
1079       --list-conda-envs
1080              List all conda environments and their  location  on  disk.  (de‐
1081              fault: False)
1082
1083       --conda-prefix DIR
1084              Specify  a directory in which the 'conda' and 'condaarchive' di‐
1085              rectories are created. These are used to  store  conda  environ‐
1086              ments  and  their  archives,  respectively. If not supplied, the
1087              value is set to the '.snakemake' directory relative to the invo‐
1088              cation  directory. If supplied, the `--use-conda` flag must also
1089              be set. The value may be given as a relative path, which will be
1090              extrapolated  to  the  invocation  directory,  or as an absolute
1091              path. The value can also be provided via the  environment  vari‐
1092              able $SNAKEMAKE_CONDA_PREFIX. (default: None)
1093
1094       --conda-cleanup-envs
1095              Cleanup unused conda environments. (default: False)
1096
1097       --conda-cleanup-pkgs [{tarballs,cache}]
1098              Cleanup  conda  packages after creating environments. In case of
1099              'tarballs' mode, will clean up all downloaded package  tarballs.
1100              In case of 'cache' mode, will additionally clean up unused pack‐
1101              age caches. If mode is omitted, will default to only cleaning up
1102              the tarballs. (default: None)
1103
1104       --conda-create-envs-only
1105              If  specified,  only creates the job-specific conda environments
1106              then exits. The `--use-conda` flag must also be  set.  (default:
1107              False)
1108
1109       --conda-frontend {conda,mamba}
1110              Choose the conda frontend for installing environments.  Mamba is
1111              much faster and highly recommended. (default: mamba)
1112
1113   SINGULARITY:
1114       --use-singularity
1115              If defined in the rule, run job within a singularity  container.
1116              If  this  flag is not set, the singularity directive is ignored.
1117              (default: False)
1118
1119       --singularity-prefix DIR
1120              Specify a directory in which singularity images will be  stored.
1121              If  not supplied, the value is set to the '.snakemake' directory
1122              relative  to  the  invocation  directory.   If   supplied,   the
1123              `--use-singularity`  flag  must  also  be  set. The value may be
1124              given as a relative path, which will be extrapolated to the  in‐
1125              vocation directory, or as an absolute path. (default: None)
1126
1127       --singularity-args ARGS
1128              Pass additional args to singularity. (default: )
1129
1130       --cleanup-containers
1131              Remove unused (singularity) containers (default: False)
1132
1133   ENVIRONMENT MODULES:
1134       --use-envmodules
1135              If  defined  in  the  rule, run job within the given environment
1136              modules, loaded in the given order. This can  be  combined  with
1137              --use-conda  and  --usesingularity, which will then be only used
1138              as a fallback for rules which don't define environment  modules.
1139              (default: False)
1140
1141              If  an arg is specified in more than one place, then commandline
1142              values
1143
1144       override environment variables which override defaults.
1145
1146
1147
1148snakemake 7.32.4                September 2023                    SNAKEMAKE(1)
Impressum