Skip to content

Commit

Permalink
correct path, change rules names
Browse files Browse the repository at this point in the history
  • Loading branch information
mAGLAVE committed Feb 19, 2024
1 parent a717168 commit 47d0d17
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 14 deletions.
16 changes: 12 additions & 4 deletions bigr_pipelines/global_qc/Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ onstart:

############################
### Get data information ###
############################
############################da


include: "rules/000.common.smk"
Expand Down Expand Up @@ -116,13 +116,21 @@ if os.path.isfile(supplementary_design):
dic_DATA[i][j][k]['Library_Name']=sub_sub_sub_data.Library_Name.values.tolist()
dic_DATA[i][j][k]['Species']=''.join(set(sub_sub_sub_data.Species)) #''.join() allow to convert set() type in string.
#get path of fastq files for this sample
dic_DATA[i][j][k]['Fastq_Files'] = [ os.path.abspath(l) for l in sum([ glob.glob('./data_input/**/' + l +'_S[0-9]*_R[1-3]_*.f*q.gz', recursive=True) for l in dic_DATA[i][j][k]['Library_Name']], [])]
dic_DATA[i][j][k]['Fastq_Files'] = [ os.path.abspath(l) for l in sum([ glob.glob('./*input/**/' + l +'_S[0-9]*_R[1-3]_*.f*q.gz', recursive=True) for l in dic_DATA[i][j][k]['Library_Name']], [])]
#print("Library : " + k)
#print("Library Name : " + ",".join(dic_DATA[i][j][k]['Library_Name']))
#print("Species : " + dic_DATA[i][j][k]['Species'])
#print("Fastq Files : " + ",".join(dic_DATA[i][j][k]['Fastq_Files']) + "\n")


################################
### Get pipeline information ###
################################


PIPELINE_FOLDER = workflow.snakefile
PIPELINE_FOLDER = PIPELINE_FOLDER.replace("/Snakefile", "")


##################################
### Gather all quality reports ###
Expand Down Expand Up @@ -172,7 +180,7 @@ else:


if isScRNAData:
include: "rules/007.cellranger_csv_RNA.smk"
include: "rules/007.csv_cellranger_multi_RNA.smk"
include: "rules/008.cellranger_multi_RNA.smk"
include: "rules/009.concat_cellranger_multi_RNA.smk"

Expand All @@ -183,7 +191,7 @@ if isScATACData:


if isScRNAATACData:
include: "rules/007.cellranger_csv_RNA_ATAC.smk"
include: "rules/007.csv_cellranger_arc_RNA_ATAC.smk"
include: "rules/008.cellranger_arc_RNA_ATAC.smk"
include: "rules/009.concat_cellranger_arc_RNA_ATAC.smk"

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Make csv config file for CellRanger
"""
007.cellranger_csv
007.csv_cellranger_arc_RNA_ATAC
from
-> Entry job
by
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""
008.cellranger_arc_RNA_ATAC
from
-> 007.cellranger_csv_RNA_ATAC
-> 007.csv_cellranger_arc_RNA_ATAC
by
-> 003.multiqc
-> 003.irods_complient
Expand Down Expand Up @@ -47,4 +47,4 @@ rule cellranger_arc:
log:
"logs/cellranger/{CR_sample}_cellranger_arc.log"
shell:
"module load singularity/3.6.3 && singularity exec --no-home {params.sing_arg} ../envs/cellranger_arc_v2.0.2.simg ../scripts/script_cellranger_arc_RNA_ATAC.sh {threads} {resources.mem_mb} {wildcards.CR_sample} {params.csv_config} {params.reference} $(pwd)"
"module load singularity/3.6.3 && singularity exec --no-home {params.sing_arg} -B {PIPELINE_FOLDER}/scripts/:{PIPELINE_FOLDER}/scripts/ {PIPELINE_FOLDER}/envs/cellranger_arc_v2.0.2.simg {PIPELINE_FOLDER}/scripts/script_cellranger_arc_RNA_ATAC.sh {threads} {resources.mem_mb} {wildcards.CR_sample} {params.csv_config} {params.reference} $(pwd)"
4 changes: 2 additions & 2 deletions bigr_pipelines/global_qc/rules/008.cellranger_atac_ATAC.smk
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Run CellRanger
"""
008.cellranger_atac
008.cellranger_atac_ATAC
from
-> Entry job
by
Expand Down Expand Up @@ -53,4 +53,4 @@ rule cellranger_atac:
log:
"logs/cellranger/{CR_sample}_cellranger_atac.log"
shell:
"module load singularity/3.6.3 && singularity exec --no-home {params.sing_arg} ../envs/cellranger_atac_v2.1.0.simg ../scripts/script_cellranger_atac_ATAC.sh {threads} {resources.mem_mb} {wildcards.CR_sample} {params.library_names} {params.reference} {params.path_fastqs} $(pwd)"
"module load singularity/3.6.3 && singularity exec --no-home {params.sing_arg} -B {PIPELINE_FOLDER}/scripts/:{PIPELINE_FOLDER}/scripts/ {PIPELINE_FOLDER}/envs/cellranger_atac_v2.1.0.simg {PIPELINE_FOLDER}/scripts/script_cellranger_atac_ATAC.sh {threads} {resources.mem_mb} {wildcards.CR_sample} {params.library_names} {params.reference} {params.path_fastqs} $(pwd)"
6 changes: 3 additions & 3 deletions bigr_pipelines/global_qc/rules/008.cellranger_multi_RNA.smk
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Run CellRanger
"""
008.cellranger_multi
008.cellranger_multi_RNA
from
-> 007.cellranger_csv_RNA
-> 007.csv_cellranger_multi_RNA
by
-> 003.multiqc
-> 003.irods_complient
Expand Down Expand Up @@ -37,4 +37,4 @@ rule cellranger_multi:
log:
"logs/cellranger/{CR_sample}_cellranger_multi.log"
shell:
"module load singularity/3.6.3 && singularity exec --no-home {params.sing_arg} ../envs/cellranger_v7.2.0.simg ../scripts/script_cellranger_multi_RNA_TCR_BCR.sh {threads} {resources.mem_mb} {wildcards.CR_sample} {params.csv_config} $(pwd)"
"module load singularity/3.6.3 && singularity exec --no-home {params.sing_arg} -B {PIPELINE_FOLDER}/scripts/:{PIPELINE_FOLDER}/scripts/ {PIPELINE_FOLDER}/envs/cellranger_v7.2.0.simg {PIPELINE_FOLDER}/scripts/script_cellranger_multi_RNA_TCR_BCR.sh {threads} {resources.mem_mb} {wildcards.CR_sample} {params.csv_config} $(pwd)"
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ rule concat_cellranger_multi:
output:
concat_multi=temp("cellranger/csv_concat/CellRanger_RNA_summary_mqc.csv")
message:
"Computing concatenation of CelleRanger multi metrics_summary.csv files"
"Computing concatenation of CellRanger multi metrics_summary.csv files"
threads: 1
resources:
mem_mb=lambda wildcard, attempt: attempt * 1000,
Expand Down Expand Up @@ -77,7 +77,7 @@ rule concat_cellranger_multi:
final_df.columns = [w.replace('"', '') for w in final_df.columns]

#sauvegarder le df en format csv
final_df.to_csv(output.concat_multi, sep=',',header=True, index=False, na_rep='0', decimal='.')
final_df.to_csv(output.concat_multi, sep=',',header=True, index=False, na_rep='NA', decimal='.')



Expand Down

0 comments on commit 47d0d17

Please sign in to comment.