-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreate_sample_cram_map.py
More file actions
166 lines (136 loc) · 6.01 KB
/
create_sample_cram_map.py
File metadata and controls
166 lines (136 loc) · 6.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import pandas as pd
from collections import defaultdict
import glob
import json
import os
PREF = "/scratch/ucgd/lustre-core/UCGD_Research/quinlan_NIH/NIH_CIDR_CEPH"
MASTER_PED = "/scratch/ucgd/lustre-labs/quinlan/u0890814/CIDR_4Gen/ped_files/master_ped_all_info.ped"
# get sample info for full cohort from Julia's "ped"
SAMPLE_INFO = pd.read_csv(
MASTER_PED,
sep="\t",
dtype={"UGRP_Lab_ID": str, "Gender": str}
)
# read in sample information for the original CIDR bolus
CIDR_ORIG_INFO = (
pd.read_excel(
"Quinlan_Released_Data/Sample_Info/QuinlanNeklason_SIF.xlsx",
sheet_name="Sheet0",
).dropna()
)[["Subject_ID", "Individual"]].rename(columns={"Subject_ID": "SUBJECT_ID"})
CIDR_ORIG_INFO["SUBJECT_ID"] = CIDR_ORIG_INFO["SUBJECT_ID"].astype(int).astype(str)
# read in mapping for original CIDR
CIDR_ORIG_MAP = pd.read_csv(
"Quinlan_Released_Data/Sample_Info/SubjectSampleMappingFile_QuinlanNeklason.csv",
dtype={"SUBJECT_ID": str}
)
CIDR_ORIG_INFO = CIDR_ORIG_INFO.merge(CIDR_ORIG_MAP, how="outer").rename(
columns={
"SUBJECT_ID": "UGRP_Lab_ID",
"SAMPLE_ID": "prefix",
}
)
CIDR_ORIG_INFO = CIDR_ORIG_INFO.dropna(subset=["prefix"])
CIDR_ORIG_INFO["provenance"] = "CIDR_rd1"
# read in sample information for the topped-up CIDR bolus
CIDR_TOPUP_INFO = pd.read_csv(
"dataset_to_PI_release2/samples_below_30x_with_generation_CIDRedit.csv",
dtype={"Subject_ID": str},
)[
[
"Subject_ID",
"wants more seq",
"sample_id",
"PICARD_average_alignment_coverage_over_genome",
]
].rename(
columns={
"Subject_ID": "UGRP_Lab_ID",
"wants more seq": "topped_up",
"sample_id": "prefix",
}
)
# remove samples that didn't produce sequencing data
CIDR_TOPUP_INFO = CIDR_TOPUP_INFO[CIDR_TOPUP_INFO["PICARD_average_alignment_coverage_over_genome"] != "Library attempted but no sequence data generated"]
CIDR_TOPUP_INFO["provenance"] = "CIDR_rd2"
# read in Deb's bolus of sequencing metadata
DEB_INFO = pd.read_excel(
"data/2025 CEPH resequence submit core.xlsx",
sheet_name="2025-08-29 CEPH REsequence list",
dtype={"LABID": str},
)[["LABID"]].rename(columns={"LABID": "UGRP_Lab_ID"})
DEB_INFO["provenance"] = "UofU_rd2"
# read in scott's bolus of sequencing metadata
WATKINS_INFO = pd.read_excel(
"UU_CIDR_CEPH/26501R_Id_list.xlsx",
sheet_name="Sheet1",
dtype={"Sample Name": str},
).rename(
columns={
"Sample Name": "UGRP_Lab_ID",
"Alt ID ": "topup_prefix",
}
)[
["UGRP_Lab_ID", "topup_prefix"]
]
WATKINS_INFO["provenance"] = "UofU_rd1"
ELIFE_INFO = SAMPLE_INFO[SAMPLE_INFO["Sequencing"] == "WashU-Illumina_short-read"][["UGRP_Lab_ID"]]
ELIFE_INFO["provenance"] = "eLife"
merged = pd.concat([CIDR_ORIG_INFO, CIDR_TOPUP_INFO, DEB_INFO, WATKINS_INFO, ELIFE_INFO])
prov = merged.groupby("UGRP_Lab_ID").agg(sequencing_provenance = ("provenance", lambda p: ",".join(p)), n_prov = ("provenance", lambda p: len(p))).reset_index()
prov.query("sequencing_provenance == 'CIDR_rd1,CIDR_rd2,UofU_rd2' or sequencing_provenance == 'CIDR_rd1,CIDR_rd2,UofU_rd1'")[["UGRP_Lab_ID", "sequencing_provenance"]].to_csv("a.tsv", sep="\t", index=False)
prov.groupby("sequencing_provenance").size().reset_index().rename(columns={0: "count"}).sort_values("count", ascending=False).to_csv("a.tsv", sep="\t", index=False)
prov.to_csv("PROVENANCE.tsv", sep="\t", index=False)
res = []
missing = []
for sample, sample_df in merged.groupby("UGRP_Lab_ID"):
provenance = sample_df["provenance"].to_list()
# these are difficult samples that have to be handled manually
cram_path = None
if len(provenance) > 2:
pass
# these are "easy" samples for which the final CRAM should already have been processed correctly
# (by either aligning FASTQ from .ora or by getting FASTQ from an existing CRAM)
elif provenance in (["CIDR_rd1"], ["UofU_rd1"], ["CIDR_rd1", "CIDR_rd2"], ["UofU_rd2", "eLife"], ["UofU_rd2"], ["CIDR_rd1", "UofU_rd2"]):
cram_path = f"/scratch/ucgd/lustre-core/UCGD_Research/quinlan_NIH/NIH_CIDR_CEPH/data/cram/{sample}.dupmarked.cram"
# also easy -- we'll leave these as-is
elif provenance == ["eLife"]:
cram_path = f"/scratch/ucgd/lustre-labs/quinlan/data-shared/datasets/CEPH/cram/{sample}.cram"
# otherwise, there's some manual stuff we'll have to do
else:
pass
if cram_path is not None and os.path.exists(cram_path):
res.append({"ugrp_sample_id": sample, "cram_fh": cram_path})
else:
missing.append({"ugrp_sample_id": sample, "provenance": ",".join(provenance)})
print (len(res))
print (pd.DataFrame(missing))
with open("json/cram_mapping.realigned.json", "w") as f:
json.dump(res, f, indent=4)
res = []
missing = []
for sample, sample_df in merged.groupby("UGRP_Lab_ID"):
provenance = sample_df["provenance"].to_list()
# these are difficult samples that have to be handled manually
cram_path = None
if provenance == ["CIDR_rd1"]:
prefix = CIDR_ORIG_INFO[CIDR_ORIG_INFO["UGRP_Lab_ID"] == sample]["prefix"].to_list()
assert len(prefix) == 1
prefix = prefix.pop()
cram_path = f"/scratch/ucgd/lustre-core/UCGD_Research/quinlan_NIH/NIH_CIDR_CEPH/Quinlan_Released_Data/CRAM/{prefix}.cram"
# these are "easy" samples for which the original CRAM (from which we'll extract FASTQ) is known
elif provenance == ["CIDR_rd1", "CIDR_rd2"]:
prefix = CIDR_TOPUP_INFO[CIDR_TOPUP_INFO["UGRP_Lab_ID"] == sample]["prefix"].to_list()
assert len(prefix) == 1
prefix = prefix.pop()
cram_path = f"/scratch/ucgd/lustre-core/UCGD_Research/quinlan_NIH/NIH_CIDR_CEPH/dataset_to_PI_release2/CRAM/{prefix}.cram"
else:
pass
if cram_path is not None and os.path.exists(cram_path):
res.append({"ugrp_sample_id": sample, "cram_fh": cram_path})
else:
missing.append({"ugrp_sample_id": sample, "provenance": ",".join(provenance)})
print (len(res))
print (pd.DataFrame(missing))
with open("json/cram_mapping.to_extract.json", "w") as f:
json.dump(res, f, indent=4)