From 896d586df05a8e03446e90e5d25f4be2edf05659 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:52:22 -0400 Subject: [PATCH 01/64] added multithread to colabfold search --- scripts/colabfold_search.sh | 47 +++++++++++++------------ scripts/precompute_alignments_mmseqs.py | 12 ++++++- 2 files changed, 35 insertions(+), 24 deletions(-) diff --git a/scripts/colabfold_search.sh b/scripts/colabfold_search.sh index f3b609de..c3f56cbf 100755 --- a/scripts/colabfold_search.sh +++ b/scripts/colabfold_search.sh @@ -2,17 +2,18 @@ # Copied from colabfold.mmseqs.com MMSEQS="$1" -QUERY="$2" -DBBASE="$3" -BASE="$4" -DB1="$5" -DB2="$6" -DB3="$7" -USE_ENV="${8:-1}" -USE_TEMPLATES="${9:-0}" -FILTER="${10:-1}" -INDEX=${11:-1} -DB_LOAD_MODE="${12:-2}" +MMSEQS_THREADS="$2" +QUERY="$3" +DBBASE="$4" +BASE="$5" +DB1="$6" +DB2="$7" +DB3="$8" +USE_ENV="${9:-1}" +USE_TEMPLATES="${10:-0}" +FILTER="${11:-1}" +INDEX=${12:-1} +DB_LOAD_MODE="${13:-2}" EXPAND_EVAL=inf ALIGN_EVAL=10 DIFF=3000 @@ -41,28 +42,28 @@ FILTER_PARAM="--filter-msa ${FILTER} --filter-min-enable 1000 --diff ${DIFF} --q EXPAND_PARAM="--expansion-mode 0 -e ${EXPAND_EVAL} --expand-filter-clusters ${FILTER} --max-seq-id 0.95" mkdir -p "${BASE}" "${MMSEQS}" createdb "${QUERY}" "${BASE}/qdb" -"${MMSEQS}" search "${BASE}/qdb" "${DBBASE}/${DB1}" "${BASE}/res" "${BASE}/tmp" $SEARCH_PARAM -"${MMSEQS}" expandaln "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res" "${DBBASE}/${DB1}${ALN}" "${BASE}/res_exp" --db-load-mode ${DB_LOAD_MODE} ${EXPAND_PARAM} +"${MMSEQS}" search "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}" "${BASE}/res" "${BASE}/tmp" $SEARCH_PARAM +"${MMSEQS}" expandaln "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res" "${DBBASE}/${DB1}${ALN}" "${BASE}/res_exp" --db-load-mode ${DB_LOAD_MODE} ${EXPAND_PARAM} "${MMSEQS}" mvdb "${BASE}/tmp/latest/profile_1" "${BASE}/prof_res" "${MMSEQS}" lndb "${BASE}/qdb_h" "${BASE}/prof_res_h" -"${MMSEQS}" align "${BASE}/prof_res" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp" "${BASE}/res_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a -"${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign" "${BASE}/res_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 -"${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign_filter" "${BASE}/uniref.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} +"${MMSEQS}" align "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp" "${BASE}/res_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a +"${MMSEQS}" filterresult "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign" "${BASE}/res_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 +"${MMSEQS}" result2msa "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign_filter" "${BASE}/uniref.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} "${MMSEQS}" rmdb "${BASE}/res_exp_realign" "${MMSEQS}" rmdb "${BASE}/res_exp" "${MMSEQS}" rmdb "${BASE}/res" "${MMSEQS}" rmdb "${BASE}/res_exp_realign_filter" if [ "${USE_TEMPLATES}" = "1" ]; then - "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB2}" "${BASE}/res_pdb" "${BASE}/tmp" --db-load-mode ${DB_LOAD_MODE} -s 7.5 -a -e 0.1 - "${MMSEQS}" convertalis "${BASE}/prof_res" "${DBBASE}/${DB2}${IDX}" "${BASE}/res_pdb" "${BASE}/${DB2}.m8" --format-output query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar --db-load-mode ${DB_LOAD_MODE} + "${MMSEQS}" search "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB2}" "${BASE}/res_pdb" "${BASE}/tmp" --db-load-mode ${DB_LOAD_MODE} -s 7.5 -a -e 0.1 + "${MMSEQS}" convertalis "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB2}${IDX}" "${BASE}/res_pdb" "${BASE}/${DB2}.m8" --format-output query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar --db-load-mode ${DB_LOAD_MODE} "${MMSEQS}" rmdb "${BASE}/res_pdb" fi if [ "${USE_ENV}" = "1" ]; then - "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB3}" "${BASE}/res_env" "${BASE}/tmp" $SEARCH_PARAM - "${MMSEQS}" expandaln "${BASE}/prof_res" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env" "${DBBASE}/${DB3}${ALN}" "${BASE}/res_env_exp" -e ${EXPAND_EVAL} --expansion-mode 0 --db-load-mode ${DB_LOAD_MODE} - "${MMSEQS}" align "${BASE}/tmp/latest/profile_1" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp" "${BASE}/res_env_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a - "${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign" "${BASE}/res_env_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 - "${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign_filter" "${BASE}/bfd.mgnify30.metaeuk30.smag30.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} + "${MMSEQS}" search "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB3}" "${BASE}/res_env" "${BASE}/tmp" $SEARCH_PARAM + "${MMSEQS}" expandaln "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env" "${DBBASE}/${DB3}${ALN}" "${BASE}/res_env_exp" -e ${EXPAND_EVAL} --expansion-mode 0 --db-load-mode ${DB_LOAD_MODE} + "${MMSEQS}" align "--threads ${MMSEQS_THREADS} ${BASE}/tmp/latest/profile_1" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp" "${BASE}/res_env_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a + "${MMSEQS}" filterresult "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign" "${BASE}/res_env_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 + "${MMSEQS}" result2msa "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign_filter" "${BASE}/bfd.mgnify30.metaeuk30.smag30.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} "${MMSEQS}" rmdb "${BASE}/res_env_exp_realign_filter" "${MMSEQS}" rmdb "${BASE}/res_env_exp_realign" "${MMSEQS}" rmdb "${BASE}/res_env_exp" diff --git a/scripts/precompute_alignments_mmseqs.py b/scripts/precompute_alignments_mmseqs.py index 2c564e41..38ccb74a 100644 --- a/scripts/precompute_alignments_mmseqs.py +++ b/scripts/precompute_alignments_mmseqs.py @@ -44,6 +44,11 @@ def main(args): else: chunk_size = args.fasta_chunk_size + if (args.threads is None): + threads = 1 + else: + threads = args.threads + # Make the output directory Path(args.output_dir).mkdir(parents=True, exist_ok=True) @@ -66,6 +71,7 @@ def main(args): cmd = [ "scripts/colabfold_search.sh", args.mmseqs_binary_path, + threads, chunk_fasta_path, args.mmseqs_db_dir, args.output_dir, @@ -100,7 +106,7 @@ def main(args): hhsearch_pdb70_runner = hhsearch.HHSearch( - binary_path=args.hhsearch_binary_path, databases=[args.pdb70] + binary_path=args.hhsearch_binary_path, databases=[args.pdb70], n_cpu=threads ) @@ -146,6 +152,10 @@ def main(args): "mmseqs_binary_path", type=str, help="Path to mmseqs binary" ) + parser.add_argument( + "--threads", type=int, default=1, + help="""How many threads should mmseqs use. (Default 1)""" + ) parser.add_argument( "--hhsearch_binary_path", type=str, default=None, help="""Path to hhsearch binary (for template search). In future From 17e3b936e995aecb667487e739defee21ea308d7 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:59:18 -0400 Subject: [PATCH 02/64] added multithread to colabfold search --- scripts/precompute_alignments_mmseqs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/precompute_alignments_mmseqs.py b/scripts/precompute_alignments_mmseqs.py index 38ccb74a..4450510f 100644 --- a/scripts/precompute_alignments_mmseqs.py +++ b/scripts/precompute_alignments_mmseqs.py @@ -71,7 +71,7 @@ def main(args): cmd = [ "scripts/colabfold_search.sh", args.mmseqs_binary_path, - threads, + f"{threads}", chunk_fasta_path, args.mmseqs_db_dir, args.output_dir, From 573623d5bb2a88d5bba9ce9761de1638e8c38abf Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 17 Apr 2024 13:04:25 -0400 Subject: [PATCH 03/64] added multithread to colabfold search --- scripts/colabfold_search.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/scripts/colabfold_search.sh b/scripts/colabfold_search.sh index c3f56cbf..f95578d7 100755 --- a/scripts/colabfold_search.sh +++ b/scripts/colabfold_search.sh @@ -42,28 +42,28 @@ FILTER_PARAM="--filter-msa ${FILTER} --filter-min-enable 1000 --diff ${DIFF} --q EXPAND_PARAM="--expansion-mode 0 -e ${EXPAND_EVAL} --expand-filter-clusters ${FILTER} --max-seq-id 0.95" mkdir -p "${BASE}" "${MMSEQS}" createdb "${QUERY}" "${BASE}/qdb" -"${MMSEQS}" search "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}" "${BASE}/res" "${BASE}/tmp" $SEARCH_PARAM -"${MMSEQS}" expandaln "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res" "${DBBASE}/${DB1}${ALN}" "${BASE}/res_exp" --db-load-mode ${DB_LOAD_MODE} ${EXPAND_PARAM} +"${MMSEQS}" search "${BASE}/qdb" "${DBBASE}/${DB1}" "${BASE}/res" "${BASE}/tmp" $SEARCH_PARAM --threads ${MMSEQS_THREADS} +"${MMSEQS}" expandaln "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res" "${DBBASE}/${DB1}${ALN}" "${BASE}/res_exp" --db-load-mode ${DB_LOAD_MODE} ${EXPAND_PARAM} --threads ${MMSEQS_THREADS} "${MMSEQS}" mvdb "${BASE}/tmp/latest/profile_1" "${BASE}/prof_res" "${MMSEQS}" lndb "${BASE}/qdb_h" "${BASE}/prof_res_h" -"${MMSEQS}" align "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp" "${BASE}/res_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a -"${MMSEQS}" filterresult "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign" "${BASE}/res_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 -"${MMSEQS}" result2msa "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign_filter" "${BASE}/uniref.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} +"${MMSEQS}" align "${BASE}/prof_res" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp" "${BASE}/res_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a --threads ${MMSEQS_THREADS} +"${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign" "${BASE}/res_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 --threads ${MMSEQS_THREADS} +"${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign_filter" "${BASE}/uniref.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} --threads ${MMSEQS_THREADS} "${MMSEQS}" rmdb "${BASE}/res_exp_realign" "${MMSEQS}" rmdb "${BASE}/res_exp" "${MMSEQS}" rmdb "${BASE}/res" "${MMSEQS}" rmdb "${BASE}/res_exp_realign_filter" if [ "${USE_TEMPLATES}" = "1" ]; then - "${MMSEQS}" search "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB2}" "${BASE}/res_pdb" "${BASE}/tmp" --db-load-mode ${DB_LOAD_MODE} -s 7.5 -a -e 0.1 - "${MMSEQS}" convertalis "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB2}${IDX}" "${BASE}/res_pdb" "${BASE}/${DB2}.m8" --format-output query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar --db-load-mode ${DB_LOAD_MODE} + "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB2}" "${BASE}/res_pdb" "${BASE}/tmp" --db-load-mode ${DB_LOAD_MODE} -s 7.5 -a -e 0.1 --threads ${MMSEQS_THREADS} + "${MMSEQS}" convertalis "${BASE}/prof_res" "${DBBASE}/${DB2}${IDX}" "${BASE}/res_pdb" "${BASE}/${DB2}.m8" --format-output query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar --db-load-mode ${DB_LOAD_MODE} --threads ${MMSEQS_THREADS} "${MMSEQS}" rmdb "${BASE}/res_pdb" fi if [ "${USE_ENV}" = "1" ]; then - "${MMSEQS}" search "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB3}" "${BASE}/res_env" "${BASE}/tmp" $SEARCH_PARAM - "${MMSEQS}" expandaln "--threads ${MMSEQS_THREADS} ${BASE}/prof_res" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env" "${DBBASE}/${DB3}${ALN}" "${BASE}/res_env_exp" -e ${EXPAND_EVAL} --expansion-mode 0 --db-load-mode ${DB_LOAD_MODE} - "${MMSEQS}" align "--threads ${MMSEQS_THREADS} ${BASE}/tmp/latest/profile_1" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp" "${BASE}/res_env_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a - "${MMSEQS}" filterresult "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign" "${BASE}/res_env_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 - "${MMSEQS}" result2msa "--threads ${MMSEQS_THREADS} ${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign_filter" "${BASE}/bfd.mgnify30.metaeuk30.smag30.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} + "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB3}" "${BASE}/res_env" "${BASE}/tmp" $SEARCH_PARAM --threads ${MMSEQS_THREADS} + "${MMSEQS}" expandaln "${BASE}/prof_res" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env" "${DBBASE}/${DB3}${ALN}" "${BASE}/res_env_exp" -e ${EXPAND_EVAL} --expansion-mode 0 --db-load-mode ${DB_LOAD_MODE} --threads ${MMSEQS_THREADS} + "${MMSEQS}" align "${BASE}/tmp/latest/profile_1" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp" "${BASE}/res_env_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a --threads ${MMSEQS_THREADS} + "${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign" "${BASE}/res_env_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 --threads ${MMSEQS_THREADS} + "${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign_filter" "${BASE}/bfd.mgnify30.metaeuk30.smag30.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} --threads ${MMSEQS_THREADS} "${MMSEQS}" rmdb "${BASE}/res_env_exp_realign_filter" "${MMSEQS}" rmdb "${BASE}/res_env_exp_realign" "${MMSEQS}" rmdb "${BASE}/res_env_exp" From d256af327857ade298deb024e83574e118ccf233 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:27:46 -0400 Subject: [PATCH 04/64] trying to run --- scripts/precompute_alignments_mmseqs.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/precompute_alignments_mmseqs.py b/scripts/precompute_alignments_mmseqs.py index 4450510f..f1b50912 100644 --- a/scripts/precompute_alignments_mmseqs.py +++ b/scripts/precompute_alignments_mmseqs.py @@ -37,17 +37,22 @@ def main(args): lines = [l.strip() for l in f.readlines()] names = lines[::2] - seqs = lines[1::2] + seqs = lines[1::2] + + print(f"name: {names}") + print(f"seqs: {seqs}") if(args.fasta_chunk_size is None): chunk_size = len(seqs) else: chunk_size = args.fasta_chunk_size + print(f"chunk_size: {chunk_size}") if (args.threads is None): threads = 1 else: threads = args.threads + print(f"threads: {threads}") # Make the output directory Path(args.output_dir).mkdir(parents=True, exist_ok=True) @@ -56,6 +61,7 @@ def main(args): s = 0 while(s < len(seqs)): e = s + chunk_size + print(f"running chunk: {s} - {e}") chunk_fasta = [el for tup in zip(names[s:e], seqs[s:e]) for el in tup] s = e @@ -181,5 +187,5 @@ def main(args): raise ValueError( "pdb70 must be specified along with hhsearch_binary_path" ) - + print("will start pipeline") main(args) From 89c01644fca3d8c3e582c68528eb3435fbd61c8d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 08:55:23 -0400 Subject: [PATCH 05/64] tests --- run_pretrained_openfold.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index e27623e3..202a53d5 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -188,6 +188,12 @@ def main(args): is_multimer = "multimer" in args.config_preset + print(f"mmcif_dir: {args.template_mmcif_dir}") + print(f"max_template_date: {args.max_template_date}") + print(f"max_hits: {args.max_hits}") + print(f"release_dates_path: {args.release_dates_path}") + print(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") + if is_multimer: template_featurizer = templates.HmmsearchHitFeaturizer( mmcif_dir=args.template_mmcif_dir, @@ -216,6 +222,7 @@ def main(args): monomer_data_pipeline=data_processor, ) + print(f"output: {args.output_dir}") output_dir_base = args.output_dir random_seed = args.data_random_seed if random_seed is None: @@ -232,6 +239,7 @@ def main(args): else: alignment_dir = args.use_precomputed_alignments + print(f"alignment_dir: {args.use_precomputed_alignments}") tag_list = [] seq_list = [] for fasta_file in list_files_with_extensions(args.fasta_dir, (".fasta", ".fa")): @@ -255,15 +263,24 @@ def main(args): tag_list.append((tag, tags)) seq_list.append(seqs) + print(f"header list: {tag_list}") + print(f"seq list: {args.seq_list}") + seq_sort_fn = lambda target: sum([len(s) for s in target[1]]) sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn) feature_dicts = {} + + print(f"sorted_targets: {sorted_targets}") + print(f"model_device: {args.model_device}") + print(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") + print(f"jax_param_path: {args.jax_param_path}") model_generator = load_models_from_command_line( config, args.model_device, args.openfold_checkpoint_path, args.jax_param_path, - args.output_dir) + args.output_dir + ) for model, output_directory in model_generator: cur_tracing_interval = 0 @@ -272,11 +289,16 @@ def main(args): if args.output_postfix is not None: output_name = f'{output_name}_{args.output_postfix}' + print(f"tag: {tag}") + print(f"tags: {tags}") + print(f"seqs: {seqs}") + print(f"alignment_dir: {alignment_dir}") # Does nothing if the alignments have already been computed precompute_alignments(tags, seqs, alignment_dir, args) feature_dict = feature_dicts.get(tag, None) if feature_dict is None: + print(f"generate_feature_dict") feature_dict = generate_feature_dict( tags, seqs, From acb0dcc05a93b81a8a5ff376f51d08c1783e15fa Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 08:56:45 -0400 Subject: [PATCH 06/64] tests --- run_pretrained_openfold.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index 202a53d5..b366d160 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -190,7 +190,7 @@ def main(args): print(f"mmcif_dir: {args.template_mmcif_dir}") print(f"max_template_date: {args.max_template_date}") - print(f"max_hits: {args.max_hits}") + print(f"max_hits: {config.data.predict.max_templates}") print(f"release_dates_path: {args.release_dates_path}") print(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") From e8fa8d2cf558ca0b887b4ffa005c6d806378136d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 08:57:37 -0400 Subject: [PATCH 07/64] tests --- run_pretrained_openfold.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index b366d160..a21c4b50 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -264,7 +264,7 @@ def main(args): seq_list.append(seqs) print(f"header list: {tag_list}") - print(f"seq list: {args.seq_list}") + print(f"seq list: {seq_list}") seq_sort_fn = lambda target: sum([len(s) for s in target[1]]) sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn) From 93caf7ecebe8964239b0a50910b3a7a4fa11c76e Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 09:43:07 -0400 Subject: [PATCH 08/64] test --- run_pretrained_openfold.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index a21c4b50..a4d8ac81 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -137,8 +137,11 @@ def generate_feature_dict( fp.write( '\n'.join([f">{tag}\n{seq}" for tag, seq in zip(tags, seqs)]) ) + print(f"tmp_fasta_path: {tmp_fasta_path}") + print(f"alignment_dir: {alignment_dir}") feature_dict = data_processor.process_fasta( - fasta_path=tmp_fasta_path, alignment_dir=alignment_dir, + fasta_path=tmp_fasta_path, + alignment_dir=alignment_dir ) elif len(seqs) == 1: tag = tags[0] From dd6f9bc141d72b977e8efc0945a71e78034da82d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 09:48:15 -0400 Subject: [PATCH 09/64] test --- run_pretrained_openfold.py | 1 + 1 file changed, 1 insertion(+) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index a4d8ac81..cef00e84 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -139,6 +139,7 @@ def generate_feature_dict( ) print(f"tmp_fasta_path: {tmp_fasta_path}") print(f"alignment_dir: {alignment_dir}") + print(f"data_processor: {data_processor}") feature_dict = data_processor.process_fasta( fasta_path=tmp_fasta_path, alignment_dir=alignment_dir From bfb2cac907ef408647de585495c2df5036394589 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 09:54:29 -0400 Subject: [PATCH 10/64] test --- openfold/data/data_pipeline.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index ce8494d0..0e8e4cf3 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -1235,11 +1235,17 @@ def process_fasta(self, input_fasta_str = f.read() input_seqs, input_descs = parsers.parse_fasta(input_fasta_str) + print(f"input_seqs: {input_seqs}") + print(f"input_descs: {input_descs}") + print(f"alignment_index: {alignment_index}") all_chain_features = {} sequence_features = {} is_homomer_or_monomer = len(set(input_seqs)) == 1 + print(f"is_homomer_or_monomer: {is_homomer_or_monomer}") for desc, seq in zip(input_descs, input_seqs): + print(f"current desc: {desc}") + print(f"current seq: {seq}") if seq in sequence_features: all_chain_features[desc] = copy.deepcopy( sequence_features[seq] @@ -1253,6 +1259,9 @@ def process_fasta(self, chain_alignment_index = None chain_alignment_dir = os.path.join(alignment_dir, desc) + print(f"chain_alignment_index: {chain_alignment_index}") + print(f"chain_alignment_dir: {chain_alignment_dir}") + chain_features = self._process_single_chain( chain_id=desc, sequence=seq, From be45f5358065931b53af42fe4ce1e0bfeced4c4e Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:01:43 -0400 Subject: [PATCH 11/64] test --- openfold/data/data_pipeline.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index 0e8e4cf3..ddc13d08 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -1168,6 +1168,8 @@ def _process_single_chain( """Runs the monomer pipeline on a single chain.""" chain_fasta_str = f'>{chain_id}\n{sequence}\n' + print(f"chain_alignment_index: {chain_alignment_index}") + print(f"chain_alignment_dir: {chain_alignment_dir}") if chain_alignment_index is None and not os.path.exists(chain_alignment_dir): raise ValueError(f"Alignments for {chain_id} not found...") @@ -1259,9 +1261,12 @@ def process_fasta(self, chain_alignment_index = None chain_alignment_dir = os.path.join(alignment_dir, desc) - print(f"chain_alignment_index: {chain_alignment_index}") + print(f"chain_id: {desc}") + print(f"sequence: {seq}") + print(f"description: {desc}") print(f"chain_alignment_dir: {chain_alignment_dir}") - + print(f"chain_alignment_index: {chain_alignment_index}") + print(f"is_homomer_or_monomer: {is_homomer_or_monomer}") chain_features = self._process_single_chain( chain_id=desc, sequence=seq, From f256b4ba29d073241aaadc01e972a4354ce6cab6 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:03:52 -0400 Subject: [PATCH 12/64] test --- openfold/data/data_pipeline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index ddc13d08..81a836ea 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -1190,6 +1190,7 @@ def _process_single_chain( chain_features.update(all_seq_msa_features) return chain_features + print(f"chain_features: {chain_features}") @staticmethod def _all_seq_msa_features(alignment_dir, alignment_index): """Get MSA features for unclustered uniprot, for pairing.""" From ec7f7654e4c42e6e3a1c513d833e8f5bccb28616 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:05:58 -0400 Subject: [PATCH 13/64] test --- openfold/data/data_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index 81a836ea..e9d9c0fe 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -1183,6 +1183,8 @@ def _process_single_chain( # We only construct the pairing features if there are 2 or more unique # sequences. if not is_homomer_or_monomer: + print(f"chain_alignment_dir: {chain_alignment_dir}") + print(f"chain_alignment_index: {chain_alignment_index}") all_seq_msa_features = self._all_seq_msa_features( chain_alignment_dir, chain_alignment_index @@ -1190,7 +1192,6 @@ def _process_single_chain( chain_features.update(all_seq_msa_features) return chain_features - print(f"chain_features: {chain_features}") @staticmethod def _all_seq_msa_features(alignment_dir, alignment_index): """Get MSA features for unclustered uniprot, for pairing.""" From e995a1a66f8995ff643feb30c07bb58d6466abb5 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:18:08 -0400 Subject: [PATCH 14/64] test --- scripts/precompute_alignments.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 1bef41a9..82d6b93f 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -88,8 +88,11 @@ def parse_and_align(files, alignment_runner, args): elif(f.endswith('.fasta') or f.endswith('.fa')): with open(path, 'r') as fp: fasta_str = fp.read() - input_seqs, _ = parse_fasta(fasta_str) - if len(input_seqs) != 1: + #input_seqs, _ = parse_fasta(fasta_str) + input_seqs, input_tags = parse_fasta(fasta_str) + print(f"input_seqs: {input_seqs}") + print(f"input_tags: {input_tags}") + if len(input_seqs) != 1: msg = f'More than one input_sequence found in {f}' if(args.raise_errors): raise ValueError(msg) From 5ba53341fbae5ddbc98263a55da715d65b8b9f21 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:29:44 -0400 Subject: [PATCH 15/64] test --- scripts/precompute_alignments.py | 70 ++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 31 deletions(-) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 82d6b93f..58fa5486 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -22,45 +22,52 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): dirs = set(os.listdir(args.output_dir)) - for seq, names in seq_groups: - first_name = names[0] - alignment_dir = os.path.join(args.output_dir, first_name) + print(f"dirs: {dirs}") + for seq, name in seq_groups: + # first_name = names[0] + alignment_dir = os.path.join(args.output_dir, name) - try: - os.makedirs(alignment_dir) - except Exception as e: - logging.warning(f"Failed to create directory for {first_name} with exception {e}...") - continue + # try: + # os.makedirs(alignment_dir) + # except Exception as e: + # logging.warning(f"Failed to create directory for {first_name} with exception {e}...") + # continue + os.makedirs(alignment_dir, exist_ok=True) fd, fasta_path = tempfile.mkstemp(suffix=".fasta") + print(f"fd: {fd}") + print(f"fasta_path: {fasta_path}") with os.fdopen(fd, 'w') as fp: fp.write(f'>query\n{seq}') try: + print(f"running alignement fasta_path: {fasta_path}") + print(f"running alignement alignment_dir: {alignment_dir}") alignment_runner.run( fasta_path, alignment_dir ) except Exception as e: logging.warning(e) - logging.warning(f"Failed to run alignments for {first_name}. Skipping...") + logging.warning(f"Failed to run alignments for {name}. Skipping...") os.remove(fasta_path) os.rmdir(alignment_dir) continue os.remove(fasta_path) - for name in names[1:]: - if(name in dirs): - logging.warning( - f'{name} has already been processed. Skipping...' - ) - continue + # for name in names[1:]: + # if(name in dirs): + # logging.warning( + # f'{name} has already been processed. Skipping...' + # ) + # continue - cp_dir = os.path.join(args.output_dir, name) - os.makedirs(cp_dir, exist_ok=True) - - for f in os.listdir(alignment_dir): - copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) + cp_dir = os.path.join(args.output_dir, name) + os.makedirs(cp_dir, exist_ok=True) + + for f in os.listdir(alignment_dir): + print(f"copying align results to: {cp_dir}") + copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) def parse_and_align(files, alignment_runner, args): @@ -89,17 +96,18 @@ def parse_and_align(files, alignment_runner, args): with open(path, 'r') as fp: fasta_str = fp.read() #input_seqs, _ = parse_fasta(fasta_str) - input_seqs, input_tags = parse_fasta(fasta_str) - print(f"input_seqs: {input_seqs}") - print(f"input_tags: {input_tags}") - if len(input_seqs) != 1: - msg = f'More than one input_sequence found in {f}' - if(args.raise_errors): - raise ValueError(msg) - else: - logging.warning(msg) - input_sequence = input_seqs[0] - seq_group_dict[input_sequence] = [file_id] + # input_seqs, input_tags = parse_fasta(fasta_str) + # print(f"input_seqs: {input_seqs}") + # print(f"input_tags: {input_tags}") + # if len(input_seqs) != 1: + # msg = f'More than one input_sequence found in {f}' + # if(args.raise_errors): + # raise ValueError(msg) + # else: + # logging.warning(msg) + for input_sequence, input_tag in parse_fasta(fasta_str): + input_sequence = input_seqs[0] + seq_group_dict[input_sequence] = [input_tag] elif(f.endswith('.core')): with open(path, 'r') as fp: core_str = fp.read() From 00d6f665b91ead2d6bec3f9873df4f2d9d3d73b9 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:30:30 -0400 Subject: [PATCH 16/64] test --- scripts/precompute_alignments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 58fa5486..7a2fb719 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -106,7 +106,7 @@ def parse_and_align(files, alignment_runner, args): # else: # logging.warning(msg) for input_sequence, input_tag in parse_fasta(fasta_str): - input_sequence = input_seqs[0] + # input_sequence = input_seqs[0] seq_group_dict[input_sequence] = [input_tag] elif(f.endswith('.core')): with open(path, 'r') as fp: From 4eaf57dd08dd576d3610e5a60a0827b4a8883d7d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:33:43 -0400 Subject: [PATCH 17/64] test --- scripts/precompute_alignments.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 7a2fb719..8cc6b1d0 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -25,6 +25,8 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): print(f"dirs: {dirs}") for seq, name in seq_groups: # first_name = names[0] + print(f"name: {name}") + print(f"args.output_dir: {args.output_dir}") alignment_dir = os.path.join(args.output_dir, name) # try: From 752aa79e1ae5b0f1c6f457b3e2d9536444a50e6b Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:35:09 -0400 Subject: [PATCH 18/64] test --- scripts/precompute_alignments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 8cc6b1d0..af75f148 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -26,6 +26,7 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): for seq, name in seq_groups: # first_name = names[0] print(f"name: {name}") + print(f"seq: {seq}") print(f"args.output_dir: {args.output_dir}") alignment_dir = os.path.join(args.output_dir, name) From 6454eb3eff7d240b24402cb3809d0c0f259dd1a8 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:35:51 -0400 Subject: [PATCH 19/64] test --- scripts/precompute_alignments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index af75f148..daf93494 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -22,6 +22,7 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): dirs = set(os.listdir(args.output_dir)) + print(f"seq_groups: {seq_groups}") print(f"dirs: {dirs}") for seq, name in seq_groups: # first_name = names[0] From 866ca5876aa80a2f6e852ca5e12cab2b7513b8ae Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:36:38 -0400 Subject: [PATCH 20/64] test --- scripts/precompute_alignments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index daf93494..1161550c 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -125,6 +125,7 @@ def parse_and_align(files, alignment_runner, args): else: continue + print(f"seq_group_dict: {seq_group_dict}") seq_group_tuples = [(k,v) for k,v in seq_group_dict.items()] run_seq_group_alignments(seq_group_tuples, alignment_runner, args) From 126cb22d950d9b7370bb2d15dc4dcf401cdddccb Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:42:16 -0400 Subject: [PATCH 21/64] test --- scripts/precompute_alignments.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 1161550c..6ecd2228 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -22,13 +22,13 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): dirs = set(os.listdir(args.output_dir)) - print(f"seq_groups: {seq_groups}") + # print(f"seq_groups: {seq_groups}") print(f"dirs: {dirs}") for seq, name in seq_groups: # first_name = names[0] print(f"name: {name}") print(f"seq: {seq}") - print(f"args.output_dir: {args.output_dir}") + # print(f"args.output_dir: {args.output_dir}") alignment_dir = os.path.join(args.output_dir, name) # try: @@ -100,7 +100,7 @@ def parse_and_align(files, alignment_runner, args): with open(path, 'r') as fp: fasta_str = fp.read() #input_seqs, _ = parse_fasta(fasta_str) - # input_seqs, input_tags = parse_fasta(fasta_str) + input_seqs, input_tags = parse_fasta(fasta_str) # print(f"input_seqs: {input_seqs}") # print(f"input_tags: {input_tags}") # if len(input_seqs) != 1: @@ -109,9 +109,11 @@ def parse_and_align(files, alignment_runner, args): # raise ValueError(msg) # else: # logging.warning(msg) - for input_sequence, input_tag in parse_fasta(fasta_str): - # input_sequence = input_seqs[0] - seq_group_dict[input_sequence] = [input_tag] + for index in range(len(input_seqs)): + seq_group_dict[input_seqs[index]] = [input_tags[index]] + # for input_sequence, input_tag in parse_fasta(fasta_str): + # # input_sequence = input_seqs[0] + # seq_group_dict[input_sequence] = [input_tag] elif(f.endswith('.core')): with open(path, 'r') as fp: core_str = fp.read() From 875246b438208f73deb56f0cafef8f545519195f Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 10:43:03 -0400 Subject: [PATCH 22/64] test --- scripts/precompute_alignments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 6ecd2228..1eeaf6ec 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -110,7 +110,7 @@ def parse_and_align(files, alignment_runner, args): # else: # logging.warning(msg) for index in range(len(input_seqs)): - seq_group_dict[input_seqs[index]] = [input_tags[index]] + seq_group_dict[input_seqs[index]] = input_tags[index] # for input_sequence, input_tag in parse_fasta(fasta_str): # # input_sequence = input_seqs[0] # seq_group_dict[input_sequence] = [input_tag] From 2dea90fa7c35d8ed8701b65c9aba4771289f46ce Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:11:39 -0400 Subject: [PATCH 23/64] test --- openfold/data/data_pipeline.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index e9d9c0fe..7e488f5e 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -468,6 +468,7 @@ def run( ): """Runs alignment tools on a sequence""" if(self.jackhmmer_uniref90_runner is not None): + print("running jackhmmer_uniref90_runner") uniref90_out_path = os.path.join(output_dir, "uniref90_hits.sto") jackhmmer_uniref90_result = run_msa_tool( @@ -505,6 +506,7 @@ def run( ) if(self.jackhmmer_mgnify_runner is not None): + print("running jackhmmer_mgnify_runner") mgnify_out_path = os.path.join(output_dir, "mgnify_hits.sto") jackhmmer_mgnify_result = run_msa_tool( msa_runner=self.jackhmmer_mgnify_runner, @@ -515,6 +517,7 @@ def run( ) if(self.use_small_bfd and self.jackhmmer_small_bfd_runner is not None): + print("running jackhmmer_small_bfd_runner") bfd_out_path = os.path.join(output_dir, "small_bfd_hits.sto") jackhmmer_small_bfd_result = run_msa_tool( msa_runner=self.jackhmmer_small_bfd_runner, @@ -523,6 +526,7 @@ def run( msa_format="sto", ) elif(self.hhblits_bfd_unirefclust_runner is not None): + print("running hhblits_bfd_unirefclust_runner") uni_name = "uni" for db_name in self.hhblits_bfd_unirefclust_runner.databases: if "uniref" in db_name.lower(): @@ -539,6 +543,7 @@ def run( ) if(self.jackhmmer_uniprot_runner is not None): + print("running jackhmmer_uniprot_runner") uniprot_out_path = os.path.join(output_dir, 'uniprot_hits.sto') result = run_msa_tool( self.jackhmmer_uniprot_runner, From 25fc5a44efed82fe1af5e93f70914977e585fd09 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:13:22 -0400 Subject: [PATCH 24/64] test --- openfold/data/data_pipeline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index 7e488f5e..c2b4e63a 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -466,6 +466,7 @@ def run( fasta_path: str, output_dir: str, ): + print("running alignment_runner") """Runs alignment tools on a sequence""" if(self.jackhmmer_uniref90_runner is not None): print("running jackhmmer_uniref90_runner") From 3ad997fe11403fde263509564cebfc166de1da1b Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:28:28 -0400 Subject: [PATCH 25/64] test --- openfold/data/data_pipeline.py | 5 +++++ scripts/precompute_alignments.py | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index c2b4e63a..67908e3e 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -409,6 +409,7 @@ def __init__( if(jackhmmer_binary_path is not None and uniref90_database_path is not None ): + print("init jackhmmer_uniref90_runner") self.jackhmmer_uniref90_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=uniref90_database_path, @@ -419,6 +420,7 @@ def __init__( self.hhblits_bfd_unirefclust_runner = None if(bfd_database_path is not None): if use_small_bfd: + print("init jackhmmer_small_bfd_runner") self.jackhmmer_small_bfd_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=bfd_database_path, @@ -430,6 +432,7 @@ def __init__( dbs.append(uniref30_database_path) if (uniclust30_database_path is not None): dbs.append(uniclust30_database_path) + print("init hhblits_bfd_unirefclust_runner") self.hhblits_bfd_unirefclust_runner = hhblits.HHBlits( binary_path=hhblits_binary_path, databases=dbs, @@ -438,6 +441,7 @@ def __init__( self.jackhmmer_mgnify_runner = None if(mgnify_database_path is not None): + print("init jackhmmer_mgnify_runner") self.jackhmmer_mgnify_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=mgnify_database_path, @@ -446,6 +450,7 @@ def __init__( self.jackhmmer_uniprot_runner = None if(uniprot_database_path is not None): + print("init jackhmmer_uniprot_runner") self.jackhmmer_uniprot_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=uniprot_database_path, diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 1eeaf6ec..defc5104 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -27,7 +27,7 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): for seq, name in seq_groups: # first_name = names[0] print(f"name: {name}") - print(f"seq: {seq}") + # print(f"seq: {seq}") # print(f"args.output_dir: {args.output_dir}") alignment_dir = os.path.join(args.output_dir, name) @@ -39,8 +39,8 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): os.makedirs(alignment_dir, exist_ok=True) fd, fasta_path = tempfile.mkstemp(suffix=".fasta") - print(f"fd: {fd}") - print(f"fasta_path: {fasta_path}") + # print(f"fd: {fd}") + # print(f"fasta_path: {fasta_path}") with os.fdopen(fd, 'w') as fp: fp.write(f'>query\n{seq}') @@ -66,12 +66,12 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): # ) # continue - cp_dir = os.path.join(args.output_dir, name) - os.makedirs(cp_dir, exist_ok=True) - - for f in os.listdir(alignment_dir): - print(f"copying align results to: {cp_dir}") - copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) + # cp_dir = os.path.join(args.output_dir, name) + # os.makedirs(cp_dir, exist_ok=True) + # + # for f in os.listdir(alignment_dir): + # print(f"copying align results to: {cp_dir}") + # copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) def parse_and_align(files, alignment_runner, args): @@ -127,7 +127,7 @@ def parse_and_align(files, alignment_runner, args): else: continue - print(f"seq_group_dict: {seq_group_dict}") + # print(f"seq_group_dict: {seq_group_dict}") seq_group_tuples = [(k,v) for k,v in seq_group_dict.items()] run_seq_group_alignments(seq_group_tuples, alignment_runner, args) From 220e1ce964b10010d9009d8c9d4bfd1992392a24 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:01:56 -0400 Subject: [PATCH 26/64] added logging --- run_pretrained_openfold.py | 54 ++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index cef00e84..fbbfad0f 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -137,9 +137,6 @@ def generate_feature_dict( fp.write( '\n'.join([f">{tag}\n{seq}" for tag, seq in zip(tags, seqs)]) ) - print(f"tmp_fasta_path: {tmp_fasta_path}") - print(f"alignment_dir: {alignment_dir}") - print(f"data_processor: {data_processor}") feature_dict = data_processor.process_fasta( fasta_path=tmp_fasta_path, alignment_dir=alignment_dir @@ -183,6 +180,37 @@ def main(args): args.use_single_seq_mode = True config = model_config(args.config_preset, long_sequence_inference=args.long_sequence_inference) + logger.info("#### INPUT / OUTPUT ####") + logger.info(f"fasta_dir: {args.fasta_dir}") + logger.info(f"output_dir: {args.output_dir}") + logger.info(f"output prediction filenames: {args.output_postfix}") + logger.info(f"output prediction filenames: {args.cif_output}") + logger.info(f"save embedded outputs: {args.save_outputs}") + + logger.info("#### PRESETS ####") + logger.info(f"skip_relaxation: {args.skip_relaxation}") + logger.info(f"use_precomputed_alignments: {args.use_precomputed_alignments}") + logger.info(f"use_single_seq_mode: {args.use_single_seq_mode}") + logger.info(f"long_sequence_inference: {args.long_sequence_inference}") + logger.info(f"Threads: {args.cpus}") + logger.info(f"multimer_ri_gap: {args.multimer_ri_gap}") + logger.info(f"subtract_plddt: {args.subtract_plddt}") + + logger.info("#### MODEL PARAMS ####") + logger.info(f"Model: {args.config_preset}") + logger.info(f"trace_model: {args.trace_model}") + + logger.info("#### DATABASE PARAMS ####") + logger.info(f"template_mmcif_dir: {args.template_mmcif_dir}") + logger.info(f"max_template_date: {args.max_template_date}") + logger.info(f"max_templates: {config.data.predict.max_templates}") + logger.info(f"release_dates_path: {args.release_dates_path}") + logger.info(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") + + logger.info("#### GPU / AI PARAMS ####") + logger.info(f"model_device: {args.model_device}") + logger.info(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") + logger.info(f"jax_param_path: {args.jax_param_path}") if args.trace_model: if not config.data.predict.fixed_size: @@ -192,12 +220,6 @@ def main(args): is_multimer = "multimer" in args.config_preset - print(f"mmcif_dir: {args.template_mmcif_dir}") - print(f"max_template_date: {args.max_template_date}") - print(f"max_hits: {config.data.predict.max_templates}") - print(f"release_dates_path: {args.release_dates_path}") - print(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") - if is_multimer: template_featurizer = templates.HmmsearchHitFeaturizer( mmcif_dir=args.template_mmcif_dir, @@ -226,7 +248,6 @@ def main(args): monomer_data_pipeline=data_processor, ) - print(f"output: {args.output_dir}") output_dir_base = args.output_dir random_seed = args.data_random_seed if random_seed is None: @@ -243,7 +264,6 @@ def main(args): else: alignment_dir = args.use_precomputed_alignments - print(f"alignment_dir: {args.use_precomputed_alignments}") tag_list = [] seq_list = [] for fasta_file in list_files_with_extensions(args.fasta_dir, (".fasta", ".fa")): @@ -267,17 +287,10 @@ def main(args): tag_list.append((tag, tags)) seq_list.append(seqs) - print(f"header list: {tag_list}") - print(f"seq list: {seq_list}") - seq_sort_fn = lambda target: sum([len(s) for s in target[1]]) sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn) feature_dicts = {} - print(f"sorted_targets: {sorted_targets}") - print(f"model_device: {args.model_device}") - print(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") - print(f"jax_param_path: {args.jax_param_path}") model_generator = load_models_from_command_line( config, args.model_device, @@ -293,16 +306,11 @@ def main(args): if args.output_postfix is not None: output_name = f'{output_name}_{args.output_postfix}' - print(f"tag: {tag}") - print(f"tags: {tags}") - print(f"seqs: {seqs}") - print(f"alignment_dir: {alignment_dir}") # Does nothing if the alignments have already been computed precompute_alignments(tags, seqs, alignment_dir, args) feature_dict = feature_dicts.get(tag, None) if feature_dict is None: - print(f"generate_feature_dict") feature_dict = generate_feature_dict( tags, seqs, From 3c35fb49ae35def2f70e9b83d300e19f0d215055 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:07:12 -0400 Subject: [PATCH 27/64] added logging --- openfold/data/data_pipeline.py | 28 --------------- run_pretrained_openfold.py | 62 +++++++++++++++++----------------- 2 files changed, 31 insertions(+), 59 deletions(-) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index 67908e3e..d2135daa 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -409,7 +409,6 @@ def __init__( if(jackhmmer_binary_path is not None and uniref90_database_path is not None ): - print("init jackhmmer_uniref90_runner") self.jackhmmer_uniref90_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=uniref90_database_path, @@ -420,7 +419,6 @@ def __init__( self.hhblits_bfd_unirefclust_runner = None if(bfd_database_path is not None): if use_small_bfd: - print("init jackhmmer_small_bfd_runner") self.jackhmmer_small_bfd_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=bfd_database_path, @@ -432,7 +430,6 @@ def __init__( dbs.append(uniref30_database_path) if (uniclust30_database_path is not None): dbs.append(uniclust30_database_path) - print("init hhblits_bfd_unirefclust_runner") self.hhblits_bfd_unirefclust_runner = hhblits.HHBlits( binary_path=hhblits_binary_path, databases=dbs, @@ -441,7 +438,6 @@ def __init__( self.jackhmmer_mgnify_runner = None if(mgnify_database_path is not None): - print("init jackhmmer_mgnify_runner") self.jackhmmer_mgnify_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=mgnify_database_path, @@ -450,7 +446,6 @@ def __init__( self.jackhmmer_uniprot_runner = None if(uniprot_database_path is not None): - print("init jackhmmer_uniprot_runner") self.jackhmmer_uniprot_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=uniprot_database_path, @@ -471,10 +466,8 @@ def run( fasta_path: str, output_dir: str, ): - print("running alignment_runner") """Runs alignment tools on a sequence""" if(self.jackhmmer_uniref90_runner is not None): - print("running jackhmmer_uniref90_runner") uniref90_out_path = os.path.join(output_dir, "uniref90_hits.sto") jackhmmer_uniref90_result = run_msa_tool( @@ -512,7 +505,6 @@ def run( ) if(self.jackhmmer_mgnify_runner is not None): - print("running jackhmmer_mgnify_runner") mgnify_out_path = os.path.join(output_dir, "mgnify_hits.sto") jackhmmer_mgnify_result = run_msa_tool( msa_runner=self.jackhmmer_mgnify_runner, @@ -523,7 +515,6 @@ def run( ) if(self.use_small_bfd and self.jackhmmer_small_bfd_runner is not None): - print("running jackhmmer_small_bfd_runner") bfd_out_path = os.path.join(output_dir, "small_bfd_hits.sto") jackhmmer_small_bfd_result = run_msa_tool( msa_runner=self.jackhmmer_small_bfd_runner, @@ -532,7 +523,6 @@ def run( msa_format="sto", ) elif(self.hhblits_bfd_unirefclust_runner is not None): - print("running hhblits_bfd_unirefclust_runner") uni_name = "uni" for db_name in self.hhblits_bfd_unirefclust_runner.databases: if "uniref" in db_name.lower(): @@ -549,7 +539,6 @@ def run( ) if(self.jackhmmer_uniprot_runner is not None): - print("running jackhmmer_uniprot_runner") uniprot_out_path = os.path.join(output_dir, 'uniprot_hits.sto') result = run_msa_tool( self.jackhmmer_uniprot_runner, @@ -1179,8 +1168,6 @@ def _process_single_chain( """Runs the monomer pipeline on a single chain.""" chain_fasta_str = f'>{chain_id}\n{sequence}\n' - print(f"chain_alignment_index: {chain_alignment_index}") - print(f"chain_alignment_dir: {chain_alignment_dir}") if chain_alignment_index is None and not os.path.exists(chain_alignment_dir): raise ValueError(f"Alignments for {chain_id} not found...") @@ -1194,8 +1181,6 @@ def _process_single_chain( # We only construct the pairing features if there are 2 or more unique # sequences. if not is_homomer_or_monomer: - print(f"chain_alignment_dir: {chain_alignment_dir}") - print(f"chain_alignment_index: {chain_alignment_index}") all_seq_msa_features = self._all_seq_msa_features( chain_alignment_dir, chain_alignment_index @@ -1250,17 +1235,10 @@ def process_fasta(self, input_fasta_str = f.read() input_seqs, input_descs = parsers.parse_fasta(input_fasta_str) - print(f"input_seqs: {input_seqs}") - print(f"input_descs: {input_descs}") - print(f"alignment_index: {alignment_index}") - all_chain_features = {} sequence_features = {} is_homomer_or_monomer = len(set(input_seqs)) == 1 - print(f"is_homomer_or_monomer: {is_homomer_or_monomer}") for desc, seq in zip(input_descs, input_seqs): - print(f"current desc: {desc}") - print(f"current seq: {seq}") if seq in sequence_features: all_chain_features[desc] = copy.deepcopy( sequence_features[seq] @@ -1274,12 +1252,6 @@ def process_fasta(self, chain_alignment_index = None chain_alignment_dir = os.path.join(alignment_dir, desc) - print(f"chain_id: {desc}") - print(f"sequence: {seq}") - print(f"description: {desc}") - print(f"chain_alignment_dir: {chain_alignment_dir}") - print(f"chain_alignment_index: {chain_alignment_index}") - print(f"is_homomer_or_monomer: {is_homomer_or_monomer}") chain_features = self._process_single_chain( chain_id=desc, sequence=seq, diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index fbbfad0f..682aa7f4 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -180,37 +180,37 @@ def main(args): args.use_single_seq_mode = True config = model_config(args.config_preset, long_sequence_inference=args.long_sequence_inference) - logger.info("#### INPUT / OUTPUT ####") - logger.info(f"fasta_dir: {args.fasta_dir}") - logger.info(f"output_dir: {args.output_dir}") - logger.info(f"output prediction filenames: {args.output_postfix}") - logger.info(f"output prediction filenames: {args.cif_output}") - logger.info(f"save embedded outputs: {args.save_outputs}") - - logger.info("#### PRESETS ####") - logger.info(f"skip_relaxation: {args.skip_relaxation}") - logger.info(f"use_precomputed_alignments: {args.use_precomputed_alignments}") - logger.info(f"use_single_seq_mode: {args.use_single_seq_mode}") - logger.info(f"long_sequence_inference: {args.long_sequence_inference}") - logger.info(f"Threads: {args.cpus}") - logger.info(f"multimer_ri_gap: {args.multimer_ri_gap}") - logger.info(f"subtract_plddt: {args.subtract_plddt}") - - logger.info("#### MODEL PARAMS ####") - logger.info(f"Model: {args.config_preset}") - logger.info(f"trace_model: {args.trace_model}") - - logger.info("#### DATABASE PARAMS ####") - logger.info(f"template_mmcif_dir: {args.template_mmcif_dir}") - logger.info(f"max_template_date: {args.max_template_date}") - logger.info(f"max_templates: {config.data.predict.max_templates}") - logger.info(f"release_dates_path: {args.release_dates_path}") - logger.info(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") - - logger.info("#### GPU / AI PARAMS ####") - logger.info(f"model_device: {args.model_device}") - logger.info(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") - logger.info(f"jax_param_path: {args.jax_param_path}") + print("#### INPUT / OUTPUT ####") + print(f"fasta_dir: {args.fasta_dir}") + print(f"output_dir: {args.output_dir}") + print(f"output prediction filenames: {args.output_postfix}") + print(f"output prediction filenames: {args.cif_output}") + print(f"save embedded outputs: {args.save_outputs}") + + print("#### PRESETS ####") + print(f"skip_relaxation: {args.skip_relaxation}") + print(f"use_precomputed_alignments: {args.use_precomputed_alignments}") + print(f"use_single_seq_mode: {args.use_single_seq_mode}") + print(f"long_sequence_inference: {args.long_sequence_inference}") + print(f"Threads: {args.cpus}") + print(f"multimer_ri_gap: {args.multimer_ri_gap}") + print(f"subtract_plddt: {args.subtract_plddt}") + + print("#### MODEL PARAMS ####") + print(f"Model: {args.config_preset}") + print(f"trace_model: {args.trace_model}") + + print("#### DATABASE PARAMS ####") + print(f"template_mmcif_dir: {args.template_mmcif_dir}") + print(f"max_template_date: {args.max_template_date}") + print(f"max_templates: {config.data.predict.max_templates}") + print(f"release_dates_path: {args.release_dates_path}") + print(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") + + print("#### GPU / AI PARAMS ####") + print(f"model_device: {args.model_device}") + print(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") + print(f"jax_param_path: {args.jax_param_path}") if args.trace_model: if not config.data.predict.fixed_size: From 2b2db7c402396aae47c8d04869a196416105c436 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:15:26 -0400 Subject: [PATCH 28/64] added logging --- run_pretrained_openfold.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index 682aa7f4..cc379a90 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -180,6 +180,8 @@ def main(args): args.use_single_seq_mode = True config = model_config(args.config_preset, long_sequence_inference=args.long_sequence_inference) + + print("") print("#### INPUT / OUTPUT ####") print(f"fasta_dir: {args.fasta_dir}") print(f"output_dir: {args.output_dir}") @@ -187,6 +189,7 @@ def main(args): print(f"output prediction filenames: {args.cif_output}") print(f"save embedded outputs: {args.save_outputs}") + print("") print("#### PRESETS ####") print(f"skip_relaxation: {args.skip_relaxation}") print(f"use_precomputed_alignments: {args.use_precomputed_alignments}") @@ -196,10 +199,12 @@ def main(args): print(f"multimer_ri_gap: {args.multimer_ri_gap}") print(f"subtract_plddt: {args.subtract_plddt}") + print("") print("#### MODEL PARAMS ####") print(f"Model: {args.config_preset}") print(f"trace_model: {args.trace_model}") + print("") print("#### DATABASE PARAMS ####") print(f"template_mmcif_dir: {args.template_mmcif_dir}") print(f"max_template_date: {args.max_template_date}") @@ -207,11 +212,14 @@ def main(args): print(f"release_dates_path: {args.release_dates_path}") print(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") + print("") print("#### GPU / AI PARAMS ####") print(f"model_device: {args.model_device}") print(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") print(f"jax_param_path: {args.jax_param_path}") + print("") + if args.trace_model: if not config.data.predict.fixed_size: raise ValueError( @@ -269,6 +277,7 @@ def main(args): for fasta_file in list_files_with_extensions(args.fasta_dir, (".fasta", ".fa")): # Gather input sequences fasta_path = os.path.join(args.fasta_dir, fasta_file) + print(f"reading fasta: {fasta_path}") with open(fasta_path, "r") as fp: data = fp.read() @@ -291,6 +300,7 @@ def main(args): sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn) feature_dicts = {} + logger.info(f"loading model information...") model_generator = load_models_from_command_line( config, args.model_device, @@ -307,6 +317,7 @@ def main(args): output_name = f'{output_name}_{args.output_postfix}' # Does nothing if the alignments have already been computed + logger.info(f"Perform alignment if not already done...") precompute_alignments(tags, seqs, alignment_dir, args) feature_dict = feature_dicts.get(tag, None) @@ -350,6 +361,7 @@ def main(args): ) cur_tracing_interval = rounded_seqlen + logger.info(f"Running fold...") out = run_model(model, processed_feature_dict, tag, args.output_dir) # Toss out the recycling dimensions --- we don't need them anymore From d377bc430d20e8f05743ced551209a24c3e4bf76 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:36:16 -0400 Subject: [PATCH 29/64] added QC plot script --- run_pretrained_openfold.py | 5 +- visualize_alphafold_results.py | 86 ++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 visualize_alphafold_results.py diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index cc379a90..25412116 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -186,7 +186,7 @@ def main(args): print(f"fasta_dir: {args.fasta_dir}") print(f"output_dir: {args.output_dir}") print(f"output prediction filenames: {args.output_postfix}") - print(f"output prediction filenames: {args.cif_output}") + print(f"cif_output: {args.cif_output}") print(f"save embedded outputs: {args.save_outputs}") print("") @@ -300,7 +300,7 @@ def main(args): sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn) feature_dicts = {} - logger.info(f"loading model information...") + logger.info(f"Loading model information...") model_generator = load_models_from_command_line( config, args.model_device, @@ -361,7 +361,6 @@ def main(args): ) cur_tracing_interval = rounded_seqlen - logger.info(f"Running fold...") out = run_model(model, processed_feature_dict, tag, args.output_dir) # Toss out the recycling dimensions --- we don't need them anymore diff --git a/visualize_alphafold_results.py b/visualize_alphafold_results.py new file mode 100644 index 00000000..519fd347 --- /dev/null +++ b/visualize_alphafold_results.py @@ -0,0 +1,86 @@ +# taken from: https://github.com/jasperzuallaert/VIBFold/blob/main/visualize_alphafold_results.py +import glob +import math +import os +import numpy as np +from matplotlib import pyplot as plt +import argparse +import pickle + + +def get_pae_plddt(model_names): + out = {} + for i, name in enumerate(model_names): + d = pickle.load(open(name, 'rb')) + basename = os.path.basename(name) + basename = basename[basename.index('model'):] + out[f'{basename}'] = {'plddt': d['plddt'], 'pae': d['predicted_aligned_error']} + return out + + +def generate_output_images(feature_dict, out_dir, name, pae_plddt_per_model): + msa = feature_dict['msa'] + seqid = (np.array(msa[0] == msa).mean(-1)) + seqid_sort = seqid.argsort() + non_gaps = (msa != 21).astype(float) + non_gaps[non_gaps == 0] = np.nan + final = non_gaps[seqid_sort] * seqid[seqid_sort, None] + + ################################################################## + plt.figure(figsize=(14, 4), dpi=100) + ################################################################## + plt.subplot(1, 2, 1) + plt.title("Sequence coverage") + plt.imshow(final, + interpolation='nearest', aspect='auto', + cmap="rainbow_r", vmin=0, vmax=1, origin='lower') + plt.plot((msa != 21).sum(0), color='black') + plt.xlim(-0.5, msa.shape[1] - 0.5) + plt.ylim(-0.5, msa.shape[0] - 0.5) + plt.colorbar(label="Sequence identity to query", ) + plt.xlabel("Positions") + plt.ylabel("Sequences") + + ################################################################## + plt.subplot(1, 2, 2) + plt.title("Predicted LDDT per position") + for model_name, value in pae_plddt_per_model.items(): + plt.plot(value["plddt"], label=model_name) + plt.ylim(0, 100) + plt.ylabel("Predicted LDDT") + plt.xlabel("Positions") + plt.savefig(f"{out_dir}/{name + ('_' if name else '')}coverage_LDDT.png") + ################################################################## + + ################################################################## + num_models = 5 # columns + num_runs_per_model = math.ceil(len(model_names) / num_models) + fig = plt.figure(figsize=(3 * num_models, 2 * num_runs_per_model), dpi=100) + for n, (model_name, value) in enumerate(pae_plddt_per_model.items()): + plt.subplot(num_runs_per_model, num_models, n + 1) + plt.title(model_name) + plt.imshow(value["pae"], label=model_name, cmap="bwr", vmin=0, vmax=30) + plt.colorbar() + fig.tight_layout() + plt.savefig(f"{out_dir}/{name + ('_' if name else '')}PAE.png") + ################################################################## + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--input_dir', dest='input_dir', required=True) + parser.add_argument('--name', dest='name') + parser.set_defaults(name='') + parser.add_argument('--output_dir', dest='output_dir') + parser.set_defaults(output_dir='') + args = parser.parse_args() + + feature_dict = pickle.load(open(f'{args.input_dir}/features.pkl', 'rb')) + # is_multimer = ('result_model_1_multimer.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) + # is_ptm = ('result_model_1_ptm.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) + # model_names = [f'{args.input_dir}/result_model_{f}{"_multimer" if is_multimer else "_ptm" if is_ptm else ""}.pkl' for f in range(1,6)] + model_names = sorted(glob.glob(f'{args.input_dir}/result_*.pkl')) + + pae_plddt_per_model = get_pae_plddt(model_names) + generate_output_images(feature_dict, args.output_dir if args.output_dir else args.input_dir, args.name, + pae_plddt_per_model) From 14a329d2a8b2aa84692302f0dd8d97ce2a0b012f Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:38:26 -0400 Subject: [PATCH 30/64] patch plot script --- visualize_alphafold_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/visualize_alphafold_results.py b/visualize_alphafold_results.py index 519fd347..19459ca0 100644 --- a/visualize_alphafold_results.py +++ b/visualize_alphafold_results.py @@ -79,7 +79,7 @@ def generate_output_images(feature_dict, out_dir, name, pae_plddt_per_model): # is_multimer = ('result_model_1_multimer.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) # is_ptm = ('result_model_1_ptm.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) # model_names = [f'{args.input_dir}/result_model_{f}{"_multimer" if is_multimer else "_ptm" if is_ptm else ""}.pkl' for f in range(1,6)] - model_names = sorted(glob.glob(f'{args.input_dir}/result_*.pkl')) + model_names = sorted(glob.glob(f'{args.input_dir}/*.pkl')) pae_plddt_per_model = get_pae_plddt(model_names) generate_output_images(feature_dict, args.output_dir if args.output_dir else args.input_dir, args.name, From b1e4a84500b85290e6846692e44604d2dff8143d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:42:10 -0400 Subject: [PATCH 31/64] patch plot script --- visualize_alphafold_results.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/visualize_alphafold_results.py b/visualize_alphafold_results.py index 19459ca0..6c25c720 100644 --- a/visualize_alphafold_results.py +++ b/visualize_alphafold_results.py @@ -75,11 +75,11 @@ def generate_output_images(feature_dict, out_dir, name, pae_plddt_per_model): parser.set_defaults(output_dir='') args = parser.parse_args() - feature_dict = pickle.load(open(f'{args.input_dir}/features.pkl', 'rb')) + feature_dict = pickle.load(open(f'{args.input_dir}/DTX1_1-DTX2_1_model_1_multimer_v3_output_dict.pkl', 'rb')) # is_multimer = ('result_model_1_multimer.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) # is_ptm = ('result_model_1_ptm.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) # model_names = [f'{args.input_dir}/result_model_{f}{"_multimer" if is_multimer else "_ptm" if is_ptm else ""}.pkl' for f in range(1,6)] - model_names = sorted(glob.glob(f'{args.input_dir}/*.pkl')) + model_names = sorted(glob.glob(f'{args.input_dir}/DTX1_1-DTX2_1_model_1_multimer_v3_output_dict.pkl')) pae_plddt_per_model = get_pae_plddt(model_names) generate_output_images(feature_dict, args.output_dir if args.output_dir else args.input_dir, args.name, From b4d5a08f4e9e0b4e593fe3e1968d16a1b4a3af16 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 25 Apr 2024 16:13:20 -0400 Subject: [PATCH 32/64] patch to save feature dict in pickle for post plots --- run_pretrained_openfold.py | 4 + visualize_alphafold_results.v2.py | 323 ++++++++++++++++++++++++++++++ 2 files changed, 327 insertions(+) create mode 100644 visualize_alphafold_results.v2.py diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index 25412116..f445a0d9 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -339,6 +339,10 @@ def main(args): feature_dicts[tag] = feature_dict + print("Storing feature dict...") + with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + processed_feature_dict = feature_processor.process_features( feature_dict, mode='predict', is_multimer=is_multimer ) diff --git a/visualize_alphafold_results.v2.py b/visualize_alphafold_results.v2.py new file mode 100644 index 00000000..f00e9b51 --- /dev/null +++ b/visualize_alphafold_results.v2.py @@ -0,0 +1,323 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +#from zipfile import Path +import numpy as np +import pandas as pd +from matplotlib import pyplot as plt, colors as cols, cm as cm +import json +from sys import exit +import os +from Bio import PDB as pdb +import io + +# plot size, in inches. +plot_size = 16 + +# @markdown Input value to increment plot axes by (this may need finetuning based on output) +plot_increment = "50" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = self.PathToFile.split("/")[-1].split(".")[0] + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + # Generate a plot of pLDDT value + def plot_pLDDT(self, size_in_inches=12, axis_label_increment=100): + x = list(range(0, len(self.pLDDT), 1)) + y = list(self.pLDDT) + + # Use standard AlphaFold colors + cmap = cols.LinearSegmentedColormap.from_list("", ["red", "orange", "yellow", "cornflowerblue", "blue"]) + + plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) + ticks = np.arange(0, len(self.pLDDT), axis_label_increment) + plt.xticks(ticks, fontname="Helvetica") + plt.yticks(fontname="Helvetica") + plt.xlabel("Residue index", size=14, fontweight="bold", fontname="Helvetica") + plt.ylabel("Predicted LDDT", size=14, fontweight="bold", fontname="Helvetica") + plt.scatter(x, y, c=y, cmap=cmap, s=5) + plt.clim(0, 100) + scale = plt.colorbar(shrink=0.5) + scale.set_label(label="Predicted LDDT", size=12, fontweight="bold", fontname="Helvetica") + # Save to directory with pickle file in + plt.savefig('{}/{}_pLDDT.png'.format(self.saving_pathname, self.saving_filename), dpi=300) + + # Generate a plot from PAE measurements + + def plot_PAE(self, size_in_inches=12, axis_label_increment=100): + ticks = np.arange(0, self.PAE[1].size, axis_label_increment) + plt.figure(figsize=(size_in_inches, size_in_inches)) + PAE = plt.imshow(self.PAE) + plt.xticks(ticks, fontname="Helvetica") + plt.yticks(ticks, fontname="Helvetica") + plt.xlabel("Residue index", size=14, fontweight="bold", fontname="Helvetica") + plt.ylabel("Residue index", size=14, fontweight="bold", fontname="Helvetica") + scale = plt.colorbar(PAE, shrink=0.5) + scale.set_label(label="Predicted error (Å)", size=12, fontweight="bold", fontname="Helvetica") + + # Save plot + plt.savefig('{}/{}_PAE.png'.format(self.saving_pathname, self.saving_filename), dpi=300) + + # Generate dataframe from PAE data and save to csv + pd_PAE = pd.DataFrame(self.PAE) + pd_PAE.to_csv('{}/{}_PAE.csv'.format(self.saving_pathname, self.saving_filename)) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, PathToFile, FastaSequence=None, ranking=None): + super().__init__(PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'] + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'] + + # Generate a ChimeraX attribute file from pLDDT measurements + def write_pLDDT_file(self): + seqMismatch = False + pd_lDDT = pd.DataFrame(self.pLDDT) + # Name dataframe column + pd_lDDT.columns = ["pLDDT"] + + # If the fasta file was provided: + if self.FastaSequence != None: + + # Open the fasta file in read mode + with (open("{}".format(self.FastaSequence), "r")) as openfile: + fasta = openfile.read() + + # Delete header line and remove line-breaks + sequence = fasta.split("\n", 1)[1].replace("\n", "") + + # Check that the lengths of the two sequences match + if len(sequence) != len(pd_lDDT): + + # If not, ignore the fasta file + print( + "Length of sequence in fasta file provided ({}) does not match length of sequence used in AlphaFold prediction ({}). Ignoring fasta file.".format( + len(sequence), len(pd_lDDT))) + seqMismatch = True + # If they do, + else: + # Convert the fasta sequence into a residue list + list_sequence = [] + for item in sequence: + list_sequence.append(item) + + # Convert the list into a pandas series + pd_sequence = pd.Series(list_sequence) + + # Insert the series into the dataframe at column 1 to act as labels for the data + pd_lDDT.insert(0, "Residue", pd_sequence) + + # Otherwise, remind user to check that they have used corret input files + else: + print("Number of residues for which pLDDT is provided: ", len(pd_lDDT), + "If this does not match the length of your sequence, please double check the input file.") + + # Tell python not to elide middle rows of dataframe when printing to std.out + pd.set_option("display.max_rows", None, "display.max_columns", None) + + # Save dataframe to ./outputfiles with same name as original pickle and .csv extension + pd_lDDT.to_csv('{}/{}_pLDDT.csv'.format(self.saving_pathname, self.saving_filename)) + # Delete residue ID + if self.FastaSequence != None and seqMismatch == False: + lDDT_table = pd_lDDT.drop('Residue', axis=1) + else: + lDDT_table = pd_lDDT + + # Initialise list to store Chimera-style residue identifiers (":x", where x = residue number) + residue_list = [] + + # Populate this list + for residue in range(0, len(lDDT_table)): + residue_list.append(":{}".format(residue + 1)) + + # Save to pandas format + chimerax_numbering = pd.Series(residue_list) + + # Insert in the first column of the dataframe, to satisfy ChimeraX formatting + lDDT_table.insert(0, 'Numbering', chimerax_numbering) + + # Tidy indices so the first label is 1 not 0 + pd_lDDT.index += 1 + + # Create a file to save the Chimera attribute output into + + with (open('{}/{}_lDDT.txt'.format(self.saving_pathname, self.saving_filename), 'w+')) as openfile: + + # Write file header in correct format + openfile.write('attribute: pLDDTvalue\nmatch mode: 1-to-1\nrecipient: residues\n') + + # Iterate over rows of dataframe, writing residue ID and lDDT value to file with correct formatting + for i, row in lDDT_table.iterrows(): + openfile.write("\t{}\t{}\n".format(row['Numbering'], row['pLDDT'])) + + return pd_lDDT + + +class AlphaFoldJson: + def __init__(self, PathToDirectory): + self.PathToDirectory = PathToDirectory + self.RankingDebug = [] + try: + with open("{}/ranking_debug.json".format(self.PathToDirectory)) as jsonfile: + self.RankingDebugRaw = json.load(jsonfile) + for index in enumerate(self.RankingDebugRaw['order']): + self.RankingDebug.append(index) + except: + exit( + "To use batch processing, please ensure that the ranking_debug.json file and the result_model_n.pkl files are present in the directory issued in the command. Exiting AlphaPickle now...") + + +class AlphaFoldPDB(AlphaFoldMetaData): + def loadCleanStructure(self, id, filePath): + standardResidues = ["ALA", "ARG", "ASN", "ASP", "CYS", "GLU", "GLN", "GLY", "HIS", "ILE", "LEU", "LYS", "MET", + "PHE", "PRO", "SER", "THR", "TRP", "TYR", "VAL"] + + parser = pdb.PDBParser() + parsedStructure = parser.get_structure(id, filePath) + for chain in parsedStructure.get_chains(): + removeResidues = list() + for i, residue in enumerate(chain.get_residues()): + if residue.resname not in standardResidues: + removeResidues.append(residue.id) + print(residue.id) + [chain.detach_child(id) for id in removeResidues] + + return parsedStructure + + def extractPLDDT(self, PDBobject): + pLDDT = [] + for residue in PDBobject.get_residues(): + i = 0 + for atom in residue.get_atoms(): + while i < 1: + pLDDT.append(atom.bfactor) + i += 1 + pLDDT_series = pd.Series(pLDDT) + return pLDDT_series + + def __init__(self, PathToFile, FastaSequence=None, ranking=None): + super().__init__(PathToFile, FastaSequence, ranking) + # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.structure = self.loadCleanStructure("test", PathToFile) + self.pLDDT = self.extractPLDDT(self.structure) + self.data = [] + self.PAE = None + + def PDB_write_pLDDT(self): + residueNumbers = pd.Series(range(1, len(self.pLDDT) + 1)) + if len(residueNumbers) != len(self.pLDDT): + print("Oops") + else: + pd_lDDT = pd.DataFrame(self.pLDDT) + pd_lDDT.columns = ["pLDDT"] + pd_lDDT.insert(0, "Residue", residueNumbers) + pd_lDDT.to_csv('{}/{}_pLDDT.csv'.format(self.saving_pathname, self.saving_filename)) + + +class AlphaFoldPAEJson(AlphaFoldMetaData): + def extractPAEfromJson(self, PathToFile): + + with open(PathToFile, 'r') as file: + jsonstring = json.load(file) + if 'predicted_aligned_error' in jsonstring[0]: + pae = jsonstring[0]['predicted_aligned_error'] + else: + residue1 = jsonstring[0]['residue1'] + residue2 = jsonstring[0]['residue2'] + pae = jsonstring[0]['distance'] + + if 'predicted_aligned_error' in jsonstring[0]: + paeArray = np.array(pae) + else: + paeArray = np.ones((max(residue1), (max(residue2)))) + for i, j, n in zip(residue1, residue2, pae): + paeArray[int(i - 1), int(j - 1)] = n + + return paeArray + + def __init__(self, PathToFile, FastaSequence=None, ranking=None): + super().__init__(PathToFile, FastaSequence, ranking) + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + self.PAE = self.extractPAEfromJson(PathToFile) + self.pLDDT = None + + +# use_files_from_google_drive = False #@param {type:"boolean"} +# +# if not use_files_from_google_drive: +# print("Select PAE files for upload") +# PAEfiles = list(google.colab.files.upload().keys()) +# print("Select pLDDT files for upload") +# pLDDTfiles = list(google.colab.files.upload().keys()) +# else: +# #print("Select PAE files for upload") +# path_to_PAE_file_in_drive = "" #@param {type:"string"} +# if ":" in path_to_PAE_file_in_drive: +# path_to_PAE_file_in_drive = path_to_PAE_file_in_drive.split(":") +# else: +# path_to_PAE_file_in_drive = [path_to_PAE_file_in_drive] +# path_to_pLDDT_file_in_drive = "" #@param {type:"string"} +# if ":" in path_to_pLDDT_file_in_drive: +# path_to_pLDDT_file_in_drive = path_to_pLDDT_file_in_drive.split(":") +# else: +# path_to_pLDDT_file_in_drive = [path_to_pLDDT_file_in_drive] + +def generate_plots(pkl, outdir, name): + results = AlphaFoldPickle(pkl, None) + results.saving_pathname = outdir + results.saving_filename = name + if type(results.PAE) == np.ndarray: + print("Plotting PAE for {} and saving to csv".format(pkl)) + results.plot_PAE(size_in_inches=plot_size, axis_label_increment=plot_increment) + + results = AlphaFoldPickle(pkl, None) + results.saving_filename = name + results.saving_pathname = outdir + results.write_pLDDT_file() + print("Plotting pLDDT for {} and saving to csv".format(pkl)) + results.plot_pLDDT(size_in_inches=plot_size, axis_label_increment=plot_increment) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--input_pkl', dest='input_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + generate_plots(args.input_pkl, args.output_dir, args.basename) From 6c4e80aff2ec8ac1cfa02ce90115a635b3eeba13 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Fri, 26 Apr 2024 08:17:21 -0400 Subject: [PATCH 33/64] patch to save feature dict in pickle for post plots --- run_pretrained_openfold.py | 10 +- scripts/generate_coverage_plot.py | 112 ++++++++++++++++++ .../visualize_alphafold_results.py | 0 .../visualize_alphafold_results.v2.py | 0 4 files changed, 119 insertions(+), 3 deletions(-) create mode 100644 scripts/generate_coverage_plot.py rename visualize_alphafold_results.py => scripts/visualize_alphafold_results.py (100%) rename visualize_alphafold_results.v2.py => scripts/visualize_alphafold_results.v2.py (100%) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index f445a0d9..5d25166c 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -339,9 +339,9 @@ def main(args): feature_dicts[tag] = feature_dict - print("Storing feature dict...") - with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: - pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + # print("Storing feature dict...") + # with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + # pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) processed_feature_dict = feature_processor.process_features( feature_dict, mode='predict', is_multimer=is_multimer @@ -365,6 +365,10 @@ def main(args): ) cur_tracing_interval = rounded_seqlen + print("Storing feature dict...") + with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + out = run_model(model, processed_feature_dict, tag, args.output_dir) # Toss out the recycling dimensions --- we don't need them anymore diff --git a/scripts/generate_coverage_plot.py b/scripts/generate_coverage_plot.py new file mode 100644 index 00000000..6bbe276e --- /dev/null +++ b/scripts/generate_coverage_plot.py @@ -0,0 +1,112 @@ +# taken from: https://github.com/sokrypton/ColabFold/blob/main/colabfold/plot.py +# and https://github.com/sokrypton/ColabFold/blob/main/colabfold/batch.py +import argparse +import os +from pathlib import Path +import pickle as pkl +import numpy as np +from matplotlib import pyplot as plt + + +def plot_predicted_alignment_error( + jobname: str, num_models: int, outs: dict, result_dir: Path, show: bool = False +): + plt.figure(figsize=(3 * num_models, 2), dpi=100) + for n, (model_name, value) in enumerate(outs.items()): + plt.subplot(1, num_models, n + 1) + plt.title(model_name) + plt.imshow(value["pae"], label=model_name, cmap="bwr", vmin=0, vmax=30) + plt.colorbar() + plt.savefig(result_dir.joinpath(jobname + "_PAE.png")) + if show: + plt.show() + plt.close() + + +def plot_msa_v2(feature_dict, sort_lines=True, dpi=100): + seq = feature_dict["msa"] + if "asym_id" in feature_dict: + Ls = [0] + k = feature_dict["asym_id"][0] + for i in feature_dict["asym_id"]: + if i == k: + Ls[-1] += 1 + else: + Ls.append(1) + k = i + else: + Ls = [len(seq)] + Ln = np.cumsum([0] + Ls) + + try: + N = feature_dict["num_alignments"][0] + except: + N = feature_dict["num_alignments"] + + msa = feature_dict["msa"][:N] + gap = msa != 21 + qid = msa == seq + gapid = np.stack([gap[:, Ln[i]:Ln[i + 1]].max(-1) for i in range(len(Ls))], -1) + lines = [] + Nn = [] + for g in np.unique(gapid, axis=0): + i = np.where((gapid == g).all(axis=-1)) + qid_ = qid[i] + gap_ = gap[i] + seqid = np.stack([qid_[:, Ln[i]:Ln[i + 1]].mean(-1) for i in range(len(Ls))], -1).sum(-1) / (g.sum(-1) + 1e-8) + non_gaps = gap_.astype(float) + non_gaps[non_gaps == 0] = np.nan + if sort_lines: + lines_ = non_gaps[seqid.argsort()] * seqid[seqid.argsort(), None] + else: + lines_ = non_gaps[::-1] * seqid[::-1, None] + Nn.append(len(lines_)) + lines.append(lines_) + + Nn = np.cumsum(np.append(0, Nn)) + lines = np.concatenate(lines, 0) + plt.figure(figsize=(8, 5), dpi=dpi) + plt.title("Sequence coverage") + plt.imshow(lines, + interpolation='nearest', aspect='auto', + cmap="rainbow_r", vmin=0, vmax=1, origin='lower', + extent=(0, lines.shape[1], 0, lines.shape[0])) + for i in Ln[1:-1]: + plt.plot([i, i], [0, lines.shape[0]], color="black") + for j in Nn[1:-1]: + plt.plot([0, lines.shape[1]], [j, j], color="black") + + plt.plot((np.isnan(lines) == False).sum(0), color='black') + plt.xlim(0, lines.shape[1]) + plt.ylim(0, lines.shape[0]) + plt.colorbar(label="Sequence identity to query") + plt.xlabel("Positions") + plt.ylabel("Sequences") + return plt + + +def generate_coverage(fd_pkl, output_dir, name, dpi=100): + feature_dict_pkl = [] + with (open("{}".format(fd_pkl), "rb")) as openfile: + while True: + try: + feature_dict_pkl.append(pkl.load(openfile)) + except EOFError: + break + feature_dict = feature_dict_pkl[0] + msa_plot = plot_msa_v2(feature_dict, dpi=dpi) + coverage_png = os.path.join(output_dir,f"{name}_coverage.png") + msa_plot.savefig(str(coverage_png), bbox_inches='tight') + msa_plot.close() + # result_files.append(coverage_png) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--feature_dict_pkl', dest='feature_dict_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + # generate_coverage(args.input_pkl, args.output_dir, args.basename) + generate_coverage(args.feature_dict_pkl, args.output_dir, args.basename) diff --git a/visualize_alphafold_results.py b/scripts/visualize_alphafold_results.py similarity index 100% rename from visualize_alphafold_results.py rename to scripts/visualize_alphafold_results.py diff --git a/visualize_alphafold_results.v2.py b/scripts/visualize_alphafold_results.v2.py similarity index 100% rename from visualize_alphafold_results.v2.py rename to scripts/visualize_alphafold_results.v2.py From 9fc31effcb5e60fe25fff2ec11db3488567606c6 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Fri, 26 Apr 2024 11:32:30 -0400 Subject: [PATCH 34/64] start --- scripts/generate_coverage_plot.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/generate_coverage_plot.py b/scripts/generate_coverage_plot.py index 6bbe276e..14419942 100644 --- a/scripts/generate_coverage_plot.py +++ b/scripts/generate_coverage_plot.py @@ -109,4 +109,5 @@ def generate_coverage(fd_pkl, output_dir, name, dpi=100): args = parser.parse_args() # generate_coverage(args.input_pkl, args.output_dir, args.basename) + print("gen coverage plot") generate_coverage(args.feature_dict_pkl, args.output_dir, args.basename) From 21d1c9b9ff4fee516366b2a21468cd711eb1c8da Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 29 Apr 2024 08:08:14 -0400 Subject: [PATCH 35/64] plots generation scripts --- scripts/generate_coverage_plot.py | 21 +---- ...sults.v2.py => generate_pae_plddt_plot.py} | 39 +++++---- scripts/visualize_alphafold_results.py | 86 ------------------- 3 files changed, 25 insertions(+), 121 deletions(-) rename scripts/{visualize_alphafold_results.v2.py => generate_pae_plddt_plot.py} (87%) delete mode 100644 scripts/visualize_alphafold_results.py diff --git a/scripts/generate_coverage_plot.py b/scripts/generate_coverage_plot.py index 14419942..b951cc8e 100644 --- a/scripts/generate_coverage_plot.py +++ b/scripts/generate_coverage_plot.py @@ -8,23 +8,8 @@ from matplotlib import pyplot as plt -def plot_predicted_alignment_error( - jobname: str, num_models: int, outs: dict, result_dir: Path, show: bool = False -): - plt.figure(figsize=(3 * num_models, 2), dpi=100) - for n, (model_name, value) in enumerate(outs.items()): - plt.subplot(1, num_models, n + 1) - plt.title(model_name) - plt.imshow(value["pae"], label=model_name, cmap="bwr", vmin=0, vmax=30) - plt.colorbar() - plt.savefig(result_dir.joinpath(jobname + "_PAE.png")) - if show: - plt.show() - plt.close() - - def plot_msa_v2(feature_dict, sort_lines=True, dpi=100): - seq = feature_dict["msa"] + seq = feature_dict["msa"][0] if "asym_id" in feature_dict: Ls = [0] k = feature_dict["asym_id"][0] @@ -85,7 +70,7 @@ def plot_msa_v2(feature_dict, sort_lines=True, dpi=100): return plt -def generate_coverage(fd_pkl, output_dir, name, dpi=100): +def generate_coverage(fd_pkl, output_dir, name, dpi=500): feature_dict_pkl = [] with (open("{}".format(fd_pkl), "rb")) as openfile: while True: @@ -94,7 +79,7 @@ def generate_coverage(fd_pkl, output_dir, name, dpi=100): except EOFError: break feature_dict = feature_dict_pkl[0] - msa_plot = plot_msa_v2(feature_dict, dpi=dpi) + msa_plot = plot_msa_v2(feature_dict, sort_lines=True, dpi=dpi) coverage_png = os.path.join(output_dir,f"{name}_coverage.png") msa_plot.savefig(str(coverage_png), bbox_inches='tight') msa_plot.close() diff --git a/scripts/visualize_alphafold_results.v2.py b/scripts/generate_pae_plddt_plot.py similarity index 87% rename from scripts/visualize_alphafold_results.v2.py rename to scripts/generate_pae_plddt_plot.py index f00e9b51..678dc1b1 100644 --- a/scripts/visualize_alphafold_results.v2.py +++ b/scripts/generate_pae_plddt_plot.py @@ -5,13 +5,15 @@ #from zipfile import Path import numpy as np import pandas as pd -from matplotlib import pyplot as plt, colors as cols, cm as cm +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager import json from sys import exit import os from Bio import PDB as pdb import io + + # plot size, in inches. plot_size = 16 @@ -22,8 +24,9 @@ # Define class for AlphaFold metadata file and class methods class AlphaFoldMetaData(object): - def __init__(self, PathToFile, FastaSequence=None, ranking=None): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): # Define attributes + self.name = name self.PathToFile = PathToFile self.FastaSequence = FastaSequence self.saving_filename = self.PathToFile.split("/")[-1].split(".")[0] @@ -32,7 +35,7 @@ def __init__(self, PathToFile, FastaSequence=None, ranking=None): self.saving_filename = "ranked_{}".format(ranking) # Generate a plot of pLDDT value - def plot_pLDDT(self, size_in_inches=12, axis_label_increment=100): + def plot_pLDDT(self, size_in_inches=3.5, axis_label_increment=100): x = list(range(0, len(self.pLDDT), 1)) y = list(self.pLDDT) @@ -41,29 +44,31 @@ def plot_pLDDT(self, size_in_inches=12, axis_label_increment=100): plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) ticks = np.arange(0, len(self.pLDDT), axis_label_increment) - plt.xticks(ticks, fontname="Helvetica") - plt.yticks(fontname="Helvetica") - plt.xlabel("Residue index", size=14, fontweight="bold", fontname="Helvetica") - plt.ylabel("Predicted LDDT", size=14, fontweight="bold", fontname="Helvetica") + plt.xticks(ticks, fontname="Times New Roman") + plt.yticks(fontname="Times New Roman") + plt.title(self.name, size=20, fontweight="bold", fontname="Times New Roman") + plt.xlabel("Residue index", size=16, fontweight="bold", fontname="Times New Roman") + plt.ylabel("Predicted LDDT", size=16, fontweight="bold", fontname="Times New Roman") plt.scatter(x, y, c=y, cmap=cmap, s=5) plt.clim(0, 100) scale = plt.colorbar(shrink=0.5) - scale.set_label(label="Predicted LDDT", size=12, fontweight="bold", fontname="Helvetica") + scale.set_label(label="Predicted LDDT", size=12, fontweight="bold", fontname="Times New Roman") # Save to directory with pickle file in plt.savefig('{}/{}_pLDDT.png'.format(self.saving_pathname, self.saving_filename), dpi=300) # Generate a plot from PAE measurements - def plot_PAE(self, size_in_inches=12, axis_label_increment=100): + def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): ticks = np.arange(0, self.PAE[1].size, axis_label_increment) plt.figure(figsize=(size_in_inches, size_in_inches)) - PAE = plt.imshow(self.PAE) - plt.xticks(ticks, fontname="Helvetica") - plt.yticks(ticks, fontname="Helvetica") - plt.xlabel("Residue index", size=14, fontweight="bold", fontname="Helvetica") - plt.ylabel("Residue index", size=14, fontweight="bold", fontname="Helvetica") + PAE = plt.imshow(self.PAE, cmap="bwr") + plt.xticks(ticks, fontname="Times New Roman") + plt.yticks(ticks, fontname="Times New Roman") + plt.title(self.name, size=20, fontweight="bold", fontname="Times New Roman") + plt.xlabel("Residue index", size=16, fontweight="bold", fontname="Times New Roman") + plt.ylabel("Residue index", size=16, fontweight="bold", fontname="Times New Roman") scale = plt.colorbar(PAE, shrink=0.5) - scale.set_label(label="Predicted error (Å)", size=12, fontweight="bold", fontname="Helvetica") + scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold", fontname="Times New Roman") # Save plot plt.savefig('{}/{}_PAE.png'.format(self.saving_pathname, self.saving_filename), dpi=300) @@ -298,14 +303,14 @@ def __init__(self, PathToFile, FastaSequence=None, ranking=None): # path_to_pLDDT_file_in_drive = [path_to_pLDDT_file_in_drive] def generate_plots(pkl, outdir, name): - results = AlphaFoldPickle(pkl, None) + results = AlphaFoldPickle(name,pkl, None) results.saving_pathname = outdir results.saving_filename = name if type(results.PAE) == np.ndarray: print("Plotting PAE for {} and saving to csv".format(pkl)) results.plot_PAE(size_in_inches=plot_size, axis_label_increment=plot_increment) - results = AlphaFoldPickle(pkl, None) + results = AlphaFoldPickle(name,pkl, None) results.saving_filename = name results.saving_pathname = outdir results.write_pLDDT_file() diff --git a/scripts/visualize_alphafold_results.py b/scripts/visualize_alphafold_results.py deleted file mode 100644 index 6c25c720..00000000 --- a/scripts/visualize_alphafold_results.py +++ /dev/null @@ -1,86 +0,0 @@ -# taken from: https://github.com/jasperzuallaert/VIBFold/blob/main/visualize_alphafold_results.py -import glob -import math -import os -import numpy as np -from matplotlib import pyplot as plt -import argparse -import pickle - - -def get_pae_plddt(model_names): - out = {} - for i, name in enumerate(model_names): - d = pickle.load(open(name, 'rb')) - basename = os.path.basename(name) - basename = basename[basename.index('model'):] - out[f'{basename}'] = {'plddt': d['plddt'], 'pae': d['predicted_aligned_error']} - return out - - -def generate_output_images(feature_dict, out_dir, name, pae_plddt_per_model): - msa = feature_dict['msa'] - seqid = (np.array(msa[0] == msa).mean(-1)) - seqid_sort = seqid.argsort() - non_gaps = (msa != 21).astype(float) - non_gaps[non_gaps == 0] = np.nan - final = non_gaps[seqid_sort] * seqid[seqid_sort, None] - - ################################################################## - plt.figure(figsize=(14, 4), dpi=100) - ################################################################## - plt.subplot(1, 2, 1) - plt.title("Sequence coverage") - plt.imshow(final, - interpolation='nearest', aspect='auto', - cmap="rainbow_r", vmin=0, vmax=1, origin='lower') - plt.plot((msa != 21).sum(0), color='black') - plt.xlim(-0.5, msa.shape[1] - 0.5) - plt.ylim(-0.5, msa.shape[0] - 0.5) - plt.colorbar(label="Sequence identity to query", ) - plt.xlabel("Positions") - plt.ylabel("Sequences") - - ################################################################## - plt.subplot(1, 2, 2) - plt.title("Predicted LDDT per position") - for model_name, value in pae_plddt_per_model.items(): - plt.plot(value["plddt"], label=model_name) - plt.ylim(0, 100) - plt.ylabel("Predicted LDDT") - plt.xlabel("Positions") - plt.savefig(f"{out_dir}/{name + ('_' if name else '')}coverage_LDDT.png") - ################################################################## - - ################################################################## - num_models = 5 # columns - num_runs_per_model = math.ceil(len(model_names) / num_models) - fig = plt.figure(figsize=(3 * num_models, 2 * num_runs_per_model), dpi=100) - for n, (model_name, value) in enumerate(pae_plddt_per_model.items()): - plt.subplot(num_runs_per_model, num_models, n + 1) - plt.title(model_name) - plt.imshow(value["pae"], label=model_name, cmap="bwr", vmin=0, vmax=30) - plt.colorbar() - fig.tight_layout() - plt.savefig(f"{out_dir}/{name + ('_' if name else '')}PAE.png") - ################################################################## - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--input_dir', dest='input_dir', required=True) - parser.add_argument('--name', dest='name') - parser.set_defaults(name='') - parser.add_argument('--output_dir', dest='output_dir') - parser.set_defaults(output_dir='') - args = parser.parse_args() - - feature_dict = pickle.load(open(f'{args.input_dir}/DTX1_1-DTX2_1_model_1_multimer_v3_output_dict.pkl', 'rb')) - # is_multimer = ('result_model_1_multimer.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) - # is_ptm = ('result_model_1_ptm.pkl' in [os.path.basename(f) for f in os.listdir(path=args.input_dir)]) - # model_names = [f'{args.input_dir}/result_model_{f}{"_multimer" if is_multimer else "_ptm" if is_ptm else ""}.pkl' for f in range(1,6)] - model_names = sorted(glob.glob(f'{args.input_dir}/DTX1_1-DTX2_1_model_1_multimer_v3_output_dict.pkl')) - - pae_plddt_per_model = get_pae_plddt(model_names) - generate_output_images(feature_dict, args.output_dir if args.output_dir else args.input_dir, args.name, - pae_plddt_per_model) From ee207c4761690142f906bbfd8173dc3d6e4dddc9 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:37:49 -0400 Subject: [PATCH 36/64] change input param for coverage --- scripts/generate_coverage_plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/generate_coverage_plot.py b/scripts/generate_coverage_plot.py index b951cc8e..fbc8428b 100644 --- a/scripts/generate_coverage_plot.py +++ b/scripts/generate_coverage_plot.py @@ -88,7 +88,7 @@ def generate_coverage(fd_pkl, output_dir, name, dpi=500): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--feature_dict_pkl', dest='feature_dict_pkl', required=True) + parser.add_argument('--input_pkl', dest='feature_dict_pkl', required=True) parser.add_argument('--output_dir', dest='output_dir', required=True) parser.add_argument('--basename', dest='basename', required=True) args = parser.parse_args() From 1a410ecebd03978318cc15368c98a6e1efbcf01a Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:44:35 -0400 Subject: [PATCH 37/64] patch --- run_pretrained_openfold.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index 5d25166c..e5aa3801 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -339,9 +339,9 @@ def main(args): feature_dicts[tag] = feature_dict - # print("Storing feature dict...") - # with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: - # pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + print("Storing feature dict...") + with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) processed_feature_dict = feature_processor.process_features( feature_dict, mode='predict', is_multimer=is_multimer From 68d1e6bccd6da2a949d027cb26854adbe9cf9352 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:48:18 -0400 Subject: [PATCH 38/64] patch --- run_pretrained_openfold.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index e5aa3801..06bed7fb 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -339,10 +339,6 @@ def main(args): feature_dicts[tag] = feature_dict - print("Storing feature dict...") - with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: - pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) - processed_feature_dict = feature_processor.process_features( feature_dict, mode='predict', is_multimer=is_multimer ) From 9812856163ee0b79ab9338552c4d8fb36dc644b8 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:01:54 -0400 Subject: [PATCH 39/64] unrecon fonts --- scripts/generate_pae_plddt_plot.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 678dc1b1..44b2fcd5 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -44,15 +44,15 @@ def plot_pLDDT(self, size_in_inches=3.5, axis_label_increment=100): plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) ticks = np.arange(0, len(self.pLDDT), axis_label_increment) - plt.xticks(ticks, fontname="Times New Roman") - plt.yticks(fontname="Times New Roman") - plt.title(self.name, size=20, fontweight="bold", fontname="Times New Roman") - plt.xlabel("Residue index", size=16, fontweight="bold", fontname="Times New Roman") - plt.ylabel("Predicted LDDT", size=16, fontweight="bold", fontname="Times New Roman") + plt.xticks(ticks) + plt.yticks() + plt.title(self.name, size=20, fontweight="bold") + plt.xlabel("Residue index", size=16, fontweight="bold") + plt.ylabel("Predicted LDDT", size=16, fontweight="bold") plt.scatter(x, y, c=y, cmap=cmap, s=5) plt.clim(0, 100) scale = plt.colorbar(shrink=0.5) - scale.set_label(label="Predicted LDDT", size=12, fontweight="bold", fontname="Times New Roman") + scale.set_label(label="Predicted LDDT", size=12, fontweight="bold") # Save to directory with pickle file in plt.savefig('{}/{}_pLDDT.png'.format(self.saving_pathname, self.saving_filename), dpi=300) @@ -62,13 +62,13 @@ def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): ticks = np.arange(0, self.PAE[1].size, axis_label_increment) plt.figure(figsize=(size_in_inches, size_in_inches)) PAE = plt.imshow(self.PAE, cmap="bwr") - plt.xticks(ticks, fontname="Times New Roman") - plt.yticks(ticks, fontname="Times New Roman") - plt.title(self.name, size=20, fontweight="bold", fontname="Times New Roman") - plt.xlabel("Residue index", size=16, fontweight="bold", fontname="Times New Roman") - plt.ylabel("Residue index", size=16, fontweight="bold", fontname="Times New Roman") + plt.xticks(ticks) + plt.yticks(ticks) + plt.title(self.name, size=20, fontweight="bold") + plt.xlabel("Residue index", size=16, fontweight="bold") + plt.ylabel("Residue index", size=16, fontweight="bold") scale = plt.colorbar(PAE, shrink=0.5) - scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold", fontname="Times New Roman") + scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") # Save plot plt.savefig('{}/{}_PAE.png'.format(self.saving_pathname, self.saving_filename), dpi=300) From 322f74c41f8ffbb08b17405e673ffa803a20a7e9 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 1 May 2024 07:30:40 -0400 Subject: [PATCH 40/64] patch for feature dict pickle output --- run_pretrained_openfold.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index 06bed7fb..d266dc06 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -343,6 +343,10 @@ def main(args): feature_dict, mode='predict', is_multimer=is_multimer ) + # print("Storing feature dict...") + # with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + # pickle.dump(processed_feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + processed_feature_dict = { k: torch.as_tensor(v, device=args.model_device) for k, v in processed_feature_dict.items() From 30bd52bd38a90463e85df5efc65dee055eb2d499 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 1 May 2024 14:54:51 -0400 Subject: [PATCH 41/64] gen pae json output --- scripts/generate_pae_plddt_plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 44b2fcd5..0b90146f 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -76,7 +76,7 @@ def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): # Generate dataframe from PAE data and save to csv pd_PAE = pd.DataFrame(self.PAE) pd_PAE.to_csv('{}/{}_PAE.csv'.format(self.saving_pathname, self.saving_filename)) - + pd_PAE.to_json('{}/{}_PAE.json'.format(self.saving_pathname, self.saving_filename)) class AlphaFoldPickle(AlphaFoldMetaData): From df5884a9059a4162ef05e2af34f84c8bcded563d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 1 May 2024 15:03:32 -0400 Subject: [PATCH 42/64] gen pae json output --- scripts/generate_pae_plddt_plot.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 0b90146f..f0932360 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -12,8 +12,6 @@ from Bio import PDB as pdb import io - - # plot size, in inches. plot_size = 16 @@ -29,7 +27,7 @@ def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): self.name = name self.PathToFile = PathToFile self.FastaSequence = FastaSequence - self.saving_filename = self.PathToFile.split("/")[-1].split(".")[0] + self.saving_filename = name #self.PathToFile.split("/")[-1].split(".")[0] self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] if ranking: self.saving_filename = "ranked_{}".format(ranking) @@ -78,6 +76,7 @@ def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): pd_PAE.to_csv('{}/{}_PAE.csv'.format(self.saving_pathname, self.saving_filename)) pd_PAE.to_json('{}/{}_PAE.json'.format(self.saving_pathname, self.saving_filename)) + class AlphaFoldPickle(AlphaFoldMetaData): def __init__(self, PathToFile, FastaSequence=None, ranking=None): @@ -303,14 +302,14 @@ def __init__(self, PathToFile, FastaSequence=None, ranking=None): # path_to_pLDDT_file_in_drive = [path_to_pLDDT_file_in_drive] def generate_plots(pkl, outdir, name): - results = AlphaFoldPickle(name,pkl, None) + results = AlphaFoldPickle(name, pkl, None) results.saving_pathname = outdir results.saving_filename = name if type(results.PAE) == np.ndarray: print("Plotting PAE for {} and saving to csv".format(pkl)) results.plot_PAE(size_in_inches=plot_size, axis_label_increment=plot_increment) - results = AlphaFoldPickle(name,pkl, None) + results = AlphaFoldPickle(name, pkl, None) results.saving_filename = name results.saving_pathname = outdir results.write_pLDDT_file() From de95dda965ec8493e53c22c601e68cceb024865d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 1 May 2024 15:05:31 -0400 Subject: [PATCH 43/64] gen pae json output --- scripts/generate_pae_plddt_plot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index f0932360..3d3325e6 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -73,8 +73,8 @@ def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): # Generate dataframe from PAE data and save to csv pd_PAE = pd.DataFrame(self.PAE) - pd_PAE.to_csv('{}/{}_PAE.csv'.format(self.saving_pathname, self.saving_filename)) - pd_PAE.to_json('{}/{}_PAE.json'.format(self.saving_pathname, self.saving_filename)) + pd_PAE.to_csv('{}/{}.csv'.format(self.saving_pathname, self.saving_filename)) + pd_PAE.to_json('{}/{}.json'.format(self.saving_pathname, self.saving_filename)) class AlphaFoldPickle(AlphaFoldMetaData): From 03d24f2f50ddb895748c4ae805c8b34bc82ff7f1 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Fri, 3 May 2024 14:00:09 -0400 Subject: [PATCH 44/64] export json like colabfold --- scripts/generate_pae_plddt_plot.py | 40 +++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 3d3325e6..90d722d7 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -5,12 +5,16 @@ #from zipfile import Path import numpy as np import pandas as pd + from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager import json from sys import exit import os from Bio import PDB as pdb import io +import json +from json import encoder +encoder.FLOAT_REPR = lambda o: format(o, '.2f') # plot size, in inches. plot_size = 16 @@ -74,7 +78,7 @@ def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): # Generate dataframe from PAE data and save to csv pd_PAE = pd.DataFrame(self.PAE) pd_PAE.to_csv('{}/{}.csv'.format(self.saving_pathname, self.saving_filename)) - pd_PAE.to_json('{}/{}.json'.format(self.saving_pathname, self.saving_filename)) + # pd_PAE.to_json('{}/{}.json'.format(self.saving_pathname, self.saving_filename)) class AlphaFoldPickle(AlphaFoldMetaData): @@ -96,13 +100,34 @@ def __init__(self, PathToFile, FastaSequence=None, ranking=None): # Try statement accounts for data run using non-pTM models, with no PAE output try: - self.PAE = self.data[0]['predicted_aligned_error'] + self.PAE = self.data[0]['predicted_aligned_error'].round(2) except: print("PAE model data not present. To access this performance metric, run AlphaFold" "using pTM-enabled models.") # Define pLDDT - self.pLDDT = self.data[0]['plddt'] + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0]['iptm_score'] + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()),2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()),2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) # Generate a ChimeraX attribute file from pLDDT measurements def write_pLDDT_file(self): @@ -309,13 +334,16 @@ def generate_plots(pkl, outdir, name): print("Plotting PAE for {} and saving to csv".format(pkl)) results.plot_PAE(size_in_inches=plot_size, axis_label_increment=plot_increment) - results = AlphaFoldPickle(name, pkl, None) - results.saving_filename = name - results.saving_pathname = outdir + # results = AlphaFoldPickle(name, pkl, None) + # results.saving_filename = name + # results.saving_pathname = outdir results.write_pLDDT_file() print("Plotting pLDDT for {} and saving to csv".format(pkl)) results.plot_pLDDT(size_in_inches=plot_size, axis_label_increment=plot_increment) + print("Saving pickle {} in json format".format(pkl)) + results.save_to_json() + if __name__ == "__main__": parser = argparse.ArgumentParser() From bdfda8f9aca5b9d476f4f4a521af3c581b1c5685 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:43:19 -0400 Subject: [PATCH 45/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index 42a6edf6..374fbe47 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -97,6 +97,7 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: ] + db_cmd logging.info('Launching subprocess "%s"', " ".join(cmd)) + print(f"hhsearch command: {' '.join(cmd)}") process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) From 6974cffd12244eec87b7b08d2fa6ff2f1cdd5cd5 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:47:00 -0400 Subject: [PATCH 46/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index 374fbe47..5be5bfd5 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -106,6 +106,7 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: retcode = process.wait() if retcode: + print(f"hhsearch command: {' '.join(cmd)}") # Stderr is truncated to prevent proto size errors in Beam. raise RuntimeError( "HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n" From 45ac61bff46973e552520d6de040538f4656fa8a Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:49:17 -0400 Subject: [PATCH 47/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index 5be5bfd5..c185d56a 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -109,8 +109,8 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: print(f"hhsearch command: {' '.join(cmd)}") # Stderr is truncated to prevent proto size errors in Beam. raise RuntimeError( - "HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n" - % (stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" + % (f"hhsearch command: {' '.join(cmd)}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) ) with open(hhr_path) as f: From 79464ceae3b41e9ca68de7d3fa5b80dfcb639477 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:51:24 -0400 Subject: [PATCH 48/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index c185d56a..e0d48331 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -110,7 +110,7 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: # Stderr is truncated to prevent proto size errors in Beam. raise RuntimeError( "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" - % (f"hhsearch command: {' '.join(cmd)}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + % (f"hhsearch command: {cmd}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) ) with open(hhr_path) as f: From cb359fdc7b5521c21d15fe38fe2af4ead11e9833 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:53:28 -0400 Subject: [PATCH 49/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index e0d48331..dfceef3d 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -108,9 +108,12 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: if retcode: print(f"hhsearch command: {' '.join(cmd)}") # Stderr is truncated to prevent proto size errors in Beam. + # raise RuntimeError( + # "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" + # % (f"hhsearch command: {cmd}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + # ) raise RuntimeError( - "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" - % (f"hhsearch command: {cmd}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + f"HHSearch failed:\ncommand:\n{cmd}\n\n" ) with open(hhr_path) as f: From c09e7a87075e247a848f6aa6a556a83a0b958a88 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:54:44 -0400 Subject: [PATCH 50/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index dfceef3d..83766edd 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -84,6 +84,7 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: for db_path in self.databases: db_cmd.append("-d") db_cmd.append(db_path) + cmd = [ self.binary_path, "-i", @@ -96,6 +97,10 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: str(self.n_cpu), ] + db_cmd + raise RuntimeError( + f"HHSearch failed:\ncommand:\n{cmd}\n\n" + ) + logging.info('Launching subprocess "%s"', " ".join(cmd)) print(f"hhsearch command: {' '.join(cmd)}") process = subprocess.Popen( From c549f8e8c5b5aadf3e86e297fb6e46191f689ce3 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Wed, 8 May 2024 09:56:04 -0400 Subject: [PATCH 51/64] debug hhsearch --- openfold/data/tools/hhsearch.py | 46 ++++++++++++++++----------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index 83766edd..664ab794 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -101,29 +101,29 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: f"HHSearch failed:\ncommand:\n{cmd}\n\n" ) - logging.info('Launching subprocess "%s"', " ".join(cmd)) - print(f"hhsearch command: {' '.join(cmd)}") - process = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - with utils.timing("HHsearch query"): - stdout, stderr = process.communicate() - retcode = process.wait() - - if retcode: - print(f"hhsearch command: {' '.join(cmd)}") - # Stderr is truncated to prevent proto size errors in Beam. - # raise RuntimeError( - # "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" - # % (f"hhsearch command: {cmd}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) - # ) - raise RuntimeError( - f"HHSearch failed:\ncommand:\n{cmd}\n\n" - ) - - with open(hhr_path) as f: - hhr = f.read() - return hhr + # logging.info('Launching subprocess "%s"', " ".join(cmd)) + # print(f"hhsearch command: {' '.join(cmd)}") + # process = subprocess.Popen( + # cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + # ) + # with utils.timing("HHsearch query"): + # stdout, stderr = process.communicate() + # retcode = process.wait() + # + # if retcode: + # print(f"hhsearch command: {' '.join(cmd)}") + # # Stderr is truncated to prevent proto size errors in Beam. + # # raise RuntimeError( + # # "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" + # # % (f"hhsearch command: {cmd}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + # # ) + # raise RuntimeError( + # f"HHSearch failed:\ncommand:\n{cmd}\n\n" + # ) + # + # with open(hhr_path) as f: + # hhr = f.read() + # return hhr @staticmethod def get_template_hits( From 6717ee629badf8d61730c6612164af13eacef194 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Thu, 9 May 2024 14:50:36 -0400 Subject: [PATCH 52/64] colabsearch integration --- notebooks/OpenFold.ipynb | 1834 +++++++++++++++---------------- openfold/data/tools/hhsearch.py | 42 +- scripts/colabfold_search.py | 729 ++++++++++++ 3 files changed, 1662 insertions(+), 943 deletions(-) create mode 100644 scripts/colabfold_search.py diff --git a/notebooks/OpenFold.ipynb b/notebooks/OpenFold.ipynb index 8a844652..669a57e6 100644 --- a/notebooks/OpenFold.ipynb +++ b/notebooks/OpenFold.ipynb @@ -1,919 +1,919 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github" - }, - "source": [ - "\"Open" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pc5-mbsX9PZC" - }, - "source": [ - "# OpenFold Colab\n", - "\n", - "Runs a simplified version of [OpenFold](https://github.com/aqlaboratory/openfold) on a target sequence. Adapted from DeepMind's [official AlphaFold Colab](https://colab.research.google.com/github/deepmind/alphafold/blob/main/notebooks/AlphaFold.ipynb).\n", - "\n", - "**Differences to AlphaFold v2.0**\n", - "\n", - "OpenFold is a trainable PyTorch reimplementation of AlphaFold 2. For the purposes of inference, it is practically identical to the original (\"practically\" because ensembling is excluded from OpenFold (recycling is enabled, however)).\n", - "\n", - "In this notebook, OpenFold is run with your choice of our original OpenFold parameters or DeepMind's publicly released parameters for AlphaFold 2.\n", - "\n", - "**Note**\n", - "\n", - "Like DeepMind's official Colab, this notebook uses **no templates (homologous structures)** and a selected portion of the full [BFD database](https://bfd.mmseqs.com/).\n", - "\n", - "**Citing this work**\n", - "\n", - "Any publication that discloses findings arising from using this notebook should [cite](https://github.com/deepmind/alphafold/#citing-this-work) DeepMind's [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2).\n", - "\n", - "**Licenses**\n", - "\n", - "This Colab supports inference with the [AlphaFold model parameters](https://github.com/deepmind/alphafold/#model-parameters-license), made available under the Creative Commons Attribution 4.0 International ([CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode)) license. The Colab itself is provided under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). See the full license statement below.\n", - "\n", - "**More information**\n", - "\n", - "You can find more information about how AlphaFold/OpenFold works in DeepMind's two Nature papers:\n", - "\n", - "* [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2)\n", - "* [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1)\n", - "\n", - "FAQ on how to interpret AlphaFold/OpenFold predictions are [here](https://alphafold.ebi.ac.uk/faq)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rowN0bVYLe9n" - }, - "outputs": [], - "source": [ - "#@markdown ### Enter the amino acid sequence to fold ⬇️\n", - "#@markdown For multiple sequences, separate sequences with a colon `:`\n", - "input_sequence = 'MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER: MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER' #@param {type:\"string\"}\n", - "\n", - "#@markdown ### Configure the model ⬇️\n", - "\n", - "weight_set = 'AlphaFold' #@param [\"OpenFold\", \"AlphaFold\"]\n", - "model_mode = 'multimer' #@param [\"monomer\", \"multimer\"]\n", - "relax_prediction = True #@param {type:\"boolean\"}\n", - "\n", - "\n", - "# Remove all whitespaces, tabs and end lines; upper-case\n", - "input_sequence = input_sequence.translate(str.maketrans('', '', ' \\n\\t')).upper()\n", - "aatypes = set('ACDEFGHIKLMNPQRSTVWY') # 20 standard aatypes\n", - "allowed_chars = aatypes.union({':'})\n", - "if not set(input_sequence).issubset(allowed_chars):\n", - " raise Exception(f'Input sequence contains non-amino acid letters: {set(input_sequence) - allowed_chars}. OpenFold only supports 20 standard amino acids as inputs.')\n", - "\n", - "if ':' in input_sequence and weight_set != 'AlphaFold':\n", - " raise ValueError('Input sequence is a multimer, must select Alphafold weight set')\n", - "\n", - "import enum\n", - "@enum.unique\n", - "class ModelType(enum.Enum):\n", - " MONOMER = 0\n", - " MULTIMER = 1\n", - "\n", - "model_type_dict = {\n", - " 'monomer': ModelType.MONOMER,\n", - " 'multimer': ModelType.MULTIMER,\n", - "}\n", - "\n", - "model_type = model_type_dict[model_mode]\n", - "print(f'Length of input sequence : {len(input_sequence.replace(\":\", \"\"))}')\n", - "#@markdown After making your selections, execute this cell by pressing the\n", - "#@markdown *Play* button on the left." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "woIxeCPygt7K" - }, - "outputs": [], - "source": [ - "#@title Install third-party software\n", - "#@markdown Please execute this cell by pressing the *Play* button on\n", - "#@markdown the left.\n", - "\n", - "\n", - "#@markdown **Note**: This installs the software on the Colab\n", - "#@markdown notebook in the cloud and not on your computer.\n", - "\n", - "import os, time\n", - "from IPython.utils import io\n", - "from sys import version_info\n", - "import subprocess\n", - "\n", - "python_version = f\"{version_info.major}.{version_info.minor}\"\n", - "\n", - "\n", - "os.system(\"wget -qnc https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Linux-x86_64.sh\")\n", - "os.system(\"bash Mambaforge-Linux-x86_64.sh -bfp /usr/local\")\n", - "os.system(\"mamba config --set auto_update_conda false\")\n", - "os.system(f\"mamba install -y -c conda-forge -c bioconda kalign2=2.04 hhsuite=3.3.0 openmm=7.7.0 python={python_version} pdbfixer biopython=1.79\")\n", - "os.system(\"pip install -q torch ml_collections py3Dmol modelcif\")\n", - "\n", - "try:\n", - " with io.capture_output() as captured:\n", - "\n", - " # Create a ramdisk to store a database chunk to make Jackhmmer run fast.\n", - " %shell sudo apt install --quiet --yes hmmer\n", - " %shell sudo mkdir -m 777 --parents /tmp/ramdisk\n", - " %shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk\n", - "\n", - " %shell wget -q -P /content \\\n", - " https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt\n", - "\n", - " %shell mkdir -p /content/openfold/openfold/resources\n", - "\n", - " commit = \"e2e19f16676b1a409f9ba3a6f69b11ee7f5887c2\"\n", - " os.system(f\"pip install -q git+https://github.com/aqlaboratory/openfold.git@{commit}\")\n", - "\n", - " os.system(f\"cp -f -p /content/stereo_chemical_props.txt /usr/local/lib/python{python_version}/site-packages/openfold/resources/\")\n", - "\n", - "except subprocess.CalledProcessError as captured:\n", - " print(captured)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "VzJ5iMjTtoZw" - }, - "outputs": [], - "source": [ - "#@title Download model weights\n", - "#@markdown Please execute this cell by pressing the *Play* button on\n", - "#@markdown the left.\n", - "\n", - "# Define constants\n", - "GIT_REPO='https://github.com/aqlaboratory/openfold'\n", - "ALPHAFOLD_PARAM_SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_2022-12-06.tar'\n", - "OPENFOLD_PARAMS_DIR = './openfold/openfold/resources/openfold_params'\n", - "ALPHAFOLD_PARAMS_DIR = './openfold/openfold/resources/params'\n", - "ALPHAFOLD_PARAMS_PATH = os.path.join(\n", - " ALPHAFOLD_PARAMS_DIR, os.path.basename(ALPHAFOLD_PARAM_SOURCE_URL)\n", - ")\n", - "\n", - "try:\n", - " with io.capture_output() as captured:\n", - " if(weight_set == 'AlphaFold'):\n", - " %shell mkdir --parents \"{ALPHAFOLD_PARAMS_DIR}\"\n", - " %shell wget -O {ALPHAFOLD_PARAMS_PATH} {ALPHAFOLD_PARAM_SOURCE_URL}\n", - " %shell tar --extract --verbose --file=\"{ALPHAFOLD_PARAMS_PATH}\" \\\n", - " --directory=\"{ALPHAFOLD_PARAMS_DIR}\" --preserve-permissions\n", - " %shell rm \"{ALPHAFOLD_PARAMS_PATH}\"\n", - " elif(weight_set == 'OpenFold'):\n", - " # Install AWS CLI\n", - " %shell curl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\"\n", - " %shell unzip -qq awscliv2.zip\n", - " %shell sudo ./aws/install\n", - " %shell rm awscliv2.zip\n", - " %shell rm -rf ./aws\n", - " %shell mkdir --parents \"{OPENFOLD_PARAMS_DIR}\"\n", - "\n", - " %shell aws s3 cp \\\n", - " --no-sign-request \\\n", - " --region us-east-1 \\\n", - " s3://openfold/openfold_params \"{OPENFOLD_PARAMS_DIR}\" \\\n", - " --recursive\n", - " else:\n", - " raise ValueError(\"Invalid weight set\")\n", - "except subprocess.CalledProcessError as captured:\n", - " print(captured)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_FpxxMo-mvcP" - }, - "outputs": [], - "source": [ - "#@title Import Python packages\n", - "#@markdown Please execute this cell by pressing the *Play* button on\n", - "#@markdown the left.\n", - "\n", - "import unittest.mock\n", - "import sys\n", - "from typing import Dict, Sequence\n", - "\n", - "sys.path.insert(0, f'/usr/local/lib/python{python_version}/dist-packages/')\n", - "sys.path.insert(0, f'/usr/local/lib/python{python_version}/site-packages/')\n", - "\n", - "# Allows us to skip installing these packages\n", - "unnecessary_modules = [\n", - " \"dllogger\",\n", - " \"pytorch_lightning\",\n", - " \"pytorch_lightning.utilities\",\n", - " \"pytorch_lightning.callbacks.early_stopping\",\n", - " \"pytorch_lightning.utilities.seed\",\n", - "]\n", - "for unnecessary_module in unnecessary_modules:\n", - " sys.modules[unnecessary_module] = unittest.mock.MagicMock()\n", - "\n", - "import os\n", - "\n", - "from urllib import request\n", - "from concurrent import futures\n", - "from google.colab import files\n", - "import json\n", - "from matplotlib import gridspec\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "import py3Dmol\n", - "import torch\n", - "import shutil\n", - "import tqdm\n", - "import tqdm.notebook\n", - "\n", - "TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'\n", - "\n", - "# Prevent shell magic being broken by openmm, prevent this cryptic error:\n", - "# \"NotImplementedError: A UTF-8 locale is required. Got ANSI_X3.4-1968\"\n", - "import locale\n", - "def getpreferredencoding(do_setlocale = True):\n", - " return \"UTF-8\"\n", - "locale.getpreferredencoding = getpreferredencoding\n", - "\n", - "from openfold import config\n", - "from openfold.data import feature_pipeline\n", - "from openfold.data import parsers\n", - "from openfold.data import data_pipeline\n", - "from openfold.data import msa_pairing\n", - "from openfold.data import feature_processing_multimer\n", - "from openfold.data.tools import jackhmmer\n", - "from openfold.model import model\n", - "from openfold.np import protein\n", - "from openfold.np.relax import relax\n", - "from openfold.np.relax.utils import overwrite_b_factors\n", - "from openfold.utils.import_weights import import_jax_weights_\n", - "from openfold.utils.tensor_utils import tensor_tree_map\n", - "\n", - "from IPython import display\n", - "from ipywidgets import GridspecLayout\n", - "from ipywidgets import Output" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "W4JpOs6oA-QS" - }, - "source": [ - "## Making a prediction\n", - "\n", - "Note that the search against databases and the actual prediction can take some time, from minutes to hours, depending on the length of the protein and what type of GPU you are allocated by Colab (see FAQ below)." - ] - }, - { - "cell_type": "code", - "source": [ - "#@title Search against genetic databases\n", - "\n", - "#@markdown Once this cell has been executed, you will see\n", - "#@markdown statistics about the multiple sequence alignment\n", - "#@markdown (MSA) that will be used by OpenFold. In particular,\n", - "#@markdown you’ll see how well each residue is covered by similar\n", - "#@markdown sequences in the MSA.\n", - "\n", - "# --- Find the closest source --\n", - "test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'\n", - "ex = futures.ThreadPoolExecutor(3)\n", - "def fetch(source):\n", - " request.urlretrieve(test_url_pattern.format(source))\n", - " return source\n", - "fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]\n", - "source = None\n", - "for f in futures.as_completed(fs):\n", - " source = f.result()\n", - " ex.shutdown()\n", - " break\n", - "\n", - "# Run the search against chunks of genetic databases (since the genetic\n", - "# databases don't fit in Colab ramdisk).\n", - "\n", - "jackhmmer_binary_path = '/usr/bin/jackhmmer'\n", - "\n", - "# --- Parse multiple sequences, if there are any ---\n", - "def split_multiple_sequences(sequence):\n", - " seqs = sequence.split(':')\n", - " sorted_seqs = sorted(seqs, key=lambda s: len(s))\n", - "\n", - " # TODO: Handle the homomer case when writing fasta sequences\n", - " fasta_path_tuples = []\n", - " for idx, seq in enumerate(set(sorted_seqs)):\n", - " fasta_path = f'target_{idx+1}.fasta'\n", - " with open(fasta_path, 'wt') as f:\n", - " f.write(f'>query\\n{seq}\\n')\n", - " fasta_path_tuples.append((seq, fasta_path))\n", - " fasta_path_by_seq = dict(fasta_path_tuples)\n", - "\n", - " return sorted_seqs, fasta_path_by_seq\n", - "\n", - "sequences, fasta_path_by_sequence = split_multiple_sequences(input_sequence)\n", - "db_results_by_sequence = {seq: {} for seq in fasta_path_by_sequence.keys()}\n", - "\n", - "DB_ROOT_PATH = f'https://storage.googleapis.com/alphafold-colab{source}/latest/'\n", - "db_configs = {}\n", - "db_configs['smallbfd'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}uniref90_2021_03.fasta',\n", - " 'z_value': 65984053,\n", - " 'num_jackhmmer_chunks': 17,\n", - "}\n", - "db_configs['mgnify'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}mgy_clusters_2022_05.fasta',\n", - " 'z_value': 304820129,\n", - " 'num_jackhmmer_chunks': 120,\n", - "}\n", - "db_configs['uniref90'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}uniref90_2022_01.fasta',\n", - " 'z_value': 144113457,\n", - " 'num_jackhmmer_chunks': 62,\n", - "}\n", - "\n", - "# Search UniProt and construct the all_seq features only for heteromers, not homomers.\n", - "if model_type == ModelType.MULTIMER and len(set(sequences)) > 1:\n", - " db_configs['uniprot'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}uniprot_2021_04.fasta',\n", - " 'z_value': 225013025 + 565928,\n", - " 'num_jackhmmer_chunks': 101,\n", - " }\n", - "\n", - "total_jackhmmer_chunks = sum([d['num_jackhmmer_chunks'] for d in db_configs.values()])\n", - "with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT) as pbar:\n", - " def jackhmmer_chunk_callback(i):\n", - " pbar.update(n=1)\n", - "\n", - " for db_name, db_config in db_configs.items():\n", - " pbar.set_description(f'Searching {db_name}')\n", - " jackhmmer_runner = jackhmmer.Jackhmmer(\n", - " binary_path=jackhmmer_binary_path,\n", - " database_path=db_config['database_path'],\n", - " get_tblout=True,\n", - " num_streamed_chunks=db_config['num_jackhmmer_chunks'],\n", - " streaming_callback=jackhmmer_chunk_callback,\n", - " z_value=db_config['z_value'])\n", - "\n", - " db_results = jackhmmer_runner.query_multiple(fasta_path_by_sequence.values())\n", - " for seq, result in zip(fasta_path_by_sequence.keys(), db_results):\n", - " db_results_by_sequence[seq][db_name] = result\n", - "\n", - "\n", - "# --- Extract the MSAs and visualize ---\n", - "# Extract the MSAs from the Stockholm files.\n", - "# NB: deduplication happens later in data_pipeline.make_msa_features.\n", - "\n", - "MAX_HITS_BY_DB = {\n", - " 'uniref90': 10000,\n", - " 'smallbfd': 5000,\n", - " 'mgnify': 501,\n", - " 'uniprot': 50000,\n", - "}\n", - "\n", - "msas_by_seq_by_db = {seq: {} for seq in sequences}\n", - "full_msa_by_seq = {seq: [] for seq in sequences}\n", - "\n", - "for seq, sequence_result in db_results_by_sequence.items():\n", - " print(f'parsing_results_for_sequence {seq}')\n", - " for db_name, db_results in sequence_result.items():\n", - " unsorted_results = []\n", - " for i, result in enumerate(db_results):\n", - " msa_obj = parsers.parse_stockholm(result['sto'])\n", - " e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])\n", - " target_names = msa_obj.descriptions\n", - " e_values = [e_values_dict[t.split('/')[0]] for t in target_names]\n", - " zipped_results = zip(msa_obj.sequences, msa_obj.deletion_matrix, target_names, e_values)\n", - " if i != 0:\n", - " # Only take query from the first chunk\n", - " zipped_results = [x for x in zipped_results if x[2] != 'query']\n", - " unsorted_results.extend(zipped_results)\n", - " sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])\n", - " msas, del_matrix, targets, _ = zip(*sorted_by_evalue)\n", - " db_msas = parsers.Msa(msas, del_matrix, targets)\n", - " if db_msas:\n", - " if db_name in MAX_HITS_BY_DB:\n", - " db_msas.truncate(MAX_HITS_BY_DB[db_name])\n", - " msas_by_seq_by_db[seq][db_name] = db_msas\n", - " full_msa_by_seq[seq].extend(db_msas.sequences)\n", - " msa_size = len(set(db_msas.sequences))\n", - " print(f'{msa_size} Sequences Found in {db_name}')\n", - "\n", - "\n", - "fig = plt.figure(figsize=(12, 3))\n", - "max_num_alignments = 0\n", - "\n", - "for seq_idx, seq in enumerate(set(sequences)):\n", - " full_msas = full_msa_by_seq[seq]\n", - " deduped_full_msa = list(dict.fromkeys(full_msas))\n", - " total_msa_size = len(deduped_full_msa)\n", - " print(f'\\n{total_msa_size} Sequences Found in Total\\n')\n", - "\n", - " aa_map = {restype: i for i, restype in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')}\n", - " msa_arr = np.array([[aa_map[aa] for aa in seq] for seq in deduped_full_msa])\n", - " num_alignments, num_res = msa_arr.shape\n", - " plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), label=f'Chain {seq_idx}')\n", - " max_num_alignments = max(num_alignments, max_num_alignments)\n", - "\n", - "\n", - "plt.title('Per-Residue Count of Non-Gap Amino Acids in the MSA')\n", - "plt.ylabel('Non-Gap Count')\n", - "plt.yticks(range(0, max_num_alignments + 1, max(1, int(max_num_alignments / 3))))\n", - "plt.legend()\n", - "plt.show()" - ], - "metadata": { - "id": "o7BqQN_gfYtq" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XUo6foMQxwS2" - }, - "outputs": [], - "source": [ - "#@title Run OpenFold and download prediction\n", - "\n", - "#@markdown Once this cell has been executed, a zip-archive with\n", - "#@markdown the obtained prediction will be automatically downloaded\n", - "#@markdown to your computer.\n", - "\n", - "# Color bands for visualizing plddt\n", - "PLDDT_BANDS = [\n", - " (0, 50, '#FF7D45'),\n", - " (50, 70, '#FFDB13'),\n", - " (70, 90, '#65CBF3'),\n", - " (90, 100, '#0053D6')\n", - "]\n", - "\n", - "# --- Run the model ---\n", - "if model_type == ModelType.MONOMER:\n", - " model_names = [\n", - " 'finetuning_3.pt',\n", - " 'finetuning_4.pt',\n", - " 'finetuning_5.pt',\n", - " 'finetuning_ptm_2.pt',\n", - " 'finetuning_no_templ_ptm_1.pt'\n", - " ]\n", - "elif model_type == ModelType.MULTIMER:\n", - " model_names = [\n", - " 'model_1_multimer_v3',\n", - " 'model_2_multimer_v3',\n", - " 'model_3_multimer_v3',\n", - " 'model_4_multimer_v3',\n", - " 'model_5_multimer_v3',\n", - " ]\n", - "\n", - "def _placeholder_template_feats(num_templates_, num_res_):\n", - " return {\n", - " 'template_aatype': np.zeros((num_templates_, num_res_, 22), dtype=np.int64),\n", - " 'template_all_atom_positions': np.zeros((num_templates_, num_res_, 37, 3), dtype=np.float32),\n", - " 'template_all_atom_mask': np.zeros((num_templates_, num_res_, 37), dtype=np.float32),\n", - " 'template_domain_names': np.zeros((num_templates_,), dtype=np.float32),\n", - " 'template_sum_probs': np.zeros((num_templates_, 1), dtype=np.float32),\n", - " }\n", - "\n", - "\n", - "def make_features(\n", - " sequences: Sequence[str],\n", - " msas_by_seq_by_db: Dict[str, Dict[str, parsers.Msa]],\n", - " model_type: ModelType):\n", - " num_templates = 1 # Placeholder for generating fake template features\n", - " feature_dict = {}\n", - "\n", - " for idx, seq in enumerate(sequences, start=1):\n", - " _chain_id = f'chain_{idx}'\n", - " num_res = len(seq)\n", - "\n", - " feats = data_pipeline.make_sequence_features(seq, _chain_id, num_res)\n", - " msas_without_uniprot = [msas_by_seq_by_db[seq][db] for db in db_configs.keys() if db != 'uniprot']\n", - " msa_feats = data_pipeline.make_msa_features(msas_without_uniprot)\n", - " feats.update(msa_feats)\n", - " feats.update(_placeholder_template_feats(num_templates, num_res))\n", - "\n", - " if model_type == ModelType.MONOMER:\n", - " feature_dict[seq] = feats\n", - " if model_type == ModelType.MULTIMER:\n", - " # Perform extra pair processing steps for heteromers\n", - " if len(set(sequences)) > 1:\n", - " uniprot_msa = msas_by_seq_by_db[seq]['uniprot']\n", - " uniprot_msa_features = data_pipeline.make_msa_features([uniprot_msa])\n", - " valid_feat_names = msa_pairing.MSA_FEATURES + (\n", - " 'msa_species_identifiers',\n", - " )\n", - " pair_feats = {\n", - " f'{k}_all_seq': v for k, v in uniprot_msa_features.items()\n", - " if k in valid_feat_names\n", - " }\n", - " feats.update(pair_feats)\n", - "\n", - " feats = data_pipeline.convert_monomer_features(feats, _chain_id)\n", - " feature_dict[_chain_id] = feats\n", - "\n", - " if model_type == ModelType.MONOMER:\n", - " np_example = feature_dict[sequences[0]]\n", - " elif model_type == ModelType.MULTIMER:\n", - " all_chain_feats = data_pipeline.add_assembly_features(feature_dict)\n", - " features = feature_processing_multimer.pair_and_merge(all_chain_features=all_chain_feats)\n", - " np_example = data_pipeline.pad_msa(features, 512)\n", - "\n", - " return np_example\n", - "\n", - "\n", - "output_dir = 'prediction'\n", - "os.makedirs(output_dir, exist_ok=True)\n", - "\n", - "plddts = {}\n", - "pae_outputs = {}\n", - "weighted_ptms = {}\n", - "unrelaxed_proteins = {}\n", - "\n", - "with tqdm.notebook.tqdm(total=len(model_names), bar_format=TQDM_BAR_FORMAT) as pbar:\n", - " for i, model_name in enumerate(model_names, start = 1):\n", - " pbar.set_description(f'Running {model_name}')\n", - "\n", - " feature_dict = make_features(sequences, msas_by_seq_by_db, model_type)\n", - "\n", - " if(weight_set == \"AlphaFold\"):\n", - " if model_type == ModelType.MONOMER:\n", - " config_preset = f\"model_{i}\"\n", - " elif model_type == ModelType.MULTIMER:\n", - " config_preset = f'model_{i}_multimer_v3'\n", - " else:\n", - " if(\"_no_templ_\" in model_name):\n", - " config_preset = \"model_3\"\n", - " else:\n", - " config_preset = \"model_1\"\n", - " if(\"_ptm_\" in model_name):\n", - " config_preset += \"_ptm\"\n", - "\n", - " cfg = config.model_config(config_preset)\n", - "\n", - " # Force the model to only use 3 recycling updates\n", - " cfg.data.common.max_recycling_iters = 3\n", - " cfg.model.recycle_early_stop_tolerance = -1\n", - "\n", - " openfold_model = model.AlphaFold(cfg)\n", - " openfold_model = openfold_model.eval()\n", - " if(weight_set == \"AlphaFold\"):\n", - " params_name = os.path.join(\n", - " ALPHAFOLD_PARAMS_DIR, f\"params_{config_preset}.npz\"\n", - " )\n", - " import_jax_weights_(openfold_model, params_name, version=config_preset)\n", - " elif(weight_set == \"OpenFold\"):\n", - " params_name = os.path.join(\n", - " OPENFOLD_PARAMS_DIR,\n", - " model_name,\n", - " )\n", - " d = torch.load(params_name)\n", - " openfold_model.load_state_dict(d)\n", - " else:\n", - " raise ValueError(f\"Invalid weight set: {weight_set}\")\n", - "\n", - " openfold_model = openfold_model.cuda()\n", - "\n", - " pipeline = feature_pipeline.FeaturePipeline(cfg.data)\n", - " processed_feature_dict = pipeline.process_features(\n", - " feature_dict,\n", - " mode='predict',\n", - " is_multimer = (model_type == ModelType.MULTIMER),\n", - " )\n", - "\n", - " processed_feature_dict = tensor_tree_map(\n", - " lambda t: t.cuda(), processed_feature_dict\n", - " )\n", - "\n", - " with torch.no_grad():\n", - " prediction_result = openfold_model(processed_feature_dict)\n", - "\n", - " # Move the batch and output to np for further processing\n", - " processed_feature_dict = tensor_tree_map(\n", - " lambda t: np.array(t[..., -1].cpu()), processed_feature_dict\n", - " )\n", - " prediction_result = tensor_tree_map(\n", - " lambda t: np.array(t.cpu()), prediction_result\n", - " )\n", - "\n", - " mean_plddt = prediction_result['plddt'].mean()\n", - "\n", - " if model_type == ModelType.MONOMER:\n", - " if 'predicted_aligned_error' in prediction_result:\n", - " pae_outputs[model_name] = (\n", - " prediction_result['predicted_aligned_error'],\n", - " prediction_result['max_predicted_aligned_error']\n", - " )\n", - " else:\n", - " # Get the pLDDT confidence metrics. Do not put pTM models here as they\n", - " # should never get selected.\n", - " plddts[model_name] = prediction_result['plddt']\n", - " elif model_type == ModelType.MULTIMER:\n", - " # Multimer models are sorted by pTM+ipTM.\n", - " plddts[model_name] = prediction_result['plddt']\n", - " pae_outputs[model_name] = (prediction_result['predicted_aligned_error'],\n", - " prediction_result['max_predicted_aligned_error'])\n", - "\n", - " weighted_ptms[model_name] = prediction_result['weighted_ptm_score']\n", - "\n", - " # Set the b-factors to the per-residue plddt.\n", - " final_atom_mask = prediction_result['final_atom_mask']\n", - " b_factors = prediction_result['plddt'][:, None] * final_atom_mask\n", - " unrelaxed_protein = protein.from_prediction(\n", - " processed_feature_dict,\n", - " prediction_result,\n", - " remove_leading_feature_dimension=False,\n", - " b_factors=b_factors,\n", - " )\n", - " unrelaxed_proteins[model_name] = unrelaxed_protein\n", - "\n", - " # Delete unused outputs to save memory.\n", - " del openfold_model\n", - " del processed_feature_dict\n", - " del prediction_result\n", - " pbar.update(n=1)\n", - "\n", - " # Find the best model according to the mean pLDDT.\n", - " if model_type == ModelType.MONOMER:\n", - " best_model_name = max(plddts.keys(), key=lambda x: plddts[x].mean())\n", - " elif model_type == ModelType.MULTIMER:\n", - " best_model_name = max(weighted_ptms.keys(), key=lambda x: weighted_ptms[x])\n", - " best_pdb = protein.to_pdb(unrelaxed_proteins[best_model_name])\n", - "\n", - " # --- AMBER relax the best model ---\n", - " if(relax_prediction):\n", - " pbar.set_description(f'AMBER relaxation')\n", - " amber_relaxer = relax.AmberRelaxation(\n", - " max_iterations=0,\n", - " tolerance=2.39,\n", - " stiffness=10.0,\n", - " exclude_residues=[],\n", - " max_outer_iterations=20,\n", - " use_gpu=True,\n", - " )\n", - " relaxed_pdb, _, _ = amber_relaxer.process(\n", - " prot=unrelaxed_proteins[best_model_name]\n", - " )\n", - " best_pdb = relaxed_pdb\n", - "\n", - " # Write out the prediction\n", - " pred_output_path = os.path.join(output_dir, 'selected_prediction.pdb')\n", - " with open(pred_output_path, 'w') as f:\n", - " f.write(best_pdb)\n", - "\n", - " pbar.update(n=1) # Finished AMBER relax.\n", - "\n", - "# Construct multiclass b-factors to indicate confidence bands\n", - "# 0=very low, 1=low, 2=confident, 3=very high\n", - "banded_b_factors = []\n", - "for plddt in plddts[best_model_name]:\n", - " for idx, (min_val, max_val, _) in enumerate(PLDDT_BANDS):\n", - " if plddt >= min_val and plddt <= max_val:\n", - " banded_b_factors.append(idx)\n", - " break\n", - "banded_b_factors = np.array(banded_b_factors)[:, None] * final_atom_mask\n", - "to_visualize_pdb = overwrite_b_factors(best_pdb, banded_b_factors)\n", - "\n", - "# --- Visualise the prediction & confidence ---\n", - "show_sidechains = True\n", - "def plot_plddt_legend():\n", - " \"\"\"Plots the legend for pLDDT.\"\"\"\n", - " thresh = [\n", - " 'Very low (pLDDT < 50)',\n", - " 'Low (70 > pLDDT > 50)',\n", - " 'Confident (90 > pLDDT > 70)',\n", - " 'Very high (pLDDT > 90)']\n", - "\n", - " colors = [x[2] for x in PLDDT_BANDS]\n", - "\n", - " plt.figure(figsize=(2, 2))\n", - " for c in colors:\n", - " plt.bar(0, 0, color=c)\n", - " plt.legend(thresh, frameon=False, loc='center', fontsize=20)\n", - " plt.xticks([])\n", - " plt.yticks([])\n", - " ax = plt.gca()\n", - " ax.spines['right'].set_visible(False)\n", - " ax.spines['top'].set_visible(False)\n", - " ax.spines['left'].set_visible(False)\n", - " ax.spines['bottom'].set_visible(False)\n", - " plt.title('Model Confidence', fontsize=20, pad=20)\n", - " return plt\n", - "\n", - "# Show the structure coloured by chain if the multimer model has been used.\n", - "if model_type == ModelType.MULTIMER:\n", - " multichain_view = py3Dmol.view(width=800, height=600)\n", - " multichain_view.addModelsAsFrames(to_visualize_pdb)\n", - " multichain_style = {'cartoon': {'colorscheme': 'chain'}}\n", - " multichain_view.setStyle({'model': -1}, multichain_style)\n", - " multichain_view.zoomTo()\n", - " multichain_view.show()\n", - "\n", - "# Color the structure by per-residue pLDDT\n", - "color_map = {i: bands[2] for i, bands in enumerate(PLDDT_BANDS)}\n", - "view = py3Dmol.view(width=800, height=600)\n", - "view.addModelsAsFrames(to_visualize_pdb)\n", - "style = {'cartoon': {\n", - " 'colorscheme': {\n", - " 'prop': 'b',\n", - " 'map': color_map}\n", - " }}\n", - "if show_sidechains:\n", - " style['stick'] = {}\n", - "view.setStyle({'model': -1}, style)\n", - "view.zoomTo()\n", - "\n", - "grid = GridspecLayout(1, 2)\n", - "out = Output()\n", - "with out:\n", - " view.show()\n", - "grid[0, 0] = out\n", - "\n", - "out = Output()\n", - "with out:\n", - " plot_plddt_legend().show()\n", - "grid[0, 1] = out\n", - "\n", - "display.display(grid)\n", - "\n", - "# Display pLDDT and predicted aligned error (if output by the model).\n", - "if pae_outputs:\n", - " num_plots = 2\n", - "else:\n", - " num_plots = 1\n", - "\n", - "plt.figure(figsize=[8 * num_plots, 6])\n", - "plt.subplot(1, num_plots, 1)\n", - "plt.plot(plddts[best_model_name])\n", - "plt.title('Predicted LDDT')\n", - "plt.xlabel('Residue')\n", - "plt.ylabel('pLDDT')\n", - "\n", - "if num_plots == 2:\n", - " plt.subplot(1, 2, 2)\n", - " pae, max_pae = list(pae_outputs.values())[0]\n", - " plt.imshow(pae, vmin=0., vmax=max_pae, cmap='Greens_r')\n", - " plt.colorbar(fraction=0.046, pad=0.04)\n", - "\n", - " # Display lines at chain boundaries.\n", - " best_unrelaxed_prot = unrelaxed_proteins[best_model_name]\n", - " total_num_res = best_unrelaxed_prot.residue_index.shape[-1]\n", - " chain_ids = best_unrelaxed_prot.chain_index\n", - " for chain_boundary in np.nonzero(chain_ids[:-1] - chain_ids[1:]):\n", - " if chain_boundary.size:\n", - " plt.plot([0, total_num_res], [chain_boundary, chain_boundary], color='red')\n", - " plt.plot([chain_boundary, chain_boundary], [0, total_num_res], color='red')\n", - " plt.title('Predicted Aligned Error')\n", - " plt.xlabel('Scored residue')\n", - " plt.ylabel('Aligned residue')\n", - "\n", - "# Save pLDDT and predicted aligned error (if it exists)\n", - "pae_output_path = os.path.join(output_dir, 'predicted_aligned_error.json')\n", - "if pae_outputs:\n", - " # Save predicted aligned error in the same format as the AF EMBL DB\n", - " rounded_errors = np.round(pae.astype(np.float64), decimals=1)\n", - " indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1\n", - " indices_1 = indices[0].flatten().tolist()\n", - " indices_2 = indices[1].flatten().tolist()\n", - " pae_data = json.dumps([{\n", - " 'residue1': indices_1,\n", - " 'residue2': indices_2,\n", - " 'distance': rounded_errors.flatten().tolist(),\n", - " 'max_predicted_aligned_error': max_pae.item()\n", - " }],\n", - " indent=None,\n", - " separators=(',', ':'))\n", - " with open(pae_output_path, 'w') as f:\n", - " f.write(pae_data)\n", - "\n", - "\n", - "# --- Download the predictions ---\n", - "shutil.make_archive(base_name='prediction', format='zip', root_dir=output_dir)\n", - "files.download(f'{output_dir}.zip')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lUQAn5LYC5n4" - }, - "source": [ - "### Interpreting the prediction\n", - "\n", - "Please see the [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2) and the [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1), as well as [DeepMind's FAQ](https://alphafold.ebi.ac.uk/faq) on how to interpret AlphaFold/OpenFold predictions. More information about the predictions of the AlphaFold Multimer model can be found in the [Alphafold Multimer paper](https://www.biorxiv.org/content/10.1101/2022.03.11.484043v3.full.pdf)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jeb2z8DIA4om" - }, - "source": [ - "## FAQ & Troubleshooting\n", - "\n", - "\n", - "* How do I get a predicted protein structure for my protein?\n", - " * Click on the _Connect_ button on the top right to get started.\n", - " * Paste the amino acid sequence of your protein (without any headers) into the “Enter the amino acid sequence to fold”.\n", - " * Run all cells in the Colab, either by running them individually (with the play button on the left side) or via _Runtime_ > _Run all._\n", - " * The predicted protein structure will be downloaded once all cells have been executed. Note: This can take minutes to hours - see below.\n", - "* How long will this take?\n", - " * Downloading the OpenFold source code can take up to a few minutes.\n", - " * Downloading and installing the third-party software can take up to a few minutes.\n", - " * The search against genetic databases can take minutes to hours.\n", - " * Running OpenFold and generating the prediction can take minutes to hours, depending on the length of your protein and on which GPU-type Colab has assigned you.\n", - "* My Colab no longer seems to be doing anything, what should I do?\n", - " * Some steps may take minutes to hours to complete.\n", - " * Sometimes, running the \"installation\" cells more than once can corrupt the OpenFold installation.\n", - " * If nothing happens or if you receive an error message, try restarting your Colab runtime via _Runtime_ > _Restart runtime_.\n", - " * If this doesn’t help, reset your Colab runtime via _Runtime_ > _Factory reset runtime_.\n", - "* How does what's run in this notebook compare to the full versions of Alphafold/Openfold?\n", - " * This Colab version of OpenFold searches a selected portion of the BFD dataset and currently doesn’t use templates, so its accuracy is reduced in comparison to the full version, which is analogous to what's described in the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2) and [Github repo](https://github.com/deepmind/alphafold/). The full version of OpenFold can be run from our own [GitHub repo](https://github.com/aqlaboratory/openfold).\n", - "* What is a Colab?\n", - " * See the [Colab FAQ](https://research.google.com/colaboratory/faq.html).\n", - "* I received a warning “Notebook requires high RAM”, what do I do?\n", - " * The resources allocated to your Colab vary. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", - " * You can execute the Colab nonetheless.\n", - "* I received an error “Colab CPU runtime not supported” or “No GPU/TPU found”, what do I do?\n", - " * Colab CPU runtime is not supported. Try changing your runtime via _Runtime_ > _Change runtime type_ > _Hardware accelerator_ > _GPU_.\n", - " * The type of GPU allocated to your Colab varies. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", - " * If you receive “Cannot connect to GPU backend”, you can try again later to see if Colab allocates you a GPU.\n", - " * [Colab Pro](https://colab.research.google.com/signup) offers priority access to GPUs.\n", - "* Does this tool install anything on my computer?\n", - " * No, everything happens in the cloud on Google Colab.\n", - " * At the end of the Colab execution a zip-archive with the obtained prediction will be automatically downloaded to your computer.\n", - "* How should I share feedback and bug reports?\n", - " * Please share any feedback and bug reports as an [issue](https://github.com/aqlaboratory/openfold/issues) on Github.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "YfPhvYgKC81B" - }, - "source": [ - "# License and Disclaimer\n", - "\n", - "This Colab notebook and other information provided is for theoretical modelling only, caution should be exercised in its use. It is provided ‘as-is’ without any warranty of any kind, whether expressed or implied. Information is not intended to be a substitute for professional medical advice, diagnosis, or treatment, and does not constitute medical or other professional advice.\n", - "\n", - "## AlphaFold/OpenFold Code License\n", - "\n", - "Copyright 2021 AlQuraishi Laboratory\n", - "\n", - "Copyright 2021 DeepMind Technologies Limited.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.\n", - "\n", - "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n", - "\n", - "## Model Parameters License\n", - "\n", - "DeepMind's AlphaFold parameters are made available under the terms of the Creative Commons Attribution 4.0 International (CC BY 4.0) license. You can find details at: https://creativecommons.org/licenses/by/4.0/legalcode\n", - "\n", - "\n", - "## Third-party software\n", - "\n", - "Use of the third-party software, libraries or code referred to in this notebook may be governed by separate terms and conditions or license provisions. Your use of the third-party software, libraries or code is subject to any such terms and you should check that you can comply with any applicable restrictions or terms and conditions before use.\n", - "\n", - "\n", - "## Mirrored Databases\n", - "\n", - "The following databases have been mirrored by DeepMind, and are available with reference to the following:\n", - "* UniRef90: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n", - "* MGnify: v2019\\_05 (unmodified), by Mitchell AL et al., available free of all copyright restrictions and made fully and freely available for both non-commercial and commercial use under [CC0 1.0 Universal (CC0 1.0) Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/).\n", - "* BFD: (modified), by Steinegger M. and Söding J., modified by DeepMind, available under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by/4.0/). See the Methods section of the [AlphaFold proteome paper](https://www.nature.com/articles/s41586-021-03828-1) for details." - ] - } - ], - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4", - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github" + }, + "source": [ + "\"Open" + ] }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file + { + "cell_type": "markdown", + "metadata": { + "id": "pc5-mbsX9PZC" + }, + "source": [ + "# OpenFold Colab\n", + "\n", + "Runs a simplified version of [OpenFold](https://github.com/aqlaboratory/openfold) on a target sequence. Adapted from DeepMind's [official AlphaFold Colab](https://colab.research.google.com/github/deepmind/alphafold/blob/main/notebooks/AlphaFold.ipynb).\n", + "\n", + "**Differences to AlphaFold v2.0**\n", + "\n", + "OpenFold is a trainable PyTorch reimplementation of AlphaFold 2. For the purposes of inference, it is practically identical to the original (\"practically\" because ensembling is excluded from OpenFold (recycling is enabled, however)).\n", + "\n", + "In this notebook, OpenFold is run with your choice of our original OpenFold parameters or DeepMind's publicly released parameters for AlphaFold 2.\n", + "\n", + "**Note**\n", + "\n", + "Like DeepMind's official Colab, this notebook uses **no templates (homologous structures)** and a selected portion of the full [BFD database](https://bfd.mmseqs.com/).\n", + "\n", + "**Citing this work**\n", + "\n", + "Any publication that discloses findings arising from using this notebook should [cite](https://github.com/deepmind/alphafold/#citing-this-work) DeepMind's [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2).\n", + "\n", + "**Licenses**\n", + "\n", + "This Colab supports inference with the [AlphaFold model parameters](https://github.com/deepmind/alphafold/#model-parameters-license), made available under the Creative Commons Attribution 4.0 International ([CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode)) license. The Colab itself is provided under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). See the full license statement below.\n", + "\n", + "**More information**\n", + "\n", + "You can find more information about how AlphaFold/OpenFold works in DeepMind's two Nature papers:\n", + "\n", + "* [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2)\n", + "* [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1)\n", + "\n", + "FAQ on how to interpret AlphaFold/OpenFold predictions are [here](https://alphafold.ebi.ac.uk/faq)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rowN0bVYLe9n" + }, + "source": [ + "#@markdown ### Enter the amino acid sequence to fold ⬇️\n", + "#@markdown For multiple sequences, separate sequences with a colon `:`\n", + "input_sequence = 'MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER: MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER' #@param {type:\"string\"}\n", + "\n", + "#@markdown ### Configure the model ⬇️\n", + "\n", + "weight_set = 'AlphaFold' #@param [\"OpenFold\", \"AlphaFold\"]\n", + "model_mode = 'multimer' #@param [\"monomer\", \"multimer\"]\n", + "relax_prediction = True #@param {type:\"boolean\"}\n", + "\n", + "\n", + "# Remove all whitespaces, tabs and end lines; upper-case\n", + "input_sequence = input_sequence.translate(str.maketrans('', '', ' \\n\\t')).upper()\n", + "aatypes = set('ACDEFGHIKLMNPQRSTVWY') # 20 standard aatypes\n", + "allowed_chars = aatypes.union({':'})\n", + "if not set(input_sequence).issubset(allowed_chars):\n", + " raise Exception(f'Input sequence contains non-amino acid letters: {set(input_sequence) - allowed_chars}. OpenFold only supports 20 standard amino acids as inputs.')\n", + "\n", + "if ':' in input_sequence and weight_set != 'AlphaFold':\n", + " raise ValueError('Input sequence is a multimer, must select Alphafold weight set')\n", + "\n", + "import enum\n", + "@enum.unique\n", + "class ModelType(enum.Enum):\n", + " MONOMER = 0\n", + " MULTIMER = 1\n", + "\n", + "model_type_dict = {\n", + " 'monomer': ModelType.MONOMER,\n", + " 'multimer': ModelType.MULTIMER,\n", + "}\n", + "\n", + "model_type = model_type_dict[model_mode]\n", + "print(f'Length of input sequence : {len(input_sequence.replace(\":\", \"\"))}')\n", + "#@markdown After making your selections, execute this cell by pressing the\n", + "#@markdown *Play* button on the left." + ], + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "woIxeCPygt7K" + }, + "source": [ + "#@title Install third-party software\n", + "#@markdown Please execute this cell by pressing the *Play* button on\n", + "#@markdown the left.\n", + "\n", + "\n", + "#@markdown **Note**: This installs the software on the Colab\n", + "#@markdown notebook in the cloud and not on your computer.\n", + "\n", + "import os, time\n", + "from IPython.utils import io\n", + "from sys import version_info\n", + "import subprocess\n", + "\n", + "python_version = f\"{version_info.major}.{version_info.minor}\"\n", + "\n", + "\n", + "os.system(\"wget -qnc https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Linux-x86_64.sh\")\n", + "os.system(\"bash Mambaforge-Linux-x86_64.sh -bfp /usr/local\")\n", + "os.system(\"mamba config --set auto_update_conda false\")\n", + "os.system(f\"mamba install -y -c conda-forge -c bioconda kalign2=2.04 hhsuite=3.3.0 openmm=7.7.0 python={python_version} pdbfixer biopython=1.79\")\n", + "os.system(\"pip install -q torch ml_collections py3Dmol modelcif\")\n", + "\n", + "try:\n", + " with io.capture_output() as captured:\n", + "\n", + " # Create a ramdisk to store a database chunk to make Jackhmmer run fast.\n", + " %shell sudo apt install --quiet --yes hmmer\n", + " %shell sudo mkdir -m 777 --parents /tmp/ramdisk\n", + " %shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk\n", + "\n", + " %shell wget -q -P /content \\\n", + " https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt\n", + "\n", + " %shell mkdir -p /content/openfold/openfold/resources\n", + "\n", + " commit = \"e2e19f16676b1a409f9ba3a6f69b11ee7f5887c2\"\n", + " os.system(f\"pip install -q git+https://github.com/aqlaboratory/openfold.git@{commit}\")\n", + "\n", + " os.system(f\"cp -f -p /content/stereo_chemical_props.txt /usr/local/lib/python{python_version}/site-packages/openfold/resources/\")\n", + "\n", + "except subprocess.CalledProcessError as captured:\n", + " print(captured)" + ], + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VzJ5iMjTtoZw" + }, + "source": [ + "#@title Download model weights\n", + "#@markdown Please execute this cell by pressing the *Play* button on\n", + "#@markdown the left.\n", + "\n", + "# Define constants\n", + "GIT_REPO='https://github.com/aqlaboratory/openfold'\n", + "ALPHAFOLD_PARAM_SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_2022-12-06.tar'\n", + "OPENFOLD_PARAMS_DIR = './openfold/openfold/resources/openfold_params'\n", + "ALPHAFOLD_PARAMS_DIR = './openfold/openfold/resources/params'\n", + "ALPHAFOLD_PARAMS_PATH = os.path.join(\n", + " ALPHAFOLD_PARAMS_DIR, os.path.basename(ALPHAFOLD_PARAM_SOURCE_URL)\n", + ")\n", + "\n", + "try:\n", + " with io.capture_output() as captured:\n", + " if(weight_set == 'AlphaFold'):\n", + " %shell mkdir --parents \"{ALPHAFOLD_PARAMS_DIR}\"\n", + " %shell wget -O {ALPHAFOLD_PARAMS_PATH} {ALPHAFOLD_PARAM_SOURCE_URL}\n", + " %shell tar --extract --verbose --file=\"{ALPHAFOLD_PARAMS_PATH}\" \\\n", + " --directory=\"{ALPHAFOLD_PARAMS_DIR}\" --preserve-permissions\n", + " %shell rm \"{ALPHAFOLD_PARAMS_PATH}\"\n", + " elif(weight_set == 'OpenFold'):\n", + " # Install AWS CLI\n", + " %shell curl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\"\n", + " %shell unzip -qq awscliv2.zip\n", + " %shell sudo ./aws/install\n", + " %shell rm awscliv2.zip\n", + " %shell rm -rf ./aws\n", + " %shell mkdir --parents \"{OPENFOLD_PARAMS_DIR}\"\n", + "\n", + " %shell aws s3 cp \\\n", + " --no-sign-request \\\n", + " --region us-east-1 \\\n", + " s3://openfold/openfold_params \"{OPENFOLD_PARAMS_DIR}\" \\\n", + " --recursive\n", + " else:\n", + " raise ValueError(\"Invalid weight set\")\n", + "except subprocess.CalledProcessError as captured:\n", + " print(captured)" + ], + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_FpxxMo-mvcP" + }, + "source": [ + "#@title Import Python packages\n", + "#@markdown Please execute this cell by pressing the *Play* button on\n", + "#@markdown the left.\n", + "\n", + "import unittest.mock\n", + "import sys\n", + "from typing import Dict, Sequence\n", + "\n", + "sys.path.insert(0, f'/usr/local/lib/python{python_version}/dist-packages/')\n", + "sys.path.insert(0, f'/usr/local/lib/python{python_version}/site-packages/')\n", + "\n", + "# Allows us to skip installing these packages\n", + "unnecessary_modules = [\n", + " \"dllogger\",\n", + " \"pytorch_lightning\",\n", + " \"pytorch_lightning.utilities\",\n", + " \"pytorch_lightning.callbacks.early_stopping\",\n", + " \"pytorch_lightning.utilities.seed\",\n", + "]\n", + "for unnecessary_module in unnecessary_modules:\n", + " sys.modules[unnecessary_module] = unittest.mock.MagicMock()\n", + "\n", + "import os\n", + "\n", + "from urllib import request\n", + "from concurrent import futures\n", + "from google.colab import files\n", + "import json\n", + "from matplotlib import gridspec\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import py3Dmol\n", + "import torch\n", + "import shutil\n", + "import tqdm\n", + "import tqdm.notebook\n", + "\n", + "TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'\n", + "\n", + "# Prevent shell magic being broken by openmm, prevent this cryptic error:\n", + "# \"NotImplementedError: A UTF-8 locale is required. Got ANSI_X3.4-1968\"\n", + "import locale\n", + "def getpreferredencoding(do_setlocale = True):\n", + " return \"UTF-8\"\n", + "locale.getpreferredencoding = getpreferredencoding\n", + "\n", + "from openfold import config\n", + "from openfold.data import feature_pipeline\n", + "from openfold.data import parsers\n", + "from openfold.data import data_pipeline\n", + "from openfold.data import msa_pairing\n", + "from openfold.data import feature_processing_multimer\n", + "from openfold.data.tools import jackhmmer\n", + "from openfold.model import model\n", + "from openfold.np import protein\n", + "from openfold.np.relax import relax\n", + "from openfold.np.relax.utils import overwrite_b_factors\n", + "from openfold.utils.import_weights import import_jax_weights_\n", + "from openfold.utils.tensor_utils import tensor_tree_map\n", + "\n", + "from IPython import display\n", + "from ipywidgets import GridspecLayout\n", + "from ipywidgets import Output" + ], + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "W4JpOs6oA-QS" + }, + "source": [ + "## Making a prediction\n", + "\n", + "Note that the search against databases and the actual prediction can take some time, from minutes to hours, depending on the length of the protein and what type of GPU you are allocated by Colab (see FAQ below)." + ] + }, + { + "cell_type": "code", + "source": [ + "#@title Search against genetic databases\n", + "\n", + "#@markdown Once this cell has been executed, you will see\n", + "#@markdown statistics about the multiple sequence alignment\n", + "#@markdown (MSA) that will be used by OpenFold. In particular,\n", + "#@markdown you’ll see how well each residue is covered by similar\n", + "#@markdown sequences in the MSA.\n", + "\n", + "# --- Find the closest source --\n", + "test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'\n", + "ex = futures.ThreadPoolExecutor(3)\n", + "def fetch(source):\n", + " request.urlretrieve(test_url_pattern.format(source))\n", + " return source\n", + "fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]\n", + "source = None\n", + "for f in futures.as_completed(fs):\n", + " source = f.result()\n", + " ex.shutdown()\n", + " break\n", + "\n", + "# Run the search against chunks of genetic databases (since the genetic\n", + "# databases don't fit in Colab ramdisk).\n", + "\n", + "jackhmmer_binary_path = '/usr/bin/jackhmmer'\n", + "\n", + "# --- Parse multiple sequences, if there are any ---\n", + "def split_multiple_sequences(sequence):\n", + " seqs = sequence.split(':')\n", + " sorted_seqs = sorted(seqs, key=lambda s: len(s))\n", + "\n", + " # TODO: Handle the homomer case when writing fasta sequences\n", + " fasta_path_tuples = []\n", + " for idx, seq in enumerate(set(sorted_seqs)):\n", + " fasta_path = f'target_{idx+1}.fasta'\n", + " with open(fasta_path, 'wt') as f:\n", + " f.write(f'>query\\n{seq}\\n')\n", + " fasta_path_tuples.append((seq, fasta_path))\n", + " fasta_path_by_seq = dict(fasta_path_tuples)\n", + "\n", + " return sorted_seqs, fasta_path_by_seq\n", + "\n", + "sequences, fasta_path_by_sequence = split_multiple_sequences(input_sequence)\n", + "db_results_by_sequence = {seq: {} for seq in fasta_path_by_sequence.keys()}\n", + "\n", + "DB_ROOT_PATH = f'https://storage.googleapis.com/alphafold-colab{source}/latest/'\n", + "db_configs = {}\n", + "db_configs['smallbfd'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}uniref90_2021_03.fasta',\n", + " 'z_value': 65984053,\n", + " 'num_jackhmmer_chunks': 17,\n", + "}\n", + "db_configs['mgnify'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}mgy_clusters_2022_05.fasta',\n", + " 'z_value': 304820129,\n", + " 'num_jackhmmer_chunks': 120,\n", + "}\n", + "db_configs['uniref90'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}uniref90_2022_01.fasta',\n", + " 'z_value': 144113457,\n", + " 'num_jackhmmer_chunks': 62,\n", + "}\n", + "\n", + "# Search UniProt and construct the all_seq features only for heteromers, not homomers.\n", + "if model_type == ModelType.MULTIMER and len(set(sequences)) > 1:\n", + " db_configs['uniprot'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}uniprot_2021_04.fasta',\n", + " 'z_value': 225013025 + 565928,\n", + " 'num_jackhmmer_chunks': 101,\n", + " }\n", + "\n", + "total_jackhmmer_chunks = sum([d['num_jackhmmer_chunks'] for d in db_configs.values()])\n", + "with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT) as pbar:\n", + " def jackhmmer_chunk_callback(i):\n", + " pbar.update(n=1)\n", + "\n", + " for db_name, db_config in db_configs.items():\n", + " pbar.set_description(f'Searching {db_name}')\n", + " jackhmmer_runner = jackhmmer.Jackhmmer(\n", + " binary_path=jackhmmer_binary_path,\n", + " database_path=db_config['database_path'],\n", + " get_tblout=True,\n", + " num_streamed_chunks=db_config['num_jackhmmer_chunks'],\n", + " streaming_callback=jackhmmer_chunk_callback,\n", + " z_value=db_config['z_value'])\n", + "\n", + " db_results = jackhmmer_runner.query_multiple(fasta_path_by_sequence.values())\n", + " for seq, result in zip(fasta_path_by_sequence.keys(), db_results):\n", + " db_results_by_sequence[seq][db_name] = result\n", + "\n", + "\n", + "# --- Extract the MSAs and visualize ---\n", + "# Extract the MSAs from the Stockholm files.\n", + "# NB: deduplication happens later in data_pipeline.make_msa_features.\n", + "\n", + "MAX_HITS_BY_DB = {\n", + " 'uniref90': 10000,\n", + " 'smallbfd': 5000,\n", + " 'mgnify': 501,\n", + " 'uniprot': 50000,\n", + "}\n", + "\n", + "msas_by_seq_by_db = {seq: {} for seq in sequences}\n", + "full_msa_by_seq = {seq: [] for seq in sequences}\n", + "\n", + "for seq, sequence_result in db_results_by_sequence.items():\n", + " print(f'parsing_results_for_sequence {seq}')\n", + " for db_name, db_results in sequence_result.items():\n", + " unsorted_results = []\n", + " for i, result in enumerate(db_results):\n", + " msa_obj = parsers.parse_stockholm(result['sto'])\n", + " e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])\n", + " target_names = msa_obj.descriptions\n", + " e_values = [e_values_dict[t.split('/')[0]] for t in target_names]\n", + " zipped_results = zip(msa_obj.sequences, msa_obj.deletion_matrix, target_names, e_values)\n", + " if i != 0:\n", + " # Only take query from the first chunk\n", + " zipped_results = [x for x in zipped_results if x[2] != 'query']\n", + " unsorted_results.extend(zipped_results)\n", + " sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])\n", + " msas, del_matrix, targets, _ = zip(*sorted_by_evalue)\n", + " db_msas = parsers.Msa(msas, del_matrix, targets)\n", + " if db_msas:\n", + " if db_name in MAX_HITS_BY_DB:\n", + " db_msas.truncate(MAX_HITS_BY_DB[db_name])\n", + " msas_by_seq_by_db[seq][db_name] = db_msas\n", + " full_msa_by_seq[seq].extend(db_msas.sequences)\n", + " msa_size = len(set(db_msas.sequences))\n", + " print(f'{msa_size} Sequences Found in {db_name}')\n", + "\n", + "\n", + "fig = plt.figure(figsize=(12, 3))\n", + "max_num_alignments = 0\n", + "\n", + "for seq_idx, seq in enumerate(set(sequences)):\n", + " full_msas = full_msa_by_seq[seq]\n", + " deduped_full_msa = list(dict.fromkeys(full_msas))\n", + " total_msa_size = len(deduped_full_msa)\n", + " print(f'\\n{total_msa_size} Sequences Found in Total\\n')\n", + "\n", + " aa_map = {restype: i for i, restype in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')}\n", + " msa_arr = np.array([[aa_map[aa] for aa in seq] for seq in deduped_full_msa])\n", + " num_alignments, num_res = msa_arr.shape\n", + " plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), label=f'Chain {seq_idx}')\n", + " max_num_alignments = max(num_alignments, max_num_alignments)\n", + "\n", + "\n", + "plt.title('Per-Residue Count of Non-Gap Amino Acids in the MSA')\n", + "plt.ylabel('Non-Gap Count')\n", + "plt.yticks(range(0, max_num_alignments + 1, max(1, int(max_num_alignments / 3))))\n", + "plt.legend()\n", + "plt.show()" + ], + "metadata": { + "id": "o7BqQN_gfYtq" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XUo6foMQxwS2" + }, + "source": [ + "#@title Run OpenFold and download prediction\n", + "\n", + "#@markdown Once this cell has been executed, a zip-archive with\n", + "#@markdown the obtained prediction will be automatically downloaded\n", + "#@markdown to your computer.\n", + "\n", + "# Color bands for visualizing plddt\n", + "PLDDT_BANDS = [\n", + " (0, 50, '#FF7D45'),\n", + " (50, 70, '#FFDB13'),\n", + " (70, 90, '#65CBF3'),\n", + " (90, 100, '#0053D6')\n", + "]\n", + "\n", + "# --- Run the model ---\n", + "if model_type == ModelType.MONOMER:\n", + " model_names = [\n", + " 'finetuning_3.pt',\n", + " 'finetuning_4.pt',\n", + " 'finetuning_5.pt',\n", + " 'finetuning_ptm_2.pt',\n", + " 'finetuning_no_templ_ptm_1.pt'\n", + " ]\n", + "elif model_type == ModelType.MULTIMER:\n", + " model_names = [\n", + " 'model_1_multimer_v3',\n", + " 'model_2_multimer_v3',\n", + " 'model_3_multimer_v3',\n", + " 'model_4_multimer_v3',\n", + " 'model_5_multimer_v3',\n", + " ]\n", + "\n", + "def _placeholder_template_feats(num_templates_, num_res_):\n", + " return {\n", + " 'template_aatype': np.zeros((num_templates_, num_res_, 22), dtype=np.int64),\n", + " 'template_all_atom_positions': np.zeros((num_templates_, num_res_, 37, 3), dtype=np.float32),\n", + " 'template_all_atom_mask': np.zeros((num_templates_, num_res_, 37), dtype=np.float32),\n", + " 'template_domain_names': np.zeros((num_templates_,), dtype=np.float32),\n", + " 'template_sum_probs': np.zeros((num_templates_, 1), dtype=np.float32),\n", + " }\n", + "\n", + "\n", + "def make_features(\n", + " sequences: Sequence[str],\n", + " msas_by_seq_by_db: Dict[str, Dict[str, parsers.Msa]],\n", + " model_type: ModelType):\n", + " num_templates = 1 # Placeholder for generating fake template features\n", + " feature_dict = {}\n", + "\n", + " for idx, seq in enumerate(sequences, start=1):\n", + " _chain_id = f'chain_{idx}'\n", + " num_res = len(seq)\n", + "\n", + " feats = data_pipeline.make_sequence_features(seq, _chain_id, num_res)\n", + " msas_without_uniprot = [msas_by_seq_by_db[seq][db] for db in db_configs.keys() if db != 'uniprot']\n", + " msa_feats = data_pipeline.make_msa_features(msas_without_uniprot)\n", + " feats.update(msa_feats)\n", + " feats.update(_placeholder_template_feats(num_templates, num_res))\n", + "\n", + " if model_type == ModelType.MONOMER:\n", + " feature_dict[seq] = feats\n", + " if model_type == ModelType.MULTIMER:\n", + " # Perform extra pair processing steps for heteromers\n", + " if len(set(sequences)) > 1:\n", + " uniprot_msa = msas_by_seq_by_db[seq]['uniprot']\n", + " uniprot_msa_features = data_pipeline.make_msa_features([uniprot_msa])\n", + " valid_feat_names = msa_pairing.MSA_FEATURES + (\n", + " 'msa_species_identifiers',\n", + " )\n", + " pair_feats = {\n", + " f'{k}_all_seq': v for k, v in uniprot_msa_features.items()\n", + " if k in valid_feat_names\n", + " }\n", + " feats.update(pair_feats)\n", + "\n", + " feats = data_pipeline.convert_monomer_features(feats, _chain_id)\n", + " feature_dict[_chain_id] = feats\n", + "\n", + " if model_type == ModelType.MONOMER:\n", + " np_example = feature_dict[sequences[0]]\n", + " elif model_type == ModelType.MULTIMER:\n", + " all_chain_feats = data_pipeline.add_assembly_features(feature_dict)\n", + " features = feature_processing_multimer.pair_and_merge(all_chain_features=all_chain_feats)\n", + " np_example = data_pipeline.pad_msa(features, 512)\n", + "\n", + " return np_example\n", + "\n", + "\n", + "output_dir = 'prediction'\n", + "os.makedirs(output_dir, exist_ok=True)\n", + "\n", + "plddts = {}\n", + "pae_outputs = {}\n", + "weighted_ptms = {}\n", + "unrelaxed_proteins = {}\n", + "\n", + "with tqdm.notebook.tqdm(total=len(model_names), bar_format=TQDM_BAR_FORMAT) as pbar:\n", + " for i, model_name in enumerate(model_names, start = 1):\n", + " pbar.set_description(f'Running {model_name}')\n", + "\n", + " feature_dict = make_features(sequences, msas_by_seq_by_db, model_type)\n", + "\n", + " if(weight_set == \"AlphaFold\"):\n", + " if model_type == ModelType.MONOMER:\n", + " config_preset = f\"model_{i}\"\n", + " elif model_type == ModelType.MULTIMER:\n", + " config_preset = f'model_{i}_multimer_v3'\n", + " else:\n", + " if(\"_no_templ_\" in model_name):\n", + " config_preset = \"model_3\"\n", + " else:\n", + " config_preset = \"model_1\"\n", + " if(\"_ptm_\" in model_name):\n", + " config_preset += \"_ptm\"\n", + "\n", + " cfg = config.model_config(config_preset)\n", + "\n", + " # Force the model to only use 3 recycling updates\n", + " cfg.data.common.max_recycling_iters = 3\n", + " cfg.model.recycle_early_stop_tolerance = -1\n", + "\n", + " openfold_model = model.AlphaFold(cfg)\n", + " openfold_model = openfold_model.eval()\n", + " if(weight_set == \"AlphaFold\"):\n", + " params_name = os.path.join(\n", + " ALPHAFOLD_PARAMS_DIR, f\"params_{config_preset}.npz\"\n", + " )\n", + " import_jax_weights_(openfold_model, params_name, version=config_preset)\n", + " elif(weight_set == \"OpenFold\"):\n", + " params_name = os.path.join(\n", + " OPENFOLD_PARAMS_DIR,\n", + " model_name,\n", + " )\n", + " d = torch.load(params_name)\n", + " openfold_model.load_state_dict(d)\n", + " else:\n", + " raise ValueError(f\"Invalid weight set: {weight_set}\")\n", + "\n", + " openfold_model = openfold_model.cuda()\n", + "\n", + " pipeline = feature_pipeline.FeaturePipeline(cfg.data)\n", + " processed_feature_dict = pipeline.process_features(\n", + " feature_dict,\n", + " mode='predict',\n", + " is_multimer = (model_type == ModelType.MULTIMER),\n", + " )\n", + "\n", + " processed_feature_dict = tensor_tree_map(\n", + " lambda t: t.cuda(), processed_feature_dict\n", + " )\n", + "\n", + " with torch.no_grad():\n", + " prediction_result = openfold_model(processed_feature_dict)\n", + "\n", + " # Move the batch and output to np for further processing\n", + " processed_feature_dict = tensor_tree_map(\n", + " lambda t: np.array(t[..., -1].cpu()), processed_feature_dict\n", + " )\n", + " prediction_result = tensor_tree_map(\n", + " lambda t: np.array(t.cpu()), prediction_result\n", + " )\n", + "\n", + " mean_plddt = prediction_result['plddt'].mean()\n", + "\n", + " if model_type == ModelType.MONOMER:\n", + " if 'predicted_aligned_error' in prediction_result:\n", + " pae_outputs[model_name] = (\n", + " prediction_result['predicted_aligned_error'],\n", + " prediction_result['max_predicted_aligned_error']\n", + " )\n", + " else:\n", + " # Get the pLDDT confidence metrics. Do not put pTM models here as they\n", + " # should never get selected.\n", + " plddts[model_name] = prediction_result['plddt']\n", + " elif model_type == ModelType.MULTIMER:\n", + " # Multimer models are sorted by pTM+ipTM.\n", + " plddts[model_name] = prediction_result['plddt']\n", + " pae_outputs[model_name] = (prediction_result['predicted_aligned_error'],\n", + " prediction_result['max_predicted_aligned_error'])\n", + "\n", + " weighted_ptms[model_name] = prediction_result['weighted_ptm_score']\n", + "\n", + " # Set the b-factors to the per-residue plddt.\n", + " final_atom_mask = prediction_result['final_atom_mask']\n", + " b_factors = prediction_result['plddt'][:, None] * final_atom_mask\n", + " unrelaxed_protein = protein.from_prediction(\n", + " processed_feature_dict,\n", + " prediction_result,\n", + " remove_leading_feature_dimension=False,\n", + " b_factors=b_factors,\n", + " )\n", + " unrelaxed_proteins[model_name] = unrelaxed_protein\n", + "\n", + " # Delete unused outputs to save memory.\n", + " del openfold_model\n", + " del processed_feature_dict\n", + " del prediction_result\n", + " pbar.update(n=1)\n", + "\n", + " # Find the best model according to the mean pLDDT.\n", + " if model_type == ModelType.MONOMER:\n", + " best_model_name = max(plddts.keys(), key=lambda x: plddts[x].mean())\n", + " elif model_type == ModelType.MULTIMER:\n", + " best_model_name = max(weighted_ptms.keys(), key=lambda x: weighted_ptms[x])\n", + " best_pdb = protein.to_pdb(unrelaxed_proteins[best_model_name])\n", + "\n", + " # --- AMBER relax the best model ---\n", + " if(relax_prediction):\n", + " pbar.set_description(f'AMBER relaxation')\n", + " amber_relaxer = relax.AmberRelaxation(\n", + " max_iterations=0,\n", + " tolerance=2.39,\n", + " stiffness=10.0,\n", + " exclude_residues=[],\n", + " max_outer_iterations=20,\n", + " use_gpu=True,\n", + " )\n", + " relaxed_pdb, _, _ = amber_relaxer.process(\n", + " prot=unrelaxed_proteins[best_model_name]\n", + " )\n", + " best_pdb = relaxed_pdb\n", + "\n", + " # Write out the prediction\n", + " pred_output_path = os.path.join(output_dir, 'selected_prediction.pdb')\n", + " with open(pred_output_path, 'w') as f:\n", + " f.write(best_pdb)\n", + "\n", + " pbar.update(n=1) # Finished AMBER relax.\n", + "\n", + "# Construct multiclass b-factors to indicate confidence bands\n", + "# 0=very low, 1=low, 2=confident, 3=very high\n", + "banded_b_factors = []\n", + "for plddt in plddts[best_model_name]:\n", + " for idx, (min_val, max_val, _) in enumerate(PLDDT_BANDS):\n", + " if plddt >= min_val and plddt <= max_val:\n", + " banded_b_factors.append(idx)\n", + " break\n", + "banded_b_factors = np.array(banded_b_factors)[:, None] * final_atom_mask\n", + "to_visualize_pdb = overwrite_b_factors(best_pdb, banded_b_factors)\n", + "\n", + "# --- Visualise the prediction & confidence ---\n", + "show_sidechains = True\n", + "def plot_plddt_legend():\n", + " \"\"\"Plots the legend for pLDDT.\"\"\"\n", + " thresh = [\n", + " 'Very low (pLDDT < 50)',\n", + " 'Low (70 > pLDDT > 50)',\n", + " 'Confident (90 > pLDDT > 70)',\n", + " 'Very high (pLDDT > 90)']\n", + "\n", + " colors = [x[2] for x in PLDDT_BANDS]\n", + "\n", + " plt.figure(figsize=(2, 2))\n", + " for c in colors:\n", + " plt.bar(0, 0, color=c)\n", + " plt.legend(thresh, frameon=False, loc='center', fontsize=20)\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " ax = plt.gca()\n", + " ax.spines['right'].set_visible(False)\n", + " ax.spines['top'].set_visible(False)\n", + " ax.spines['left'].set_visible(False)\n", + " ax.spines['bottom'].set_visible(False)\n", + " plt.title('Model Confidence', fontsize=20, pad=20)\n", + " return plt\n", + "\n", + "# Show the structure coloured by chain if the multimer model has been used.\n", + "if model_type == ModelType.MULTIMER:\n", + " multichain_view = py3Dmol.view(width=800, height=600)\n", + " multichain_view.addModelsAsFrames(to_visualize_pdb)\n", + " multichain_style = {'cartoon': {'colorscheme': 'chain'}}\n", + " multichain_view.setStyle({'model': -1}, multichain_style)\n", + " multichain_view.zoomTo()\n", + " multichain_view.show()\n", + "\n", + "# Color the structure by per-residue pLDDT\n", + "color_map = {i: bands[2] for i, bands in enumerate(PLDDT_BANDS)}\n", + "view = py3Dmol.view(width=800, height=600)\n", + "view.addModelsAsFrames(to_visualize_pdb)\n", + "style = {'cartoon': {\n", + " 'colorscheme': {\n", + " 'prop': 'b',\n", + " 'map': color_map}\n", + " }}\n", + "if show_sidechains:\n", + " style['stick'] = {}\n", + "view.setStyle({'model': -1}, style)\n", + "view.zoomTo()\n", + "\n", + "grid = GridspecLayout(1, 2)\n", + "out = Output()\n", + "with out:\n", + " view.show()\n", + "grid[0, 0] = out\n", + "\n", + "out = Output()\n", + "with out:\n", + " plot_plddt_legend().show()\n", + "grid[0, 1] = out\n", + "\n", + "display.display(grid)\n", + "\n", + "# Display pLDDT and predicted aligned error (if output by the model).\n", + "if pae_outputs:\n", + " num_plots = 2\n", + "else:\n", + " num_plots = 1\n", + "\n", + "plt.figure(figsize=[8 * num_plots, 6])\n", + "plt.subplot(1, num_plots, 1)\n", + "plt.plot(plddts[best_model_name])\n", + "plt.title('Predicted LDDT')\n", + "plt.xlabel('Residue')\n", + "plt.ylabel('pLDDT')\n", + "\n", + "if num_plots == 2:\n", + " plt.subplot(1, 2, 2)\n", + " pae, max_pae = list(pae_outputs.values())[0]\n", + " plt.imshow(pae, vmin=0., vmax=max_pae, cmap='Greens_r')\n", + " plt.colorbar(fraction=0.046, pad=0.04)\n", + "\n", + " # Display lines at chain boundaries.\n", + " best_unrelaxed_prot = unrelaxed_proteins[best_model_name]\n", + " total_num_res = best_unrelaxed_prot.residue_index.shape[-1]\n", + " chain_ids = best_unrelaxed_prot.chain_index\n", + " for chain_boundary in np.nonzero(chain_ids[:-1] - chain_ids[1:]):\n", + " if chain_boundary.size:\n", + " plt.plot([0, total_num_res], [chain_boundary, chain_boundary], color='red')\n", + " plt.plot([chain_boundary, chain_boundary], [0, total_num_res], color='red')\n", + " plt.title('Predicted Aligned Error')\n", + " plt.xlabel('Scored residue')\n", + " plt.ylabel('Aligned residue')\n", + "\n", + "# Save pLDDT and predicted aligned error (if it exists)\n", + "pae_output_path = os.path.join(output_dir, 'predicted_aligned_error.json')\n", + "if pae_outputs:\n", + " # Save predicted aligned error in the same format as the AF EMBL DB\n", + " rounded_errors = np.round(pae.astype(np.float64), decimals=1)\n", + " indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1\n", + " indices_1 = indices[0].flatten().tolist()\n", + " indices_2 = indices[1].flatten().tolist()\n", + " pae_data = json.dumps([{\n", + " 'residue1': indices_1,\n", + " 'residue2': indices_2,\n", + " 'distance': rounded_errors.flatten().tolist(),\n", + " 'max_predicted_aligned_error': max_pae.item()\n", + " }],\n", + " indent=None,\n", + " separators=(',', ':'))\n", + " with open(pae_output_path, 'w') as f:\n", + " f.write(pae_data)\n", + "\n", + "\n", + "# --- Download the predictions ---\n", + "shutil.make_archive(base_name='prediction', format='zip', root_dir=output_dir)\n", + "files.download(f'{output_dir}.zip')" + ], + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lUQAn5LYC5n4" + }, + "source": [ + "### Interpreting the prediction\n", + "\n", + "Please see the [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2) and the [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1), as well as [DeepMind's FAQ](https://alphafold.ebi.ac.uk/faq) on how to interpret AlphaFold/OpenFold predictions. More information about the predictions of the AlphaFold Multimer model can be found in the [Alphafold Multimer paper](https://www.biorxiv.org/content/10.1101/2022.03.11.484043v3.full.pdf)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jeb2z8DIA4om" + }, + "source": [ + "## FAQ & Troubleshooting\n", + "\n", + "\n", + "* How do I get a predicted protein structure for my protein?\n", + " * Click on the _Connect_ button on the top right to get started.\n", + " * Paste the amino acid sequence of your protein (without any headers) into the “Enter the amino acid sequence to fold”.\n", + " * Run all cells in the Colab, either by running them individually (with the play button on the left side) or via _Runtime_ > _Run all._\n", + " * The predicted protein structure will be downloaded once all cells have been executed. Note: This can take minutes to hours - see below.\n", + "* How long will this take?\n", + " * Downloading the OpenFold source code can take up to a few minutes.\n", + " * Downloading and installing the third-party software can take up to a few minutes.\n", + " * The search against genetic databases can take minutes to hours.\n", + " * Running OpenFold and generating the prediction can take minutes to hours, depending on the length of your protein and on which GPU-type Colab has assigned you.\n", + "* My Colab no longer seems to be doing anything, what should I do?\n", + " * Some steps may take minutes to hours to complete.\n", + " * Sometimes, running the \"installation\" cells more than once can corrupt the OpenFold installation.\n", + " * If nothing happens or if you receive an error message, try restarting your Colab runtime via _Runtime_ > _Restart runtime_.\n", + " * If this doesn’t help, reset your Colab runtime via _Runtime_ > _Factory reset runtime_.\n", + "* How does what's run in this notebook compare to the full versions of Alphafold/Openfold?\n", + " * This Colab version of OpenFold searches a selected portion of the BFD dataset and currently doesn’t use templates, so its accuracy is reduced in comparison to the full version, which is analogous to what's described in the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2) and [Github repo](https://github.com/deepmind/alphafold/). The full version of OpenFold can be run from our own [GitHub repo](https://github.com/aqlaboratory/openfold).\n", + "* What is a Colab?\n", + " * See the [Colab FAQ](https://research.google.com/colaboratory/faq.html).\n", + "* I received a warning “Notebook requires high RAM”, what do I do?\n", + " * The resources allocated to your Colab vary. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", + " * You can execute the Colab nonetheless.\n", + "* I received an error “Colab CPU runtime not supported” or “No GPU/TPU found”, what do I do?\n", + " * Colab CPU runtime is not supported. Try changing your runtime via _Runtime_ > _Change runtime type_ > _Hardware accelerator_ > _GPU_.\n", + " * The type of GPU allocated to your Colab varies. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", + " * If you receive “Cannot connect to GPU backend”, you can try again later to see if Colab allocates you a GPU.\n", + " * [Colab Pro](https://colab.research.google.com/signup) offers priority access to GPUs.\n", + "* Does this tool install anything on my computer?\n", + " * No, everything happens in the cloud on Google Colab.\n", + " * At the end of the Colab execution a zip-archive with the obtained prediction will be automatically downloaded to your computer.\n", + "* How should I share feedback and bug reports?\n", + " * Please share any feedback and bug reports as an [issue](https://github.com/aqlaboratory/openfold/issues) on Github.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YfPhvYgKC81B" + }, + "source": [ + "# License and Disclaimer\n", + "\n", + "This Colab notebook and other information provided is for theoretical modelling only, caution should be exercised in its use. It is provided ‘as-is’ without any warranty of any kind, whether expressed or implied. Information is not intended to be a substitute for professional medical advice, diagnosis, or treatment, and does not constitute medical or other professional advice.\n", + "\n", + "## AlphaFold/OpenFold Code License\n", + "\n", + "Copyright 2021 AlQuraishi Laboratory\n", + "\n", + "Copyright 2021 DeepMind Technologies Limited.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.\n", + "\n", + "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n", + "\n", + "## Model Parameters License\n", + "\n", + "DeepMind's AlphaFold parameters are made available under the terms of the Creative Commons Attribution 4.0 International (CC BY 4.0) license. You can find details at: https://creativecommons.org/licenses/by/4.0/legalcode\n", + "\n", + "\n", + "## Third-party software\n", + "\n", + "Use of the third-party software, libraries or code referred to in this notebook may be governed by separate terms and conditions or license provisions. Your use of the third-party software, libraries or code is subject to any such terms and you should check that you can comply with any applicable restrictions or terms and conditions before use.\n", + "\n", + "\n", + "## Mirrored Databases\n", + "\n", + "The following databases have been mirrored by DeepMind, and are available with reference to the following:\n", + "* UniRef90: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n", + "* MGnify: v2019\\_05 (unmodified), by Mitchell AL et al., available free of all copyright restrictions and made fully and freely available for both non-commercial and commercial use under [CC0 1.0 Universal (CC0 1.0) Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/).\n", + "* BFD: (modified), by Steinegger M. and Söding J., modified by DeepMind, available under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by/4.0/). See the Methods section of the [AlphaFold proteome paper](https://www.nature.com/articles/s41586-021-03828-1) for details." + ] + } + ], + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index 664ab794..d1b13d58 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -84,7 +84,6 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: for db_path in self.databases: db_cmd.append("-d") db_cmd.append(db_path) - cmd = [ self.binary_path, "-i", @@ -97,33 +96,24 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: str(self.n_cpu), ] + db_cmd - raise RuntimeError( - f"HHSearch failed:\ncommand:\n{cmd}\n\n" + logging.info('Launching subprocess "%s"', " ".join(cmd)) + process = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) + with utils.timing("HHsearch query"): + stdout, stderr = process.communicate() + retcode = process.wait() + + if retcode: + # Stderr is truncated to prevent proto size errors in Beam. + raise RuntimeError( + "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" + % (f"hhsearch command: {' '.join(cmd)}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + ) - # logging.info('Launching subprocess "%s"', " ".join(cmd)) - # print(f"hhsearch command: {' '.join(cmd)}") - # process = subprocess.Popen( - # cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE - # ) - # with utils.timing("HHsearch query"): - # stdout, stderr = process.communicate() - # retcode = process.wait() - # - # if retcode: - # print(f"hhsearch command: {' '.join(cmd)}") - # # Stderr is truncated to prevent proto size errors in Beam. - # # raise RuntimeError( - # # "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" - # # % (f"hhsearch command: {cmd}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) - # # ) - # raise RuntimeError( - # f"HHSearch failed:\ncommand:\n{cmd}\n\n" - # ) - # - # with open(hhr_path) as f: - # hhr = f.read() - # return hhr + with open(hhr_path) as f: + hhr = f.read() + return hhr @staticmethod def get_template_hits( diff --git a/scripts/colabfold_search.py b/scripts/colabfold_search.py new file mode 100644 index 00000000..2aa89f89 --- /dev/null +++ b/scripts/colabfold_search.py @@ -0,0 +1,729 @@ +""" + +ADAPTED FROM https://github.com/sokrypton/ColabFold/blob/main/colabfold/mmseqs/search.py#L223 + +Functionality for running mmseqs locally. Takes in a fasta file, outputs final.a3m + +Note: Currently needs mmseqs compiled from source +""" + +import logging +import math +import os +import shutil +import subprocess +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +import pandas +import random + +logger = logging.getLogger(__name__) + + +def safe_filename(file: str) -> str: + return "".join([c if c.isalnum() or c in ["_", ".", "-"] else "_" for c in file]) + + +def msa_to_str( + unpaired_msa: List[str], + paired_msa: List[str], + query_seqs_unique: List[str], + query_seqs_cardinality: List[int], +) -> str: + msa = "#" + ",".join(map(str, map(len, query_seqs_unique))) + "\t" + msa += ",".join(map(str, query_seqs_cardinality)) + "\n" + # build msa with cardinality of 1, it makes it easier to parse and manipulate + query_seqs_cardinality = [1 for _ in query_seqs_cardinality] + msa += pair_msa(query_seqs_unique, query_seqs_cardinality, paired_msa, unpaired_msa) + return msa + + +def pair_msa( + query_seqs_unique: List[str], + query_seqs_cardinality: List[int], + paired_msa: Optional[List[str]], + unpaired_msa: Optional[List[str]], +) -> str: + if paired_msa is None and unpaired_msa is not None: + a3m_lines = pad_sequences( + unpaired_msa, query_seqs_unique, query_seqs_cardinality + ) + elif paired_msa is not None and unpaired_msa is not None: + a3m_lines = ( + pair_sequences(paired_msa, query_seqs_unique, query_seqs_cardinality) + + "\n" + + pad_sequences(unpaired_msa, query_seqs_unique, query_seqs_cardinality) + ) + elif paired_msa is not None and unpaired_msa is None: + a3m_lines = pair_sequences( + paired_msa, query_seqs_unique, query_seqs_cardinality + ) + else: + raise ValueError(f"Invalid pairing") + return a3m_lines + + +def pair_sequences( + a3m_lines: List[str], query_sequences: List[str], query_cardinality: List[int] +) -> str: + a3m_line_paired = [""] * len(a3m_lines[0].splitlines()) + for n, seq in enumerate(query_sequences): + lines = a3m_lines[n].splitlines() + for i, line in enumerate(lines): + if line.startswith(">"): + if n != 0: + line = line.replace(">", "\t", 1) + a3m_line_paired[i] = a3m_line_paired[i] + line + else: + a3m_line_paired[i] = a3m_line_paired[i] + line * query_cardinality[n] + return "\n".join(a3m_line_paired) + + +def pad_sequences( + a3m_lines: List[str], query_sequences: List[str], query_cardinality: List[int] +) -> str: + _blank_seq = [ + ("-" * len(seq)) + for n, seq in enumerate(query_sequences) + for _ in range(query_cardinality[n]) + ] + a3m_lines_combined = [] + pos = 0 + for n, seq in enumerate(query_sequences): + for j in range(0, query_cardinality[n]): + lines = a3m_lines[n].split("\n") + for a3m_line in lines: + if len(a3m_line) == 0: + continue + if a3m_line.startswith(">"): + a3m_lines_combined.append(a3m_line) + else: + a3m_lines_combined.append( + "".join(_blank_seq[:pos] + [a3m_line] + _blank_seq[pos + 1:]) + ) + pos += 1 + return "\n".join(a3m_lines_combined) + + +def parse_fasta(fasta_string: str) -> Tuple[List[str], List[str]]: + """Parses FASTA string and returns list of strings with amino-acid sequences. + + Arguments: + fasta_string: The string contents of a FASTA file. + + Returns: + A tuple of two lists: + * A list of sequences. + * A list of sequence descriptions taken from the comment lines. In the + same order as the sequences. + """ + sequences = [] + descriptions = [] + index = -1 + for line in fasta_string.splitlines(): + line = line.strip() + if line.startswith("#"): + continue + if line.startswith(">"): + index += 1 + descriptions.append(line[1:]) # Remove the '>' at the beginning. + sequences.append("") + continue + elif not line: + continue # Skip blank lines. + sequences[index] += line + + return sequences, descriptions + + +def get_queries( + input_path: Union[str, Path], sort_queries_by: str = "length" +) -> Tuple[List[Tuple[str, str, Optional[List[str]]]], bool]: + """Reads a directory of fasta files, a single fasta file or a csv file and returns a tuple + of job name, sequence and the optional a3m lines""" + + input_path = Path(input_path) + if not input_path.exists(): + raise OSError(f"{input_path} could not be found") + + if input_path.is_file(): + if input_path.suffix == ".csv" or input_path.suffix == ".tsv": + sep = "\t" if input_path.suffix == ".tsv" else "," + df = pandas.read_csv(input_path, sep=sep) + assert "id" in df.columns and "sequence" in df.columns + queries = [ + (seq_id, sequence.upper().split(":"), None) + for seq_id, sequence in df[["id", "sequence"]].itertuples(index=False) + ] + for i in range(len(queries)): + if len(queries[i][1]) == 1: + queries[i] = (queries[i][0], queries[i][1][0], None) + elif input_path.suffix == ".a3m": + (seqs, header) = parse_fasta(input_path.read_text()) + if len(seqs) == 0: + raise ValueError(f"{input_path} is empty") + query_sequence = seqs[0] + # Use a list so we can easily extend this to multiple msas later + a3m_lines = [input_path.read_text()] + queries = [(input_path.stem, query_sequence, a3m_lines)] + elif input_path.suffix in [".fasta", ".faa", ".fa"]: + (sequences, headers) = parse_fasta(input_path.read_text()) + queries = [] + for sequence, header in zip(sequences, headers): + sequence = sequence.upper() + if sequence.count(":") == 0: + # Single sequence + queries.append((header, sequence, None)) + else: + # Complex mode + queries.append((header, sequence.upper().split(":"), None)) + else: + raise ValueError(f"Unknown file format {input_path.suffix}") + else: + assert input_path.is_dir(), "Expected either an input file or a input directory" + queries = [] + for file in sorted(input_path.iterdir()): + if not file.is_file(): + continue + if file.suffix.lower() not in [".a3m", ".fasta", ".faa"]: + logger.warning(f"non-fasta/a3m file in input directory: {file}") + continue + (seqs, header) = parse_fasta(file.read_text()) + if len(seqs) == 0: + logger.error(f"{file} is empty") + continue + query_sequence = seqs[0] + if len(seqs) > 1 and file.suffix in [".fasta", ".faa", ".fa"]: + logger.warning( + f"More than one sequence in {file}, ignoring all but the first sequence" + ) + + if file.suffix.lower() == ".a3m": + a3m_lines = [file.read_text()] + queries.append((file.stem, query_sequence.upper(), a3m_lines)) + else: + if query_sequence.count(":") == 0: + # Single sequence + queries.append((file.stem, query_sequence, None)) + else: + # Complex mode + queries.append((file.stem, query_sequence.upper().split(":"), None)) + + # sort by seq. len + if sort_queries_by == "length": + queries.sort(key=lambda t: len("".join(t[1]))) + + elif sort_queries_by == "random": + random.shuffle(queries) + + is_complex = False + for job_number, (_, query_sequence, a3m_lines) in enumerate(queries): + if isinstance(query_sequence, list): + is_complex = True + break + if a3m_lines is not None and a3m_lines[0].startswith("#"): + a3m_line = a3m_lines[0].splitlines()[0] + tab_sep_entries = a3m_line[1:].split("\t") + if len(tab_sep_entries) == 2: + query_seq_len = tab_sep_entries[0].split(",") + query_seq_len = list(map(int, query_seq_len)) + query_seqs_cardinality = tab_sep_entries[1].split(",") + query_seqs_cardinality = list(map(int, query_seqs_cardinality)) + is_single_protein = ( + True + if len(query_seq_len) == 1 and query_seqs_cardinality[0] == 1 + else False + ) + if not is_single_protein: + is_complex = True + break + return queries, is_complex + + +def run_mmseqs(mmseqs: Path, params: List[Union[str, Path]]): + params_log = " ".join(str(i) for i in params) + logger.info(f"Running {mmseqs} {params_log}") + # hide MMseqs2 verbose paramters list that clogs up the log + os.environ["MMSEQS_CALL_DEPTH"] = "1" + subprocess.check_call([mmseqs] + params) + + +def mmseqs_search_monomer( + dbbase: Path, + base: Path, + uniref_db: Path = Path("uniref30_2302_db"), + template_db: Path = Path(""), # Unused by default + metagenomic_db: Path = Path("colabfold_envdb_202108_db"), + mmseqs: Path = Path("mmseqs"), + use_env: bool = True, + use_templates: bool = False, + filter: bool = True, + expand_eval: float = math.inf, + align_eval: int = 10, + diff: int = 3000, + qsc: float = -20.0, + max_accept: int = 1000000, + prefilter_mode: int = 0, + s: float = 8, + db_load_mode: int = 2, + threads: int = 32, +): + """Run mmseqs with a local colabfold database set + + db1: uniprot db (UniRef30) + db2: Template (unused by default) + db3: metagenomic db (colabfold_envdb_202108 or bfd_mgy_colabfold, the former is preferred) + """ + if filter: + # 0.1 was not used in benchmarks due to POSIX shell bug in line above + # EXPAND_EVAL=0.1 + align_eval = 10 + qsc = 0.8 + max_accept = 100000 + + used_dbs = [uniref_db] + if use_templates: + used_dbs.append(template_db) + if use_env: + used_dbs.append(metagenomic_db) + + for db in used_dbs: + if not dbbase.joinpath(f"{db}.dbtype").is_file(): + raise FileNotFoundError(f"Database {db} does not exist") + if ( + ( + not dbbase.joinpath(f"{db}.idx").is_file() + and not dbbase.joinpath(f"{db}.idx.index").is_file() + ) + or os.environ.get("MMSEQS_IGNORE_INDEX", False) + ): + logger.info("Search does not use index") + db_load_mode = 0 + dbSuffix1 = "_seq" + dbSuffix2 = "_aln" + dbSuffix3 = "" + else: + dbSuffix1 = ".idx" + dbSuffix2 = ".idx" + dbSuffix3 = ".idx" + + # fmt: off + # @formatter:off + search_param = ["--num-iterations", "3", "--db-load-mode", str(db_load_mode), "-a", "-e", "0.1", "--max-seqs", "10000"] + search_param += ["--prefilter-mode", str(prefilter_mode)] + if s is not None: + search_param += ["-s", "{:.1f}".format(s)] + else: + search_param += ["--k-score", "'seq:96,prof:80'"] + + filter_param = ["--filter-msa", str(filter), "--filter-min-enable", "1000", "--diff", str(diff), "--qid", "0.0,0.2,0.4,0.6,0.8,1.0", "--qsc", "0", "--max-seq-id", "0.95",] + expand_param = ["--expansion-mode", "0", "-e", str(expand_eval), "--expand-filter-clusters", str(filter), "--max-seq-id", "0.95",] + + run_mmseqs(mmseqs, ["search", base.joinpath("qdb"), dbbase.joinpath(uniref_db), base.joinpath("res"), base.joinpath("tmp"), "--threads", str(threads)] + search_param) + run_mmseqs(mmseqs, ["mvdb", base.joinpath("tmp/latest/profile_1"), base.joinpath("prof_res")]) + run_mmseqs(mmseqs, ["lndb", base.joinpath("qdb_h"), base.joinpath("prof_res_h")]) + run_mmseqs(mmseqs, ["expandaln", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), base.joinpath("res"), dbbase.joinpath(f"{uniref_db}{dbSuffix2}"), base.joinpath("res_exp"), "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + expand_param) + run_mmseqs(mmseqs, ["align", base.joinpath("prof_res"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), base.joinpath("res_exp"), base.joinpath("res_exp_realign"), "--db-load-mode", str(db_load_mode), "-e", str(align_eval), "--max-accept", str(max_accept), "--threads", str(threads), "--alt-ali", "10", "-a"]) + run_mmseqs(mmseqs, ["filterresult", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), + base.joinpath("res_exp_realign"), base.joinpath("res_exp_realign_filter"), "--db-load-mode", + str(db_load_mode), "--qid", "0", "--qsc", str(qsc), "--diff", "0", "--threads", + str(threads), "--max-seq-id", "1.0", "--filter-min-enable", "100"]) + run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), + base.joinpath("res_exp_realign_filter"), base.joinpath("uniref.a3m"), "--msa-format-mode", + "6", "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + filter_param) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp_realign")]) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp")]) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res")]) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp_realign_filter")]) + + if use_env: + run_mmseqs(mmseqs, ["search", base.joinpath("prof_res"), dbbase.joinpath(metagenomic_db), base.joinpath("res_env"), + base.joinpath("tmp3"), "--threads", str(threads)] + search_param) + run_mmseqs(mmseqs, ["expandaln", base.joinpath("prof_res"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), base.joinpath("res_env"), + dbbase.joinpath(f"{metagenomic_db}{dbSuffix2}"), base.joinpath("res_env_exp"), "-e", str(expand_eval), + "--expansion-mode", "0", "--db-load-mode", str(db_load_mode), "--threads", str(threads)]) + run_mmseqs(mmseqs, ["align", base.joinpath("tmp3/latest/profile_1"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), + base.joinpath("res_env_exp"), base.joinpath("res_env_exp_realign"), "--db-load-mode", + str(db_load_mode), "-e", str(align_eval), "--max-accept", str(max_accept), "--threads", + str(threads), "--alt-ali", "10", "-a"]) + run_mmseqs(mmseqs, ["filterresult", base.joinpath("qdb"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), + base.joinpath("res_env_exp_realign"), base.joinpath("res_env_exp_realign_filter"), + "--db-load-mode", str(db_load_mode), "--qid", "0", "--qsc", str(qsc), "--diff", "0", + "--max-seq-id", "1.0", "--threads", str(threads), "--filter-min-enable", "100"]) + run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), + base.joinpath("res_env_exp_realign_filter"), + base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m"), "--msa-format-mode", "6", + "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + filter_param) + + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp_realign_filter")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp_realign")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env")]) + + run_mmseqs(mmseqs, ["mergedbs", base.joinpath("qdb"), base.joinpath("final.a3m"), base.joinpath("uniref.a3m"), base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m")]) + else: + run_mmseqs(mmseqs, ["mvdb", base.joinpath("uniref.a3m"), base.joinpath("final.a3m")]) + + if use_templates: + run_mmseqs(mmseqs, ["search", base.joinpath("prof_res"), dbbase.joinpath(template_db), base.joinpath("res_pdb"), + base.joinpath("tmp2"), "--db-load-mode", str(db_load_mode), "--threads", str(threads), "-s", "7.5", "-a", "-e", "0.1", "--prefilter-mode", str(prefilter_mode)]) + run_mmseqs(mmseqs, ["convertalis", base.joinpath("prof_res"), dbbase.joinpath(f"{template_db}{dbSuffix3}"), base.joinpath("res_pdb"), + base.joinpath(f"{template_db}"), "--format-output", + "query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar", + "--db-output", "1", + "--db-load-mode", str(db_load_mode), "--threads", str(threads)]) + run_mmseqs(mmseqs, ["unpackdb", base.joinpath(f"{template_db}"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", ".m8"]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_pdb")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath(f"{template_db}")]) + + run_mmseqs(mmseqs, ["unpackdb", base.joinpath("final.a3m"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", ".a3m"]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("final.a3m")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("uniref.a3m")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res")]) + # @formatter:on + # fmt: on + + for file in base.glob("prof_res*"): + file.unlink() + shutil.rmtree(base.joinpath("tmp")) + if use_templates: + shutil.rmtree(base.joinpath("tmp2")) + if use_env: + shutil.rmtree(base.joinpath("tmp3")) + + +def mmseqs_search_pair( + dbbase: Path, + base: Path, + uniref_db: Path = Path("uniref30_2302_db"), + spire_db: Path = Path("spire_ctg10_2401_db"), + mmseqs: Path = Path("mmseqs"), + pair_env: bool = True, + prefilter_mode: int = 0, + s: float = 8, + threads: int = 64, + db_load_mode: int = 2, + pairing_strategy: int = 0, +): + if not dbbase.joinpath(f"{uniref_db}.dbtype").is_file(): + raise FileNotFoundError(f"Database {uniref_db} does not exist") + if ( + ( + not dbbase.joinpath(f"{uniref_db}.idx").is_file() + and not dbbase.joinpath(f"{uniref_db}.idx.index").is_file() + ) + or os.environ.get("MMSEQS_IGNORE_INDEX", False) + ): + logger.info("Search does not use index") + db_load_mode = 0 + dbSuffix1 = "_seq" + dbSuffix2 = "_aln" + else: + dbSuffix1 = ".idx" + dbSuffix2 = ".idx" + + if pair_env: + db = spire_db + output = ".env.paired.a3m" + else: + db = uniref_db + output = ".paired.a3m" + + # fmt: off + # @formatter:off + search_param = ["--num-iterations", "3", "--db-load-mode", str(db_load_mode), "-a", "-e", "0.1", "--max-seqs", "10000",] + search_param += ["--prefilter-mode", str(prefilter_mode)] + if s is not None: + search_param += ["-s", "{:.1f}".format(s)] + else: + search_param += ["--k-score", "'seq:96,prof:80'"] + expand_param = ["--expansion-mode", "0", "-e", "inf", "--expand-filter-clusters", "0", "--max-seq-id", "0.95",] + run_mmseqs(mmseqs, ["search", base.joinpath("qdb"), dbbase.joinpath(db), base.joinpath("res"), base.joinpath("tmp"), "--threads", str(threads),] + search_param,) + run_mmseqs(mmseqs, ["expandaln", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res"), dbbase.joinpath(f"{db}{dbSuffix2}"), base.joinpath("res_exp"), "--db-load-mode", str(db_load_mode), "--threads", str(threads),] + expand_param,) + run_mmseqs(mmseqs, ["align", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res_exp"), base.joinpath("res_exp_realign"), "--db-load-mode", str(db_load_mode), "-e", "0.001", "--max-accept", "1000000", "--threads", str(threads), "-c", "0.5", "--cov-mode", "1",],) + run_mmseqs(mmseqs, ["pairaln", base.joinpath("qdb"), dbbase.joinpath(f"{db}"), base.joinpath("res_exp_realign"), base.joinpath("res_exp_realign_pair"), "--db-load-mode", str(db_load_mode), "--pairing-mode", str(pairing_strategy), "--pairing-dummy-mode", "0", "--threads", str(threads), ],) + run_mmseqs(mmseqs, ["align", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res_exp_realign_pair"), base.joinpath("res_exp_realign_pair_bt"), "--db-load-mode", str(db_load_mode), "-e", "inf", "-a", "--threads", str(threads), ],) + run_mmseqs(mmseqs, ["pairaln", base.joinpath("qdb"), dbbase.joinpath(f"{db}"), base.joinpath("res_exp_realign_pair_bt"), base.joinpath("res_final"), "--db-load-mode", str(db_load_mode), "--pairing-mode", str(pairing_strategy), "--pairing-dummy-mode", "1", "--threads", str(threads),],) + run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res_final"), base.joinpath("pair.a3m"), "--db-load-mode", str(db_load_mode), "--msa-format-mode", "5", "--threads", str(threads),],) + run_mmseqs(mmseqs, ["unpackdb", base.joinpath("pair.a3m"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", output,],) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign_pair")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign_pair_bt")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_final")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("pair.a3m")]) + shutil.rmtree(base.joinpath("tmp")) + # @formatter:on + # fmt: on + + +def main(): + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument( + "query", + type=Path, + help="fasta files with the queries.", + ) + parser.add_argument( + "dbbase", + type=Path, + help="The path to the database and indices you downloaded and created with setup_databases.sh", + ) + parser.add_argument( + "base", type=Path, help="Directory for the results (and intermediate files)" + ) + parser.add_argument( + "--prefilter-mode", + type=int, + default=0, + choices=[0, 1, 2], + help="Prefiltering algorithm to use: 0: k-mer (high-mem), 1: ungapped (high-cpu), 2: exhaustive (no prefilter, very slow). See wiki for more details: https://github.com/sokrypton/ColabFold/wiki#colabfold_search", + ) + parser.add_argument( + "-s", + type=float, + default=None, + help="MMseqs2 sensitivity. Lowering this will result in a much faster search but possibly sparser MSAs. By default, the k-mer threshold is directly set to the same one of the server, which corresponds to a sensitivity of ~8.", + ) + # dbs are uniref, templates and environmental + # We normally don't use templates + parser.add_argument( + "--db1", type=Path, default=Path("uniref30_2302_db"), help="UniRef database" + ) + parser.add_argument("--db2", type=Path, default=Path(""), help="Templates database") + parser.add_argument( + "--db3", + type=Path, + default=Path("colabfold_envdb_202108_db"), + help="Environmental database", + ) + parser.add_argument("--db4", type=Path, default=Path("spire_ctg10_2401_db"), help="Environmental pairing database") + + # poor man's boolean arguments + parser.add_argument( + "--use-env", type=int, default=1, choices=[0, 1], help="Use --db3" + ) + parser.add_argument( + "--use-env-pairing", type=int, default=0, choices=[0, 1], help="Use --db4" + ) + parser.add_argument( + "--use-templates", type=int, default=0, choices=[0, 1], help="Use --db2" + ) + parser.add_argument( + "--filter", + type=int, + default=1, + choices=[0, 1], + help="Filter the MSA by pre-defined align_eval, qsc, max_accept", + ) + + # mmseqs params + parser.add_argument( + "--mmseqs", + type=Path, + default=Path("mmseqs"), + help="Location of the mmseqs binary.", + ) + parser.add_argument( + "--expand-eval", + type=float, + default=math.inf, + help="e-val threshold for 'expandaln'.", + ) + parser.add_argument( + "--align-eval", type=int, default=10, help="e-val threshold for 'align'." + ) + parser.add_argument( + "--diff", + type=int, + default=3000, + help="filterresult - Keep at least this many seqs in each MSA block.", + ) + parser.add_argument( + "--qsc", + type=float, + default=-20.0, + help="filterresult - reduce diversity of output MSAs using min score thresh.", + ) + parser.add_argument( + "--max-accept", + type=int, + default=1000000, + help="align - Maximum accepted alignments before alignment calculation for a query is stopped.", + ) + parser.add_argument( + "--pairing_strategy", type=int, default=0, help="pairaln - Pairing strategy." + ) + parser.add_argument( + "--db-load-mode", + type=int, + default=0, + help="Database preload mode 0: auto, 1: fread, 2: mmap, 3: mmap+touch", + ) + parser.add_argument( + "--threads", type=int, default=64, help="Number of threads to use." + ) + args = parser.parse_args() + + logging.basicConfig(level=logging.INFO) + + queries, is_complex = get_queries(args.query, None) + + queries_unique = [] + for job_number, (raw_jobname, query_sequences, a3m_lines) in enumerate(queries): + # remove duplicates before searching + query_sequences = ( + [query_sequences] if isinstance(query_sequences, str) else query_sequences + ) + query_seqs_unique = [] + for x in query_sequences: + if x not in query_seqs_unique: + query_seqs_unique.append(x) + query_seqs_cardinality = [0] * len(query_seqs_unique) + for seq in query_sequences: + seq_idx = query_seqs_unique.index(seq) + query_seqs_cardinality[seq_idx] += 1 + + queries_unique.append([raw_jobname, query_seqs_unique, query_seqs_cardinality]) + + args.base.mkdir(exist_ok=True, parents=True) + query_file = args.base.joinpath("query.fas") + with query_file.open("w") as f: + for job_number, ( + raw_jobname, + query_sequences, + query_seqs_cardinality, + ) in enumerate(queries_unique): + for j, seq in enumerate(query_sequences): + # The header of first sequence set as 101 + query_seq_headername = 101 + j + f.write(f">{query_seq_headername}\n{seq}\n") + + run_mmseqs( + args.mmseqs, + ["createdb", query_file, args.base.joinpath("qdb"), "--shuffle", "0"], + ) + with args.base.joinpath("qdb.lookup").open("w") as f: + id = 0 + file_number = 0 + for job_number, ( + raw_jobname, + query_sequences, + query_seqs_cardinality, + ) in enumerate(queries_unique): + for seq in query_sequences: + raw_jobname_first = raw_jobname.split()[0] + f.write(f"{id}\t{raw_jobname_first}\t{file_number}\n") + id += 1 + file_number += 1 + + mmseqs_search_monomer( + mmseqs=args.mmseqs, + dbbase=args.dbbase, + base=args.base, + uniref_db=args.db1, + template_db=args.db2, + metagenomic_db=args.db3, + use_env=args.use_env, + use_templates=args.use_templates, + filter=args.filter, + expand_eval=args.expand_eval, + align_eval=args.align_eval, + diff=args.diff, + qsc=args.qsc, + max_accept=args.max_accept, + prefilter_mode=args.prefilter_mode, + s=args.s, + db_load_mode=args.db_load_mode, + threads=args.threads, + ) + if is_complex is True: + mmseqs_search_pair( + mmseqs=args.mmseqs, + dbbase=args.dbbase, + base=args.base, + uniref_db=args.db1, + prefilter_mode=args.prefilter_mode, + s=args.s, + db_load_mode=args.db_load_mode, + threads=args.threads, + pairing_strategy=args.pairing_strategy, + pair_env=False, + ) + if args.use_env_pairing: + mmseqs_search_pair( + mmseqs=args.mmseqs, + dbbase=args.dbbase, + base=args.base, + uniref_db=args.db1, + spire_db=args.db4, + prefilter_mode=args.prefilter_mode, + s=args.s, + db_load_mode=args.db_load_mode, + threads=args.threads, + pairing_strategy=args.pairing_strategy, + pair_env=True, + ) + + id = 0 + for job_number, ( + raw_jobname, + query_sequences, + query_seqs_cardinality, + ) in enumerate(queries_unique): + unpaired_msa = [] + paired_msa = None + if len(query_seqs_cardinality) > 1: + paired_msa = [] + for seq in query_sequences: + with args.base.joinpath(f"{id}.a3m").open("r") as f: + unpaired_msa.append(f.read()) + args.base.joinpath(f"{id}.a3m").unlink() + + if args.use_env_pairing: + with open(args.base.joinpath(f"{id}.paired.a3m"), 'a') as file_pair: + with open(args.base.joinpath(f"{id}.env.paired.a3m"), 'r') as file_pair_env: + while chunk := file_pair_env.read(10 * 1024 * 1024): + file_pair.write(chunk) + args.base.joinpath(f"{id}.env.paired.a3m").unlink() + + if len(query_seqs_cardinality) > 1: + with args.base.joinpath(f"{id}.paired.a3m").open("r") as f: + paired_msa.append(f.read()) + args.base.joinpath(f"{id}.paired.a3m").unlink() + id += 1 + msa = msa_to_str( + unpaired_msa, paired_msa, query_sequences, query_seqs_cardinality + ) + args.base.joinpath(f"{job_number}.a3m").write_text(msa) + + # rename a3m files + for job_number, (raw_jobname, query_sequences, query_seqs_cardinality) in enumerate(queries_unique): + os.rename( + args.base.joinpath(f"{job_number}.a3m"), + args.base.joinpath(f"{safe_filename(raw_jobname)}.a3m"), + ) + + # rename m8 files + if args.use_templates: + id = 0 + for raw_jobname, query_sequences, query_seqs_cardinality in queries_unique: + with args.base.joinpath(f"{safe_filename(raw_jobname)}_{args.db2}.m8").open( + "w" + ) as f: + for _ in range(len(query_seqs_cardinality)): + with args.base.joinpath(f"{id}.m8").open("r") as g: + f.write(g.read()) + os.remove(args.base.joinpath(f"{id}.m8")) + id += 1 + + query_file.unlink() + run_mmseqs(args.mmseqs, ["rmdb", args.base.joinpath("qdb")]) + run_mmseqs(args.mmseqs, ["rmdb", args.base.joinpath("qdb_h")]) + + +if __name__ == "__main__": + main() From 6abf48fa7473290faa09caa81b67f27468e59f9a Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Mon, 13 May 2024 16:46:46 -0400 Subject: [PATCH 53/64] gen plots new version --- scripts/generate_pae_plddt_plot.py | 384 ++++++++++------------------- scripts/generate_pae_plot.py | 163 ++++++++++++ scripts/generate_plddt_plot.py | 155 ++++++++++++ 3 files changed, 442 insertions(+), 260 deletions(-) create mode 100644 scripts/generate_pae_plot.py create mode 100644 scripts/generate_plddt_plot.py diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 90d722d7..89bb6f9f 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -7,20 +7,22 @@ import pandas as pd from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +from mpl_toolkits.axes_grid1 import ImageGrid import json from sys import exit import os from Bio import PDB as pdb +from Bio import SeqIO import io import json from json import encoder + encoder.FLOAT_REPR = lambda o: format(o, '.2f') # plot size, in inches. plot_size = 16 -# @markdown Input value to increment plot axes by (this may need finetuning based on output) -plot_increment = "50" # @param[10,25,50,100,250,500] +plot_increment = "200" # @param[10,25,50,100,250,500] plot_increment = int(plot_increment) @@ -31,60 +33,16 @@ def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): self.name = name self.PathToFile = PathToFile self.FastaSequence = FastaSequence - self.saving_filename = name #self.PathToFile.split("/")[-1].split(".")[0] + self.saving_filename = name self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] if ranking: self.saving_filename = "ranked_{}".format(ranking) - # Generate a plot of pLDDT value - def plot_pLDDT(self, size_in_inches=3.5, axis_label_increment=100): - x = list(range(0, len(self.pLDDT), 1)) - y = list(self.pLDDT) - - # Use standard AlphaFold colors - cmap = cols.LinearSegmentedColormap.from_list("", ["red", "orange", "yellow", "cornflowerblue", "blue"]) - - plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) - ticks = np.arange(0, len(self.pLDDT), axis_label_increment) - plt.xticks(ticks) - plt.yticks() - plt.title(self.name, size=20, fontweight="bold") - plt.xlabel("Residue index", size=16, fontweight="bold") - plt.ylabel("Predicted LDDT", size=16, fontweight="bold") - plt.scatter(x, y, c=y, cmap=cmap, s=5) - plt.clim(0, 100) - scale = plt.colorbar(shrink=0.5) - scale.set_label(label="Predicted LDDT", size=12, fontweight="bold") - # Save to directory with pickle file in - plt.savefig('{}/{}_pLDDT.png'.format(self.saving_pathname, self.saving_filename), dpi=300) - - # Generate a plot from PAE measurements - - def plot_PAE(self, size_in_inches=3.5, axis_label_increment=100): - ticks = np.arange(0, self.PAE[1].size, axis_label_increment) - plt.figure(figsize=(size_in_inches, size_in_inches)) - PAE = plt.imshow(self.PAE, cmap="bwr") - plt.xticks(ticks) - plt.yticks(ticks) - plt.title(self.name, size=20, fontweight="bold") - plt.xlabel("Residue index", size=16, fontweight="bold") - plt.ylabel("Residue index", size=16, fontweight="bold") - scale = plt.colorbar(PAE, shrink=0.5) - scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") - - # Save plot - plt.savefig('{}/{}_PAE.png'.format(self.saving_pathname, self.saving_filename), dpi=300) - - # Generate dataframe from PAE data and save to csv - pd_PAE = pd.DataFrame(self.PAE) - pd_PAE.to_csv('{}/{}.csv'.format(self.saving_pathname, self.saving_filename)) - # pd_PAE.to_json('{}/{}.json'.format(self.saving_pathname, self.saving_filename)) - class AlphaFoldPickle(AlphaFoldMetaData): - def __init__(self, PathToFile, FastaSequence=None, ranking=None): - super().__init__(PathToFile, FastaSequence, ranking) # Define attributes + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes if ranking: self.saving_filename = "ranked_{}".format(ranking) self.data = [] @@ -114,8 +72,8 @@ def __init__(self, PathToFile, FastaSequence=None, ranking=None): def save_to_json(self): # save pkl to json format as colabfold colab_data = {} - colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()),2)) - colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()),2)) + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) colab_data['max_pae'] = self.max_pae colab_data['ptm'] = self.ptm colab_data['iptm'] = self.iptm @@ -129,227 +87,133 @@ def default(self, obj): with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) - # Generate a ChimeraX attribute file from pLDDT measurements - def write_pLDDT_file(self): - seqMismatch = False - pd_lDDT = pd.DataFrame(self.pLDDT) - # Name dataframe column - pd_lDDT.columns = ["pLDDT"] - - # If the fasta file was provided: - if self.FastaSequence != None: - - # Open the fasta file in read mode - with (open("{}".format(self.FastaSequence), "r")) as openfile: - fasta = openfile.read() - - # Delete header line and remove line-breaks - sequence = fasta.split("\n", 1)[1].replace("\n", "") - - # Check that the lengths of the two sequences match - if len(sequence) != len(pd_lDDT): - - # If not, ignore the fasta file - print( - "Length of sequence in fasta file provided ({}) does not match length of sequence used in AlphaFold prediction ({}). Ignoring fasta file.".format( - len(sequence), len(pd_lDDT))) - seqMismatch = True - # If they do, - else: - # Convert the fasta sequence into a residue list - list_sequence = [] - for item in sequence: - list_sequence.append(item) - - # Convert the list into a pandas series - pd_sequence = pd.Series(list_sequence) - - # Insert the series into the dataframe at column 1 to act as labels for the data - pd_lDDT.insert(0, "Residue", pd_sequence) - - # Otherwise, remind user to check that they have used corret input files - else: - print("Number of residues for which pLDDT is provided: ", len(pd_lDDT), - "If this does not match the length of your sequence, please double check the input file.") - - # Tell python not to elide middle rows of dataframe when printing to std.out - pd.set_option("display.max_rows", None, "display.max_columns", None) - - # Save dataframe to ./outputfiles with same name as original pickle and .csv extension - pd_lDDT.to_csv('{}/{}_pLDDT.csv'.format(self.saving_pathname, self.saving_filename)) - # Delete residue ID - if self.FastaSequence != None and seqMismatch == False: - lDDT_table = pd_lDDT.drop('Residue', axis=1) - else: - lDDT_table = pd_lDDT - - # Initialise list to store Chimera-style residue identifiers (":x", where x = residue number) - residue_list = [] - - # Populate this list - for residue in range(0, len(lDDT_table)): - residue_list.append(":{}".format(residue + 1)) - - # Save to pandas format - chimerax_numbering = pd.Series(residue_list) - - # Insert in the first column of the dataframe, to satisfy ChimeraX formatting - lDDT_table.insert(0, 'Numbering', chimerax_numbering) - - # Tidy indices so the first label is 1 not 0 - pd_lDDT.index += 1 - # Create a file to save the Chimera attribute output into +def plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=100): + m1_x = list(range(0, len(model1.pLDDT), 1)) + m1_y = list(model1.pLDDT) + m2_x = list(range(0, len(model2.pLDDT), 1)) + m2_y = list(model2.pLDDT) + m3_x = list(range(0, len(model3.pLDDT), 1)) + m3_y = list(model3.pLDDT) - with (open('{}/{}_lDDT.txt'.format(self.saving_pathname, self.saving_filename), 'w+')) as openfile: + plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) + ticks = np.arange(0, len(model1.pLDDT), axis_label_increment) + plt.xticks(ticks) + plt.yticks() + plt.title(name, size=20, fontweight="bold") + plt.xlabel("Residue index", size=16, fontweight="bold") + plt.ylabel("Predicted LDDT", size=16, fontweight="bold") + plt.plot(m1_x, m1_y, '-b', label='model1') + plt.plot(m2_x, m2_y, '-m', label='model2') + plt.plot(m3_x, m3_y, '-g', label='model3') - # Write file header in correct format - openfile.write('attribute: pLDDTvalue\nmatch mode: 1-to-1\nrecipient: residues\n') + plt.vlines(x=prot1len, ymin=0, ymax=100, colors='k', linestyles='--') - # Iterate over rows of dataframe, writing residue ID and lDDT value to file with correct formatting - for i, row in lDDT_table.iterrows(): - openfile.write("\t{}\t{}\n".format(row['Numbering'], row['pLDDT'])) + plt.legend(loc='lower right') + plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) - return pd_lDDT - -class AlphaFoldJson: - def __init__(self, PathToDirectory): - self.PathToDirectory = PathToDirectory - self.RankingDebug = [] - try: - with open("{}/ranking_debug.json".format(self.PathToDirectory)) as jsonfile: - self.RankingDebugRaw = json.load(jsonfile) - for index in enumerate(self.RankingDebugRaw['order']): - self.RankingDebug.append(index) - except: - exit( - "To use batch processing, please ensure that the ranking_debug.json file and the result_model_n.pkl files are present in the directory issued in the command. Exiting AlphaPickle now...") - - -class AlphaFoldPDB(AlphaFoldMetaData): - def loadCleanStructure(self, id, filePath): - standardResidues = ["ALA", "ARG", "ASN", "ASP", "CYS", "GLU", "GLN", "GLY", "HIS", "ILE", "LEU", "LYS", "MET", - "PHE", "PRO", "SER", "THR", "TRP", "TYR", "VAL"] - - parser = pdb.PDBParser() - parsedStructure = parser.get_structure(id, filePath) - for chain in parsedStructure.get_chains(): - removeResidues = list() - for i, residue in enumerate(chain.get_residues()): - if residue.resname not in standardResidues: - removeResidues.append(residue.id) - print(residue.id) - [chain.detach_child(id) for id in removeResidues] - - return parsedStructure - - def extractPLDDT(self, PDBobject): - pLDDT = [] - for residue in PDBobject.get_residues(): - i = 0 - for atom in residue.get_atoms(): - while i < 1: - pLDDT.append(atom.bfactor) - i += 1 - pLDDT_series = pd.Series(pLDDT) - return pLDDT_series - - def __init__(self, PathToFile, FastaSequence=None, ranking=None): - super().__init__(PathToFile, FastaSequence, ranking) - # Define attributes - if ranking: - self.saving_filename = "ranked_{}".format(ranking) - self.structure = self.loadCleanStructure("test", PathToFile) - self.pLDDT = self.extractPLDDT(self.structure) - self.data = [] - self.PAE = None - - def PDB_write_pLDDT(self): - residueNumbers = pd.Series(range(1, len(self.pLDDT) + 1)) - if len(residueNumbers) != len(self.pLDDT): - print("Oops") - else: - pd_lDDT = pd.DataFrame(self.pLDDT) - pd_lDDT.columns = ["pLDDT"] - pd_lDDT.insert(0, "Residue", residueNumbers) - pd_lDDT.to_csv('{}/{}_pLDDT.csv'.format(self.saving_pathname, self.saving_filename)) - - -class AlphaFoldPAEJson(AlphaFoldMetaData): - def extractPAEfromJson(self, PathToFile): - - with open(PathToFile, 'r') as file: - jsonstring = json.load(file) - if 'predicted_aligned_error' in jsonstring[0]: - pae = jsonstring[0]['predicted_aligned_error'] - else: - residue1 = jsonstring[0]['residue1'] - residue2 = jsonstring[0]['residue2'] - pae = jsonstring[0]['distance'] - - if 'predicted_aligned_error' in jsonstring[0]: - paeArray = np.array(pae) - else: - paeArray = np.ones((max(residue1), (max(residue2)))) - for i, j, n in zip(residue1, residue2, pae): - paeArray[int(i - 1), int(j - 1)] = n - - return paeArray - - def __init__(self, PathToFile, FastaSequence=None, ranking=None): - super().__init__(PathToFile, FastaSequence, ranking) - if ranking: - self.saving_filename = "ranked_{}".format(ranking) - - self.PAE = self.extractPAEfromJson(PathToFile) - self.pLDDT = None - - -# use_files_from_google_drive = False #@param {type:"boolean"} +# def generate_plddt_plot(fasta, model1, model2, model3, outdir, name): +# +# +# +# prot1len = get_multimer_prot1_len(fasta) +# # results.write_pLDDT_file() +# print("Plotting pLDDT for {}".format(name)) +# plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=plot_size, +# axis_label_increment=plot_increment) + + +def plot_paE(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=200): + def draw_subplot(name, ax, model, prot1len, display_scale=False): + ticks = np.arange(0, model.PAE[1].size, axis_label_increment) + img_ax = ax.imshow(model.PAE, cmap="bwr") + ax.set_xticks(ticks) + ax.set_yticks(ticks) + ax.set_title(name, size=20, fontweight="bold") + ax.set_xlabel("Residue index", size=16, fontweight="bold") + ax.set_ylabel("Residue index", size=16, fontweight="bold") + ax.axvline(x=prot1len, color='k', linewidth=4) + ax.axhline(y=prot1len, color='k', linewidth=4) + return img_ax + + fig = plt.figure(figsize=(size_in_inches, size_in_inches)) + + grid = ImageGrid(fig, 111, # as in plt.subplot(111) + nrows_ncols=(1, 3), + axes_pad=0.15, + share_all=False, + cbar_location="right", + cbar_mode="single", + cbar_size="7%", + cbar_pad=0.15, + ) + + models = [model1, model2, model3] + + cnt = 1 + for ax, model in zip(grid, models): + im = draw_subplot(f'model{cnt}', ax, model, prot1len) + cnt += 1 + + scale = ax.cax.colorbar(im, label="Predicted error (Å)") + scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") + # Save plot + plt.savefig('{}/{}_PAE.png'.format(outdir, name), dpi=300) + + +# def generate_pae_plot(fasta, pkl1, pkl2, pkl3, outdir, name, prot1len): # -# if not use_files_from_google_drive: -# print("Select PAE files for upload") -# PAEfiles = list(google.colab.files.upload().keys()) -# print("Select pLDDT files for upload") -# pLDDTfiles = list(google.colab.files.upload().keys()) -# else: -# #print("Select PAE files for upload") -# path_to_PAE_file_in_drive = "" #@param {type:"string"} -# if ":" in path_to_PAE_file_in_drive: -# path_to_PAE_file_in_drive = path_to_PAE_file_in_drive.split(":") -# else: -# path_to_PAE_file_in_drive = [path_to_PAE_file_in_drive] -# path_to_pLDDT_file_in_drive = "" #@param {type:"string"} -# if ":" in path_to_pLDDT_file_in_drive: -# path_to_pLDDT_file_in_drive = path_to_pLDDT_file_in_drive.split(":") -# else: -# path_to_pLDDT_file_in_drive = [path_to_pLDDT_file_in_drive] - -def generate_plots(pkl, outdir, name): - results = AlphaFoldPickle(name, pkl, None) - results.saving_pathname = outdir - results.saving_filename = name - if type(results.PAE) == np.ndarray: - print("Plotting PAE for {} and saving to csv".format(pkl)) - results.plot_PAE(size_in_inches=plot_size, axis_label_increment=plot_increment) - - # results = AlphaFoldPickle(name, pkl, None) - # results.saving_filename = name - # results.saving_pathname = outdir - results.write_pLDDT_file() - print("Plotting pLDDT for {} and saving to csv".format(pkl)) - results.plot_pLDDT(size_in_inches=plot_size, axis_label_increment=plot_increment) - - print("Saving pickle {} in json format".format(pkl)) - results.save_to_json() +# print("Plotting pLDDT for {}".format(name)) +# plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, +# axis_label_increment=plot_increment) + +def generate_plots_and_json(fasta, pkl1, pkl2, pkl3, outdir, name): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + # "${NAME}_model_${model}_multimer_v3_relaxed" + model1_results.saving_filename = f"{name}_model_1_multimer_v3_relaxed" + print("Saving model1 in json format") + model1_results.save_to_json() + + model2_results = AlphaFoldPickle(name, pkl2) + model2_results.saving_pathname = outdir + model2_results.saving_filename = f"{name}_model_2_multimer_v3_relaxed" + print("Saving model2 in json format") + model2_results.save_to_json() + + model3_results = AlphaFoldPickle(name, pkl3) + model3_results.saving_pathname = outdir + model3_results.saving_filename = f"{name}_model_3_multimer_v3_relaxed" + print("Saving model3 in json format") + model3_results.save_to_json() + + def get_multimer_prot1_len(f): + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + return len(record.seq) + + prot1len = get_multimer_prot1_len(fasta) + + print("Generating plddt plot") + plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + axis_label_increment=plot_increment) + print("Generating PAE plot") + plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + axis_label_increment=plot_increment) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--input_pkl', dest='input_pkl', required=True) + parser.add_argument('--fasta', dest='fasta', required=True) + parser.add_argument('--model1_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model2_pkl', dest='model2_pkl', required=True) + parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) parser.add_argument('--output_dir', dest='output_dir', required=True) parser.add_argument('--basename', dest='basename', required=True) args = parser.parse_args() - generate_plots(args.input_pkl, args.output_dir, args.basename) + generate_plots_and_json(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, + args.basename) + # generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) + # generate_pae_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) diff --git a/scripts/generate_pae_plot.py b/scripts/generate_pae_plot.py new file mode 100644 index 00000000..c83427a0 --- /dev/null +++ b/scripts/generate_pae_plot.py @@ -0,0 +1,163 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +from mpl_toolkits.axes_grid1 import ImageGrid +import json +from sys import exit +import os +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +# @markdown Input value to increment plot axes by (this may need finetuning based on output) +plot_increment = "200" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0]['iptm_score'] + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def plot_paE(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=200): + def draw_subplot(name, ax, model, prot1len, display_scale=False): + ticks = np.arange(0, model.PAE[1].size, axis_label_increment) + img_ax = ax.imshow(model.PAE, cmap="bwr") + ax.set_xticks(ticks) + ax.set_yticks(ticks) + ax.set_title(name, size=20, fontweight="bold") + ax.set_xlabel("Residue index", size=16, fontweight="bold") + ax.set_ylabel("Residue index", size=16, fontweight="bold") + ax.axvline(x=prot1len, color='k', linewidth=4) + ax.axhline(y=prot1len, color='k', linewidth=4) + return img_ax + + fig = plt.figure(figsize=(size_in_inches, size_in_inches)) + + grid = ImageGrid(fig, 111, # as in plt.subplot(111) + nrows_ncols=(1, 3), + axes_pad=0.15, + share_all=False, + cbar_location="right", + cbar_mode="single", + cbar_size="7%", + cbar_pad=0.15, + ) + + models = [model1, model2, model3] + + cnt = 1 + for ax, model in zip(grid, models): + im = draw_subplot(f'model{cnt}', ax, model, prot1len) + cnt += 1 + + scale = ax.cax.colorbar(im, label="Predicted error (Å)") + scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") + # Save plot + plt.savefig('{}/{}_PAE.png'.format(outdir, name), dpi=300) + + +def generate_pae_plot(fasta, pkl1, pkl2, pkl3, outdir, name): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + model1_results.saving_filename = name + + model2_results = AlphaFoldPickle(name, pkl2) + model2_results.saving_pathname = outdir + model2_results.saving_filename = name + + model3_results = AlphaFoldPickle(name, pkl3) + model3_results.saving_pathname = outdir + model3_results.saving_filename = name + + def get_multimer_prot1_len(f): + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + return len(record.seq) + + prot1len = get_multimer_prot1_len(fasta) + # results.write_pLDDT_file() + print("Plotting pLDDT for {}".format(name)) + plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + axis_label_increment=plot_increment) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--fasta', dest='fasta', required=True) + parser.add_argument('--model1_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model2_pkl', dest='model2_pkl', required=True) + parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + generate_pae_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) diff --git a/scripts/generate_plddt_plot.py b/scripts/generate_plddt_plot.py new file mode 100644 index 00000000..6b6a49f4 --- /dev/null +++ b/scripts/generate_plddt_plot.py @@ -0,0 +1,155 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +import json +from sys import exit +import os +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +# @markdown Input value to increment plot axes by (this may need finetuning based on output) +plot_increment = "50" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0]['iptm_score'] + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=100): + m1_x = list(range(0, len(model1.pLDDT), 1)) + m1_y = list(model1.pLDDT) + m2_x = list(range(0, len(model2.pLDDT), 1)) + m2_y = list(model2.pLDDT) + m3_x = list(range(0, len(model3.pLDDT), 1)) + m3_y = list(model3.pLDDT) + + plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) + ticks = np.arange(0, len(model1.pLDDT), axis_label_increment) + plt.xticks(ticks) + plt.yticks() + plt.title(name, size=20, fontweight="bold") + plt.xlabel("Residue index", size=16, fontweight="bold") + plt.ylabel("Predicted LDDT", size=16, fontweight="bold") + plt.plot(m1_x, m1_y, '-b', label='model1') + plt.plot(m2_x, m2_y, '-m', label='model2') + plt.plot(m3_x, m3_y, '-g', label='model3') + + plt.vlines(x=prot1len, ymin=0, ymax=100, colors='k', linestyles='--') + + plt.legend(loc='lower right') + plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) + + +def generate_plddt_plot(fasta, pkl1, pkl2, pkl3, outdir, name): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + model1_results.saving_filename = name + print("Saving model1 in json format") + model1_results.save_to_json() + + model2_results = AlphaFoldPickle(name, pkl2) + model2_results.saving_pathname = outdir + model2_results.saving_filename = name + print("Saving model2 in json format") + model2_results.save_to_json() + + model3_results = AlphaFoldPickle(name, pkl3) + model3_results.saving_pathname = outdir + model3_results.saving_filename = name + print("Saving model3 in json format") + model3_results.save_to_json() + + def get_multimer_prot1_len(f): + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + return len(record.seq) + + prot1len = get_multimer_prot1_len(fasta) + # results.write_pLDDT_file() + print("Plotting pLDDT for {}".format(name)) + plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + axis_label_increment=plot_increment) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--fasta', dest='fasta', required=True) + parser.add_argument('--model1_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model2_pkl', dest='model2_pkl', required=True) + parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) From 60a0b8bc77c397751c94011cd9d0539f7e536d8e Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 14 May 2024 11:33:24 -0400 Subject: [PATCH 54/64] add table to pae plot --- scripts/generate_pae_plddt_plot.py | 97 +++++++++++++++++++----------- 1 file changed, 63 insertions(+), 34 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 89bb6f9f..1cce3dc0 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -8,6 +8,8 @@ from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager from mpl_toolkits.axes_grid1 import ImageGrid +from matplotlib.table import table +from matplotlib.gridspec import GridSpec import json from sys import exit import os @@ -113,18 +115,23 @@ def plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=3. plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) -# def generate_plddt_plot(fasta, model1, model2, model3, outdir, name): -# -# -# -# prot1len = get_multimer_prot1_len(fasta) -# # results.write_pLDDT_file() -# print("Plotting pLDDT for {}".format(name)) -# plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=plot_size, -# axis_label_increment=plot_increment) +def plot_paE(outdir, name, model1, model2, model3, prot1len, interface_df, size_in_inches=3.5, axis_label_increment=200): + + # data = [ + # [0.742, 376, 64, 83, 92, 2, 4, 8, 6.0], + # [0.742, 348, 69, 86, 92, 2, 3, 6, 6.0], + # [0.018, 3, 54, 58, 63, 14, 15, 15, 5.7] + # ] + # + # columns = ( + # 'pdockq', 'ncontacts', 'plddt_min', 'plddt_avg', 'plddt_max', 'pae_min', 'pae_avg', 'pae_max', 'distance_avg') + # + # df = pd.DataFrame( + # data, + # columns=list(columns) + # ) -def plot_paE(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=200): def draw_subplot(name, ax, model, prot1len, display_scale=False): ticks = np.arange(0, model.PAE[1].size, axis_label_increment) img_ax = ax.imshow(model.PAE, cmap="bwr") @@ -137,38 +144,50 @@ def draw_subplot(name, ax, model, prot1len, display_scale=False): ax.axhline(y=prot1len, color='k', linewidth=4) return img_ax - fig = plt.figure(figsize=(size_in_inches, size_in_inches)) + nrows = 1 + height_ratios = [1] + if interface_df is not None: + nrows = 2 + height_ratios = [1, 2] - grid = ImageGrid(fig, 111, # as in plt.subplot(111) - nrows_ncols=(1, 3), - axes_pad=0.15, - share_all=False, - cbar_location="right", - cbar_mode="single", - cbar_size="7%", - cbar_pad=0.15, - ) + fig = plt.figure(figsize=(12, 10), layout="constrained") + gs1 = GridSpec(nrows, 4, figure=fig, width_ratios=[1,1,1,0.1], height_ratios=height_ratios) models = [model1, model2, model3] - cnt = 1 - for ax, model in zip(grid, models): - im = draw_subplot(f'model{cnt}', ax, model, prot1len) - cnt += 1 + ax1 = fig.add_subplot(gs1[0, 0]) + im1 = draw_subplot(f'model1', ax1, models[0], prot1len) + + ax2 = fig.add_subplot(gs1[0, 1]) + im2 = draw_subplot(f'model2', ax2, models[1], prot1len) - scale = ax.cax.colorbar(im, label="Predicted error (Å)") + ax3 = fig.add_subplot(gs1[0, 2]) + im3 = draw_subplot(f'model3', ax3, models[2], prot1len) + + ax4 = fig.add_subplot(gs1[0, 3]) + mesh = ax4.pcolormesh(models[2].PAE, cmap="bwr") + scale = fig.colorbar(mesh, ax4, label="Predicted error (Å)") scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") + + if interface_df is not None: + ax5 = fig.add_subplot(gs1[1, :]) + ax5.axis('off') + ax5.axis('tight') + rows = ['model %d' % x for x in (1, 2, 3)] + tbl = ax5.table( + cellText=interface_df.values[:,2:], + rowLabels=rows, + colLabels=list(interface_df.columns)[2:], + loc="upper center") + tbl.auto_set_font_size(False) + tbl.set_fontsize(14) + tbl.auto_set_column_width([0, 1, 2, 3, 4, 5, 6, 7, 8]) + # Save plot plt.savefig('{}/{}_PAE.png'.format(outdir, name), dpi=300) -# def generate_pae_plot(fasta, pkl1, pkl2, pkl3, outdir, name, prot1len): -# -# print("Plotting pLDDT for {}".format(name)) -# plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, -# axis_label_increment=plot_increment) - -def generate_plots_and_json(fasta, pkl1, pkl2, pkl3, outdir, name): +def generate_plots_and_json(fasta, pkl1, pkl2, pkl3, outdir, name, interface): model1_results = AlphaFoldPickle(name, pkl1) model1_results.saving_pathname = outdir # "${NAME}_model_${model}_multimer_v3_relaxed" @@ -198,8 +217,17 @@ def get_multimer_prot1_len(f): print("Generating plddt plot") plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, axis_label_increment=plot_increment) + print("Generating PAE plot") - plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + df = None + if interface is None: + print("No interface file provided, will not output interface table") + elif os.path.exists(interface): + df = pd.read_csv(interface, sep=",") + else: + print(f"Unable to create pandas dataframe with provided interface file {interface}") + + plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, df, size_in_inches=plot_size, axis_label_increment=plot_increment) @@ -211,9 +239,10 @@ def get_multimer_prot1_len(f): parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) parser.add_argument('--output_dir', dest='output_dir', required=True) parser.add_argument('--basename', dest='basename', required=True) + parser.add_argument('--interface', dest='interface', required=False) args = parser.parse_args() generate_plots_and_json(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, - args.basename) + args.basename, args.interface) # generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) # generate_pae_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) From c4e14006b0548242d5b9b0601452ac0a3bc32d0d Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 14 May 2024 13:39:15 -0400 Subject: [PATCH 55/64] split json creation from plot script --- scripts/generate_pae_plddt_plot.py | 19 +---- scripts/pickle_to_json.py | 111 +++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 17 deletions(-) create mode 100644 scripts/pickle_to_json.py diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 1cce3dc0..63f980ec 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -187,25 +187,10 @@ def draw_subplot(name, ax, model, prot1len, display_scale=False): plt.savefig('{}/{}_PAE.png'.format(outdir, name), dpi=300) -def generate_plots_and_json(fasta, pkl1, pkl2, pkl3, outdir, name, interface): +def generate_plots(fasta, pkl1, pkl2, pkl3, outdir, name, interface): model1_results = AlphaFoldPickle(name, pkl1) - model1_results.saving_pathname = outdir - # "${NAME}_model_${model}_multimer_v3_relaxed" - model1_results.saving_filename = f"{name}_model_1_multimer_v3_relaxed" - print("Saving model1 in json format") - model1_results.save_to_json() - model2_results = AlphaFoldPickle(name, pkl2) - model2_results.saving_pathname = outdir - model2_results.saving_filename = f"{name}_model_2_multimer_v3_relaxed" - print("Saving model2 in json format") - model2_results.save_to_json() - model3_results = AlphaFoldPickle(name, pkl3) - model3_results.saving_pathname = outdir - model3_results.saving_filename = f"{name}_model_3_multimer_v3_relaxed" - print("Saving model3 in json format") - model3_results.save_to_json() def get_multimer_prot1_len(f): with open(f) as handle: @@ -242,7 +227,7 @@ def get_multimer_prot1_len(f): parser.add_argument('--interface', dest='interface', required=False) args = parser.parse_args() - generate_plots_and_json(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, + generate_plots(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename, args.interface) # generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) # generate_pae_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) diff --git a/scripts/pickle_to_json.py b/scripts/pickle_to_json.py new file mode 100644 index 00000000..b7302e1e --- /dev/null +++ b/scripts/pickle_to_json.py @@ -0,0 +1,111 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +#from zipfile import Path +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +from mpl_toolkits.axes_grid1 import ImageGrid +from matplotlib.table import table +from matplotlib.gridspec import GridSpec +import json +from sys import exit +import os +from Bio import PDB as pdb +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +plot_increment = "200" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0]['iptm_score'] + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def generate_json(pkl1, outdir, name, model_nbr): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + # "${NAME}_model_${model}_multimer_v3_relaxed" + model1_results.saving_filename = f"{name}_model_{model_nbr}_multimer_v3_relaxed" + print("Saving model1 in json format") + model1_results.save_to_json() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--model_pkl', dest='model1_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + parser.add_argument('--model_nbr', dest='model_nbr', required=True) + args = parser.parse_args() + + generate_json(args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, + args.basename, args.model_nbr) From 9e9dd6215ec944fcbd87f72a6c144d28fda8a0ea Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 14 May 2024 13:40:27 -0400 Subject: [PATCH 56/64] split json creation from plot script --- scripts/pickle_to_json.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/pickle_to_json.py b/scripts/pickle_to_json.py index b7302e1e..cdf66f88 100644 --- a/scripts/pickle_to_json.py +++ b/scripts/pickle_to_json.py @@ -107,5 +107,4 @@ def generate_json(pkl1, outdir, name, model_nbr): parser.add_argument('--model_nbr', dest='model_nbr', required=True) args = parser.parse_args() - generate_json(args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, - args.basename, args.model_nbr) + generate_json(args.model_pkl, args.output_dir, args.basename, args.model_nbr) From ceb056aa4e8b70c27ee7b84f497d8bdc21c9757f Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 14 May 2024 13:41:34 -0400 Subject: [PATCH 57/64] split json creation from plot script --- scripts/pickle_to_json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pickle_to_json.py b/scripts/pickle_to_json.py index cdf66f88..045bdf57 100644 --- a/scripts/pickle_to_json.py +++ b/scripts/pickle_to_json.py @@ -101,7 +101,7 @@ def generate_json(pkl1, outdir, name, model_nbr): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--model_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model_pkl', dest='model_pkl', required=True) parser.add_argument('--output_dir', dest='output_dir', required=True) parser.add_argument('--basename', dest='basename', required=True) parser.add_argument('--model_nbr', dest='model_nbr', required=True) From ca54002326945058284225586301b9e9d6b47526 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 14 May 2024 13:42:22 -0400 Subject: [PATCH 58/64] split json creation from plot script --- scripts/pickle_to_json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pickle_to_json.py b/scripts/pickle_to_json.py index 045bdf57..dcfcd09d 100644 --- a/scripts/pickle_to_json.py +++ b/scripts/pickle_to_json.py @@ -95,7 +95,7 @@ def generate_json(pkl1, outdir, name, model_nbr): model1_results.saving_pathname = outdir # "${NAME}_model_${model}_multimer_v3_relaxed" model1_results.saving_filename = f"{name}_model_{model_nbr}_multimer_v3_relaxed" - print("Saving model1 in json format") + print(f"Saving model{model_nbr} in json format") model1_results.save_to_json() From f96ecea936473a4dab4a5f2ec09646b86299da32 Mon Sep 17 00:00:00 2001 From: Maxime Date: Fri, 19 Jul 2024 10:47:14 -0400 Subject: [PATCH 59/64] improve exception messages --- openfold/config.py | 2 +- openfold/data/data_pipeline.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openfold/config.py b/openfold/config.py index b6259ed0..0c7d13e5 100644 --- a/openfold/config.py +++ b/openfold/config.py @@ -224,7 +224,7 @@ def model_config( c.data.eval.max_extra_msa = 1152 c.data.predict.max_extra_msa = 1152 else: - raise ValueError("Invalid model name") + raise ValueError(f"Invalid model name {name}") if long_sequence_inference: assert(not train) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index d2135daa..6ed85864 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -1208,7 +1208,7 @@ def read_msa(start, size): uniprot_msa_path = os.path.join(alignment_dir, "uniprot_hits.sto") if not os.path.exists(uniprot_msa_path): chain_id = os.path.basename(os.path.normpath(alignment_dir)) - raise ValueError(f"Missing 'uniprot_hits.sto' for {chain_id}. " + raise ValueError(f"Missing file {uniprot_msa_path} for {chain_id}. " f"This is required for Multimer MSA pairing.") with open(uniprot_msa_path, "r") as fp: From 2dbf243dbf171f4a5d912ce7b28523329460b4dc Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Jul 2024 11:57:47 -0400 Subject: [PATCH 60/64] patch to display multimer seq limit in plots --- scripts/generate_pae_plddt_plot.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 63f980ec..72a8ba4d 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -90,7 +90,7 @@ def default(self, obj): outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) -def plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=100): +def plot_pLDDT(outdir, name, model1, model2, model3, fasta, size_in_inches=3.5, axis_label_increment=100): m1_x = list(range(0, len(model1.pLDDT), 1)) m1_y = list(model1.pLDDT) m2_x = list(range(0, len(model2.pLDDT), 1)) @@ -109,7 +109,15 @@ def plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=3. plt.plot(m2_x, m2_y, '-m', label='model2') plt.plot(m3_x, m3_y, '-g', label='model3') - plt.vlines(x=prot1len, ymin=0, ymax=100, colors='k', linestyles='--') + def get_multimer_prot1_len(f): + all_len = [] + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + all_len.append(len(record.seq)) + + all_len = get_multimer_len(fasta) + for l in all_len: + plt.vlines(x=l, ymin=0, ymax=100, colors='k', linestyles='--') plt.legend(loc='lower right') plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) @@ -192,15 +200,10 @@ def generate_plots(fasta, pkl1, pkl2, pkl3, outdir, name, interface): model2_results = AlphaFoldPickle(name, pkl2) model3_results = AlphaFoldPickle(name, pkl3) - def get_multimer_prot1_len(f): - with open(f) as handle: - for record in SeqIO.parse(handle, "fasta"): - return len(record.seq) - prot1len = get_multimer_prot1_len(fasta) print("Generating plddt plot") - plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, fasta, size_in_inches=plot_size, axis_label_increment=plot_increment) print("Generating PAE plot") From 22f1ebbb5c614149aea2b297af9d92f3226ea862 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:04:01 -0400 Subject: [PATCH 61/64] patch to display multimer seq limit in plots --- scripts/generate_pae_plddt_plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index 72a8ba4d..ab202f87 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -109,7 +109,7 @@ def plot_pLDDT(outdir, name, model1, model2, model3, fasta, size_in_inches=3.5, plt.plot(m2_x, m2_y, '-m', label='model2') plt.plot(m3_x, m3_y, '-g', label='model3') - def get_multimer_prot1_len(f): + def get_multimer_len(f): all_len = [] with open(f) as handle: for record in SeqIO.parse(handle, "fasta"): From ae770375f3c6ff48656352f119062fd1f0a81cf3 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:05:18 -0400 Subject: [PATCH 62/64] patch to display multimer seq limit in plots --- scripts/generate_pae_plddt_plot.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index ab202f87..fda27488 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -114,6 +114,7 @@ def get_multimer_len(f): with open(f) as handle: for record in SeqIO.parse(handle, "fasta"): all_len.append(len(record.seq)) + return all_len all_len = get_multimer_len(fasta) for l in all_len: From 7ff67eb702312f9a6ddab9541bfdd8f4ca69f217 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:08:28 -0400 Subject: [PATCH 63/64] patch to display multimer seq limit in plots --- scripts/generate_pae_plddt_plot.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index fda27488..a9f506b8 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -124,7 +124,7 @@ def get_multimer_len(f): plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) -def plot_paE(outdir, name, model1, model2, model3, prot1len, interface_df, size_in_inches=3.5, axis_label_increment=200): +def plot_paE(outdir, name, model1, model2, model3, fasta, interface_df, size_in_inches=3.5, axis_label_increment=200): # data = [ # [0.742, 376, 64, 83, 92, 2, 4, 8, 6.0], @@ -141,7 +141,7 @@ def plot_paE(outdir, name, model1, model2, model3, prot1len, interface_df, size_ # ) - def draw_subplot(name, ax, model, prot1len, display_scale=False): + def draw_subplot(name, ax, model, fasta, display_scale=False): ticks = np.arange(0, model.PAE[1].size, axis_label_increment) img_ax = ax.imshow(model.PAE, cmap="bwr") ax.set_xticks(ticks) @@ -149,8 +149,18 @@ def draw_subplot(name, ax, model, prot1len, display_scale=False): ax.set_title(name, size=20, fontweight="bold") ax.set_xlabel("Residue index", size=16, fontweight="bold") ax.set_ylabel("Residue index", size=16, fontweight="bold") - ax.axvline(x=prot1len, color='k', linewidth=4) - ax.axhline(y=prot1len, color='k', linewidth=4) + + def get_multimer_len(f): + all_len = [] + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + all_len.append(len(record.seq)) + return all_len + + all_len = get_multimer_len(fasta) + for l in all_len: + ax.axvline(x=l, color='k', linewidth=4) + ax.axhline(y=l, color='k', linewidth=4) return img_ax nrows = 1 @@ -165,13 +175,13 @@ def draw_subplot(name, ax, model, prot1len, display_scale=False): models = [model1, model2, model3] ax1 = fig.add_subplot(gs1[0, 0]) - im1 = draw_subplot(f'model1', ax1, models[0], prot1len) + im1 = draw_subplot(f'model1', ax1, models[0], fasta) ax2 = fig.add_subplot(gs1[0, 1]) - im2 = draw_subplot(f'model2', ax2, models[1], prot1len) + im2 = draw_subplot(f'model2', ax2, models[1], fasta) ax3 = fig.add_subplot(gs1[0, 2]) - im3 = draw_subplot(f'model3', ax3, models[2], prot1len) + im3 = draw_subplot(f'model3', ax3, models[2], fasta) ax4 = fig.add_subplot(gs1[0, 3]) mesh = ax4.pcolormesh(models[2].PAE, cmap="bwr") @@ -216,7 +226,7 @@ def generate_plots(fasta, pkl1, pkl2, pkl3, outdir, name, interface): else: print(f"Unable to create pandas dataframe with provided interface file {interface}") - plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, df, size_in_inches=plot_size, + plot_paE(outdir, name, model1_results, model2_results, model3_results, fasta, df, size_in_inches=plot_size, axis_label_increment=plot_increment) From 724db4e608da508b2798ab51fe62097bf6cf12c9 Mon Sep 17 00:00:00 2001 From: jflucier <3505568+jflucier@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:14:30 -0400 Subject: [PATCH 64/64] patch to display multimer seq limit in plots --- scripts/generate_pae_plddt_plot.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py index a9f506b8..493014b7 100644 --- a/scripts/generate_pae_plddt_plot.py +++ b/scripts/generate_pae_plddt_plot.py @@ -117,8 +117,10 @@ def get_multimer_len(f): return all_len all_len = get_multimer_len(fasta) + cumul_l = 0 for l in all_len: - plt.vlines(x=l, ymin=0, ymax=100, colors='k', linestyles='--') + cumul_l += l + plt.vlines(x=cumul_l, ymin=0, ymax=100, colors='k', linestyles='--') plt.legend(loc='lower right') plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) @@ -158,9 +160,11 @@ def get_multimer_len(f): return all_len all_len = get_multimer_len(fasta) + cumul_l = 0 for l in all_len: - ax.axvline(x=l, color='k', linewidth=4) - ax.axhline(y=l, color='k', linewidth=4) + cumul_l += l + ax.axvline(x=cumul_l, color='k', linewidth=4) + ax.axhline(y=cumul_l, color='k', linewidth=4) return img_ax nrows = 1 @@ -241,6 +245,17 @@ def generate_plots(fasta, pkl1, pkl2, pkl3, outdir, name, interface): parser.add_argument('--interface', dest='interface', required=False) args = parser.parse_args() + + # def get_multimer_len(f): + # all_len = [] + # with open(f) as handle: + # for record in SeqIO.parse(handle, "fasta"): + # all_len.append(len(record.seq)) + # return all_len + # + # + # all_len = get_multimer_len(args.fasta) + generate_plots(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename, args.interface) # generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename)