Best Python code snippet using autotest_python
coverage.py
Source:coverage.py
...745 def find_def_coverage(self, morf, statements, missing, definfo):746 """Return mapping from function name to coverage.747 """748 def_coverage = {}749 root = self.morf_name(morf)750 statements = set(statements)751 missing = set(missing)752 for info in definfo:753 if info.codestart is None:754 info.coverage = 1755 else:756 lines = set(range(info.codestart, info.end+1))757 stmt = len(lines.intersection(statements))758 miss = len(lines.intersection(missing))759 if miss == 0: info.cover = 1760 else: info.coverage = (1.0 - float(miss)/float(stmt))761 def relative_filename(self, filename):762 """ Convert filename to relative filename from self.relative_dir.763 """764 return filename.replace(self.relative_dir, "")765 def morf_name(self, morf):766 """ Return the name of morf as used in report.767 """768 if isinstance(morf, types.ModuleType):769 return morf.__name__770 else:771 return self.relative_filename(os.path.splitext(morf)[0])772 def filter_by_prefix(self, morfs, omit_prefixes):773 """ Return list of morfs where the morf name does not begin774 with any one of the omit_prefixes.775 """776 filtered_morfs = []777 for morf in morfs:778 for prefix in omit_prefixes:779 if self.morf_name(morf).startswith(prefix):780 break781 else:782 filtered_morfs.append(morf)783 return filtered_morfs784 def morf_name_compare(self, x, y):785 return cmp(self.morf_name(x), self.morf_name(y))786 def report(self, morfs, show_missing=1, ignore_errors=0, file=None, omit_prefixes=[]):787 if not isinstance(morfs, types.ListType):788 morfs = [morfs]789 # On windows, the shell doesn't expand wildcards. Do it here.790 globbed = []791 for morf in morfs:792 if isinstance(morf, strclass):793 globbed.extend(glob.glob(morf))794 else:795 globbed.append(morf)796 morfs = globbed797 798 morfs = self.filter_by_prefix(morfs, omit_prefixes)799 morfs.sort(self.morf_name_compare)800 max_name = max([5,] + map(len, map(self.morf_name, morfs)))801 fmt_name = "%%- %ds " % max_name802 fmt_err = fmt_name + "%s: %s"803 header = fmt_name % "Name" + " Stmts Exec Cover"804 fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"805 if show_missing:806 header = header + " Missing"807 fmt_coverage = fmt_coverage + " %s"808 if not file:809 file = sys.stdout810 print >>file, header811 print >>file, "-" * len(header)812 total_statements = 0813 total_executed = 0814 for morf in morfs:815 name = self.morf_name(morf)816 try:817 _, statements, _, missing, readable = self.analysis2(morf)818 n = len(statements)819 m = n - len(missing)820 if n > 0:821 pc = 100.0 * m / n822 else:823 pc = 100.0824 args = (name, n, m, pc)825 if show_missing:826 args = args + (readable,)827 print >>file, fmt_coverage % args828 total_statements = total_statements + n829 total_executed = total_executed + m...
newcompute_features.py
Source:newcompute_features.py
1#Copyright MIT License, 2017 Armaghan Naik, Pieter Spealman2#PS 08.18.18 - suppressing ignorable warnings3# https://github.com/numpy/numpy/pull/4324import warnings5warnings.simplefilter("ignore")6import string7import os8import numpy9import scipy10import scipy.stats11import sys12import simplejson as json13uorf_identifier = ['chr', 'morf_name', 'ss_start', 'ss_end', 'polarity']14SETTINGS = json.load(open('analysis.json','r'))15crappy_starts = set(SETTINGS['ignore_start_codons'])16def load_a_sample(kindof, aname):17 predictions = []18 evidence = []19 for achr in SETTINGS['chromosomes']:20 try:21 predictions.extend([x for x in json.load(open('predictions.'+kindof+aname+'/'+achr+'.predictions')) if x['uorf_sequence'][:3] not in crappy_starts])22 except json.decoder.JSONDecodeError:23 pass24 try:25 evidence.extend([x for x in json.load(open('predictions.'+kindof+aname+'/'+achr+'.evidence')) if x['uorf_sequence'][:3] not in crappy_starts])26 except json.decoder.JSONDecodeError:27 pass28 # load up abundance calculations29 orf_transcript_estimate = {}30 sname = [x['mRNA'] for x in SETTINGS['SAMPLES'] if x['name']==aname][0].split('/')[-1]31 for line in open(SETTINGS['processed_dir']+sname+'-quantified.csv'):32 Line = line.rstrip().split()33 orf_transcript_estimate[Line[0]] = float(Line[1])34 #35 orf_loading_estimates_lookup = {}36 sname = [x['RPF'] for x in SETTINGS['SAMPLES'] if x['name']==aname][0].split('/')[-1]37 for line in open(SETTINGS['processed_dir']+sname+'-quantified.csv'):38 Line = line.rstrip().split()39 orf_loading_estimates_lookup[Line[0]] = float(Line[1])40 return predictions, evidence, orf_transcript_estimate, orf_loading_estimates_lookup41feat_order = []42designfeats = ['ss_start', 'ss_end', 'max_power_freq', 'entropy_of_power', 'median_phase', 'median_power', 'morf_5putr_peak_phase', 'morf_5putr_peak_power', 'morf_5putr_region_power', 'morf_5putr_sum_downstream', 'morf_5putr_sum_upstream', 'morf_dist_TIS', 'morf_dist_TSS', 'within_phase_of_in_frame', 'within_phase_of_max_power_freq', 'within_power_of_in_frame', 'within_power_of_max_power_freq', 'protection_sum', 'pwm_score', 'relative_start_magnitude', 'weighted_avg_phase', 'morf_nterm_region_ends_before', 'morf_nterm_region_start_after', 'morf_nonoverlap','morf_len_TLS', 'within_power_of_max_power_freq', 'within_power_of_in_frame']43dfidx = {x[1]:x[0] for x in enumerate(designfeats)}44keepfeats = ['within_power_of_max_power_freq', 'within_power_of_in_frame', 'morf_dist_TIS', 'morf_dist_TSS', 'morf_nterm_region_ends_before', 'morf_nterm_region_start_after', 'morf_nonoverlap']45def compute_derived_features(rep_data, orf_transcript_ests, orf_loading_ests):46 n = len(rep_data)47 X = numpy.zeros((n,len(designfeats)))48 QQ = numpy.zeros((n,))49 total_rpf_depth = float(orf_loading_ests['ALL_READS'])50 ii = 051 which_key = []52 for aval in rep_data:53 if aval['morf_name'] in orf_transcript_ests:54 X[ii,:] = [float(aval[z]) for z in designfeats]55 QQ[ii] = orf_transcript_ests[aval['morf_name']]56 if (QQ[ii] + X[ii,dfidx['morf_5putr_region_power']]) >= 20 and QQ[ii]>0:57 ii += 158 which_key.append([aval[x] for x in uorf_identifier])59 X = X[:ii,:]60 QQ = QQ[:ii]61 n = ii62 # normalize mRNA relative abundances63 QQ /= float(orf_transcript_ests['ALL_READS'])64 synfeats = {}65 for k in keepfeats:66 synfeats[k] = X[:,dfidx[k]] 67 mylengths = numpy.abs(X[:,dfidx['ss_end']]-X[:,dfidx['ss_start']])68 synfeats['spacing_of_max_power'] = X[:,dfidx['max_power_freq']]/mylengths69 synfeats['relative_power_of_max_power_freq'] = X[:,dfidx['within_power_of_max_power_freq']]/QQ70 synfeats['relative_power_of_in_frame'] = X[:,dfidx['within_power_of_in_frame']]/QQ71 synfeats['relative_median_power'] = X[:,dfidx['median_power']]/QQ72 #73 synfeats['region_relative_morf_peak_power']= X[:,dfidx['morf_5putr_peak_power']]/QQ 74 #75 synfeats['abs_phase_of_max_power_freq'] = X[:,dfidx['within_phase_of_max_power_freq']]76 synfeats['abs_phase_of_in_frame'] = X[:,dfidx['within_phase_of_in_frame']]77 #78 synfeats['length_of_puorf'] = mylengths79 #80 #81 avg_upstream_reads = X[:,dfidx['morf_5putr_sum_upstream']]/X[:,dfidx['morf_dist_TSS']]/QQ82 avg_upstream_reads[numpy.isnan(avg_upstream_reads)] = 083 synfeats['relative_avg_upstream_reads'] = avg_upstream_reads84 #85 avg_downstream_reads = X[:,dfidx['morf_5putr_sum_downstream']]/X[:,dfidx['morf_dist_TIS']]/QQ86 avg_downstream_reads[numpy.isnan(avg_downstream_reads)] = 087 synfeats['relative_avg_downstream_reads'] = avg_downstream_reads88 synfeats['normalized_start_magnitude'] = X[:,dfidx['relative_start_magnitude']]/total_rpf_depth89 # print feat_order90 synfeatorder = sorted(synfeats.keys())91 if len(feat_order)==0:92 feat_order.extend(synfeatorder)93 else:94 synfeatorder = feat_order95 newX = numpy.vstack([synfeats[x] for x in synfeatorder]).T96 # import pdb97 # pdb.set_trace()98 return which_key, newX, X99for kindof in ['']+['p'+str(i)+'-' for i in range(len(SETTINGS['permutation_seeds']))]:100 outdir = 'calculated'+kindof101 if not os.path.exists(outdir):102 os.mkdir(outdir)103 for asample in SETTINGS['SAMPLES']:104 sname = asample['name']105 print asample, sname106 predictions, evidence, orf_transcript_estimate, orf_loading_estimates_lookup = load_a_sample(kindof, sname)107 which_key, newX, X = compute_derived_features(predictions, orf_transcript_estimate, orf_loading_estimates_lookup)108 fout = open(outdir+'/'+sname+'.table','w')109 for x in which_key:110 fout.write(' '.join([str(z) for z in x])+"\n")111 fout.close()112 numpy.save(outdir+'/'+sname+'-feats.npy', newX)113 numpy.save(outdir+'/'+sname+'-design-feats.npy', X)114 sname = asample['name']+'-evidence'115 which_key, newX, X = compute_derived_features(evidence, orf_transcript_estimate, orf_loading_estimates_lookup)116 fout = open(outdir+'/'+sname+'.table','w')117 for x in which_key:118 fout.write(' '.join([str(z) for z in x])+"\n")119 fout.close()120 numpy.save(outdir+'/'+sname+'-feats.npy', newX)121 numpy.save(outdir+'/'+sname+'-design-feats.npy', X)122 fout = open('here.forder','w')123 for x in feat_order:124 fout.write(x+"\n")125 fout.close()126 fout = open('here.dorder','w')127 for x in designfeats:128 fout.write(x+"\n")...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!