blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce17cfce8c468bf026821bed8c4ba90149ca668a | 0a3cbf51778ed922c8466af0484e588aa3246c10 | /main/views.py | 4167fe0f29133e925a23434a25ad87558c61aab2 | [] | no_license | minahosam/e-shopper | 075f1917984424f3eb9eea56ed1cf49bbfaa0789 | 2c601e888bd62b803ab2fe6f59607dacb9868f3e | refs/heads/main | 2023-06-21T17:15:08.454089 | 2021-08-07T18:45:48 | 2021-08-07T18:45:48 | 372,903,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,135 | py | from django.shortcuts import render,redirect
from .models import *
from .forms import *
from Profile.models import *
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
import json
import datetime
from .utils import *
# Create your views here.
def show_all_product(request):
all_brand=brand.objects.all()
all_category=category.objects.all()
all_products=index.objects.all().order_by('-id')[:6]
if request.user.is_authenticated:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=cookieContent['net']
total=cookieContent['total']
else:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=0
total=cookieContent['total']
return render(request,'main/index.html',{'all_brand':all_brand ,'all_category':all_category,'products':all_products,'total':total})
def product_detail(request,slug):
all_products=index.objects.get(slug=slug)
all_brand=brand.objects.all()
all_category=category.objects.all()
all_reviews=review.objects.all()
if request.method=='POST':
form=reviewForm(request.POST,request.FILES)
if form.is_valid():
form.save()
else:
form=reviewForm()
return render(request,'main/product_details.html',{'products':all_products,'all_brand':all_brand ,'all_category':all_category,'form':form,'revi':all_reviews})
def wish_list(request,slug):
product=index.objects.get(slug=slug)
if request.user in product.add_to_wishlist.all():
product.add_to_wishlist.remove(request.user)
else:
product.add_to_wishlist.add(request.user)
return redirect('main:show')
def wishlist_page(request):
product_wished=index.objects.filter(add_to_wishlist=request.user)
return render(request,'main/wish_page.html',{'wished':product_wished})
def search_by_category(request):
cat=request.GET['category']
print (cat)
all_brand=brand.objects.all()
all_category=category.objects.all()
name_of_category=category.objects.get(category_name=cat)
categore_selected_result=index.objects.filter(product_category=name_of_category.id)
return render(request,'main/search_category.html',{'category':categore_selected_result,'all_brand':all_brand ,'all_category':all_category})
def search_by_brand(request):
brand_=request.GET['brand']
print(brand_)
all_brand=brand.objects.all()
all_category=category.objects.all()
brand_name=brand.objects.get(brand_name = brand_)
brand_selected_result=index.objects.filter(product_brand=brand_name.id)
return render(request,'main/search_brand.html',{'brand':brand_selected_result,'all_brand':all_brand ,'all_category':all_category})
def cart(request):
if request.user.is_authenticated:
cartData=dataCart(request)
item=cartData['items']
net_total=cartData['net']
total=cartData['total']
else:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=cookieContent['net']
total=cookieContent['total']
return render(request,'main/cart.html',{'items':item ,'net':net_total,'total':total})
def checkout(request):
if request.user.is_authenticated:
cartData=dataCart(request)
item=cartData['items']
net_total=cartData['net']
total=cartData['total']
coutries=cartData['countries']
states=cartData['state']
shipping_info=shippinginfo.objects.all()
shiped=False
for i in item:
if i.item_order.order_completed ==False:
shiped = True
else:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=cookieContent['net']
total=cookieContent['total']
shipping_info=shippinginfo.objects.all()
coutries=country.objects.all()
shiped=False
states=state.objects.filter(state_country__in=coutries)
return render(request,'main/checkout.html',{'items':item , 'net':net_total,'total':total,'countries':coutries,'state':states,'shiped':shiped,'info':shipping_info})
def update_cart(request):
additon=json.loads(request.body)
product=additon['produactId']
product_action=additon['action']
print(product_action,product)
selected_product=index.objects.get(id=product)
order_owner=request.user
requested_user=userprofile.objects.get(usrename=order_owner)
Order , create=order.objects.get_or_create(order_customer=requested_user,order_completed=False)
item , create=orderItem.objects.get_or_create(item_name=selected_product,item_order=Order)
if product_action == 'add':
item.item_quantity = item.item_quantity
elif product_action == 'add2':
item.item_quantity = item.item_quantity + 1
else:
item.item_quantity = item.item_quantity- 1
print('-')
item.save()
print(item.item_quantity)
if item.item_quantity == 0:
item.delete()
if product_action == 'delete':
item.delete()
return JsonResponse('added',safe=False)
def country_name_from_json(request,*args,**kwargs):
selected_country=kwargs.get('country')
states_according_to_country=list(state.objects.filter(state_country__country_name=selected_country).values())
return JsonResponse({'data':states_according_to_country})
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def process_order(request):
transaction_id2=datetime.datetime.now().timestamp()
data=json.loads(request.body)
print(data)
print(transaction_id2)
userc=request.user
print(userc)
if userc.is_authenticated:
user=request.user
print(user)
customer=userprofile.objects.get(username=user)
print(customer)
Order=order.objects.get(order_customer=customer,order_compcleted=False)
total=float(data['shippingData']['total'])
print(total)
order.transaction_id=transaction_id2
item=orderItem.objects.filter(item_order=Order)
net_total=sum([items.total_selected_item for items in item])
if total == net_total:
Order.order_completed=True
Order.save()
shiiping=shippinginfo.objects.create(
shipping_user=customer,shipping_order=Order,shipping_mail=data['shippingData']['email'],
title=data['shippingData']['title'],shipping_firstname=data['shippingData']['firstname'],shiping_middlename=data['shippingData']['middlename'],
shipping_lastname=data['shippingData']['lastname'],shiping_adress1=data['shippingData']['adress1'],shipping_adress2=data['shippingData']['adress2'],
shipping_zipcode=data['shippingData']['zipcode'],shipping_country=data['shippingData']['country'],shipping_state=data['shippingData']['state'],
shipping_phone=data['shippingData']['phone'],shipping_mobile_number=data['shippingData']['mobile_number'],shipping_fax=data['shippingData']['fax']
)
shiiping.save()
else:
print('user not logged in')
return JsonResponse('payment submitted.........',safe=False) | [
"[email protected]"
] | |
764e6a72e60e5d6ef21dd3927b6b6543d48c628a | 344cd39f8d8f00c418c300d212c4c7d4344edf27 | /RNASeq.py | 4f53058847bae3a664142be27b3222a3520cf7fb | [
"Apache-2.0"
] | permissive | maggishaggy/altanalyze | fba602d3562b513cd0dfe83c7baec5cf09d204b2 | f73b21c519836c5d8bec089260ff2b631bff9fbf | refs/heads/master | 2020-05-20T08:36:49.084461 | 2019-04-30T01:00:57 | 2019-04-30T01:00:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306,374 | py | ###RNASeq
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string, os
from stats_scripts import statistics
import math
import os.path
import unique
import update
import copy
import time
import export
from build_scripts import EnsemblImport; reload(EnsemblImport)
try: from build_scripts import JunctionArrayEnsemblRules
except Exception: pass ### occurs with circular imports
try: from build_scripts import JunctionArray; reload(JunctionArray)
except Exception: pass ### occurs with circular imports
try: from build_scripts import ExonArrayEnsemblRules
except Exception: pass ### occurs with circular imports
import multiprocessing
import logging
import traceback
import warnings
import bisect
import shutil
from visualization_scripts import clustering; reload(clustering)
try:
import scipy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
except Exception: pass
try: import numpy
except Exception: pass
LegacyMode = True
try:
from scipy import average as Average
from scipy import stats
except Exception:
try: from statistics import avg as Average
except Exception: pass ### occurs with circular imports
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list_clean=[]
dir_list = unique.read_directory(sub_dir)
for filepath in dir_list:
if 'log.txt' not in filepath and '.log' not in filepath:
dir_list_clean.append(filepath)
return dir_list_clean
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
######### Below code deals with building the AltDatabase #########
def collapseNoveExonBoundaries(novel_exon_coordinates,dataset_dir):
""" Merge exon predictions based on junction measurments from TopHat. The predicted exons are
bound by the identified splice site and the consensus length of reads in that sample"""
dataset_dir = string.replace(dataset_dir,'exp.','ExpressionInput/novel.')
export_data,status = AppendOrWrite(dataset_dir) ### Export all novel exons
if status == 'not found':
export_data.write('GeneID\tStrand\tExonID\tCoordinates\n')
novel_gene_exon_db={}
for (chr,coord) in novel_exon_coordinates:
key = (chr,coord)
ji,side,coord2 = novel_exon_coordinates[(chr,coord)]
try:
if side == 'left': ### left corresponds to the position of coord
intron = string.split(string.split(ji.ExonRegionID(),'-')[1][:2],'.')[0]
else:
intron = string.split(string.split(ji.ExonRegionID(),'-'),'.')[0]
ls = [coord,coord2]
ls.sort() ### The order of this is variable
if ji.Strand() == '-':
coord2,coord = ls
else: coord,coord2 = ls
if 'I' in intron and ji.Novel() == 'side':
#if 'ENSG00000221983' == ji.GeneID():
try: novel_gene_exon_db[ji.GeneID(),ji.Strand(),intron].append((coord,coord2,ji,key,side))
except Exception: novel_gene_exon_db[ji.GeneID(),ji.Strand(),intron] = [(coord,coord2,ji,key,side)]
except Exception: pass
outdatedExons={} ### merging novel exons, delete one of the two original
for key in novel_gene_exon_db:
firstNovel=True ### First putative novel exon coordinates examined for that gene
novel_gene_exon_db[key].sort()
if key[1]=='-':
novel_gene_exon_db[key].reverse()
for (c1,c2,ji,k,s) in novel_gene_exon_db[key]:
if firstNovel==False:
#print [c1,l2] #abs(c1-l2);sys.exit()
### see if the difference between the start position of the second exon is less than 300 nt away from the end of the last
if abs(c2-l1) < 300 and os!=s: ### 80% of human exons are less than 200nt - PMID: 15217358
proceed = True
#if key[1]=='-':
if c2 in k:
novel_exon_coordinates[k] = ji,s,l1
outdatedExons[ok]=None ### merged out entry
elif l1 in ok:
novel_exon_coordinates[ok] = li,os,c2
outdatedExons[k]=None ### merged out entry
else:
proceed = False ### Hence, the two splice-site ends are pointing to two distinct versus one common exons
"""
if c2 == 18683670 or l1 == 18683670:
print key,abs(c2-l1), c1, c2, l1, l2, li.ExonRegionID(), ji.ExonRegionID();
print k,novel_exon_coordinates[k]
print ok,novel_exon_coordinates[ok]
"""
if proceed:
values = string.join([ji.GeneID(),ji.Strand(),key[2],ji.Chr()+':'+str(l1)+'-'+str(c2)],'\t')+'\n'
export_data.write(values)
### For negative strand genes, c1 is larger than c2 but is the 5' begining of the exon
l1,l2,li,ok,os = c1,c2,ji,k,s ### record the last entry
firstNovel=False
for key in outdatedExons: ### Delete the non-merged entry
del novel_exon_coordinates[key]
export_data.close()
return novel_exon_coordinates
def exportNovelExonToBedCoordinates(species,novel_exon_coordinates,chr_status,searchChr=None):
### Export the novel exon coordinates based on those in the junction BED file to examine the differential expression of the predicted novel exon
#bamToBed -i accepted_hits.bam -split| coverageBed -a stdin -b /home/databases/hESC_differentiation_exons.bed > day20_7B__exons-novel.bed
bed_export_path = filepath('AltDatabase/'+species+'/RNASeq/chr/'+species + '_Ensembl_exons'+searchChr+'.bed')
bed_data = open(bed_export_path,'w') ### Appends to existing file
for (chr,coord) in novel_exon_coordinates:
ji,side,coord2 = novel_exon_coordinates[(chr,coord)]
if side == 'left': start,stop = coord,coord2
if side == 'right': start,stop = coord2,coord
try: gene = ji.GeneID()
except Exception: gene = 'NA'
if gene == None: gene = 'NA'
if gene == None: gene = 'NA'
if gene != 'NA': ### Including these has no benefit for AltAnalyze (just slows down alignment and piles up memory)
if ji.Strand() == '-': stop,start=start,stop
if chr_status == False:
chr = string.replace(chr,'chr','') ### This will thus match up to the BAM files
a = [start,stop]; a.sort(); start,stop = a
bed_values = [chr,str(start),str(stop),gene,'0',str(ji.Strand())]
bed_values = cleanUpLine(string.join(bed_values,'\t'))+'\n'
bed_data.write(bed_values)
bed_data.close()
return bed_export_path
def moveBAMtoBEDFile(species,dataset_name,root_dir):
bed_export_path = filepath('AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.bed')
dataset_name = string.replace(dataset_name,'exp.','')
new_fn = root_dir+'/BAMtoBED/'+species + '_'+dataset_name+'_exons.bed'
new_fn = string.replace(new_fn,'.txt','')
print 'Writing exon-level coordinates to BED file:'
print new_fn
catFiles(bed_export_path,'chr') ### concatenate the files ot the main AltDatabase directory then move
export.customFileMove(bed_export_path,new_fn)
return new_fn
def reformatExonFile(species,type,chr_status):
if type == 'exon':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
export_path = 'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.txt'
### Used by BEDTools to get counts per specific AltAnalyze exon region (should augment with de novo regions identified from junction analyses)
bed_export_path = 'AltDatabase/'+species+'/RNASeq/chr/'+species + '_Ensembl_exons.bed'
bed_data = export.ExportFile(bed_export_path)
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
export_path = 'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
print 'Writing',export_path
export_data = export.ExportFile(export_path)
fn=filepath(filename); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x+=1
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['affy_class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
else:
try: gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t
except Exception: print t;kill
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention,
if constitutive_call == 'yes': ens_constitutive_status = '1'
else: ens_constitutive_status = '0'
export_values = [gene+':'+exonid, exonid, gene, '', chr, strand, start, stop, 'known', constitutive_call, ens_exon_ids, ens_constitutive_status]
export_values+= [exonid, start, stop, splice_events, splice_junctions]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
if type == 'exon':
if chr_status == False:
chr = string.replace(chr,'chr','') ### This will thus match up to the BAM files
bed_values = [chr,start,stop,gene+':'+exonid+'_'+ens_exon_ids,'0',strand]
bed_values = string.join(bed_values,'\t')+'\n'; bed_data.write(bed_values)
export_data.close()
if type == 'exon': bed_data.close()
def importExonAnnotations(species,type,search_chr):
if 'exon' in type:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
fn=filepath(filename); x=0; exon_annotation_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t; proceed = 'yes'
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if len(search_chr)>0:
if chr != search_chr: proceed = 'no'
if proceed == 'yes':
if type == 'exon': start = int(start); stop = int(stop)
ea = EnsemblImport.ExonAnnotationsSimple(chr, strand, start, stop, gene, ens_exon_ids, constitutive_call, exonid, splice_events, splice_junctions)
if type == 'junction_coordinates':
exon1_start,exon1_stop = string.split(start,'|')
exon2_start,exon2_stop = string.split(stop,'|')
if strand == '-':
exon1_stop,exon1_start = exon1_start,exon1_stop
exon2_stop,exon2_start = exon2_start,exon2_stop
#if gene == 'ENSMUSG00000027340': print chr,int(exon1_stop),int(exon2_start)
exon_annotation_db[chr,int(exon1_stop),int(exon2_start)]=ea
elif type == 'distal-exon':
exon_annotation_db[gene] = exonid
else:
try: exon_annotation_db[gene].append(ea)
except KeyError: exon_annotation_db[gene]=[ea]
return exon_annotation_db
def exportKnownJunctionComparisons(species):
gene_junction_db = JunctionArrayEnsemblRules.importEnsemblUCSCAltJunctions(species,'standard')
gene_intronjunction_db = JunctionArrayEnsemblRules.importEnsemblUCSCAltJunctions(species,'_intronic')
for i in gene_intronjunction_db: gene_junction_db[i]=[]
gene_junction_db2={}
for (gene,critical_exon,incl_junction,excl_junction) in gene_junction_db:
critical_exons = string.split(critical_exon,'|')
for critical_exon in critical_exons:
try: gene_junction_db2[gene,incl_junction,excl_junction].append(critical_exon)
except Exception: gene_junction_db2[gene,incl_junction,excl_junction] = [critical_exon]
gene_junction_db = gene_junction_db2; gene_junction_db2=[]
junction_export = 'AltDatabase/' + species + '/RNASeq/'+ species + '_junction_comps.txt'
fn=filepath(junction_export); data = open(fn,'w')
print "Exporting",junction_export
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title); temp_list=[]
for (gene,incl_junction,excl_junction) in gene_junction_db:
critical_exons = unique.unique(gene_junction_db[(gene,incl_junction,excl_junction)])
critical_exon = string.join(critical_exons,'|')
temp_list.append(string.join([gene,critical_exon,excl_junction,incl_junction,gene+':'+excl_junction,gene+':'+incl_junction,'AltAnalyze'],'\t')+'\n')
temp_list = unique.unique(temp_list)
for i in temp_list: data.write(i)
data.close()
def getExonAndJunctionSequences(species):
export_exon_filename = 'AltDatabase/'+species+'/RNASeq/'+species+'_Ensembl_exons.txt'
ensembl_exon_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'null',{})
### Import just the probeset region for mRNA alignment analysis
analysis_type = ('region_only','get_sequence'); array_type = 'RNASeq'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_exon_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_exon_db,species,analysis_type)
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
getCriticalJunctionSequences(critical_exon_file,species,ensembl_exon_db)
"""
### Import the full Ensembl exon sequence (not just the probeset region) for miRNA binding site analysis
analysis_type = 'get_sequence'; array_type = 'RNASeq'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_exon_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_exon_db,species,analysis_type)
"""
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
updateCriticalExonSequences(critical_exon_file, ensembl_exon_db)
def updateCriticalExonSequences(filename,ensembl_exon_db):
exon_seq_db_filename = filename[:-4]+'_updated.txt'
exonseq_data = export.ExportFile(exon_seq_db_filename)
critical_exon_seq_db={}; null_count={}
for gene in ensembl_exon_db:
gene_exon_data={}
for probe_data in ensembl_exon_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: gene_exon_data[probeset_id] = ed.ExonSeq()
except Exception: null_count[gene]=[] ### Occurs for non-chromosomal DNA (could also download this sequence though)
if len(gene_exon_data)>0: critical_exon_seq_db[gene] = gene_exon_data
print len(null_count),'genes not assigned sequenced (e.g.,non-chromosomal)'
ensembl_exon_db=[]
### Export exon sequences
for gene in critical_exon_seq_db:
gene_exon_data = critical_exon_seq_db[gene]
for probeset in gene_exon_data:
critical_exon_seq = gene_exon_data[probeset]
values = [probeset,'',critical_exon_seq]
values = string.join(values,'\t')+'\n'
exonseq_data.write(values)
exonseq_data.close()
print exon_seq_db_filename, 'exported....'
def getCriticalJunctionSequences(filename,species,ensembl_exon_db):
### Assemble and export junction sequences
junction_seq_db_filename = string.replace(filename,'exon-seq','junction-seq')
junctionseq_data = export.ExportFile(junction_seq_db_filename)
critical_exon_seq_db={}; null_count={}
for gene in ensembl_exon_db:
gene_exon_data={}
for probe_data in ensembl_exon_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: gene_exon_data[probeset_id] = ed.ExonSeq()
except Exception: null_count[gene]=[] ### Occurs for non-chromosomal DNA (could also download this sequence though)
if len(gene_exon_data)>0: critical_exon_seq_db[gene] = gene_exon_data
print len(null_count),'genes not assigned sequenced (e.g.,non-chromosomal)'
ensembl_exon_db=[]
junction_annotation_db = importExonAnnotations(species,'junction',[])
for gene in junction_annotation_db:
if gene in critical_exon_seq_db:
gene_exon_data = critical_exon_seq_db[gene]
for jd in junction_annotation_db[gene]:
exon1,exon2=string.split(jd.ExonRegionIDs(),'-')
p1=gene+':'+exon1
p2=gene+':'+exon2
p1_seq=gene_exon_data[p1][-15:]
p2_seq=gene_exon_data[p2][:15]
junction_seq = p1_seq+'|'+p2_seq
junctionseq_data.write(gene+':'+jd.ExonRegionIDs()+'\t'+junction_seq+'\t\n')
junctionseq_data.close()
print junction_seq_db_filename, 'exported....'
def getEnsemblAssociations(species,data_type,test_status,force):
### Get UCSC associations (download databases if necessary)
from build_scripts import UCSCImport
mRNA_Type = 'mrna'; run_from_scratch = 'yes'
export_all_associations = 'no' ### YES only for protein prediction analysis
update.buildUCSCAnnoationFiles(species,mRNA_Type,export_all_associations,run_from_scratch,force)
null = EnsemblImport.getEnsemblAssociations(species,data_type,test_status); null=[]
reformatExonFile(species,'exon',True); reformatExonFile(species,'junction',True)
exportKnownJunctionComparisons(species)
getExonAndJunctionSequences(species)
######### Below code deals with user read alignment as opposed to building the AltDatabase #########
class ExonInfo:
def __init__(self,start,unique_id,annotation):
self.start = start; self.unique_id = unique_id; self.annotation = annotation
def ReadStart(self): return self.start
def UniqueID(self): return self.unique_id
def Annotation(self): return self.annotation
def setExonRegionData(self,rd): self.rd = rd
def ExonRegionData(self): return self.rd
def setExonRegionID(self,region_id): self.region_id = region_id
def ExonRegionID(self): return self.region_id
def setAlignmentRegion(self,region_type): self.region_type = region_type
def AlignmentRegion(self): return self.region_type
def __repr__(self): return "ExonData values"
class JunctionData:
def __init__(self,chr,strand,exon1_stop,exon2_start,junction_id,biotype):
self.chr = chr; self.strand = strand; self._chr = chr
self.exon1_stop = exon1_stop; self.exon2_start = exon2_start
self.junction_id = junction_id; self.biotype = biotype
#self.reads = reads; self.condition = condition
self.left_exon = None; self.right_exon = None; self.jd = None; self.gene_id = None
self.trans_splicing = None
self.splice_events=''
self.splice_junctions=''
self.seq_length=''
self.uid = None
def Chr(self): return self.chr
def Strand(self): return self.strand
def Exon1Stop(self): return self.exon1_stop
def Exon2Start(self): return self.exon2_start
def setExon1Stop(self,exon1_stop): self.exon1_stop = exon1_stop
def setExon2Start(self,exon2_start): self.exon2_start = exon2_start
def setSeqLength(self,seq_length): self.seq_length = seq_length
def SeqLength(self): return self.seq_length
def BioType(self): return self.biotype
def checkExonPosition(self,exon_pos):
if exon_pos == self.Exon1Stop(): return 'left'
else: return 'right'
### These are used to report novel exon boundaries
def setExon1Start(self,exon1_start): self.exon1_start = exon1_start
def setExon2Stop(self,exon2_stop): self.exon2_stop = exon2_stop
def Exon1Start(self): return self.exon1_start
def Exon2Stop(self): return self.exon2_stop
def Reads(self): return self.reads
def JunctionID(self): return self.junction_id
def Condition(self): return self.condition
def setExonAnnotations(self,jd):
self.jd = jd
self.splice_events = jd.AssociatedSplicingEvent()
self.splice_junctions = jd.AssociatedSplicingJunctions()
self.exon_region = jd.ExonRegionIDs()
self.exonid = jd.ExonID()
self.gene_id = jd.GeneID()
self.uid = jd.GeneID()+':'+jd.ExonRegionIDs()
def ExonAnnotations(self): return self.jd
def setLeftExonAnnotations(self,ld): self.gene_id,self.left_exon = ld
def LeftExonAnnotations(self): return self.left_exon
def setRightExonAnnotations(self,rd): self.secondary_geneid,self.right_exon = rd
def RightExonAnnotations(self): return self.right_exon
def setGeneID(self,geneid): self.gene_id = geneid
def GeneID(self): return self.gene_id
def setSecondaryGeneID(self,secondary_geneid): self.secondary_geneid = secondary_geneid
def SecondaryGeneID(self): return self.secondary_geneid
def setTransSplicing(self): self.trans_splicing = 'yes'
def TransSplicing(self): return self.trans_splicing
def SpliceSitesFound(self):
if self.jd != None: sites_found = 'both'
elif self.left_exon != None and self.right_exon != None: sites_found = 'both'
elif self.left_exon != None: sites_found = 'left'
elif self.right_exon != None: sites_found = 'right'
else: sites_found = None
return sites_found
def setConstitutive(self,constitutive): self.constitutive = constitutive
def Constitutive(self): return self.constitutive
def setAssociatedSplicingEvent(self,splice_events): self.splice_events = splice_events
def AssociatedSplicingEvent(self): return self.splice_events
def setAssociatedSplicingJunctions(self,splice_junctions): self.splice_junctions = splice_junctions
def AssociatedSplicingJunctions(self): return self.splice_junctions
def setExonID(self,exonid): self.exonid = exonid
def ExonID(self): return self.exonid
def setExonRegionID(self,exon_region): self.exon_region = exon_region
def ExonRegionID(self): return self.exon_region
def setUniqueID(self,uid): self.uid = uid
def UniqueID(self): return self.uid
def setLeftExonRegionData(self,li): self.li = li
def LeftExonRegionData(self): return self.li
def setRightExonRegionData(self,ri): self.ri = ri
def RightExonRegionData(self): return self.ri
def setNovel(self, side): self.side = side
def Novel(self): return self.side
def __repr__(self): return "JunctionData values"
def checkBEDFileFormat(bed_dir,root_dir):
""" This method checks to see if the BED files (junction or exon) have 'chr' proceeding the chr number.
It also checks to see if some files have two underscores and one has none or if double underscores are missing from all."""
dir_list = read_directory(bed_dir)
x=0
break_now = False
chr_present = False
condition_db={}
for filename in dir_list:
fn=filepath(bed_dir+filename)
#if ('.bed' in fn or '.BED' in fn): delim = 'r'
delim = 'rU'
if '.tab' in string.lower(filename) or '.bed' in string.lower(filename) or '.junction_quantification.txt' in string.lower(filename):
condition_db[filename]=[]
for line in open(fn,delim).xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
if line[0] == '#': x=0 ### BioScope
elif x == 0: x=1 ###skip the first line
elif x < 10: ### Only check the first 10 lines
if 'chr' in line: ### Need to look at multiple input formats (chr could be in t[0] or t[1])
chr_present = True
x+=1
else:
break_now = True
break
if break_now == True:
break
### Check to see if exon.bed and junction.bed file names are propper or faulty (which will result in downstream errors)
double_underscores=[]
no_doubles=[]
for condition in condition_db:
if '__' in condition:
double_underscores.append(condition)
else:
no_doubles.append(condition)
exon_beds=[]
junctions_beds=[]
if len(double_underscores)>0 and len(no_doubles)>0:
### Hence, a problem is likely due to inconsistent naming
print 'The input files appear to have inconsistent naming. If both exon and junction sample data are present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print 'Exiting AltAnalyze'; forceError
elif len(no_doubles)>0:
for condition in no_doubles:
condition = string.lower(condition)
if 'exon' in condition:
exon_beds.append(condition)
if 'junction' in condition:
junctions_beds.append(condition)
if len(exon_beds)>0 and len(junctions_beds)>0:
print 'The input files appear to have inconsistent naming. If both exon and junction sample data are present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print 'Exiting AltAnalyze'; forceError
return chr_present
def getStrandMappingData(species):
splicesite_db={}
refExonCoordinateFile = unique.filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
splicesite_db[chr,int(start)]=strand
splicesite_db[chr,int(stop)]=strand
return splicesite_db
def importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,getReads=False,searchChr=None,getBiotype=None,testImport=False,filteredJunctions=None):
dir_list = read_directory(bed_dir)
begin_time = time.time()
if 'chr' not in searchChr:
searchChr = 'chr'+searchChr
condition_count_db={}; neg_count=0; pos_count=0; junction_db={}; biotypes={}; algorithms={}; exon_len_db={}; splicesite_db={}
if testImport == 'yes': print "Reading user RNA-seq input data files"
for filename in dir_list:
count_db={}; rows=0
fn=filepath(bed_dir+filename)
condition = export.findFilename(fn)
if '__' in condition:
### Allow multiple junction files per sample to be combined (e.g. canonical and non-canonical junction alignments)
condition=string.split(condition,'__')[0]+filename[-4:]
if ('.bed' in fn or '.BED' in fn or '.tab' in fn or '.TAB' in fn or '.junction_quantification.txt' in fn) and '._' not in condition:
if ('.bed' in fn or '.BED' in fn): delim = 'r'
else: delim = 'rU'
### The below code removes .txt if still in the filename along with .tab or .bed
if '.tab' in fn: condition = string.replace(condition,'.txt','.tab')
elif '.bed' in fn: condition = string.replace(condition,'.txt','.bed')
if '.TAB' in fn: condition = string.replace(condition,'.txt','.TAB')
elif '.BED' in fn: condition = string.replace(condition,'.txt','.BED')
if testImport == 'yes': print "Reading the bed file", [fn], condition
### If the BED was manually created on a Mac, will neeed 'rU' - test this
for line in open(fn,delim).xreadlines(): break
if len(line)>500: delim = 'rU'
for line in open(fn,delim).xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
t = string.split(data,'\t')
rows+=1
if rows==1 or '#' == data[0]:
format_description = data
algorithm = 'Unknown'
if 'TopHat' in format_description: algorithm = 'TopHat'
elif 'HMMSplicer' in format_description: algorithm = 'HMMSplicer'
elif 'SpliceMap junctions' in format_description: algorithm = 'SpliceMap'
elif t[0] == 'E1': algorithm = 'BioScope-junction'
elif '# filterOrphanedMates=' in data or 'alignmentFilteringMode=' in data or '#number_of_mapped_reads=' in data:
algorithm = 'BioScope-exon'
elif '.junction_quantification.txt' in fn:
algorithm = 'TCGA format'
if 'barcode' in t: junction_position = 1
else: junction_position = 0
elif '.tab' in fn and len(t)==9:
try: start = float(t[1]) ### expect this to be a numerical coordinate
except Exception: continue
algorithm = 'STAR'
strand = '-' ### If no strand exists
rows=2 ### allows this first row to be processed
if len(splicesite_db)==0: ### get strand to pos info
splicesite_db = getStrandMappingData(species)
if testImport == 'yes': print condition, algorithm
if rows>1:
try:
if ':' in t[0]:
chr = string.split(t[0],':')[0]
else: chr = t[0]
if 'chr' not in chr:
chr = 'chr'+chr
if searchChr == chr or ('BioScope' in algorithm and searchChr == t[1]): proceed = True
elif searchChr == 'chrMT' and ('BioScope' not in algorithm):
if 'M' in chr and len(chr)<6: proceed = True ### If you don't have the length, any random thing with an M will get included
else: proceed = False
else: proceed = False
except IndexError:
print 'The input file:\n',filename
print 'is not formated as expected (format='+algorithm+').'
print 'search chromosome:',searchChr
print t; force_bad_exit
if proceed:
proceed = False
if '.tab' in fn or '.TAB' in fn:
### Applies to non-BED format Junction and Exon inputs (BioScope)
if 'BioScope' in algorithm:
if algorithm == 'BioScope-exon': ### Not BED format
chr,source,data_type,start,end,reads,strand,null,gene_info=t[:9]
if 'chr' not in chr: chr = 'chr'+chr
if data_type == 'exon': ### Can also be CDS
gene_info,test,rpkm_info,null = string.split(gene_info,';')
symbol = string.split(gene_info,' ')[-1]
#refseq = string.split(transcript_info,' ')[-1]
rpkm = string.split(rpkm_info,' ')[-1]
#if normalize_feature_exp == 'RPKM': reads = rpkm ### The RPKM should be adjusted +1 counts, so don't use this
biotype = 'exon'; biotypes[biotype]=[]
exon1_stop,exon2_start = int(start),int(end); junction_id=''
### Adjust exon positions - not ideal but necessary. Needed as a result of exon regions overlapping by 1nt (due to build process)
exon1_stop+=1; exon2_start-=1
#if float(reads)>4 or getReads:
proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
seq_length = abs(exon1_stop-exon2_start)
if algorithm == 'BioScope-junction':
chr = t[1]; strand = t[2]; exon1_stop = int(t[4]); exon2_start = int(t[8]); count_paired = t[17]; count_single = t[19]; score=t[21]
if 'chr' not in chr: chr = 'chr'+chr
try: exon1_start = int(t[3]); exon2_stop = int(t[9])
except Exception: pass ### If missing, these are not assigned
reads = str(int(float(count_paired))+int(float(count_single))) ### Users will either have paired or single read (this uses either)
biotype = 'junction'; biotypes[biotype]=[]; junction_id=''
if float(reads)>4 or getReads: proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
seq_length = abs(float(exon1_stop-exon2_start))
if 'STAR' in algorithm:
chr = t[0]; exon1_stop = int(t[1])-1; exon2_start = int(t[2])+1; strand=''
if 'chr' not in chr: chr = 'chr'+chr
reads = str(int(t[7])+int(t[6]))
biotype = 'junction'; biotypes[biotype]=[]; junction_id=''
if float(reads)>4 or getReads: proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
if (chr,exon1_stop) in splicesite_db:
strand = splicesite_db[chr,exon1_stop]
elif (chr,exon2_start) in splicesite_db:
strand = splicesite_db[chr,exon2_start]
#else: proceed = False
seq_length = abs(float(exon1_stop-exon2_start))
if strand == '-': ### switch the orientation of the positions
exon1_stop,exon2_start=exon2_start,exon1_stop
exon1_start = exon1_stop; exon2_stop = exon2_start
#if 9996685==exon1_stop and 10002682==exon2_stop:
#print chr, strand, reads, exon1_stop, exon2_start,proceed;sys.exit()
else:
try:
if algorithm == 'TCGA format':
coordinates = string.split(t[junction_position],',')
try: chr,pos1,strand = string.split(coordinates[0],':')
except Exception: print t;sys.exit()
chr,pos2,strand = string.split(coordinates[1],':')
if 'chr' not in chr: chr = 'chr'+chr
pos2 = str(int(pos2)-1) ### This is the bed format conversion with exons of 0 length
exon1_start, exon2_stop = pos1, pos2
reads = t[junction_position+1]
junction_id = t[junction_position]
exon1_len=0; exon2_len=0
else:
### Applies to BED format Junction input
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
if 'chr' not in chr: chr = 'chr'+chr
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
biotype = 'junction'; biotypes[biotype]=[]
if strand == '-':
if (exon1_len+exon2_len)==0: ### Kallisto-Splice directly reports these coordinates
exon1_stop = exon1_start
exon2_start = exon2_stop
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
if (exon1_len+exon2_len)==0: ### Kallisto-Splice directly reports these coordinates
exon1_stop = exon1_start
exon2_start= exon2_stop
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
if float(reads)>4 or getReads: proceed = True
if algorithm == 'HMMSplicer':
if '|junc=' in junction_id: reads = string.split(junction_id,'|junc=')[-1]
else: proceed = False
if algorithm == 'SpliceMap':
if ')' in junction_id and len(junction_id)>1: reads = string.split(junction_id,')')[0][1:]
else: proceed = False
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
except Exception,e:
#print traceback.format_exc();sys.exit()
### Applies to BED format exon input (BEDTools export)
# bamToBed -i accepted_hits.bam -split| coverageBed -a stdin -b /home/nsalomonis/databases/Mm_Ensembl_exons.bed > day0_8B__exons.bed
try: chr, start, end, exon_id, null, strand, reads, bp_coverage, bp_total, percent_coverage = t
except Exception:
print 'The file',fn,'does not appear to be propperly formatted as input.'
print t; force_exception
if 'chr' not in chr: chr = 'chr'+chr
algorithm = 'TopHat-exon'; biotype = 'exon'; biotypes[biotype]=[]
exon1_stop,exon2_start = int(start),int(end); junction_id=exon_id; seq_length = float(bp_total)
if seq_length == 0:
seq_length = abs(float(exon1_stop-exon2_start))
### Adjust exon positions - not ideal but necessary. Needed as a result of exon regions overlapping by 1nt (due to build process)
exon1_stop+=1; exon2_start-=1
#if float(reads)>4 or getReads: ### Added in version 2.0.9 to remove rare novel isoforms
proceed = True
#else: proceed = False
if proceed:
if 'chr' not in chr:
chr = 'chr'+chr ### Add the chromosome prefix
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if strand == '+': pos_count+=1
else: neg_count+=1
if getReads and seq_length>0:
if getBiotype == biotype:
if biotype == 'junction':
### We filtered for junctions>4 reads before, now we include all reads for expressed junctions
if (chr,exon1_stop,exon2_start) in filteredJunctions:
count_db[chr,exon1_stop,exon2_start] = reads
try: exon_len_db[chr,exon1_stop,exon2_start] = seq_length
except Exception: exon_len_db[chr,exon1_stop,exon2_start] = []
else:
count_db[chr,exon1_stop,exon2_start] = reads
try: exon_len_db[chr,exon1_stop,exon2_start] = seq_length
except Exception: exon_len_db[chr,exon1_stop,exon2_start] = []
elif seq_length>0:
if (chr,exon1_stop,exon2_start) not in junction_db:
ji = JunctionData(chr,strand,exon1_stop,exon2_start,junction_id,biotype)
junction_db[chr,exon1_stop,exon2_start] = ji
try: ji.setSeqLength(seq_length) ### If RPKM imported or calculated
except Exception: null=[]
try: ji.setExon1Start(exon1_start);ji.setExon2Stop(exon2_stop)
except Exception: null=[]
key = chr,exon1_stop,exon2_start
algorithms[algorithm]=[]
if getReads:
if condition in condition_count_db:
### combine the data from the different files for the same sample junction alignments
count_db1 = condition_count_db[condition]
for key in count_db:
if key not in count_db1: count_db1[key] = count_db[key]
else:
combined_counts = int(count_db1[key])+int(count_db[key])
count_db1[key] = str(combined_counts)
condition_count_db[condition]=count_db1
else:
try: condition_count_db[condition] = count_db
except Exception: null=[] ### Occurs for other text files in the directory that are not used for the analysis
end_time = time.time()
if testImport == 'yes': print 'Read coordinates imported in',int(end_time-begin_time),'seconds'
if getReads:
#print len(exon_len_db), getBiotype, 'read counts present for',algorithm
return condition_count_db,exon_len_db,biotypes,algorithms
else:
if testImport == 'yes':
if 'exon' not in biotypes and 'BioScope' not in algorithm:
print len(junction_db),'junctions present in',algorithm,'format BED files.' # ('+str(pos_count),str(neg_count)+' by strand).'
elif 'exon' in biotypes and 'BioScope' not in algorithm:
print len(junction_db),'sequence identifiers present in input files.'
else: print len(junction_db),'sequence identifiers present in BioScope input files.'
return junction_db,biotypes,algorithms
def importExonCoordinates(probeCoordinateFile,search_chr,getBiotype):
probe_coordinate_db={}
junction_db={}
biotypes={}
x=0
fn=filepath(probeCoordinateFile)
for line in open(fn,'rU').xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
probe_id = t[0]; probeset_id=t[1]; chr=t[2]; strand=t[3]; start=t[4]; end=t[5]
exon1_stop,exon2_start = int(start),int(end)
seq_length = abs(float(exon1_stop-exon2_start))
if 'chr' not in chr:
chr = 'chr'+chr ### Add the chromosome prefix
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if search_chr == chr or search_chr == None:
try: biotype = t[6]
except Exception:
if seq_length>25:biotype = 'junction'
else: biotype = 'exon'
if strand == '-':
exon1_stop,exon2_start = exon2_start, exon1_stop ### this is their actual 5' -> 3' orientation
if biotype == 'junction':
exon1_start,exon2_stop = exon1_stop,exon2_start
else:
exon1_stop+=1; exon2_start-=1
biotypes[biotype]=[]
if getBiotype == biotype or getBiotype == None:
ji = JunctionData(chr,strand,exon1_stop,exon2_start,probe_id,biotype)
junction_db[chr,exon1_stop,exon2_start] = ji
try: ji.setSeqLength(seq_length) ### If RPKM imported or calculated
except Exception: null=[]
try: ji.setExon1Start(exon1_start);ji.setExon2Stop(exon2_stop)
except Exception: null=[]
probe_coordinate_db[probe_id] = chr,exon1_stop,exon2_start ### Import the expression data for the correct chromosomes with these IDs
return probe_coordinate_db, junction_db, biotypes
def importExpressionMatrix(exp_dir,root_dir,species,fl,getReads,search_chr=None,getBiotype=None):
""" Non-RNA-Seq expression data (typically Affymetrix microarray) import and mapping to an external probe-coordinate database """
begin_time = time.time()
condition_count_db={}; neg_count=0; pos_count=0; algorithms={}; exon_len_db={}
probe_coordinate_db, junction_db, biotypes = importExonCoordinates(fl.ExonMapFile(),search_chr,getBiotype)
x=0
fn=filepath(exp_dir)[:-1]
condition = export.findFilename(fn)
### If the BED was manually created on a Mac, will neeed 'rU' - test this
for line in open(fn,'rU').xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
t = string.split(data,'\t')
if '#' == data[0]: None
elif x==0:
if 'block' in t:
start_index = 7
else:
start_index = 1
headers = t[start_index:]
x=1
else:
proceed = 'yes' ### restrict by chromosome with minimum line parsing (unless we want counts instead)
probe_id=t[0]
if probe_id in probe_coordinate_db:
key = probe_coordinate_db[probe_id]
if getReads == 'no':
pass
else:
expression_data = t[start_index:]
i=0
for sample in headers:
if sample in condition_count_db:
count_db = condition_count_db[sample]
count_db[key] = expression_data[i]
exon_len_db[key]=[]
else:
count_db={}
count_db[key] = expression_data[i]
condition_count_db[sample] = count_db
exon_len_db[key]=[]
i+=1
algorithms['ProbeData']=[]
end_time = time.time()
if testImport == 'yes': print 'Probe data imported in',int(end_time-begin_time),'seconds'
if getReads == 'yes':
return condition_count_db,exon_len_db,biotypes,algorithms
else:
return junction_db,biotypes,algorithms
def adjustCounts(condition_count_db,exon_len_db):
for key in exon_len_db:
try:
null=exon_len_db[key]
for condition in condition_count_db:
count_db = condition_count_db[condition]
try: read_count = float(count_db[key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
except KeyError: read_count = 1 ###Was zero, but needs to be one for more realistic log2 fold calculations
count_db[key] = str(read_count) ### Replace original counts with adjusted counts
except Exception: null=[]
return condition_count_db
def calculateRPKM(condition_count_db,exon_len_db,biotype_to_examine):
"""Determines the total number of reads in a sample and then calculates RPMK relative to a pre-determined junction length (60).
60 was choosen, based on Illumina single-end read lengths of 35 (5 nt allowed overhand on either side of the junction)"""
### Get the total number of mapped reads
mapped_reads={}
for condition in condition_count_db:
mapped_reads[condition]=0
count_db = condition_count_db[condition]
for key in count_db:
read_count = count_db[key]
mapped_reads[condition]+=float(read_count)
### Use the average_total_reads when no counts reported such that 0 counts are comparable
average_total_reads = 0
for i in mapped_reads:
average_total_reads+=mapped_reads[i]
if testImport == 'yes':
print 'condition:',i,'total reads:',mapped_reads[i]
average_total_reads = average_total_reads/len(condition_count_db)
if testImport == 'yes':
print 'average_total_reads:',average_total_reads
k=0
c=math.pow(10.0,9.0)
for key in exon_len_db:
try:
for condition in condition_count_db:
total_mapped_reads = mapped_reads[condition]
try: read_count = float(condition_count_db[condition][key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
except KeyError: read_count = 1 ###Was zero, but needs to be one for more realistic log2 fold calculations
if biotype_to_examine == 'junction': region_length = 60.0
else:
try: region_length = exon_len_db[key]
except Exception: continue ### This should only occur during testing (when restricting to one or few chromosomes)
if read_count == 1: ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
rpkm = c*(float(read_count)/(float(average_total_reads)*region_length))
try:
if region_length == 0:
region_length = abs(int(key[2]-key[1]))
rpkm = c*(read_count/(float(total_mapped_reads)*region_length))
except Exception:
print condition, key
print 'Error Encountered... Exon or Junction of zero length encoutered... RPKM failed... Exiting AltAnalyze.'
print 'This error may be due to inconsistent file naming. If both exon and junction sample data is present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print [read_count,total_mapped_reads,region_length];k=1; forceError
condition_count_db[condition][key] = str(rpkm) ### Replace original counts with RPMK
except Exception:
if k == 1: kill
null=[]
return condition_count_db
def calculateGeneLevelStatistics(steady_state_export,species,expressed_gene_exon_db,normalize_feature_exp,array_names,fl,excludeLowExp=True,exportRPKMs=False):
global UserOptions; UserOptions = fl
exp_file = string.replace(steady_state_export,'-steady-state','')
if normalize_feature_exp == 'RPKM':
exp_dbase, all_exp_features, array_count = importRawCountData(exp_file,expressed_gene_exon_db,excludeLowExp=excludeLowExp)
steady_state_db = obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp,excludeLowExp=excludeLowExp); exp_dbase=[]
exportGeneCounts(steady_state_export,array_names,steady_state_db)
steady_state_db = calculateGeneRPKM(steady_state_db)
if exportRPKMs:
exportGeneCounts(steady_state_export,array_names,steady_state_db,dataType='RPKMs')
else:
exp_dbase, all_exp_features, array_count = importNormalizedCountData(exp_file,expressed_gene_exon_db)
steady_state_db = obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp); exp_dbase=[]
exportGeneCounts(steady_state_export,array_names,steady_state_db)
return steady_state_db, all_exp_features
def exportGeneCounts(steady_state_export,headers,gene_count_db,dataType='counts'):
### In addition to RPKM gene-level data, export gene level counts and lengths (should be able to calculate gene RPKMs from this file)
if dataType=='counts':
export_path = string.replace(steady_state_export,'exp.','counts.')
else:
export_path = steady_state_export
export_data = export.ExportFile(export_path)
title = string.join(['Ensembl']+headers,'\t')+'\n'
export_data.write(title)
for gene in gene_count_db:
sample_counts=[]
for count_data in gene_count_db[gene]:
try: read_count,region_length = count_data
except Exception: read_count = count_data
sample_counts.append(str(read_count))
sample_counts = string.join([gene]+sample_counts,'\t')+'\n'
export_data.write(sample_counts)
export_data.close()
def importGeneCounts(filename,import_type):
### Import non-normalized original counts and return the max value
counts_filename = string.replace(filename,'exp.','counts.')
status = verifyFile(counts_filename)
if status == 'not found': ### Occurs for non-normalized counts
counts_filename = filename
fn=filepath(counts_filename); x=0; count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
gene = t[0]
if import_type == 'max':
count_db[gene] = str(max(map(float,t[1:])))
else:
count_db[gene] = map(float,t[1:])
return count_db,array_names
def calculateGeneRPKM(gene_count_db):
"""Determines the total number of reads in a sample and then calculates RPMK relative to a pre-determined junction length (60).
60 was choosen, based on Illumina single-end read lengths of 35 (5 nt allowed overhand on either side of the junction)"""
### Get the total number of mapped reads (relative to all gene aligned rather than genome aligned exon reads)
mapped_reads={}
for gene in gene_count_db:
index=0
for (read_count,total_len) in gene_count_db[gene]:
try: mapped_reads[index]+=float(read_count)
except Exception: mapped_reads[index]=float(read_count)
index+=1
### Use the average_total_reads when no counts reported such that 0 counts are comparable
average_total_reads = 0
for i in mapped_reads: average_total_reads+=mapped_reads[i]
average_total_reads = average_total_reads/(index+1) ###
c=math.pow(10.0,9.0)
for gene in gene_count_db:
index=0; rpkms = []
for (read_count,region_length) in gene_count_db[gene]:
total_mapped_reads = mapped_reads[index]
#print [read_count],[region_length],[total_mapped_reads]
#if gene == 'ENSMUSG00000028186': print [read_count, index, total_mapped_reads,average_total_reads,region_length]
if read_count == 0: read_count=1; rpkm = c*(float(read_count)/(float(average_total_reads)*region_length)) ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
else:
try: rpkm = c*(float(read_count+1)/(float(total_mapped_reads)*region_length)) ### read count is incremented +1 (see next line)
except Exception: read_count=1; rpkm = c*(float(read_count)/(float(average_total_reads)*region_length)) ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
#if gene == 'ENSMUSG00000028186': print rpkm,read_count,index,total_mapped_reads,average_total_reads,region_length
#if gene == 'ENSMUSG00000026049': print gene_count_db[gene], mapped_reads[index], rpkm
rpkms.append(rpkm)
index+=1
gene_count_db[gene] = rpkms ### Replace original counts with RPMK
return gene_count_db
def deleteOldAnnotations(species,root_dir,dataset_name):
db_dir = root_dir+'AltDatabase/'+species
try:
status = export.deleteFolder(db_dir)
if status == 'success':
print "...Previous experiment database deleted"
except Exception: null=[]
count_dir = root_dir+'ExpressionInput/Counts'
try: status = export.deleteFolder(count_dir)
except Exception: pass
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
export_path = root_dir+'ExpressionInput/'+dataset_name
try: os.remove(filepath(export_path))
except Exception: null=[]
try: os.remove(filepath(string.replace(export_path,'exp.','counts.')))
except Exception: null=[]
try: os.remove(filepath(string.replace(export_path,'exp.','novel.')))
except Exception: null=[]
from copy_reg import pickle
from types import MethodType
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
def call_it(instance, name, args=(), kwargs=None):
"indirect caller for instance methods and multiprocessing"
if kwargs is None:
kwargs = {}
return getattr(instance, name)(*args, **kwargs)
def alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset_name,Multi=None):
fl = exp_file_location_db[dataset_name]
try: multiThreading = fl.multiThreading()
except Exception: multiThreading = True
print 'multiThreading:',multiThreading
normalize_feature_exp = fl.FeatureNormalization()
testImport='no'
if 'demo_data' in fl.ExpFile():
### If the input files are in the AltAnalyze test directory, only analyze select chromosomes
print 'Running AltAnalyze in TEST MODE... restricting to select chromosomes only!!!!!'
testImport='yes'
rnaseq_begin_time = time.time()
p = AlignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset_name,testImport)
chromosomes = p.getChromosomes()
### The following files need to be produced from chromosome specific sets later
countsFile = p.countsFile()
exonFile = p.exonFile()
junctionFile = p.junctionFile()
junctionCompFile = p.junctionCompFile()
novelJunctionAnnotations = p.novelJunctionAnnotations()
#chromosomes = ['chrMT']
#p('chrY'); p('chr1'); p('chr2')
#chromosomes = ['chr8','chr17']
multiprocessing_pipe = True
if 'exp.' not in dataset_name:
dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name:
dataset_name+='.txt'
try:
mlp=Multi
pool_size = mlp.cpu_count()
print 'Using %d processes' % pool_size
if multiprocessing_pipe and multiThreading:
### This is like pool, but less efficient (needed to get print outs)
s = pool_size; b=0
chr_blocks=[]
while s<len(chromosomes):
chr_blocks.append(chromosomes[b:s])
b+=pool_size; s+=pool_size
chr_blocks.append(chromosomes[b:s])
queue = mlp.Queue()
results=[]
#parent_conn, child_conn=multiprocessing.Pipe()
for chromosomes in chr_blocks:
procs=list()
#print 'Block size:',len(chromosomes)
for search_chr in chromosomes:
proc = mlp.Process(target=p, args=(queue,search_chr)) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
procs.append(proc)
proc.start()
for _ in procs:
val = queue.get()
if p.AnalysisMode() == 'GUI': print '*',
results.append(val)
for proc in procs:
proc.join()
elif multiThreading:
pool = mlp.Pool(processes=pool_size)
chr_vars=[]
for search_chr in chromosomes:
chr_vars.append(([],search_chr)) ### As an alternative for the pipe version above, pass an empty list rather than queue
results = pool.map(p, chr_vars) ### worker jobs initiated in tandem
try:pool.close(); pool.join(); pool = None
except Exception: pass
else:
forceThreadingError
print 'Read exon and junction mapping complete'
except Exception,e:
#print e
print 'Proceeding with single-processor version align...'
try: proc.close; proc.join; proc = None
except Exception: pass
try: pool.close(); pool.join(); pool = None
except Exception: pass
results=[] ### For single-thread compatible versions of Python
for search_chr in chromosomes:
result = p([],search_chr)
results.append(result)
results_organized=[]
for result_set in results:
if len(result_set[0])>0: ### Sometimes chromsomes are missing
biotypes = result_set[0]
results_organized.append(list(result_set[1:]))
pooled_results = [sum(value) for value in zip(*results_organized)] # combine these counts
pooled_results = [biotypes]+pooled_results
p.setCountsOverview(pooled_results) # store as retreivable objects
catFiles(countsFile,'Counts')
catFiles(junctionFile,'junctions')
catFiles(exonFile,'exons')
catFiles(junctionCompFile,'comps')
catFiles(novelJunctionAnnotations,'denovo')
if normalize_feature_exp == 'RPKM':
fastRPKMCalculate(countsFile)
rnaseq_end_time = time.time()
print '...RNA-seq import completed in',int(rnaseq_end_time-rnaseq_begin_time),'seconds\n'
biotypes = p.outputResults()
return biotypes
def alignCoordinatesToGeneExternal(species,coordinates_to_annotate):
chr_strand_gene_dbs,location_gene_db,chromosomes,gene_location_db = getChromosomeStrandCoordinates(species,'no')
read_aligned_to_gene=0
for (chr,strand) in coordinates_to_annotate:
if (chr,strand) in chr_strand_gene_dbs:
chr_gene_locations = chr_strand_gene_dbs[chr,strand]
chr_reads = coordinates_to_annotate[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
### Set GeneID for each coordinate object (primary and seconardary GeneIDs)
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,location_gene_db,chr_reads,'no',read_aligned_to_gene)
### Gene objects will be updated
def catFiles(outFileDir,folder):
""" Concatenate all the chromosomal files but retain only the first header """
root_dir = export.findParentDir(outFileDir)+folder+'/'
dir_list = read_directory(root_dir)
firstFile=True
with open(filepath(outFileDir), 'w') as outfile:
for fname in dir_list:
chr_file = root_dir+fname
header=True
with open(filepath(chr_file)) as infile:
for line in infile:
if header:
header=False
if firstFile:
outfile.write(line)
firstFile=False
else: outfile.write(line)
export.deleteFolder(root_dir)
def error(msg, *args):
return multiprocessing.get_logger().error(msg, *args)
class AlignExonsAndJunctionsToEnsembl:
def setCountsOverview(self, overview):
self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count = overview
def getChromosomes(self):
chr_list=list()
for c in self.chromosomes:
### Sort chromosome by int number
ci=string.replace(c,'chr','')
try: ci = int(ci)
except Exception: pass
chr_list.append((ci,c))
chr_list.sort()
chr_list2=list()
for (i,c) in chr_list: chr_list2.append(c) ### sorted
return chr_list2
def countsFile(self):
return string.replace(self.expfile,'exp.','counts.')
def junctionFile(self):
junction_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_Ensembl_junctions.txt'
return junction_file
def exonFile(self):
exon_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_Ensembl_exons.txt'
return exon_file
def junctionCompFile(self):
junction_comp_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_junction_comps_updated.txt'
return junction_comp_file
def novelJunctionAnnotations(self):
junction_annotation_file = self.root_dir+'AltDatabase/ensembl/'+self.species+'/'+self.species + '_alternative_junctions_de-novo.txt'
return junction_annotation_file
def AnalysisMode(self): return self.analysisMode
def __init__(self,species,exp_file_location_db,dataset_name,testImport):
self.species = species; self.dataset_name = dataset_name
self.testImport = testImport
fl = exp_file_location_db[dataset_name]
bed_dir=fl.BEDFileDir()
root_dir=fl.RootDir()
#self.stdout = fl.STDOUT()
try: platformType = fl.PlatformType()
except Exception: platformType = 'RNASeq'
try: analysisMode = fl.AnalysisMode()
except Exception: analysisMode = 'GUI'
### This occurs when run using the BAMtoBED pipeline in the GUI
if 'exp.' not in dataset_name:
dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name:
dataset_name+='.txt'
self.dataset_name = dataset_name
### Import experimentally identified junction splice-sites
normalize_feature_exp = fl.FeatureNormalization()
if platformType == 'RNASeq':
chr_status = checkBEDFileFormat(bed_dir,root_dir) ### If false, need to remove 'chr' from the search_chr
else:
chr_status = True
#self.fl = fl # Can not pass this object in pool or it breaks
self.platformType = platformType
self.analysisMode = analysisMode
self.root_dir = root_dir
self.normalize_feature_exp = normalize_feature_exp
self.bed_dir = bed_dir
self.chr_status = chr_status
self.exonBedBuildStatus = fl.ExonBedBuildStatus()
self.expfile = root_dir+'ExpressionInput/'+dataset_name
if testImport == 'yes':
print 'Chromosome annotation detected =',chr_status
#if self.exonBedBuildStatus == 'yes':
reformatExonFile(species,'exon',chr_status) ### exports BED format exons for exon expression extraction
"""
Strategies to reduce memory in RNASeq:
1) (done)Delete old AltDatabase-local version if it exists before starting
2) (done)Check to see if a file exists before writing it and if so append rather than create
3) (done)Get counts last and normalize last in for exons and junctions separately.
4) (done)Delete objects explicitly before importing any new data (define a new function that just does this).
5) (done)Get all chromosomes first then parse exon and junction coordinate data on a per known chromosome basis.
6) (done)Prior to deleting all junction/exon object info for each chromsome, save the coordinate(key)-to-annotation information for the read count export file."""
### Delete any existing annotation databases that currently exist (redundant with below)
deleteOldAnnotations(species,root_dir,dataset_name)
###Define variables to report once reads for all chromosomes have been aligned
#global self.known_count; global self.novel_junction_count; global self.one_found; global self.not_found; global self.both_found; global self.trans_splicing_reads
#global self.junctions_without_exon_gene_alignments; global self.exons_without_gene_alignment_count; global self.junction_simple_db; global self.chr_strand_gene_dbs
self.known_count=0; self.novel_junction_count=0; self.one_found=0; self.not_found=0; self.both_found=0; self.trans_splicing_reads=0
self.junctions_without_exon_gene_alignments=0; self.exons_without_gene_alignment_count=0; self.junction_simple_db={}
###Begin Chromosome specific read to exon alignments
self.chr_strand_gene_dbs,self.location_gene_db,chromosomes,self.gene_location_db = getChromosomeStrandCoordinates(species,testImport)
self.chromosomes = chromosomes
print "Processing exon/junction coordinates sequentially by chromosome"
print "Note: this step is time intensive (can be hours) and no print statements may post for a while"
def outputResults(self):
exportDatasetLinkedGenes(self.species,self.gene_location_db,self.root_dir) ### Include an entry for gene IDs to include constitutive expression for RPKM normalized data
chr_gene_locations=[]; self.location_gene_db=[]; self.chr_strand_gene_dbs=[]
#print 'user coordinates imported/processed'
#print 'Importing read counts from coordinate data...'
biotypes = self.biotypes_store
### Output summary statistics
if self.normalize_feature_exp != 'none':
print self.normalize_feature_exp, 'normalization complete'
if 'junction' in biotypes:
print 'Imported Junction Statistics:'
print ' ',self.known_count, 'junctions found in Ensembl/UCSC and',self.novel_junction_count,'are novel'
print ' ',self.trans_splicing_reads,'trans-splicing junctions found (two aligning Ensembl genes)'
print ' ',self.junctions_without_exon_gene_alignments, 'junctions where neither splice-site aligned to a gene'
if (float(self.known_count)*10)<float(self.novel_junction_count):
print '\nWARNING!!!!! Few junctions aligned to known exons. Ensure that the AltAnalyze Ensembl database\nversion matches the genome build aligned to!\n'
if 'exon' in biotypes:
print 'Imported Exon Statistics:'
print ' ',self.exons_without_gene_alignment_count, 'exons where neither aligned to a gene'
print 'User databases and read counts written to:', self.root_dir[:-1]+'ExpressionInput'
### END CHROMOSOME SPECIFIC ANALYSES
if self.exonBedBuildStatus == 'yes':
bedfile = moveBAMtoBEDFile(self.species,self.dataset_name,self.root_dir)
print 'Exon BED file updated with novel exon predictions from junction file'
return bedfile; sys.exit()
clearObjectsFromMemory(self.junction_simple_db); self.junction_simple_db=[]
return biotypes
def test(self, search_chr):
print search_chr
def __call__(self, queue, search_chr):
try:
#sys.stdout = self.stdout
platformType = self.platformType
testImport = self.testImport
species = self.species
dataset_name = self.dataset_name
platformType = self.platformType
analysisMode = self.analysisMode
root_dir = self.root_dir
normalize_feature_exp = self.normalize_feature_exp
bed_dir = self.bed_dir
chr_status = self.chr_status
junction_annotations={}
if chr_status == False:
searchchr = string.replace(search_chr,'chr','')
else:
searchchr = search_chr
if platformType == 'RNASeq':
junction_db,biotypes,algorithms = importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,searchChr=searchchr,testImport=testImport)
else:
normalize_feature_exp = 'quantile'
junction_db,biotypes,algorithms = importExpressionMatrix(bed_dir,root_dir,species,fl,'no',search_chr=searchchr)
self.biotypes_store = biotypes
if len(junction_db)>0:
### Determine which kind of data is being imported, junctions, exons or both
unmapped_exon_db={}
if 'junction' in biotypes:
### Get all known junction splice-sites
ens_junction_coord_db = importExonAnnotations(species,'junction_coordinates',search_chr)
if testImport == 'yes':
print len(ens_junction_coord_db),'Ensembl/UCSC junctions imported'
### Identify known junctions sites found in the experimental dataset (perfect match)
novel_junction_db={}; novel_exon_db={}
for key in junction_db:
ji=junction_db[key]
if ji.BioType()=='junction':
if key in ens_junction_coord_db:
jd=ens_junction_coord_db[key]
ji.setExonAnnotations(jd)
self.known_count+=1
else:
novel_junction_db[key]=junction_db[key]; self.novel_junction_count+=1
#if 75953254 in key: print key; sys.exit()
else:
unmapped_exon_db[key]=junction_db[key]
ens_exon_db = importExonAnnotations(species,'exon',search_chr)
if 'junction' in biotypes:
if testImport == 'yes':
print self.known_count, 'junctions found in Ensembl/UCSC and',len(novel_junction_db),'are novel.'
### Separate each junction into a 5' and 3' splice site (exon1_coord_db and exon2_coord_db)
exon1_coord_db={}; exon2_coord_db={}
for (chr,exon1_stop,exon2_start) in ens_junction_coord_db:
jd = ens_junction_coord_db[(chr,exon1_stop,exon2_start)]
exon1_coord_db[chr,exon1_stop] = jd.GeneID(),string.split(jd.ExonRegionIDs(),'-')[0]
exon2_coord_db[chr,exon2_start] = jd.GeneID(),string.split(jd.ExonRegionIDs(),'-')[1]
clearObjectsFromMemory(ens_junction_coord_db); ens_junction_coord_db=[] ### Clear object from memory
### Get and re-format individual exon info
exon_region_db={}
#if 'exon' not in biotypes:
for gene in ens_exon_db:
for rd in ens_exon_db[gene]:
exon_region_db[gene,rd.ExonRegionIDs()]=rd
### Add the exon annotations from the known junctions to the exons to export dictionary
exons_to_export={}
for key in junction_db:
ji=junction_db[key]
if ji.ExonAnnotations() != None:
jd = ji.ExonAnnotations()
exon1, exon2 = string.split(jd.ExonRegionIDs(),'-')
key1 = jd.GeneID(),exon1; key2 = jd.GeneID(),exon2
exons_to_export[key1] = exon_region_db[key1]
exons_to_export[key2] = exon_region_db[key2]
### For novel experimental junctions, identify those with at least one matching known 5' or 3' site
exons_not_identified = {}; novel_exon_coordinates={}
for (chr,exon1_stop,exon2_start) in novel_junction_db:
ji = novel_junction_db[(chr,exon1_stop,exon2_start)]
coord = [exon1_stop,exon2_start]; coord.sort()
if (chr,exon1_stop) in exon1_coord_db and (chr,exon2_start) in exon2_coord_db:
### Assign exon annotations to junctions where both splice-sites are known in Ensembl/UCSC
### Store the exon objects, genes and regions (le is a tuple of gene and exon region ID)
### Do this later for the below un-assigned exons
le=exon1_coord_db[(chr,exon1_stop)]; ji.setLeftExonAnnotations(le); ji.setLeftExonRegionData(exon_region_db[le])
re=exon2_coord_db[(chr,exon2_start)]; ji.setRightExonAnnotations(re); ji.setRightExonRegionData(exon_region_db[re])
if le[0] != re[0]: ### Indicates Trans-splicing (e.g., chr7:52,677,568-52,711,750 mouse mm9)
ji.setTransSplicing(); #print exon1_stop,le,exon2_start,re,ji.Chr(),ji.Strand()
self.both_found+=1; #print 'five',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
else:
if (chr,exon1_stop) in exon1_coord_db: ### hence, exon1_stop is known, so report the coordinates of exon2 as novel
le=exon1_coord_db[(chr,exon1_stop)]; ji.setLeftExonAnnotations(le)
self.one_found+=1; #print 'three',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
novel_exon_coordinates[ji.Chr(),exon2_start] = ji,'left',ji.Exon2Stop() ### Employ this strategy to avoid duplicate exons with differing lengths (mainly an issue if analyzing only exons results)
ji.setNovel('side')
elif (chr,exon2_start) in exon2_coord_db: ### hence, exon2_start is known, so report the coordinates of exon1 as novel
re=exon2_coord_db[(chr,exon2_start)]; ji.setRightExonAnnotations(re) ### In very rare cases, a gene can be assigned here, even though the splice-site is on the opposite strand (not worthwhile filtering out)
self.one_found+=1; #print 'three',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
novel_exon_coordinates[ji.Chr(),exon1_stop] = ji,'right',ji.Exon1Start()
ji.setNovel('side')
else:
self.not_found+=1; #if self.not_found < 10: print (chr,exon1_stop,exon2_start)
novel_exon_coordinates[ji.Chr(),exon1_stop] = ji,'right',ji.Exon1Start()
novel_exon_coordinates[ji.Chr(),exon2_start] = ji,'left',ji.Exon2Stop()
ji.setNovel('both')
### We examine reads where one splice-site aligns to a known but the other not, to determine if trans-splicing occurs
try: exons_not_identified[chr,ji.Strand()].append((coord,ji))
except KeyError: exons_not_identified[chr,ji.Strand()] = [(coord,ji)]
"""
if fl.ExonBedBuildStatus() == 'no':
exportNovelJunctions(species,novel_junction_db,condition_count_db,root_dir,dataset_name,'junction') ### Includes known exons
"""
#print self.both_found, ' where both and', self.one_found, 'where one splice-site are known out of',self.both_found+self.one_found+self.not_found
#print 'Novel junctions where both splice-sites are known:',self.both_found
#print 'Novel junctions where one splice-site is known:',self.one_found
#print 'Novel junctions where the splice-sites are not known:',self.not_found
clearObjectsFromMemory(exon_region_db); exon_region_db=[] ### Clear memory of this object
read_aligned_to_gene=0
for (chr,strand) in exons_not_identified:
if (chr,strand) in self.chr_strand_gene_dbs:
chr_gene_locations = self.chr_strand_gene_dbs[chr,strand]
chr_reads = exons_not_identified[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
### Set GeneID for each coordinate object (primary and seconardary GeneIDs)
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,self.location_gene_db,chr_reads,'no',read_aligned_to_gene)
#print read_aligned_to_gene, 'novel junctions aligned to Ensembl genes out of',self.one_found+self.not_found
clearObjectsFromMemory(exons_not_identified); exons_not_identified=[] ## Clear memory of this object
for key in novel_junction_db:
(chr,exon1_stop,exon2_start) = key
ji=novel_junction_db[key]
if ji.GeneID() == None:
try:
if ji.SecondaryGeneID() != None:
### Occurs if mapping is to the 5'UTR of a gene for the left splice-site (novel alternative promoter)
ji.setGeneID(ji.SecondaryGeneID()); ji.setSecondaryGeneID(''); #print key, ji.GeneID(), ji.Strand(), ji.SecondaryGeneID()
except Exception: null=[]
if ji.GeneID() != None:
geneid = ji.GeneID()
proceed = 'no'
if ji.SpliceSitesFound() == None: proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
elif ji.SpliceSitesFound() == 'left': proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
elif ji.SpliceSitesFound() == 'right': proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
if proceed == 'yes':
for coordinate in coordinates:
if ji.TransSplicing() == 'yes':
#print ji.Chr(),ji.GeneID(), ji.SecondaryGeneID(), ji.Exon1Stop(), ji.Exon2Start()
self.trans_splicing_reads+=1
if ji.checkExonPosition(coordinate) == 'right': geneid = ji.SecondaryGeneID()
if abs(exon2_start-exon1_stop)==1: eventType = 'novel-exon-intron' ### Indicates intron-exon boundary (intron retention)
else: eventType = 'novel'
exon_data = (coordinate,ji.Chr()+'-'+str(coordinate),eventType)
try: novel_exon_db[geneid].append(exon_data)
except KeyError: novel_exon_db[geneid] = [exon_data]
else:
### write these out
self.junctions_without_exon_gene_alignments+=1
### Remove redundant exon entries and store objects
for key in novel_exon_db:
exon_data_objects=[]
exon_data_list = unique.unique(novel_exon_db[key])
exon_data_list.sort()
for e in exon_data_list:
ed = ExonInfo(e[0],e[1],e[2])
exon_data_objects.append(ed)
novel_exon_db[key] = exon_data_objects
#print self.trans_splicing_reads,'trans-splicing junctions found (two aligning Ensembl genes).'
#print self.junctions_without_exon_gene_alignments, 'junctions where neither splice-site aligned to a gene'
#if 'X' in search_chr: print len(ens_exon_db),len(ens_exon_db['ENSMUSG00000044424'])
alignReadsToExons(novel_exon_db,ens_exon_db,testImport=testImport)
### Link exon annotations up with novel junctions
junction_region_db,exons_to_export = annotateNovelJunctions(novel_junction_db,novel_exon_db,exons_to_export)
### Add the exon region data from known Ensembl/UCSC matched junctions to junction_region_db for recipricol junction analysis
for key in junction_db:
ji=junction_db[key]; jd = ji.ExonAnnotations()
try:
uid = jd.GeneID()+':'+jd.ExonRegionIDs(); ji.setUniqueID(uid)
try: junction_region_db[jd.GeneID()].append((formatID(uid),jd.ExonRegionIDs()))
except KeyError: junction_region_db[jd.GeneID()] = [(formatID(uid),jd.ExonRegionIDs())]
except AttributeError: null=[] ### Occurs since not all entries in the dictionary are perfect junction matches
try: novel_exon_coordinates = collapseNoveExonBoundaries(novel_exon_coordinates,root_dir+dataset_name) ### Joins inferred novel exon-IDs (5' and 3' splice sites) from adjacent and close junction predictions
except Exception: pass ### No errors encountered before
#if self.exonBedBuildStatus == 'yes':
### Append to the exported BED format exon coordinate file
bedfile = exportNovelExonToBedCoordinates(species,novel_exon_coordinates,chr_status,searchChr=searchchr)
### Identify reciprocol junctions and retrieve splice-event annotations for exons and inclusion junctions
junction_annotations,critical_exon_annotations = JunctionArray.inferJunctionComps(species,('RNASeq',junction_region_db,root_dir),searchChr=searchchr)
clearObjectsFromMemory(junction_region_db); junction_region_db=[]
### Reformat these dictionaries to combine annotations from multiple reciprocol junctions
junction_annotations = combineExonAnnotations(junction_annotations)
critical_exon_annotations = combineExonAnnotations(critical_exon_annotations)
if 'exon' in biotypes:
if testImport == 'yes':
print len(unmapped_exon_db),'exon genomic locations imported.'
### Create a new dictionary keyed by chromosome and strand
exons_not_aligned={}
for (chr,exon1_stop,exon2_start) in unmapped_exon_db:
ji = unmapped_exon_db[(chr,exon1_stop,exon2_start)]
coord = [exon1_stop,exon2_start]; coord.sort()
try: exons_not_aligned[chr,ji.Strand()].append((coord,ji))
except KeyError: exons_not_aligned[chr,ji.Strand()] = [(coord,ji)]
read_aligned_to_gene=0
for (chr,strand) in exons_not_aligned:
if (chr,strand) in self.chr_strand_gene_dbs:
chr_gene_locations = self.chr_strand_gene_dbs[chr,strand]
chr_reads = exons_not_aligned[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,self.location_gene_db,chr_reads,'no',read_aligned_to_gene)
#print read_aligned_to_gene, 'exons aligned to Ensembl genes out of',self.one_found+self.not_found
align_exon_db={}; exons_without_gene_alignments={}; multigene_exon=0
for key in unmapped_exon_db:
(chr,exon1_stop,exon2_start) = key
ji=unmapped_exon_db[key]
if ji.GeneID() == None:
try:
if ji.SecondaryGeneID() != None:
### Occurs if mapping outside known exon boundaries for one side of the exon
ji.setGeneID(ji.SecondaryGeneID()); ji.setSecondaryGeneID(''); #print key, ji.GeneID(), ji.Strand(), ji.SecondaryGeneID()
except Exception: null=[]
else:
if 'ENS' in ji.JunctionID():
if ji.GeneID() not in ji.JunctionID(): ### Hence, there were probably two overlapping Ensembl genes and the wrong was assigned based on the initial annotations
original_geneid = string.split(ji.JunctionID(),':')[0]
if original_geneid in ens_exon_db: ji.setGeneID(original_geneid) #check if in ens_exon_db (since chromosome specific)
if ji.GeneID() != None:
geneid = ji.GeneID()
coordinates = [exon1_stop,exon2_start]
for coordinate in coordinates:
if ji.TransSplicing() != 'yes': ### This shouldn't occur for exons
exon_data = (coordinate,ji.Chr()+'-'+str(coordinate),'novel')
try: align_exon_db[geneid].append(exon_data)
except KeyError: align_exon_db[geneid] = [exon_data]
else:
multigene_exon+=1 ### Shouldn't occur due to a fix in the gene-alignment method which will find the correct gene on the 2nd interation
else: exons_without_gene_alignments[key]=ji; self.exons_without_gene_alignment_count+=1
### Remove redundant exon entries and store objects (this step may be unnecessary)
for key in align_exon_db:
exon_data_objects=[]
exon_data_list = unique.unique(align_exon_db[key])
exon_data_list.sort()
for e in exon_data_list:
ed = ExonInfo(e[0],e[1],e[2])
exon_data_objects.append(ed)
align_exon_db[key] = exon_data_objects
#print self.exons_without_gene_alignment_count, 'exons where neither aligned to a gene'
#if self.exons_without_gene_alignment_count>3000: print 'NOTE: Poor mapping of these exons may be due to an older build of\nEnsembl than the current version. Update BAMtoBED mappings to correct.'
begin_time = time.time()
alignReadsToExons(align_exon_db,ens_exon_db)
end_time = time.time()
if testImport == 'yes':
print 'Exon sequences aligned to exon regions in',int(end_time-begin_time),'seconds'
### Combine the start and end region alignments into a single exon annotation entry
combineDetectedExons(unmapped_exon_db,align_exon_db,novel_exon_db)
clearObjectsFromMemory(unmapped_exon_db); clearObjectsFromMemory(align_exon_db); clearObjectsFromMemory(novel_exon_db)
unmapped_exon_db=[]; align_exon_db=[]; novel_exon_db=[]
"""
if fl.ExonBedBuildStatus() == 'no':
exportNovelJunctions(species,exons_without_gene_alignments,condition_count_db,root_dir,dataset_name,'exon') ### Includes known exons
"""
clearObjectsFromMemory(exons_without_gene_alignments); exons_without_gene_alignments=[]
### Export both exon and junction annotations
if 'junction' in biotypes:
### Export the novel user exon annotations
exportDatasetLinkedExons(species,exons_to_export,critical_exon_annotations,root_dir,testImport=testImport,searchChr=searchchr)
### Export the novel user exon-junction annotations (original junction_db objects updated by above processing)
exportDatasetLinkedJunctions(species,junction_db,junction_annotations,root_dir,testImport=testImport,searchChr=searchchr)
### Clear memory once results are exported (don't want to delete actively used objects)
if 'junction' in biotypes:
clearObjectsFromMemory(exons_to_export); clearObjectsFromMemory(critical_exon_annotations)
clearObjectsFromMemory(novel_junction_db); novel_junction_db=[]
clearObjectsFromMemory(novel_exon_coordinates); novel_exon_coordinates=[]
exons_to_export=[]; critical_exon_annotations=[]
clearObjectsFromMemory(exon1_coord_db); clearObjectsFromMemory(exon2_coord_db)
exon1_coord_db=[]; exon2_coord_db=[]
if 'exon' in biotypes:
clearObjectsFromMemory(exons_not_aligned); exons_not_aligned=[]
clearObjectsFromMemory(ens_exon_db); ens_exon_db=[]
### Add chromsome specific junction_db data to a simple whole genome dictionary
for key in junction_db:
ji = junction_db[key]
if ji.GeneID()!=None and ji.UniqueID()!=None: self.junction_simple_db[key]=ji.UniqueID()
#returnLargeGlobalVars()
clearObjectsFromMemory(junction_db); clearObjectsFromMemory(junction_annotations)
junction_db=[]; junction_annotations=[]; chr_reads=[]
for biotype in biotypes:
### Import Read Counts (do this last to conserve memory)
if platformType == 'RNASeq':
condition_count_db,exon_len_db,biotypes2,algorithms = importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,getReads=True,searchChr=searchchr,getBiotype=biotype,testImport=testImport,filteredJunctions=self.junction_simple_db)
else:
condition_count_db,exon_len_db,biotypes2,algorithms = importExpressionMatrix(bed_dir,root_dir,species,fl,'yes',getBiotype=biotype)
###First export original counts, rather than quantile normalized or RPKM
self.exportJunctionCounts(species,self.junction_simple_db,exon_len_db,condition_count_db,root_dir,dataset_name,biotype,'counts',searchChr=searchchr)
clearObjectsFromMemory(condition_count_db); clearObjectsFromMemory(exon_len_db); condition_count_db=[]; exon_len_db=[]
if analysisMode == 'commandline':
print 'finished parsing data for chromosome:',search_chr ### Unix platforms are not displaying the progress in real-time
else:
pass #print "*",
try: queue.put([self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count])
except Exception:
### If queue is not a multiprocessing object
queue = [self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count]
return queue
except Exception:
print traceback.format_exc()
error(traceback.format_exc())
multiprocessing.log_to_stderr().setLevel(logging.DEBUG)
raise
def exportJunctionCounts(self,species,junction_simple_db,exon_len_db,condition_count_db,root_dir,dataset_name,biotype,count_type,searchChr=None):
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
export_path = root_dir+'ExpressionInput/'+dataset_name
if count_type == 'counts':
export_path = string.replace(export_path,'exp.','counts.') ### separately export counts
if searchChr !=None:
export_path = string.replace(export_path,'ExpressionInput','ExpressionInput/Counts')
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
self.countsFile = export_path
if self.testImport == 'yes':
print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
title = ['AltAnalyze_ID']
for condition in condition_count_db: title.append(condition)
export_data.write(string.join(title,'\t')+'\n')
for key in self.junction_simple_db:
chr,exon1_stop,exon2_start = key
if biotype == 'junction':
coordinates = chr+':'+str(exon1_stop)+'-'+str(exon2_start)
elif biotype == 'exon':
coordinates = chr+':'+str(exon1_stop-1)+'-'+str(exon2_start+1)
try:
null=exon_len_db[key]
if count_type == 'counts': values = [self.junction_simple_db[key]+'='+coordinates]
else: values = [self.junction_simple_db[key]]
for condition in condition_count_db: ###Memory crash here
count_db = condition_count_db[condition]
try: read_count = count_db[key]
except KeyError: read_count = '0'
values.append(read_count)
export_data.write(string.join(values,'\t')+'\n')
except Exception: null=[]
export_data.close()
def countsDir(self):
return self.countsFile
def calculateRPKMsFromGeneCounts(filename,species,AdjustExpression):
""" Manual way of calculating gene RPKMs from gene counts only """
gene_lengths = getGeneExonLengths(species)
fastRPKMCalculate(filename,GeneLengths=gene_lengths,AdjustExpression=AdjustExpression)
def fastRPKMCalculate(counts_file,GeneLengths=None,AdjustExpression=True):
export_path = string.replace(counts_file,'counts.','exp.')
export_data = export.ExportFile(export_path) ### Write this new file
fn=filepath(counts_file); header=True
exon_sum_array=[]; junction_sum_array=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
exon_sum_array=[0]*len(samples)
junction_sum_array=[0]*len(samples)
else:
try: values = map(float,t[1:])
except Exception:
print traceback.format_exc()
print t
badCountsLine
### get the total reads/sample
if '-' in string.split(t[0],'=')[0]:
junction_sum_array = [sum(value) for value in zip(*[junction_sum_array,values])]
else:
exon_sum_array = [sum(value) for value in zip(*[exon_sum_array,values])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides warnings associated with Scipy for n=1 sample comparisons
jatr=Average(junction_sum_array) # Average of the total maped reads
eatr=Average(exon_sum_array) # Average of the total maped reads
if AdjustExpression:
offset = 1
else:
offset = 0
header=True
c=math.pow(10.0,9.0)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
export_data.write(line) ### Write header
header=False
else:
try:
exon_id,coordinates = string.split(t[0],'=')
coordinates = string.split(coordinates,':')[1]
coordinates = string.split(coordinates,'-')
l=abs(int(coordinates[1])-int(coordinates[0])) ### read-length
except Exception: ### Manual way of calculating gene RPKMs from gene counts only
exon_id = t[0]
try: l = GeneLengths[exon_id]
except Exception: continue #Occurs when Ensembl genes supplied from an external analysis
try: read_counts = map(lambda x: int(x)+offset, t[1:])
except Exception: read_counts = map(lambda x: int(float(x))+offset, t[1:])
if '-' in exon_id:
count_stats = zip(read_counts,junction_sum_array)
atr = jatr
l=60
else:
count_stats = zip(read_counts,exon_sum_array)
atr = eatr
values=[]
#rpkm = map(lambda (r,t): c*(r/(t*l)), count_stats) ### Efficent way to convert to rpkm, but doesn't work for 0 counts
for (r,t) in count_stats:
if r == 1: ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
t = atr
try:
rpkm = str(c*(r/(t*l)))
#print c,r,t,l,exon_id,rpkm;sys.exit()
values.append(rpkm)
except Exception,e:
print e
print t[0]
print 'Error Encountered... Exon or Junction of zero length encoutered... RPKM failed... Exiting AltAnalyze.'
print 'This error may be due to inconsistent file naming. If both exon and junction sample data is present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print [r,t,l];k=1; forceError
values = string.join([exon_id]+values,'\t')+'\n'
export_data.write(values)
export_data.close()
def mergeCountFiles(counts_file1,counts_file2):
### Used internally to merge count files that are very large and too time-consuming to recreate (regenerate them)
export_path = string.replace(counts_file2,'counts.','temp-counts.')
export_data = export.ExportFile(export_path) ### Write this new file
fn=filepath(counts_file1); header=True
count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
si = samples.index('H9.102.2.5.bed')+1
else:
try: value = t[si]
except Exception: print t; sys.exit()
### get the total reads/sample
count_db[t[0]] = value
fn=filepath(counts_file2); header=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
si = samples.index('H9.102.2.5.bed')+1
export_data.write(line)
else:
try: t[si] = count_db[t[0]]
except Exception: pass ### keep the current value
export_data.write(string.join(t,'\t')+'\n')
export_data.close()
def getGeneExonLengths(species):
gene_lengths={}
filename = 'AltDatabase/'+species+'/RNASeq/'+species+'_Ensembl_exons.txt'
fn=filepath(filename)
firstLine=True
for line in open(fn,'rU').xreadlines():
line = line.rstrip('\n')
if firstLine:
firstLine=False
else:
t = string.split(line,'\t')
geneID = t[2]; start = int(t[6]); end = int(t[7]); exonID = t[1]
if 'E' in exonID:
try: gene_lengths[geneID]+=abs(end-start)
except Exception: gene_lengths[geneID]=abs(end-start)
return gene_lengths
def importRawCountData(filename,expressed_gene_exon_db,excludeLowExp=True):
""" Identifies exons or junctions to evaluate gene-level expression. This function, as it is currently written:
1) examines the RPKM and original read counts associated with all exons
2) removes exons/junctions that do not meet their respective RPKM AND read count cutoffs
3) returns ONLY those exons and genes deemed expressed, whether constitutive selected or all exons
"""
### Get expression values for exon/junctions to analyze
seq_ids_to_import={}
for gene in expressed_gene_exon_db:
for exonid in expressed_gene_exon_db[gene]: seq_ids_to_import[exonid]=[]
### Define thresholds
exon_exp_threshold = UserOptions.ExonExpThreshold()
junction_exp_threshold = UserOptions.JunctionExpThreshold()
exon_rpkm_threshold = UserOptions.ExonRPKMThreshold()
gene_rpkm_threshold = UserOptions.RPKMThreshold()
gene_exp_threshold = UserOptions.GeneExpThreshold()
### Import RPKM normalized expression values
fn=filepath(filename); x=0; rpkm_dbase={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id=t[0]
max_count=max(map(float,t[1:]))
if max_count>=exon_rpkm_threshold or excludeLowExp==False: rpkm_dbase[exon_id]=[] ### Only retain exons/junctions meeting the RPKM threshold
### Import non-normalized original counts
counts_filename = string.replace(filename,'exp.','counts.')
fn=filepath(counts_filename); x=0; exp_dbase={}
all_exp_features={} ### Don't filter for only gene-expression reporting
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id,coordinates = string.split(t[0],'=')
coordinates = string.split(coordinates,':')[1]
coordinates = string.split(coordinates,'-')
length=abs(int(coordinates[1])-int(coordinates[0]))
max_count=max(map(float,t[1:])); proceed = 'no'
if '-' in exon_id:
length = 60.0
if max_count>=junction_exp_threshold or excludeLowExp==False:
### Only considered when exon data is not present in the analysis
proceed = 'yes'
elif max_count>=exon_exp_threshold or excludeLowExp==False: proceed = 'yes'
if proceed == 'yes' and exon_id in rpkm_dbase: ### Ensures that the maximum sample (not group) user defined count threshold is achieved at the exon or junction-level
all_exp_features[exon_id]=None
if exon_id in seq_ids_to_import:### Forces an error if not in the steady-state pre-determined set (CS or all-exons) - INCLUDE HERE TO FILTER ALL FEATURES
exp_dbase[exon_id] = t[1:],length ### Include sequence length for normalization
for exon in exp_dbase: array_count = len(exp_dbase[exon][0]); break
try:null=array_count
except Exception:
print 'No exons or junctions considered expressed (based user thresholds). Exiting analysis.'; force_exit
return exp_dbase, all_exp_features, array_count
def importNormalizedCountData(filename,expressed_gene_exon_db):
### Get expression values for exon/junctions to analyze
seq_ids_to_import={}
for gene in expressed_gene_exon_db:
for exonid in expressed_gene_exon_db[gene]: seq_ids_to_import[exonid]=[]
### Define thresholds
exon_exp_threshold = UserOptions.ExonExpThreshold()
junction_exp_threshold = UserOptions.JunctionExpThreshold()
exon_rpkm_threshold = UserOptions.ExonRPKMThreshold()
gene_rpkm_threshold = UserOptions.RPKMThreshold()
gene_exp_threshold = UserOptions.GeneExpThreshold()
### Import non-normalized original counts
fn=filepath(filename); x=0; exp_dbase={}
all_exp_features={} ### Don't filter for only gene-expression reporting
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id=t[0]; proceed = 'no'
max_count=max(map(float,t[1:]))
if '-' in exon_id:
if max_count>=junction_exp_threshold: proceed = 'yes'
elif max_count>=exon_exp_threshold: proceed = 'yes'
if proceed == 'yes': ### Ensures that the maximum sample (not group) user defined count threshold is achieved at the exon or junction-level
all_exp_features[exon_id]=None
if exon_id in seq_ids_to_import: ### If a "constitutive" or exon-level feature (filter missing prior to 2.0.8 - bug)
exp_dbase[exon_id] = t[1:],0 ### Add the zero just to comply with the raw count input format (indicates exon length)
for exon in exp_dbase: array_count = len(exp_dbase[exon][0]); break
return exp_dbase, all_exp_features, array_count
def obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp,excludeLowExp=True):
###Calculate avg expression for each sample for each exon (using constitutive or all exon values)
if excludeLowExp == False:
gene_lengths = getGeneExonLengths(species)
steady_state_db={}
for gene in expressed_gene_exon_db:
x = 0; gene_sum=0
exon_list = expressed_gene_exon_db[gene]
while x < array_count:
exp_list=[]; len_list=[]
for exon in exon_list:
try:
exp_val = exp_dbase[exon][0][x]
if normalize_feature_exp == 'RPKM':
### Decided to include all exons, expressed or not to prevent including lowly expressed exons that are long, that can bias the expression call
#if float(exp_val) != 0: ### Here, we use the original raw count data, whereas above is the adjusted quantile or raw count data
exp_list.append(exp_val); len_list.append(exp_dbase[exon][1]) ### This is for RNASeq -> don't include undetected exons - made in v.204
else: exp_list.append(exp_val) #elif float(exp_val) != 1:
except KeyError: null =[] ###occurs if the expression exon list is missing some of these exons
try:
if len(exp_list)==0:
for exon in exon_list:
try:
exp_list.append(exp_dbase[exon][0][x]); len_list.append(exp_dbase[exon][1])
#kill
except KeyError: null=[] ### Gene entries will cause this error, since they are in the database but not in the count file
if normalize_feature_exp == 'RPKM':
sum_const_exp=sum(map(float,exp_list)); gene_sum+=sum_const_exp
sum_length=sum(len_list) ### can have different lengths for each sample, since only expressed exons are considered
if excludeLowExp == False:
sum_length = gene_lengths[gene] ### Uses the all annotated exon lengths
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append((sum_const_exp,sum_length))
except KeyError: steady_state_db[gene] = [(sum_const_exp,sum_length)]
else:
avg_const_exp=Average(exp_list)
if avg_const_exp != 1: gene_sum+=avg_const_exp
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append(avg_const_exp)
except KeyError: steady_state_db[gene] = [avg_const_exp]
except Exception: null=[] ### Occurs when processing a truncated dataset (for testing usually) - no values for the gene should be included
x += 1
if gene_sum==0:
try:
del steady_state_db[gene] ### Hence, no genes showed evidence of expression (most critical for RNA-Seq)
except Exception: null=[] ### Error occurs when a gene is added to the database from self.location_gene_db, but is not expressed
return steady_state_db
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>1:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def AppendOrWrite(export_path):
export_path = filepath(export_path)
status = verifyFile(export_path)
if status == 'not found':
export_data = export.ExportFile(export_path) ### Write this new file
else:
export_data = open(export_path,'a') ### Appends to existing file
return export_data, status
def quantileNormalizationSimple(condition_count_db):
### Basic quantile normalization method (average ranked expression values)
### Get all junction or exon entries
key_db={}
for condition in condition_count_db:
count_db = condition_count_db[condition]
for key in count_db: key_db[key]=[]
condition_unnormalized_db={}
for key in key_db:
### Only look at the specific biotype of interest for each normalization
for condition in condition_count_db:
count_db = condition_count_db[condition]
try:
count = float(count_db[key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
count_db[key] = [] ### Set equal to null as a temporary measure to save memory
except KeyError: count = 1.00 ###Was zero, but needs to be one for more realistic log2 fold calculations
### store the minimal information to recover the original count and ID data prior to quantile normalization
try: condition_unnormalized_db[condition].append([count,key])
except Exception: condition_unnormalized_db[condition]=[[count,key]]
quantile_normalize_db={}; key_db={}
for condition in condition_unnormalized_db:
condition_unnormalized_db[condition].sort() ### Sort lists by count number
rank=0 ### thus, the ID is the rank order of counts
for (count,key) in condition_unnormalized_db[condition]:
try: quantile_normalize_db[rank].append(count)
except KeyError: quantile_normalize_db[rank] = [count]
rank+=1
### Get the average value for each index
for rank in quantile_normalize_db:
quantile_normalize_db[rank] = Average(quantile_normalize_db[rank])
for condition in condition_unnormalized_db:
rank=0
count_db = condition_count_db[condition]
for (count,key) in condition_unnormalized_db[condition]:
avg_count = quantile_normalize_db[rank]
rank+=1
count_db[key] = str(avg_count) ### re-set this value to the normalized value
try:
clearObjectsFromMemory(condition_unnormalized_db); condition_unnormalized_db = []
clearObjectsFromMemory(quantile_normalize_db); quantile_normalize_db = []
except Exception: None
return condition_count_db
def combineExonAnnotations(db):
for i in db:
list1=[]; list2=[]
for (junctions,splice_event) in db[i]:
list1.append(junctions); list2.append(splice_event)
junctions = EnsemblImport.combineAnnotations(list1)
splice_event = EnsemblImport.combineAnnotations(list2)
db[i] = junctions,splice_event
return db
def formatID(id):
### JunctionArray methods handle IDs with ":" different than those that lack this
return string.replace(id,':','@')
def filterChromosomes(chromosome_names):
### If transcriptome only aligned to Ensembl reference, many chromosomes are not real
updated_chromosomes=[]
chr_count=0
for chr in chromosome_names:
if 'chr' in chr and len(chr)<7:
chr_count+=1
updated_chromosomes.append(chr)
if chr_count>1:
return updated_chromosomes
else:
return chromosome_names
def getChromosomeStrandCoordinates(species,testImport):
### For novel junctions with no known-splice site, map to genes
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
chr_strand_gene_db = {}; location_gene_db = {}; chromosome_names={}; all_chromosomes={}
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
location_gene_db[chr,int(start),int(end)] = gene,strand
try: chr_strand_gene_db[chr,strand].append((int(start),int(end)))
except KeyError: chr_strand_gene_db[chr,strand] = [(int(start),int(end))]
if testImport == 'yes':
if chr=='chr1': chromosome_names[chr]=[]
#if chr=='chr19': chromosome_names[chr]=[] ### Gene rich chromosome
#if chr=='chrMT': chromosome_names[chr]=[] ### Gene rich chromosome
elif len(chr)<7: chromosome_names[chr]=[]
all_chromosomes[chr]=[]
#chromosome_names = filterChromosomes(chromosome_names)
### Some organisms aren't organized into classical chromosomes (why I don't know)
if len(chromosome_names)<10 and len(all_chromosomes)>9 and testImport=='no': chromosome_names = all_chromosomes
return chr_strand_gene_db,location_gene_db,chromosome_names,gene_location_db
def exportDatasetLinkedExons(species,exons_to_export,critical_exon_annotations,root_dir,testImport=None,searchChr=None):
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.txt'
if searchChr != None:
export_path = string.replace(export_path,'RNASeq/'+species,'RNASeq/exons/'+species)
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
if testImport == 'yes': print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
### We stored these in a dictionary to make sure each exon is written only once and so we can organize by gene
exons_to_export_list=[]
for key in exons_to_export:
ed = exons_to_export[key]
exons_to_export_list.append((key,ed))
exons_to_export_list.sort()
for (key,ed) in exons_to_export_list:
constitutive_call = 'no'; ens_constitutive_status = '0'
try:
red = ed.ExonRegionData()
exon_region = ed.ExonRegionID()
start = str(ed.ReadStart()); stop = start
if '-' not in exon_region and '_' not in exon_region: annotation = 'known'
else: annotation = 'novel'
except Exception:
red = ed ### For annotated exons, no difference in the annotations
exon_region = ed.ExonRegionIDs()
start = str(red.ExonStart()); stop = str(red.ExonStop())
constitutive_call = red.Constitutive()
if constitutive_call == 'yes': ens_constitutive_status = '1'
annotation = 'known'
uid = red.GeneID()+':'+exon_region
splice_events = red.AssociatedSplicingEvent(); splice_junctions = red.AssociatedSplicingJunctions()
if uid in critical_exon_annotations:
splice_junctions,splice_events = critical_exon_annotations[uid]
export_values = [uid, exon_region, red.GeneID(), '', red.Chr(), red.Strand(), start, stop, annotation, constitutive_call, red.ExonID(), ens_constitutive_status]
export_values+= [exon_region, str(red.ExonStart()), str(red.ExonStop()), splice_events, splice_junctions]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def exportNovelJunctions(species,novel_junction_db,condition_count_db,root_dir,dataset_name,biotype):
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
dataset_name = string.replace(dataset_name,'exp','novel')
dataset_name = string.replace(dataset_name,'.txt','.'+biotype+'.txt')
export_path = root_dir+'ExpressionInput/'+dataset_name
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
title = ['chr','strand','start','stop','start Ensembl','end Ensembl','known start', 'known end']
for condition in condition_count_db: title.append(condition)
export_data.write(string.join(title,'\t')+'\n')
for key in novel_junction_db:
ji = novel_junction_db[key]
try: gene1 = str(ji.GeneID())
except Exception: gene1=''
try: gene2 = str(ji.SecondaryGeneID())
except Exception: gene2 = 'None'
try: le = str(ji.LeftExonAnnotations())
except Exception: le = ''
try: re = str(ji.RightExonAnnotations())
except Exception: re = ''
if biotype == 'junction':
values = [ji.Chr(), ji.Strand(), str(ji.Exon1Stop()), str(ji.Exon2Start())]
elif biotype == 'exon':
values = [ji.Chr(), ji.Strand(), str(ji.Exon1Stop()-1), str(ji.Exon2Start()+1)] ### correct for initial adjustment
values += [gene1,gene2,le,re]
for condition in condition_count_db:
count_db = condition_count_db[condition]
try: read_count = count_db[key]
except KeyError: read_count = '0'
values.append(read_count)
export_data.write(string.join(values,'\t')+'\n')
export_data.close()
def exportDatasetLinkedGenes(species,gene_location_db,root_dir):
"""Include an entry for gene IDs to include constitutive expression for RPKM normalized data"""
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
export_data,status = AppendOrWrite(export_path)
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
export_values = [gene, 'E0.1',gene, '', chr, strand, str(start), str(end), 'known', 'yes', gene, '1']
export_values+= ['E0.1', str(start), str(end), '', '']
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def exportDatasetLinkedJunctions(species,junction_db,junction_annotations,root_dir,testImport=False,searchChr=None):
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
if searchChr != None:
export_path = string.replace(export_path,'RNASeq/'+species,'RNASeq/junctions/'+species)
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
if testImport == 'yes': print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
for key in junction_db:
(chr,exon1_stop,exon2_start) = key
ji=junction_db[key]
#print key, ji.UniqueID(), ji.GeneID()
if ji.GeneID()!=None and ji.UniqueID()!=None:
if ji.UniqueID() in junction_annotations: ### Obtained from JunctionArray.inferJunctionComps()
junctions,splice_events = junction_annotations[ji.UniqueID()]
if ji.TransSplicing() == 'yes':
if len(splice_events)>0: splice_events+= '|trans-splicing'
else: splice_events = 'trans-splicing'
ji.setAssociatedSplicingEvent(splice_events); ji.setAssociatedSplicingJunctions(junctions)
elif ji.TransSplicing() == 'yes':
ji.setAssociatedSplicingEvent('trans-splicing')
try:
try: constitutive_call = ji.Constitutive()
except Exception:
jd = ji.ExonAnnotations()
constitutive_call = jd.Constitutive()
if constitutive_call == 'yes': ens_constitutive_status = '1'
else: ens_constitutive_status = '0'
annotation = 'known'
except Exception:
constitutive_call = 'no'; ens_constitutive_status = '0'; annotation = 'novel'
if 'I' in ji.ExonRegionID() or 'U' in ji.ExonRegionID() or '_' in ji.ExonRegionID():
annotation = 'novel' ### Not previously indicated well (as I remember) for exon-level reads - so do this
export_values = [ji.UniqueID(), ji.ExonRegionID(), ji.GeneID(), '', ji.Chr(), ji.Strand(), str(ji.Exon1Stop()), str(ji.Exon2Start()), annotation, constitutive_call, ji.ExonID(), ens_constitutive_status]
export_values+= [ji.ExonRegionID(), str(ji.Exon1Stop()), str(ji.Exon2Start()), ji.AssociatedSplicingEvent(), ji.AssociatedSplicingJunctions()]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def combineDetectedExons(unmapped_exon_db,align_exon_db,novel_exon_db):
### Used for exon alignments (both start position and end position aligned to exon/intron/UTR regions)
### Reformat align_exon_db to easily lookup exon data
aligned_exon_lookup_db={}
for gene in align_exon_db:
for ed in align_exon_db[gene]:
aligned_exon_lookup_db[gene,ed.ReadStart()]=ed
#if gene == 'ENSMUSG00000064181': print ed.ReadStart(),ed.ExonRegionID()
### Reformat novel_exon_db to easily lookup exon data - created from junction analysis (rename above exons to match novel junctions)
novel_exon_lookup_db={}
for gene in novel_exon_db:
for ed in novel_exon_db[gene]:
try:
### Only store exons that are found in the novel exon file
null = aligned_exon_lookup_db[gene,ed.ReadStart()+1] ### offset introduced on import
novel_exon_lookup_db[gene,ed.ReadStart()+1]=ed
except Exception: null=[]
try:
### Only store exons that are found in the novel exon file
null = aligned_exon_lookup_db[gene,ed.ReadStart()-1] ### offset introduced on import
novel_exon_lookup_db[gene,ed.ReadStart()-1]=ed
except Exception: null=[]
### Lookup the propper exon region ID and gene ID to format the unique ID and export coordinates
x = 0
for key in unmapped_exon_db:
(chr,exon1_stop,exon2_start) = key
ji=unmapped_exon_db[key]
proceed = 'no'
if ji.GeneID() != None:
e1 = (ji.GeneID(),exon1_stop)
e2 = (ji.GeneID(),exon2_start)
exon_info=[]; override_annotation = None; found=[]
try: null = aligned_exon_lookup_db[e1]; found.append(1)
except Exception: null=[]
try: null = aligned_exon_lookup_db[e2]; found.append(2)
except Exception: null=[]
try: null = novel_exon_lookup_db[e1]; override_annotation = 1
except Exception:
try: null = novel_exon_lookup_db[e2]; override_annotation = 2
except Exception: null=[]
if len(found)>0:
### Below is not the simplist way to do this, but should be the fastest
if 1 in found: exon_info.append(aligned_exon_lookup_db[e1])
if 2 in found: exon_info.append(aligned_exon_lookup_db[e2])
if len(exon_info) == 2: ed1,ed2 = exon_info
else:
ed1 = exon_info[0]; ed2 = ed1; x+=1 ### if only one splice site aligned to a gene region (shouldn't occur)
if x == 2: null=[]; #print 'SOME EXONS FOUND WITH ONLY ONE ALIGNING POSITION...',key,ji.GeneID(),ed1.ExonRegionID(),e1,e2
try: red1 = ed1.ExonRegionData(); red2 = ed2.ExonRegionData()
except Exception:
"""
print [ji.GeneID(), ji.Chr(), key]
print e1, e2
try: print ed1.ExonRegionData()
except Exception: 'ed1 failed'
try: print ed2.ExonRegionData()
except Exception: 'ed2 failed'
"""
continue
region1 = ed1.ExonRegionID(); region2 = ed2.ExonRegionID()
#print region1,region2,ji.GeneID(),ji.Chr(),ji.Strand()
try: splice_junctions = EnsemblImport.combineAnnotations([red1.AssociatedSplicingJunctions(),red2.AssociatedSplicingJunctions()])
except Exception: print red1, red2;sys.exit()
splice_events = EnsemblImport.combineAnnotations([red1.AssociatedSplicingEvent(),red2.AssociatedSplicingEvent()])
ji.setAssociatedSplicingJunctions(splice_junctions)
ji.setAssociatedSplicingEvent(splice_events)
ens_exon_ids = EnsemblImport.combineAnnotations([red1.ExonID(),red2.ExonID()])
ji.setExonID(ens_exon_ids)
if red1.Constitutive() == 'yes' or red2.Constitutive() == 'yes': constitutive_call = 'yes'
else: constitutive_call = 'no'
ji.setConstitutive(constitutive_call)
report_both_regions = 'no'
try:
### If the annotations are from a BED file produced by AltAnalyze, novel alternative splice sites may be present
### if the below variable is not created, then this exon may over-ride the annotated exon region (e.g., E15.1 is over-written by E15.1_1234;E15.1_1256)
if 'ENS' in ji.JunctionID() and ':' not in ji.JunctionID(): report_both_regions = 'yes'
except Exception: null=[]
try:
### If the annotations are from a BED file produced by AltAnalyze, it is possible for to a known exon to share a splice-site coordinate
### with a novel junction exon. This will cause both to have the same override_annotation. Prevent this with the below 2nd override
if 'ENS' in ji.JunctionID() and ':' in ji.JunctionID(): override_annotation = None
except Exception: null=[]
if override_annotation != None:
if '_' in region1: region1 = string.split(region1,'_')[0]+'_'+str(int(string.split(region1,'_')[-1])-1)
if '_' in region2: region2 = string.split(region2,'_')[0]+'_'+str(int(string.split(region2,'_')[-1])+1)
if override_annotation == 1: region_id = region1 ### This forces a TopHat exon to be named for the splice-site position
else: region_id = region2
else:
if report_both_regions == 'no':
### Don't include specific start and end coordinates if inside a known exon
if ed1.AlignmentRegion() == 'exon': region1 = string.split(region1,'_')[0]
if ed2.AlignmentRegion() == 'exon': region2 = string.split(region2,'_')[0]
if ed1.AlignmentRegion() == 'full-intron' and ed2.AlignmentRegion() == 'full-intron':
region1 = string.split(region1,'_')[0]; region2 = string.split(region2,'_')[0]
### Below adjustmements need to compenstate for adjustments made upon import
if '_' in region1: region1 = string.split(region1,'_')[0]+'_'+str(int(string.split(region1,'_')[-1])-1)
if '_' in region2: region2 = string.split(region2,'_')[0]+'_'+str(int(string.split(region2,'_')[-1])+1)
ji.setExon1Stop(ji.Exon1Stop()-1); ji.setExon2Start(ji.Exon2Start()+1)
if override_annotation != None: null=[] ### It is already assigned above
elif region1 == region2: region_id = region1
elif ji.Strand() == '+': region_id = region1+';'+region2
else: region_id = region2+';'+region1 ### start and stop or genomically assigned
uid = ji.GeneID()+':'+region_id
#try: exon_region_db[ji.GeneID()].append((formatID(uid),region_id))
#except KeyError: exon_region_db[ji.GeneID()]=[(formatID(uid),region_id)]
ji.setExonRegionID(region_id)
ji.setUniqueID(uid) ### hgu133
### Export format for new exons to add to the existing critical exon database (those in exon_region_db are combined with analyzed junctions)
#exons_to_export[ji.GeneID(),region_id] = ji
else:
#print key, ji.GeneID(), ji.JunctionID(); sys.exit()
null=[] ### Occurs because two genes are overlapping
#return exons_to_export
def annotateNovelJunctions(novel_junction_db,novel_exon_db,exons_to_export):
### Reformat novel_exon_db to easily lookup exon data
novel_exon_lookup_db={}
for gene in novel_exon_db:
for ed in novel_exon_db[gene]:
novel_exon_lookup_db[gene,ed.ReadStart()]=ed
### Lookup the propper exon region ID and gene ID to format the unique ID and export coordinates
junction_region_db={}
unknown_gene_junctions={}
for key in novel_junction_db:
(chr,exon1_stop,exon2_start) = key
ji=novel_junction_db[key]
proceed = 'no'
if ji.GeneID() != None:
if ji.SpliceSitesFound() != 'both':
e1 = (ji.GeneID(),exon1_stop)
if ji.TransSplicing() == 'yes':
e2 = (ji.SecondaryGeneID(),exon2_start)
else: e2 = (ji.GeneID(),exon2_start)
if e1 in novel_exon_lookup_db and e2 in novel_exon_lookup_db:
proceed = 'yes'
try: ed1 = novel_exon_lookup_db[e1]; red1 = ed1.ExonRegionData(); gene1 = e1[0]
except Exception:
print chr, key, e1; kill
ed2 = novel_exon_lookup_db[e2]; red2 = ed2.ExonRegionData(); gene2 = e2[0]
### If the splice-site was a match to a known junciton splice site, use it instead of that identified by exon-region location overlapp
if ji.LeftExonAnnotations() != None: region1 = ji.LeftExonAnnotations()
else: region1 = ed1.ExonRegionID(); exons_to_export[gene1,region1] = ed1
if ji.RightExonAnnotations() != None: region2 = ji.RightExonAnnotations()
else: region2 = ed2.ExonRegionID(); exons_to_export[gene2,region2] = ed2
#print region1,region2,ji.GeneID(),ji.Chr(),ji.Strand(), ji.LeftExonAnnotations(), ji.RightExonAnnotations()
else:
proceed = 'yes'
region1 = ji.LeftExonAnnotations()
region2 = ji.RightExonAnnotations()
red1 = ji.LeftExonRegionData()
red2 = ji.RightExonRegionData()
### Store the individual exons for export
gene1 = ji.GeneID()
if ji.TransSplicing() == 'yes': gene2 = ji.SecondaryGeneID()
else: gene2 = ji.GeneID()
exons_to_export[gene1,region1] = red1
exons_to_export[gene2,region2] = red2
if proceed == 'yes':
try: splice_junctions = EnsemblImport.combineAnnotations([red1.AssociatedSplicingJunctions(),red2.AssociatedSplicingJunctions()])
except Exception: print red1, red2;sys.exit()
splice_events = EnsemblImport.combineAnnotations([red1.AssociatedSplicingEvent(),red2.AssociatedSplicingEvent()])
ji.setAssociatedSplicingJunctions(splice_junctions)
ji.setAssociatedSplicingEvent(splice_events)
ens_exon_ids = EnsemblImport.combineAnnotations([red1.ExonID(),red2.ExonID()])
ji.setExonID(ens_exon_ids)
if ji.TransSplicing() == 'yes':
uid = ji.GeneID()+':'+region1+'-'+ji.SecondaryGeneID()+':'+region2
region_id = uid
### When trans-splicing occurs, add the data twice to junction_region_db for the two different genes
### in JunctionArray.inferJunctionComps, establish two separate gene junctions with a unique ID for the non-gene exon
try: junction_region_db[ji.GeneID()].append((formatID(uid),region1+'-'+'U1000.1_'+str(ji.Exon2Start())))
except KeyError: junction_region_db[ji.GeneID()]=[(formatID(uid),region1+'-'+'U1000.1_'+str(ji.Exon2Start()))]
try: junction_region_db[ji.SecondaryGeneID()].append((formatID(uid),'U0.1_'+str(ji.Exon1Stop())+'-'+region2))
except KeyError: junction_region_db[ji.SecondaryGeneID()]=[(formatID(uid),'U0.1_'+str(ji.Exon1Stop())+'-'+region2)]
else:
uid = ji.GeneID()+':'+region1+'-'+region2
region_id = region1+'-'+region2
try: junction_region_db[ji.GeneID()].append((formatID(uid),region_id))
except KeyError: junction_region_db[ji.GeneID()]=[(formatID(uid),region_id)]
ji.setExonRegionID(region_id)
ji.setUniqueID(uid)
else:
unknown_gene_junctions[key]=[]
return junction_region_db,exons_to_export
def alignReadsToExons(novel_exon_db,ens_exon_db,testImport=False):
### Simple method for aligning a single coordinate to an exon/intron region of an already matched gene
examined_exons=0; aligned_exons=0
for gene in ens_exon_db: #novel_exon_db
try:
region_numbers=[]; region_starts=[]; region_stops=[]
for ed in novel_exon_db[gene]:
examined_exons+=1; aligned_status=0; index=-1
for rd in ens_exon_db[gene]:
index+=1 ### keep track of exon/intron we are in
region_numbers.append(int(string.split(rd.ExonRegionIDs()[1:],'.')[0]))
if rd.Strand() == '-': region_starts.append(rd.ExonStop()); region_stops.append(rd.ExonStart())
else: region_starts.append(rd.ExonStart()); region_stops.append(rd.ExonStop())
#print [rd.ExonStart(),rd.ExonStop(), rd.Strand()]
#print [ed.ReadStart(),rd.ExonStart(),rd.ExonStop()]
if ed.ReadStart()>=rd.ExonStart() and ed.ReadStart()<=rd.ExonStop():
ed.setAlignmentRegion('exon')
if 'I' in rd.ExonRegionIDs(): ### In an annotated intron
ed.setAlignmentRegion('intron')
ord = rd; updated = None
try: ### If the splice site is a novel 3' splice site then annotate as the 3' exon (less than 50nt away)
nrd = ens_exon_db[gene][index+1]
if (abs(ed.ReadStart()-nrd.ExonStart())<3) or (abs(ed.ReadStart()-nrd.ExonStop())<3):
ed.setAlignmentRegion('full-intron') ### this is the start/end of intron coordinates
elif (abs(ed.ReadStart()-nrd.ExonStart())<50) or (abs(ed.ReadStart()-nrd.ExonStop())<50): rd = nrd; updated = 1
except Exception: null=[]
try:
prd = ens_exon_db[gene][index-1]
if (abs(ed.ReadStart()-prd.ExonStart())<3) or (abs(ed.ReadStart()-prd.ExonStop())<3):
ed.setAlignmentRegion('full-intron')### this is the start/end of intron coordinates
elif (abs(ed.ReadStart()-prd.ExonStart())<50) or (abs(ed.ReadStart()-prd.ExonStop())<50):
if updated==1: rd = ord; ###Hence the intron is too small to descriminate between alt5' and alt3' exons
else: rd = prd
except Exception: null=[]
ed.setExonRegionData(rd); aligned_exons+=1; aligned_status=1
if rd.ExonStop()==ed.ReadStart():
ed.setExonRegionID(rd.ExonRegionIDs())
elif rd.ExonStart()==ed.ReadStart():
ed.setExonRegionID(rd.ExonRegionIDs())
elif 'exon-intron' in ed.Annotation(): ### intron retention
ed.setExonRegionID(rd.ExonRegionIDs()) ### Hence there is a 1nt difference between read
else:
ed.setExonRegionID(rd.ExonRegionIDs()+'_'+str(ed.ReadStart()))
break
if aligned_status == 0: ### non-exon/intron alinging sequences
region_numbers.sort(); region_starts.sort(); region_stops.sort()
if (rd.Strand() == '+' and ed.ReadStart()>=rd.ExonStop()) or (rd.Strand() == '-' and rd.ExonStop()>=ed.ReadStart()):
### Applicable to 3'UTR (or other trans-splicing) aligning
utr_id = 'U'+str(region_numbers[-1])+'.1_'+str(ed.ReadStart())
ud = EnsemblImport.ExonAnnotationsSimple(rd.Chr(),rd.Strand(),region_stops[-1],region_stops[-1],gene,'','no',utr_id,'','')
ed.setExonRegionID(utr_id)
else:
### Applicable to 5'UTR (or other trans-splicing) aligning
utr_id = 'U0.1'+'_'+str(ed.ReadStart())
ud = EnsemblImport.ExonAnnotationsSimple(rd.Chr(),rd.Strand(),region_starts[0],region_starts[0],gene,'','no',utr_id,'','')
ed.setExonRegionID(utr_id)
ed.setExonRegionData(ud)
ed.setAlignmentRegion('UTR')
except Exception: null=[]
if testImport == 'yes': print aligned_exons, 'splice sites aligned to exon region out of', examined_exons
def geneAlign(chr,chr_gene_locations,location_gene_db,chr_reads,switch_coord,read_aligned_to_gene):
""" This function aligns the start or end position for each feature (junction or exon) to a gene, in two
steps by calling this function twice. In the second interation, the coordinates are reversed """
index = 0 ### Don't examine genes already looked at
genes_assigned = 0; trans_splicing=[]
for (coord,ji) in chr_reads: ### junction coordinates or exon coordinates with gene object
if index >5: index -=5 ### It is possible for some genes to overlap, so set back the index of genomically ranked genes each time
gene_id_obtained = 'no'
if switch_coord == 'no': rs,re=coord ### reverse the coordinates for the second iteration
else: re,rs=coord ### first-interation coordinates (start and end)
while index < len(chr_gene_locations):
cs,ce = chr_gene_locations[index]
#print [re,rs,cs,ce, ji.Chromosome()];sys.exit()
### Determine if the first listed coordinate lies within the gene
if cs <= rs and ce >= rs:
### Yes, it does
gene,strand = location_gene_db[chr,cs,ce]
if switch_coord == 'yes': ### Only applies to coordinates, where the end-position didn't lie in the same gene as the start-position
if cs <= re and ce >= re:
### This occurs when the first iteration detects a partial overlap, but the gene containing both coordinates is downstream
### Hence, not trans-splicing
ji.setGeneID(gene)
break
first_geneid = ji.GeneID() ### see what gene was assigned in the first iteration (start position only)
#print ['trans',coord, first_geneid, gene] ### Note: in rare cases, an exon can overlap with two genes (bad Ensembl annotations?)
ji.setTransSplicing()
side = ji.checkExonPosition(rs)
if side == 'left':
ji.setGeneID(gene)
ji.setSecondaryGeneID(first_geneid)
else:
ji.setSecondaryGeneID(gene)
#if ji.GeneID() == None: print 'B',coord, ji.GeneID(), secondaryGeneID()
#print ji.GeneID(), ji.SecondaryGeneID();kill
genes_assigned+=1; gene_id_obtained = 'yes'
### Check to see if this gene represents a multi-gene spanning region (overlaps with multiple gene loci)
try:
### This code was used to check and see if the gene is multi-spanning. Appears that the < sign is wrong > anyways, never go to the next gene unless the next read has passed it
#cs2,ce2 = chr_gene_locations[index+1]
#if cs2 < ce: index+=1 ### Continue analysis (if above is correct, the gene will have already been assigned)
#else: break
break
except Exception: break
else:
### First iteration, store the identified gene ID (only looking at the start position)
ji.setGeneID(gene); gene_id_obtained = 'yes'
#print gene, rs, re, cs, ce
### Check the end position, to ensure it is also lies within the gene region
if cs <= re and ce >= re:
genes_assigned+=1
else:
### Hence, the end lies outside the gene region
trans_splicing.append((coord,ji))
### Check to see if this gene represents a multi-gene spanning region (overlaps with multiple gene loci)
try:
### This code was used to check and see if the gene is multi-spanning. Appears that the < sign is wrong > anyways, never go to the next gene unless the next read has passed it
#cs2,ce2 = chr_gene_locations[index+1]
#if cs2 < ce: index+=1 ### Continue analysis (if above is correct, the gene will have already been assigned)
#else: break
break
except Exception: break
else:
if rs < ce and re < ce: break
elif switch_coord == 'no' and cs <= re and ce >= re:
### This can occur if the left junction splice site is in an exon and the other is the UTR as opposed to another gene
gene,strand = location_gene_db[chr,cs,ce]
ji.setSecondaryGeneID(gene); gene_id_obtained = 'yes'
#print gene, coord, ji.Strand(), ji.GeneID()
index+=1
if gene_id_obtained == 'no':
### These often appear to be genes predicted by tBLASTn at UCSC but not by Ensembl (e.g., chr17:27,089,652-27,092,318 mouse mm9)
null=[]
#ji.setGeneID(None) ### This is not necessary, since if one exon does not align to a gene it is still a valid alignment
#print chr,coord
read_aligned_to_gene += genes_assigned
#print genes_assigned, chr, 'Gene IDs assigned out of', len(chr_reads)
#print len(trans_splicing),'reads with evidence of trans-splicing'
### For any coordinate-pair where the end-position doesn't lie within the same gene as the start, re-run for those to see which gene they are in
if switch_coord == 'no' and len(trans_splicing)>0:
read_aligned_to_gene = geneAlign(chr,chr_gene_locations,location_gene_db,trans_splicing,'yes',read_aligned_to_gene)
return read_aligned_to_gene
def getNovelExonCoordinates(species,root_dir):
""" Currently, any novel exon determined during initial RNA-Seq read annotation with defined start and end coordinates, only has
the exon-end coordinate, not start, in it's name. However, the start and stop are indicated in the counts.Experiment.txt file.
To get this, we parse that file and only store exons with an I or U in them and then correct for this in the matching function below """
exp_dir = root_dir+'/ExpressionInput/'
dir_list = read_directory(exp_dir)
counts_file = None
for file in dir_list:
if 'counts.' in file and 'steady' not in file:
counts_file = file
### Example
#ENSG00000137076:I17.1_35718353=chr9:35718353-35718403 (novel exon coordinates - just sorted, not necessarily in the correct order)
#ENSG00000137076:E17.1-I17.1_35718403=chr9:35718809-35718403 (5' supporting junction)
#ENSG00000137076:I17.1_35718353-E18.1=chr9:35718353-35717783 (3' supporting junction)
#here, once we see that I17.1_35718353 is the exon ID, we know we need to get the function with -I17.1_35718403 (always the second value)
if counts_file!=None:
fn=filepath(exp_dir+counts_file)
print 'Reading counts file'
novel_exon_db = parseCountFile(fn,'exons',{}) ### Get novel exons
print 'Reading counts file'
novel_exon_db = parseCountFile(fn,'junctions',novel_exon_db) ### Get novel exons
return novel_exon_db
def getMaxCounts(fn,cutoff,filterExport=False,filterExportDir=False):
firstLine=True
expressed_uids={}
if filterExport != False:
eo=export.ExportFile(filterExportDir)
for line in open(fn,'rU').xreadlines():
Line = cleanUpLine(line)
t = string.split(Line,'\t')
key = t[0]
if firstLine:
firstLine = False
if filterExport != False:
eo.write(line)
else:
if filterExport != False:
if key in filterExport:
eo.write(line)
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: maxExp = max(map(lambda x: float(x), t[1:])); #print maxExp;sys.exit()
except Exception:
#print t[1:];sys.exit()
if 'NA' in t[1:]:
tn = [0 if x=='NA' else x for x in t[1:]] ### Replace NAs
maxExp = max(map(lambda x: float(x), tn))
elif '' in t[1:]:
tn = [0 if x=='' else x for x in t[1:]] ### Replace blanks
maxExp = max(map(lambda x: float(x), tn))
else:
maxExp=cutoff+1
#gene = string.split(uid,':')[0]
if maxExp > cutoff:
expressed_uids[uid] = []
return expressed_uids
def importBiologicalRelationships(species):
### Combine non-coding Ensembl gene annotations with UniProt functional annotations
import ExpressionBuilder
custom_annotation_dbase={}
try: coding_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
except Exception: coding_db = {}
try: gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
except Exception: gene_to_symbol_db = {}
for gene in coding_db:
#coding_type = string.split(coding_db[gene][-1],'|')
coding_type = coding_db[gene][-1]
if 'protein_coding' in coding_type:
coding_type = 'protein_coding'
else:
coding_type = 'ncRNA'
if gene in gene_to_symbol_db:
symbol = string.lower(gene_to_symbol_db[gene][0])
### The below genes cause issues with many single cell datasets in terms of being highly correlated
if 'rpl'==symbol[:3] or 'rps'==symbol[:3] or 'mt-'==symbol[:3] or '.' in symbol or 'gm'==symbol[:2]:
coding_type = 'ncRNA'
try: gene_db = custom_annotation_dbase[coding_type]; gene_db[gene]=[]
except Exception: custom_annotation_dbase[coding_type] = {gene:[]}
filename = 'AltDatabase/uniprot/'+species+'/custom_annotations.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ens_gene,compartment,custom_class = t[:3]
if 'GPCR' in custom_class:
custom_class = ['GPCR']
else:
custom_class = string.split(custom_class,'|')
custom_class = string.split(compartment,'|')+custom_class
for cc in custom_class:
try: gene_db = custom_annotation_dbase[cc]; gene_db[ens_gene]=[]
except Exception: custom_annotation_dbase[cc] = {ens_gene:[]}
#custom_annotation_dbase={}
try:
filename = 'AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-BioMarkers.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
gene,null,celltype = t[:3]
try: gene_db = custom_annotation_dbase['BioMarker']; gene_db[gene]=[]
except Exception: custom_annotation_dbase['BioMarker'] = {gene:[]}
#print len(custom_annotation_dbase), 'gene classes imported'
except Exception: pass
return custom_annotation_dbase
def importGeneSets(geneSetType,filterType=None,geneAnnotations=None,speciesName=None):
try: speciesName = species
except: pass
gene_db={}
if 'Ontology' in geneSetType:
filename = 'AltDatabase/goelite/'+speciesName+'/nested/Ensembl_to_Nested-GO.txt'
ontology=True
else:
filename = 'AltDatabase/goelite/'+speciesName+'/gene-mapp/Ensembl-'+geneSetType+'.txt'
ontology=False
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if ontology:
gene,category = t
else: gene,null,category = t[:3]
if filterType==None:
try: gene_db[gene].append(category)
except Exception: gene_db[gene] = [category]
elif filterType in category:
if gene in geneAnnotations:
gene = geneAnnotations[gene][0]
gene_db[gene]=[]
return gene_db
def singleCellRNASeqWorkflow(Species, platform, expFile, mlp, exp_threshold=0, rpkm_threshold=5, drivers=False, parameters = None, reportOnly=False):
global species
global rho_cutoff
species = Species
removeOutliers = False
if parameters != None:
rpkm_threshold = parameters.ExpressionCutoff()
exp_threshold = parameters.CountsCutoff()
rho_cutoff = parameters.RhoCutoff()
restrictBy = parameters.RestrictBy()
try: removeOutliers = parameters.RemoveOutliers()
except Exception: pass
if platform == 'exons' or platform == 'PSI':
rpkm_threshold=0
exp_threshold=0
else:
rho_cutoff = 0.4
restrictBy = 'protein_coding'
onlyIncludeDrivers=True
if platform != 'exons' and platform != 'PSI':
platform = checkExpressionFileFormat(expFile,platform)
if platform != 'RNASeq':
if rpkm_threshold>1.9999:
rpkm_threshold = math.log(rpkm_threshold,2) ### log2 transform
if removeOutliers:
### Remove samples with low relative number of genes expressed
try:
print '***Removing outlier samples***'
from import_scripts import sampleIndexSelection
reload(sampleIndexSelection)
output_file = expFile[:-4]+'-OutliersRemoved.txt'
sampleIndexSelection.statisticallyFilterFile(expFile,output_file,rpkm_threshold)
if 'exp.' in expFile:
### move the original groups and comps files
groups_file = string.replace(expFile,'exp.','groups.')
groups_file = string.replace(groups_file,'-steady-state','')
groups_filtered_file = groups_file[:-4]+'-OutliersRemoved.txt'
#comps_file = string.replace(groups_file,'groups.','comps.')
#comps_filtered_file = string.replace(groups_filtered_file,'groups.','comps.')
#counts_file = string.replace(expFile,'exp.','counts.')
#counts_filtered_file = string.replace(output_file,'exp.','counts.')
try: shutil.copyfile(groups_file,groups_filtered_file) ### if present copy over
except Exception: pass
try: shutil.copyfile(comps_file,comps_filtered_file) ### if present copy over
except Exception: pass
#try: shutil.copyfile(counts_file,counts_filtered_file) ### if present copy over
#except Exception: pass
expFile = output_file
print ''
except Exception:
print '***Filtering FAILED***'
print traceback.format_exc()
expressed_uids_rpkm = getMaxCounts(expFile,rpkm_threshold)
try: expressed_uids_counts = getMaxCounts(string.replace(expFile,'exp.','counts.'),exp_threshold)
except Exception: expressed_uids_counts=expressed_uids_rpkm
if len(expressed_uids_counts) > 0:
try: expressed_uids = expressed_uids_rpkm.viewkeys() & expressed_uids_counts.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(expressed_uids_rpkm,expressed_uids_counts)
else:
expressed_uids = expressed_uids_rpkm
if reportOnly:
print '.',
else:
print 'Genes filtered by counts:',len(expressed_uids_counts)
print 'Genes filtered by expression:',len(expressed_uids_rpkm),len(expressed_uids)
#expressed_uids = filterByProteinAnnotation(species,expressed_uids)
print len(expressed_uids), 'expressed genes by RPKM/TPM (%d) and counts (%d)' % (rpkm_threshold,exp_threshold)
from import_scripts import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
try: biological_categories = importBiologicalRelationships(species)
except Exception:
restrictBy = None
biological_categories={}
print 'Missing annotation file in:','AltDatabase/uniprot/'+species+'/custom_annotations.txt !!!!!'
if restrictBy !=None:
if reportOnly:
print '.',
else:
print 'Attempting to restrict analysis to protein coding genes only (flag --RestrictBy protein_coding)'
genes = biological_categories['protein_coding']
genes_temp=dict(genes)
for gene in genes_temp:
if gene in gene_to_symbol_db:
genes[gene_to_symbol_db[gene][0]]=[] ### add symbols
genes_temp={}
else:
genes = {}
for i in expressed_uids: genes[i]=[]
"""
genes.update(biological_categories['BioMarker'])
genes.update(biological_categories['transcription regulator'])
genes.update(biological_categories['splicing regulator'])
genes.update(biological_categories['kinase'])
genes.update(biological_categories['GPCR'])
"""
expressed_uids_db={}; guide_genes={}
for id in expressed_uids: expressed_uids_db[id]=[]
if platform == 'exons' or platform == 'PSI': ### For splicing-index value filtering
expressed_uids=[]
for uid in expressed_uids_db:
geneID = string.split(uid,':')[0]
geneID = string.split(geneID,' ')[-1]
if geneID in genes: expressed_uids.append(uid)
else:
try: expressed_uids = genes.viewkeys() & expressed_uids_db.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(genes,expressed_uids_db)
#print len(expressed_uids)
expressed_uids_db2={}
for id in expressed_uids: expressed_uids_db2[id]=[]
if drivers != False:
guide_genes = getDrivers(drivers)
if onlyIncludeDrivers:
try: expressed_uids = guide_genes.viewkeys() & expressed_uids_db2.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(guide_genes,expressed_uids_db2)
if len(expressed_uids)<100:
print '\nNOTE: The input IDs do not sufficiently map to annotated protein coding genes...',
print 'skipping protein coding annotation filtering.'
expressed_uids=[]
for uid in expressed_uids_db:
expressed_uids.append(uid)
if reportOnly:
print '.',
else:
print len(expressed_uids), 'expressed IDs being further analyzed'
print_out,n = findCommonExpressionProfiles(expFile,species,platform,expressed_uids,guide_genes,mlp,parameters=parameters,reportOnly=reportOnly)
return print_out,n
def getOverlappingKeys(db1,db2):
db3=[]
for key in db1:
if key in db2:
db3.append(key)
return db3
def getDrivers(filename):
fn = filepath(filename)
firstLine=True
drivers={}
for line in open(fn,'rU').xreadlines():
line = line.rstrip()
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
gene = t[0]
drivers[gene]=[]
print 'Imported %d guide genes' % len(drivers)
return drivers
def filterByProteinAnnotation(species,expressed_uids):
import ExpressionBuilder
custom_annotation_dbase = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
expressed_uids_protein=[]
for gene in expressed_uids:
if gene in custom_annotation_dbase:
compartment,custom_class = custom_annotation_dbase[gene]
if 'protein_coding' in custom_class:
expressed_uids_protein.append(gene)
if len(expressed_uids_protein)>10:
return expressed_uids_protein
else:
return expressed_uids
def CoeffVar(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=[]):
firstLine=True
expressed_values={}
expressed_values_filtered={}
cv_list=[]
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
values = map(lambda x: float(x), t[1:])
#gene = string.split(uid,':')[0]
if uid in expressed_uids:
vs = list(values); vs.sort()
cv = statistics.stdev(values)/statistics.avg(values)
if samplesDiffering<1: samplesDiffering=1
if platform == 'RNASeq':
if (vs[-1*samplesDiffering]/vs[samplesDiffering])>fold: ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
cv_list.append((cv,uid))
else:
if (vs[-1*samplesDiffering]-vs[samplesDiffering])>fold: ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
cv_list.append((cv,uid))
if uid in guideGenes:
expressed_values[uid] = values
cv_list.append((10000,uid)) ### Very high CV
cv_list.sort()
cv_list.reverse()
x=0
for (cv,uid) in cv_list:
x+=1
"""
if uid == 'ENSMUSG00000003882':
print x, 'ilr7'
"""
for (cv,uid) in cv_list[:5000]:
expressed_values_filtered[uid] = expressed_values[uid]
return expressed_values_filtered, fold, samplesDiffering, headers
def determinePattern(vs):
max_vs = max(vs)
min_vs = min(vs)
lower_max = max_vs - (max_vs*0.01)
upper_min = abs(max_vs)*0.01
s = bisect.bisect_right(vs,upper_min) ### starting low 15% index position
e = bisect.bisect_left(vs,lower_max) ### ending upper 85% index position
#print vs
#print max_vs, min_vs
#print lower_max, upper_min
#print s, e
avg = statistics.avg(vs[s:e+1])
m = bisect.bisect_left(vs,avg)
ratio = vs[m]/vs[((e-s)/2)+s-2] ### If the ratio is close to 1, a sigmoidal or linear pattern likely exists
print ratio
#sys.exit()
return ratio
def checkExpressionFileFormat(expFile,platform):
firstLine=True
inputMax=0; inputMin=10000
expressed_values={}
rows=0
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: values = map(lambda x: float(x), t[1:])
except Exception:
values=[]
for value in t[1:]:
try: values.append(float(value))
except Exception:pass
try:
if max(values)>inputMax: inputMax = max(values)
except Exception:
pass
if inputMax>100:
break
if inputMax>100: ### Thus, not log values
platform = 'RNASeq'
else:
platform = "3'array"
return platform
def optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=[],reportOnly=False):
firstLine=True
expressed_values={}
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: values = map(lambda x: float(x), t[1:])
except Exception:
values = t[1:]
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: float(x), values)
else:
values=[]
for value in t[1:]:
try: values.append(float(value))
except Exception: values.append(-9999)
values = numpy.ma.masked_values(values, -9999.)
#gene = string.split(uid,':')[0]
#if uid == 'ENSMUSG00000041515': print 'IRF8'
if uid in expressed_uids:
#slope_exp_ratio = determinePattern(vs)
#if slope_exp_ratio<2 and slope_exp_ratio>0.5:
if platform == 'RNASeq':
try: values = map(lambda x: math.log(x+1,2),values)
except Exception:
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
elif '' in values:
values = [0 if x=='' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
vs = list(values); vs.sort()
if (vs[-1*samplesDiffering]-vs[samplesDiffering-1])>math.log(fold,2): ### Ensures that atleast 4 samples are significantly different in the set
if reportOnly==False:
expressed_values[uid] = values
else:
expressed_values[uid]=[] ### Don't store the values - datasets can contain tens of thousands of
else:
vs = list(values); vs.sort()
if (vs[-1*samplesDiffering]-vs[samplesDiffering-1])>math.log(fold,2): ### Ensures that atleast 4 samples are significantly different in the set
if reportOnly==False:
expressed_values[uid] = values
else:
expressed_values[uid]=[]
if uid in guideGenes:
expressed_values[uid] = values
#if uid == 'ENSMUSG00000062825': print (vs[-1*samplesDiffering]-vs[samplesDiffering]),math.log(fold,2);sys.exit()
if reportOnly:
print '.',
else:
print len(expressed_uids),'genes examined and', len(expressed_values),'genes expressed for a fold cutoff of', fold
if len(expressed_uids)==0 or len(expressed_values)==0:
print options_result_in_no_genes
elif len(expressed_uids) < 50 and len(expressed_values)>0:
return expressed_values, fold, samplesDiffering, headers
elif len(expressed_values)>15000:
if platform == 'exons' or platform == 'PSI':
fold+=0.1
else:
fold+=1
samplesDiffering+=1
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guideGenes,reportOnly=reportOnly)
elif fold == 1.2 and samplesDiffering == 1:
return expressed_values, fold, samplesDiffering, headers
elif len(expressed_values)<50:
fold-=0.2
samplesDiffering-=1
if samplesDiffering<1: samplesDiffering = 1
if fold < 1.1: fold = 1.2
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guideGenes,reportOnly=reportOnly)
else:
return expressed_values, fold, samplesDiffering, headers
return expressed_values, fold, samplesDiffering, headers
def intraCorrelation(expressed_values,mlp):
if mlp.cpu_count() < 3:
processors = mlp.cpu_count()
else: processors = 8
pool = mlp.Pool(processes=processors)
si = (len(expressed_values)/processors)
s = si; b=0
db_ls=[]
if len(expressed_values)<10: forceError ### will si to be zero and an infanite loop
while s<len(expressed_values):
db_ls.append(dict(expressed_values.items()[b:s]))
b+=si; s+=si
db_ls.append(dict(expressed_values.items()[b:s]))
### Create an instance of MultiZscoreWorker (store the variables to save memory)
workerMulti = MultiCorrelatePatterns(expressed_values)
results = pool.map(workerMulti,db_ls)
#for i in db_ls: workerMulti(i)
pool.close(); pool.join(); pool = None
correlated_genes={}
for a in results:
for k in a: correlated_genes[k] = a[k]
return correlated_genes
def findCommonExpressionProfiles(expFile,species,platform,expressed_uids,guide_genes,mlp,fold=2,samplesDiffering=2,parameters=None,reportOnly=False):
use_CV=False
global rho_cutoff
row_metric = 'correlation'; row_method = 'average'
column_metric = 'cosine'; column_method = 'hopach'
original_column_metric = column_metric
original_column_method = column_method
color_gradient = 'yellow_black_blue'; transpose = False; graphic_links=[]
if parameters != None:
try: excludeGuides = parameters.ExcludeGuides() ### Remove signatures
except Exception: excludeGuides = None
fold = parameters.FoldDiff()
samplesDiffering = parameters.SamplesDiffering()
amplifyGenes = parameters.amplifyGenes()
if 'Guide' in parameters.GeneSelection():
amplifyGenes = False ### This occurs when running ICGS with the BOTH option, in which Guide3 genes are retained - ignore these
parameters.setGeneSelection('')
parameters.setClusterGOElite('')
excludeCellCycle = parameters.ExcludeCellCycle()
from visualization_scripts import clustering
row_metric = 'correlation'; row_method = 'average'
column_metric = parameters.ColumnMetric(); column_method = parameters.ColumnMethod()
original_column_metric = column_metric
original_column_method = column_method
color_gradient = 'yellow_black_blue'; graphic_links=[]
if platform == 'exons' or platform =='PSI': color_gradient = 'yellow_black_blue'
guide_genes = parameters.JustShowTheseIDs()
cell_cycle_id_list = []
else:
amplifyGenes = False
excludeCellCycle = False
if platform != 'exons'and platform !='PSI':
platform = checkExpressionFileFormat(expFile,platform)
else:
if LegacyMode: pass
else:
fold = math.pow(2,0.5)
fold = 1.25
#"""
if use_CV:
expressed_values, fold, samplesDiffering, headers = CoeffVar(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=guide_genes)
else:
if reportOnly:
print '.',
else:
print 'Finding an optimal number of genes based on differing thresholds to include for clustering...'
#fold=1; samplesDiffering=1
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guide_genes,reportOnly=reportOnly) #fold=2,samplesDiffering=2
if reportOnly:
print '.',
else:
print 'Evaluating',len(expressed_values),'genes, differentially expressed',fold,'fold for at least',samplesDiffering*2,'samples'
#sys.exit()
from import_scripts import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol_db)
areYouSure=False
if (excludeCellCycle == 'strict' or excludeCellCycle == True) and areYouSure:
cc_param = copy.deepcopy(parameters)
cc_param.setPathwaySelect('cell cycle')
cc_param.setGeneSet('GeneOntology')
cc_param.setGeneSelection('amplify')
transpose = cc_param
filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
writeFilteredFile(filtered_file,platform,headers,{},expressed_values,[])
if len(expressed_values)<1000:
row_method = 'hopach'; row_metric = 'correlation'
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
if len(headers)>7000: ### For very ultra-large datasets
column_method = 'average'
cc_graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cell_cycle_id_list = genericRowIDImport(string.replace(cc_graphic_links[0][-1],'.png','.txt'))
expressed_values2 = {}
for id in expressed_values:
try: symbolID = gene_to_symbol_db[id][0]
except Exception: symbolID = id
if id not in cell_cycle_id_list and symbolID not in cell_cycle_id_list:
expressed_values2[id]=expressed_values[id]
print len(expressed_values)-len(expressed_values2),'cell-cycle associated genes removed for cluster discovery'
expressed_values = expressed_values2
if reportOnly==False:
print 'amplifyGenes:',amplifyGenes
### Write out filtered list to amplify and to filtered.YourExperiment.txt
filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
groups_file = string.replace(expFile,'exp.','groups.')
groups_filtered_file = string.replace(filtered_file,'exp.','groups.')
groups_file = string.replace(groups_file,'-steady-state','')
groups_filtered_file = string.replace(groups_filtered_file,'-steady-state','')
if reportOnly==False:
try: export.customFileCopy(groups_file,groups_filtered_file) ### if present copy over
except Exception: pass
writeFilteredFile(filtered_file,platform,headers,{},expressed_values,[])
filtered_file_new = string.replace(expFile,'exp.','filteredExp.')
try: export.customFileCopy(filtered_file,filtered_file_new) ### if present copy over
except Exception: pass
else:
filtered_file = writeFilteredFileReimport(expFile,platform,headers,expressed_values) ### expressed_values just contains the UID
print_out = '%d genes, differentially expressed %d fold for at least %d samples' % (len(expressed_values), fold, samplesDiffering*2)
return print_out, filtered_file
if len(expressed_values)<1400 and column_method == 'hopach':
row_method = 'hopach'; row_metric = 'correlation'
else:
row_method = 'weighted'; row_metric = 'cosine'
if amplifyGenes:
transpose = parameters
try:
if len(parameters.GeneSelection())>0:
parameters.setGeneSelection(parameters.GeneSelection()+' amplify')
print 'Finding correlated genes to the input geneset(s)...'
else:
print 'Finding intra-correlated genes from the input geneset(s)...'
parameters.setGeneSelection(parameters.GeneSelection()+' IntraCorrelatedOnly amplify')
except Exception:
parameters.setGeneSelection(parameters.GeneSelection()+' IntraCorrelatedOnly amplify')
print 'Finding intra-correlated genes from the input geneset(s)...'
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
#return graphic_links
from visualization_scripts import clustering
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(graphic_links[-1][-1][:-4]+'.txt')
headers = ['UID']+column_header
expressed_values2={}
for i in row_header: ### Filter the expressed values for the intra-correlated queried gene set and replace
try: expressed_values2[i]=expressed_values[i]
except Exception:
try:
e = symbol_to_gene[i][0]
expressed_values2[e]=expressed_values[e]
except Exception:
pass
expressed_values = expressed_values2
print 'Looking for common gene expression profiles for class assignment...',
begin_time = time.time()
useNumpyCorr=True
negative_rho = rho_cutoff*-1
#results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
#eo = export.ExportFile(results_file[:-4]+'-genes.txt')
if useNumpyCorr:
row_ids=[]
x = []
for id in expressed_values:
row_ids.append(id)
x.append(expressed_values[id])
#if id== 'Bcl2l11': print expressed_values[id];sys.exit()
D1 = numpy.corrcoef(x)
print 'initial correlations obtained'
i=0
correlated_genes={}
if 'exons' == platform or 'PSI' == platform:
for score_ls in D1:
proceed = True
correlated = []
geneID = row_ids[i]
refgene = string.split(geneID,':')[0]
k=0
if excludeGuides!=None:
if geneID in excludeGuides: ### skip this main event
proceed=False
continue
for v in score_ls:
if v>rho_cutoff:# or v<negative_rho:
if refgene not in row_ids[k]:
correlated.append((v,row_ids[k]))
if excludeGuides!=None:
if row_ids[k] in excludeGuides: ### skip this main event
proceed=False
break
k+=1
correlated.sort()
if LegacyMode == False:
correlated.reverse()
if proceed:
correlated = map(lambda x:x[1],correlated)
correlated_genes[geneID] = correlated
i+=1
else:
for score_ls in D1:
correlated = []
geneID = row_ids[i]
k=0; temp=[]
for v in score_ls:
if v>rho_cutoff:# or v<negative_rho:
#scores.append((v,row_ids[k]))
correlated.append((v,row_ids[k]))
#temp.append((geneID,row_ids[k],str(v)))
k+=1
correlated.sort()
if LegacyMode == False:
correlated.reverse()
correlated = map(lambda x:x[1],correlated)
if len(correlated)>0:
correlated_genes[geneID] = correlated
#for (a,b,c) in temp: eo.write(a+'\t'+b+'\t'+c+'\n')
i+=1
else:
### Find common patterns now
performAllPairwiseComparisons = True
if performAllPairwiseComparisons:
correlated_genes = intraCorrelation(expressed_values,mlp)
print len(correlated_genes), 'highly correlated genes found for downstream clustering.'
else: correlated_genes={}
atleast_10={}
if len(correlated_genes)<70: connections = 0
elif len(correlated_genes)<110: connections = 4
else: connections = 5
numb_corr=[]
for i in correlated_genes:
if len(correlated_genes[i])>connections:
numb_corr.append([len(correlated_genes[i]),i])
atleast_10[i]=correlated_genes[i] ### if atleast 10 genes apart of this pattern
x=0
for k in correlated_genes[i]:
if x<30: ### cap it at 30
try: atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
except Exception: pass
x+=1
if len(atleast_10)<30:
print 'Initial correlated set too small, getting anything correlated'
for i in correlated_genes:
if len(correlated_genes[i])>0:
numb_corr.append([len(correlated_genes[i]),i])
try: atleast_10[i]=correlated_genes[i] ### if atleast 10 genes apart of this pattern
except Exception: pass
for k in correlated_genes[i]:
try: atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
except Exception: pass
if len(atleast_10) == 0:
atleast_10 = expressed_values
#eo.close()
print len(atleast_10), 'genes correlated to multiple other members (initial filtering)'
### go through the list from the most linked to the least linked genes, only reported the most linked partners
if len(atleast_10)>5000:
print_out=""
return print_out,atleast_10
removeOutlierDrivenCorrelations=True
exclude_corr=[]
numb_corr.sort(); numb_corr.reverse()
numb_corr2=[]
#print len(numb_corr)
if removeOutlierDrivenCorrelations and samplesDiffering != 1:
for key in numb_corr: ### key gene
associations,gene = key
temp_corr_matrix_db={}; rows=[]; temp_corr_matrix=[]
gene_exp_vals = list(expressed_values[gene]) ### copy the list
max_index = gene_exp_vals.index(max(gene_exp_vals))
del gene_exp_vals[max_index]
#temp_corr_matrix.append(exp_vals); rows.append(gene)
#if 'ENSG00000016082' in correlated_genes[gene] or 'ENSG00000016082' == gene: print gene_to_symbol_db[gene],associations
if gene not in exclude_corr:
#print len(correlated_genes[gene])
for k in correlated_genes[gene]:
exp_vals = list(expressed_values[k]) ### copy the list
#print exp_vals
del exp_vals[max_index]
#temp_corr_matrix.append(exp_vals); rows.append(gene)
#print exp_vals,'\n'
temp_corr_matrix_db[k]=exp_vals
temp_corr_matrix.append(exp_vals); rows.append(gene)
correlated_hits = pearsonCorrelations(gene_exp_vals,temp_corr_matrix_db)
try: avg_corr = numpyCorrelationMatrix(temp_corr_matrix,rows,gene)
except Exception: avg_corr = 0
#if gene_to_symbol_db[gene][0] == 'ISL1' or gene_to_symbol_db[gene][0] == 'CD10' or gene_to_symbol_db[gene][0] == 'POU3F2':
if len(correlated_hits)>0:
if LegacyMode:
if (float(len(correlated_hits))+1)/len(correlated_genes[gene])<0.5 or avg_corr<rho_cutoff: ### compare to the below
pass
else:
numb_corr2.append([len(correlated_hits),gene])
else:
if (float(len(correlated_hits))+1)/len(correlated_genes[gene])<0.5 or avg_corr<(rho_cutoff-0.1):
#exclude_corr.append(key)
#if gene == 'XXX': print len(correlated_hits),len(correlated_genes[gene]), avg_corr, rho_cutoff-0.1
pass
else:
numb_corr2.append([len(correlated_hits),gene])
#print (float(len(correlated_hits))+1)/len(correlated_genes[gene]), len(correlated_genes[gene]), key
numb_corr = numb_corr2
numb_corr.sort(); numb_corr.reverse()
#print len(numb_corr)
exclude_corr={}; new_filtered_set={}
limit=0
for key in numb_corr: ### key gene
associations,gene = key
#if 'ENSG00000016082' in correlated_genes[gene] or 'ENSG00000016082' == gene: print gene_to_symbol_db[gene],associations
if gene not in exclude_corr:
for k in correlated_genes[gene]:
exclude_corr[k]=[]
new_filtered_set[k]=[]
new_filtered_set[gene]=[]
limit+=1
#print key
#if limit==1: break
atleast_10 = new_filtered_set
addMultipleDrivers=True
if len(guide_genes)>0 and addMultipleDrivers: ### Artificially weight the correlated genes with known biological driverse
for gene in guide_genes:
y=1
while y<2:
if y==1:
try: atleast_10[gene]=expressed_values[gene]
except Exception: break
else:
try: atleast_10[gene+'-'+str(y)]=expressed_values[gene]
except Exception: break
expressed_values[gene+'-'+str(y)]=expressed_values[gene] ### Add this new ID to the database
#print gene+'-'+str(y)
y+=1
#atleast_10 = expressed_values
results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
writeFilteredFile(results_file,platform,headers,gene_to_symbol_db,expressed_values,atleast_10)
print len(atleast_10),'final correlated genes'
end_time = time.time()
print 'Initial clustering completed in',int(end_time-begin_time),'seconds'
results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
if len(atleast_10)<1200 and column_method == 'hopach':
row_method = 'hopach'; row_metric = 'correlation'
else:
if LegacyMode:
row_method = 'average'; row_metric = 'euclidean'
else:
row_method = 'weighted'; row_metric = 'cosine'
#print row_method, row_metric
correlateByArrayDirectly = False
if correlateByArrayDirectly:
from visualization_scripts import clustering
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(results_file)
new_column_header = map(lambda x: int(x[5:]),column_header)
matrix = [new_column_header]+matrix
matrix = zip(*matrix) ### transpose
exp_sample_db={}
for sample_data in matrix:
exp_sample_db[sample_data[0]] = sample_data[1:]
correlated_arrays = intraCorrelation(exp_sample_db,mpl)
print len(correlated_arrays), 'highly correlated arrays from gene subsets.'
mimum_corr_arrays={}
for i in correlated_arrays:
if len(correlated_arrays[i])>1:
linked_lists=correlated_arrays[i]+[i]
for k in correlated_arrays[i]:
linked_lists+=correlated_arrays[k]
linked_lists = unique.unique(linked_lists)
linked_lists.sort()
# print len(linked_lists), linked_lists
else:
try:
from visualization_scripts import clustering
if platform == 'exons': color_gradient = 'yellow_black_blue'
transpose = False
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors (possibly outside of LegacyMode)
graphic_links = clustering.runHCexplicit(results_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
if len(graphic_links)==0:
graphic_links = clustering.runHCexplicit(results_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cluster_file = string.replace(graphic_links[0][1],'.png','.txt')
except Exception: pass
#exportGroupsFromClusters(cluster_file,expFile,platform)
#"""
#filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
#graphic_links = [(1,'/Users/saljh8/Desktop/Grimes/KashishNormalization/test/ExpressionInput/SamplePrediction/DataPlots/Clustering-CombinedSingleCell_March_15_2015-CORRELATED-FEATURES-hierarchical_cosine_euclidean.txt')]
try: graphic_links,new_results_file = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,graphics=graphic_links,ColumnMethod=column_method)
except Exception: print traceback.format_exc()
row_metric = 'correlation'; row_method = 'hopach'
#column_metric = 'cosine'
#if LegacyMode: column_method = 'hopach'
cellCycleRemove1=[]; cellCycleRemove2=[]
try:
newDriverGenes1, cellCycleRemove1 = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',stringency='strict',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
newDriverGenes1_str = 'Guide1 '+string.join(newDriverGenes1.keys(),' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes1_str) ### force correlation to these targetGenes
parameters.setGeneSet('None Selected') ### silence this
parameters.setPathwaySelect('None Selected')
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
newDriverGenes2, cellCycleRemove2 = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',stringency='strict',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
newDriverGenes2_str = 'Guide2 '+string.join(newDriverGenes2.keys(),' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes2_str) ### force correlation to these targetGenes
parameters.setGeneSet('None Selected') ### silence this
parameters.setPathwaySelect('None Selected')
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
newDriverGenes3 = unique.unique(newDriverGenes1.keys()+newDriverGenes2.keys())
cellCycleRemove=cellCycleRemove1+cellCycleRemove2 ### It is possible for a cell cycle guide-gene to be reported in both guide1 and 2, but only as cell cycle associated in one of them
newDriverGenes3_filtered=[]
for i in newDriverGenes3:
if not i in cellCycleRemove:
newDriverGenes3_filtered.append(i)
newDriverGenes3_str = 'Guide3 '+string.join(newDriverGenes3_filtered,' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes3_str)
try:
parameters.setClusterGOElite('BioMarkers')
"""
if species == 'Mm' or species == 'Hs' or species == 'Rn':
parameters.setClusterGOElite('BioMarkers')
else:
parameters.setClusterGOElite('GeneOntology')
"""
except Exception, e:
print e
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
except Exception:
print traceback.format_exc()
try: copyICGSfiles(expFile,graphic_links)
except Exception: pass
return graphic_links,len(atleast_10)
def copyICGSfiles(expFile,graphic_links):
if 'ExpressionInput' in expFile:
root_dir = string.split(expFile,'ExpressionInput')[0]
else:
root_dir = string.split(expFile,'AltResults')[0]
destination_folder = root_dir+'/ICGS'
try: os.mkdir(destination_folder)
except Exception: pass
for (order,png) in graphic_links:
file = export.findFilename(png)
txt = string.replace(file,'.png','.txt')
pdf = string.replace(file,'.png','.pdf')
dest_png = destination_folder+'/'+file
dest_txt = destination_folder+'/'+txt
dest_pdf = destination_folder+'/'+pdf
shutil.copy(png, dest_png)
shutil.copy(png[:-4]+'.txt', dest_txt)
shutil.copy(png[:-4]+'.pdf', dest_pdf)
def pearsonCorrelations(ref_gene_exp,exp_value_db):
correlated=[]
for gene in exp_value_db:
rho,p = stats.pearsonr(ref_gene_exp,exp_value_db[gene])
if rho>rho_cutoff or rho<(rho_cutoff*-1):
if rho!= 1:
correlated.append(gene)
#print len(exp_value_db),len(correlated);sys.exit()
return correlated
def numpyCorrelationMatrix(x,rows,gene):
D1 = numpy.corrcoef(x)
gene_correlations={}
i=0
scores = []
for score_ls in D1:
for v in score_ls:
scores.append(v)
return numpy.average(scores)
def numpyCorrelationMatrixCount(x,rows,cutoff=0.4,geneTypeReport=None):
### Find which genes are most correlated
D1 = numpy.corrcoef(x)
gene_correlation_counts={}
i=0
for score_ls in D1:
correlated_genes=[]
geneID = rows[i]
k=0; genes_to_report=[]
for rho in score_ls:
if rho>cutoff:
correlated_genes.append(rows[k])
if rows[k] in geneTypeReport:
genes_to_report.append(rows[k])
k+=1
gene_correlation_counts[geneID]=len(correlated_genes),genes_to_report
i+=1
return gene_correlation_counts
def numpyCorrelationMatrixGene(x,rows,gene):
D1 = numpy.corrcoef(x)
gene_correlations={}
i=0
for score_ls in D1:
scores = []
geneID = rows[i]
k=0
for v in score_ls:
scores.append((v,rows[k]))
k+=1
scores.sort()
gene_correlations[geneID] = scores
i+=1
correlated_genes={}
rho_values = map(lambda (r,g): r,gene_correlations[gene])
genes = map(lambda (r,g): g,gene_correlations[gene])
s1 = bisect.bisect_right(rho_values,rho_cutoff)
s2 = bisect.bisect_left(rho_values,-1*rho_cutoff)
correlated = genes[:s2] ### for the right bisect, remove self correlations with -1
correlated = genes[s1:] ### for the left bisect, remove self correlations with -1
#print len(rows), len(correlated);sys.exit()
return len(correlated)/len(rows)
def numpyCorrelationMatrixGeneAlt(x,rows,genes,gene_to_symbol,rho_cutoff):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
D1 = numpy.ma.corrcoef(x)
i=0
gene_correlations={}
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
if v > rho_cutoff:
uid = rows[k]
if uid in gene_to_symbol: uid = gene_to_symbol[uid][0]
scores.append((v,uid))
k+=1
scores.sort()
scores.reverse()
scores = map(lambda x: x[1], scores[:140]) ### grab the top 140 correlated gene symbols only
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
i+=1
return gene_correlations
def genericRowIDImport(filename):
id_list=[]
for line in open(filename,'rU').xreadlines():
uid = string.split(line,'\t')[0]
if ' ' in uid:
for id in string.split(uid,' '):
id_list.append(id)
else:
id_list.append(uid)
return id_list
def writeFilteredFileReimport(expFile,platform,headers,expressed_values):
filtered_file=expFile[:-4]+'-VarGenes.txt'
groups_file = string.replace(expFile,'exp.','groups.')
filtered_groups = string.replace(filtered_file,'exp.','groups.')
try: shutil.copy(groups_file,filtered_groups)
except: pass
eo = export.ExportFile(filtered_file)
eo.write(headers)
for line in open(expFile,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]
if uid in expressed_values:
if platform=='RNASeq': ### set to RNASeq when non-log2 data detected
values = t[1:]
try: values = map(lambda x: math.log(float(x)+1,2),values)
except Exception:
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
elif '' in values:
values = [0 if x=='' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
values = map(str,values)
eo.write(string.join([uid]+values,'\t')+'\n')
else:
eo.write(line)
eo.close()
return filtered_file
def writeFilteredFile(results_file,platform,headers,gene_to_symbol_db,expressed_values,atleast_10,excludeGenes=[]):
eo = export.ExportFile(results_file)
try: headers = string.replace(headers,'row_clusters-flat','UID')
except Exception:
headers = string.join(headers,'\t')+'\n'
headers = string.replace(headers,'row_clusters-flat','UID')
eo.write(headers)
keep=[]; sort_genes=False
e=0
if len(atleast_10)==0:
atleast_10 = expressed_values
sort_genes = True
for i in atleast_10:
if i in gene_to_symbol_db:
symbol = gene_to_symbol_db[i][0]
else: symbol = i
if i not in excludeGenes and symbol not in excludeGenes:
if i not in keep:
keep.append((symbol,i))
if sort_genes:
keep.sort(); keep.reverse()
for (symbol,i) in keep:
"""
if platform == 'RNASeq':
values = map(lambda x: logTransform(x), expressed_values[i])
else:
"""
values = map(str,expressed_values[i])
eo.write(string.join([symbol]+values,'\t')+'\n')
e+=1
eo.close()
def remoteGetDriverGenes(Species,platform,results_file,numSamplesClustered=3,excludeCellCycle=False,ColumnMethod='hopach'):
global species
species = Species
guideGenes, cellCycleRemove = correlateClusteredGenes(platform,results_file,stringency='strict',excludeCellCycle=excludeCellCycle,ColumnMethod=ColumnMethod)
guideGenes = string.join(guideGenes.keys(),' ')+' amplify positive'
return guideGenes
def correlateClusteredGenes(platform,results_file,stringency='medium',numSamplesClustered=3,
excludeCellCycle=False,graphics=[],ColumnMethod='hopach',rhoCutOff=0.2, transpose=False,
includeMoreCells=False):
if numSamplesClustered<1: numSamplesClustered=1
### Get all highly variably but low complexity differences, typically one or two samples that are really different
if stringency == 'medium':
new_results_file = string.replace(results_file,'.txt','-filtered.txt')
new_results_file = string.replace(new_results_file,'.cdt','-filtered.txt')
eo = export.ExportFile(new_results_file)
medVarHighComplexity=[]; medVarLowComplexity=[]; highVarHighComplexity=[]; highVarLowComplexity=[]
if transpose==False or includeMoreCells:
medVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=3,hits_to_report=6,transpose=transpose)
medVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=3,hits_to_report=6,transpose=transpose) #hits_cutoff=6
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=1,hits_to_report=4,transpose=transpose)
highVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.2,hits_cutoff=1,hits_to_report=6,filter=True,numSamplesClustered=numSamplesClustered,transpose=transpose)
else:
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=1,hits_to_report=4,transpose=transpose)
#combined_results = dict(medVarLowComplexity.items() + medVarLowComplexity.items() + highVarLowComplexity.items() + highVarHighComplexity.items())
combined_results={}
for i in medVarLowComplexity: combined_results[i]=[]
for i in medVarHighComplexity: combined_results[i]=[]
for i in highVarLowComplexity: combined_results[i]=[]
for i in highVarHighComplexity: combined_results[i]=[]
#combined_results = highVarHighComplexity
if stringency == 'strict':
medVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
medVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=4,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered) #hits_cutoff=6
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=3,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
highVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=3,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
#combined_results = dict(medVarLowComplexity.items() + medVarLowComplexity.items() + highVarLowComplexity.items() + highVarHighComplexity.items())
combined_results={}
for i in medVarLowComplexity: combined_results[i]=[]
for i in medVarHighComplexity: combined_results[i]=[]
for i in highVarLowComplexity: combined_results[i]=[]
for i in highVarHighComplexity: combined_results[i]=[]
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCutOff,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle)
if guideGenes == 'TooFewBlocks':
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCutOff+0.1,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle)
if guideGenes == 'TooFewBlocks':
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCutOff+0.2,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle,forceOutput=True)
if len(guideGenes)>200:
print 'Too many guides selected (>200)... performing more stringent filtering...'
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle,restrictTFs=True)
return guideGenes, addition_cell_cycle_associated
#B4galt6, Prom1
for tuple_ls in combined_results:
data_length = len(tuple_ls);break
if data_length == len(column_header):
eo.write(string.join(column_header,'\t')+'\n')
else:
eo.write(string.join(['UID']+column_header,'\t')+'\n')
#combined_results = highVarHighComplexity
for tuple_ls in combined_results:
eo.write(string.join(list(tuple_ls),'\t')+'\n')
eo.close()
cluster = True
if cluster == True and transpose==False:
from visualization_scripts import clustering
if ColumnMethod == 'hopach':
row_method = 'hopach'
column_method = 'hopach'
else:
column_method = ColumnMethod
row_method = 'average'
row_metric = 'correlation'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
if platform == 'exons': color_gradient = 'yellow_black_blue'
transpose = False
try:
len(guide_genes)
except Exception:
guide_genes = []
graphics = clustering.runHCexplicit(new_results_file, graphics, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cluster_file = string.replace(graphics[0][1],'.png','.txt')
#exportGroupsFromClusters(cluster_file,expFile,platform)
return graphics, new_results_file
def exportReDefinedClusterBlocks(results_file,block_db,rho_cutoff):
### Re-import the matrix to get the column cluster IDs
matrix, column_header, row_header, dataset_name, group_db, priorColumnClusters, priorRowClusters = clustering.remoteImportData(results_file)
new_block_db = {}
centroid_blocks=[]
centroids = []
for block in block_db:
if len(block_db[block])>3:
new_block_db[block] = block_db[block] ### Keep track of the row_header indexes associated with each blcok
data = map(lambda x: matrix[x],block_db[block])
### Compute an expression centroid from the block (cluster)
centroid = [float(sum(col))/len(col) for col in zip(*data)]
centroids.append(centroid)
centroid_blocks.append(block)
### Compare block centroids
D1 = numpy.corrcoef(centroids)
i=0
correlated_blocks=[]
for score_ls in D1:
scores = []
block = centroid_blocks[i]
k=0
for v in score_ls:
if str(v)!='nan' and v>0.6:
if block !=centroid_blocks[k]:
blocks = [block,centroid_blocks[k]]
blocks.sort()
if blocks not in correlated_blocks:
correlated_blocks.append(blocks)
k+=1
i+=1
newBlock=0
existing=[]
updated_blocks={}
correlated_blocks.sort()
print correlated_blocks
### Build a tree of related blocks (based on the code in junctionGraph)
for (block1,block2) in correlated_blocks:
if block1 not in existing and block2 not in existing:
newBlock=newBlock+1
updated_blocks[newBlock]=[block1,]
updated_blocks[newBlock].append(block2)
existing.append(block1)
existing.append(block2)
elif block1 in existing and block2 not in existing:
for i in updated_blocks:
if block1 in updated_blocks[i]:
updated_blocks[i].append(block2)
existing.append(block2)
elif block2 in existing and block1 not in existing:
for i in updated_blocks:
if block2 in updated_blocks[i]:
updated_blocks[i].append(block1)
existing.append(block1)
elif block1 in existing and block2 in existing:
for i in updated_blocks:
if block1 in updated_blocks[i]:
b1=i
if block2 in updated_blocks[i]:
b2=i
if b1!=b2:
for b in updated_blocks[b2]:
if b not in updated_blocks[b1]:
updated_blocks[b1].append(b)
del updated_blocks[b2]
### Add blocks not correlated to other blocks (not in correlated_blocks)
#print len(existing),len(centroid_blocks)
print updated_blocks
for block in centroid_blocks:
if block not in existing:
newBlock+=1
updated_blocks[newBlock]=[block]
import collections
row_order = collections.OrderedDict()
for newBlock in updated_blocks:
events_in_block=0
for block in updated_blocks[newBlock]:
for i in new_block_db[block]:
events_in_block+=1
if events_in_block>5:
for block in updated_blocks[newBlock]:
for i in new_block_db[block]:
row_order[i] = newBlock ### i is a row_header index - row_header[i] is a UID
#if newBlock==3:
#if row_header[i]=='TAF2&ENSG00000064313&E9.1-I9.1_120807184__ENSG00000064313&E9.1-E10.1':
#print row_header[i]
print updated_blocks
### Non-clustered block results - Typically not used by good to refer back to when testing
original_block_order = collections.OrderedDict()
for block in new_block_db:
for i in new_block_db[block]:
original_block_order[i]=block
#row_order = original_block_order
### Export the results
row_header.reverse() ### Reverse order is the default
priorColumnClusters = map(str,priorColumnClusters)
new_results_file = results_file[:-4]+'-BlockIDs.txt'
eo = export.ExportFile(new_results_file)
eo.write(string.join(['UID','row_clusters-flat']+column_header,'\t')+'\n')
eo.write(string.join(['column_clusters-flat','']+priorColumnClusters,'\t')+'\n')
for i in row_order:
cluster_number = str(row_order[i])
uid = row_header[i]
values = map(str,matrix[i])
eo.write(string.join([uid,cluster_number]+values,'\t')+'\n')
eo.close()
print 'Filtered, grouped expression clusters exported to:',new_results_file
def correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=5,
filter=False,geneFilter=None,numSamplesClustered=3,excludeCellCycle=False,restrictTFs=False,
forceOutput=False,ReDefinedClusterBlocks=False,transpose=False):
from visualization_scripts import clustering
addition_cell_cycle_associated=[]
if geneFilter != None:
geneFilter_db={}
for i in geneFilter:
geneFilter_db[i[0]]=[]
geneFilter=geneFilter_db
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(results_file,geneFilter=geneFilter)
if transpose: ### If performing reduce cluster heterogeneity on cells rather than on genes
#print 'Transposing matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
Platform = None
for i in row_header:
if 'ENS' in i and '-' in i and ':' in i: Platform = 'exons'
#print hits_to_report
if hits_to_report == 1:
### Select the best gene using correlation counts and TFs
try:
from import_scripts import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol_db)
try: TFs = importGeneSets('Biotypes',filterType='transcription regulator',geneAnnotations=gene_to_symbol_db)
except Exception: TFs = importGeneSets('BioTypes',filterType='transcription regulator',geneAnnotations=gene_to_symbol_db)
if excludeCellCycle == True or excludeCellCycle == 'strict':
try: cell_cycle = importGeneSets('KEGG',filterType='Cell cycle:',geneAnnotations=gene_to_symbol_db)
except Exception:
cell_cycle = {}
try: cell_cycle_go = importGeneSets('GeneOntology',filterType='GO:0022402',geneAnnotations=gene_to_symbol_db)
except Exception: cell_cycle_go={}
for i in cell_cycle_go:
cell_cycle[i]=[]
print len(cell_cycle),'cell cycle genes being considered.'
else:
cell_cycle={}
except Exception:
print traceback.format_exc()
symbol_to_gene={}; TFs={}; cell_cycle={}
gene_corr_counts = numpyCorrelationMatrixCount(matrix,row_header,cutoff=0.4,geneTypeReport=TFs)
#try: column_header = map(lambda x: string.split(x,':')[1],column_header[1:])
#except Exception: column_header = column_header[1:]
i=0
block=0
if ReDefinedClusterBlocks:
import collections
block_db=collections.OrderedDict() ### seems benign but could alter legacy results
else:
block_db={}
for row in matrix:
if i!=0:
rho,p = stats.pearsonr(row,matrix[i-1]) ### correlate to the last ordered row
#if row_header[i] == 'Pax6': print [block],row_header[i-1],rho,rho_cutoff
"""
try:
if row_header[i] in guide_genes: print row_header[i], rho
if row_header[i-1] in guide_genes: print row_header[i-1], rho
if row_header[i+1] in guide_genes: print row_header[i+1], rho
except Exception:
pass
"""
#if hits_to_report == 1: print [block],row_header[i], row_header[i-1],rho,rho_cutoff
#print rho
if rho>0.95:
pass ### don't store this
elif rho>rho_cutoff:
try:
block_db[block].append(i) ### store the row index
except Exception:
block_db[block] = [i] ### store the row index
else:
block+=1
block_db[block] = [i] ### store the row index
else:
block_db[block] = [i] ### store the row index
i+=1
if ReDefinedClusterBlocks:
### Produces a filtered-down and centroid organized heatmap text file
exportReDefinedClusterBlocks(results_file,block_db,rho_cutoff)
if hits_to_report == 1:
if len(block_db)<4 and forceOutput==False:
return 'TooFewBlocks', None
guideGenes={}
### Select the top TFs or non-TFs with the most gene correlations
for b in block_db:
corr_counts_gene = []; cell_cycle_count=[]
#print len(block_db), b, map(lambda i: row_header[i],block_db[b])
for (gene,i) in map(lambda i: (row_header[i],i),block_db[b]):
corr_counts_gene.append((len(gene_corr_counts[gene][1]),gene_corr_counts[gene][0],gene))
if gene in cell_cycle:
cell_cycle_count.append(gene)
corr_counts_gene.sort(); tfs=[]
#print b, corr_counts_gene, '***',len(cell_cycle_count)
if (len(cell_cycle_count)>1) or (len(corr_counts_gene)<4 and (len(cell_cycle_count)>0)): pass
else:
tf_count=0
for (r,t, gene) in corr_counts_gene:
if gene in TFs:
if gene not in cell_cycle:
if restrictTFs==True and tf_count==0: pass
else:
guideGenes[gene]=[]
tf_count+=1
if len(tfs)==0:
gene = corr_counts_gene[-1][-1]
if gene in cell_cycle and LegacyMode: pass
else:
guideGenes[gene]=[]
#block_db[b]= [corr_counts_gene[-1][-1]] ### save just the selected gene indexes
### Additional filter to remove guides that will bring in cell cycle genes (the more guides the more likely)
if excludeCellCycle == 'strict':
#print 'guides',len(guideGenes)
guideCorrelated = numpyCorrelationMatrixGeneAlt(matrix,row_header,guideGenes,gene_to_symbol_db,rho_cutoff)
guideGenes={}
for gene in guideCorrelated:
cell_cycle_count=[]
for corr_gene in guideCorrelated[gene]:
if corr_gene in cell_cycle: cell_cycle_count.append(corr_gene)
#print gene, len(cell_cycle_count),len(guideCorrelated[gene])
if (float(len(cell_cycle_count))/len(guideCorrelated[gene]))>.15 or (len(guideCorrelated[gene])<4 and (len(cell_cycle_count)>0)):
print gene, cell_cycle_count
addition_cell_cycle_associated.append(gene)
pass
else:
guideGenes[gene]=[]
print 'additional Cell Cycle guide genes removed:',addition_cell_cycle_associated
print len(guideGenes), 'novel guide genes discovered:', guideGenes.keys()
return guideGenes,addition_cell_cycle_associated
def greaterThan(x,results_file,numSamplesClustered):
if 'alt_junctions' not in results_file and Platform == None:
if x>(numSamplesClustered-1): return 1
else: return 0
else:
return 1
max_block_size=0
### Sometimes the hits_cutoff is too stringent so take the largest size instead
for block in block_db:
indexes = len(block_db[block])
if indexes>max_block_size: max_block_size=indexes
max_block_size-=1
retained_ids={}; final_rows = {}
for block in block_db:
indexes = block_db[block]
#print [block], len(indexes),hits_cutoff,max_block_size
if len(indexes)>hits_cutoff or len(indexes)>max_block_size: ###Increasing this helps get rid of homogenous clusters of little significance
#if statistics.avg(matrix[indexes[0]][1:]) < -2: print statistics.avg(matrix[indexes[0]][1:]), len(indexes)
gene_names = map(lambda i: row_header[i], indexes)
#if 'Pax6' in gene_names or 'WNT8A' in gene_names: print '******',hits_to_report, gene_names
indexes = indexes[:hits_to_report]
if filter:
new_indexes = []
for index in indexes:
vs = list(matrix[index])
a = map(lambda x: greaterThan(x,results_file,numSamplesClustered),vs)
b=[1]*numSamplesClustered
c = [(i, i+len(b)) for i in range(len(a)) if a[i:i+len(b)] == b]
if len(c)>0: #http://stackoverflow.com/questions/10459493/find-indexes-of-sequence-in-list-in-python
new_indexes.append(index)
"""
vs.sort()
try:
if abs(vs[-5]-vs[5])>6: new_indexes.append(index)
except Exception:
if abs(vs[-1]-vs[1])>6: new_indexes.append(index)"""
indexes = new_indexes
#if block == 1: print map(lambda i:row_header[i],indexes)
#print indexes;sys.exit()
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
for i in indexes:
retained_ids[row_header[i]]=[]
if len(final_rows)==0:
for block in block_db:
indexes = block_db[block]
if len(indexes)>hits_cutoff or len(indexes)>max_block_size:
indexes = indexes[:hits_to_report]
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
if len(final_rows)==0:
for block in block_db:
indexes = block_db[block]
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
#print 'block length:',len(block_db), 'genes retained:',len(retained_ids)
return final_rows, column_header
def exportGroupsFromClusters(cluster_file,expFile,platform,suffix=None):
lineNum=1
for line in open(cluster_file,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if lineNum==1: names = t[2:]; lineNum+=1
elif lineNum==2: clusters = t[2:]; lineNum+=1
else: break
unique_clusters=[] ### Export groups
new_groups_dir = string.replace(expFile,'exp.','groups.')
new_comps_dir = string.replace(expFile,'exp.','comps.')
if suffix != None:
new_groups_dir = new_groups_dir[:-4]+'-'+suffix+'.txt' ###Usually end in ICGS
new_comps_dir = new_comps_dir[:-4]+'-'+suffix+'.txt'
out_obj = export.ExportFile(new_groups_dir)
cluster_number=0
cluster_db={}
for name in names:
cluster = clusters[names.index(name)]
if platform == 'RNASeq':
if 'junction_quantification' not in name and '.bed' not in name:
name = name+'.bed'
elif 'junction_quantification.txt' not in name and '.txt' not in name and '.bed' not in name:
name = name+'.txt'
if ':' in name:
group,name = string.split(name,':')
if group in cluster_db:
clust_num=cluster_db[group]
else:
cluster_number+=1
cluster_db[group] = cluster_number
clust_num = cluster_number
if cluster=='NA': cluster = group
else:
clust_num = cluster
out_obj.write(name+'\t'+str(clust_num)+'\t'+cluster+'\n')
clust_num = str(clust_num)
if clust_num not in unique_clusters: unique_clusters.append(clust_num)
out_obj.close()
comps=[] #Export comps
out_obj = export.ExportFile(new_comps_dir)
""" ### All possible pairwise group comparisons
for c1 in unique_clusters:
for c2 in unique_clusters:
temp=[int(c2),int(c1)]; temp.sort(); temp.reverse()
if c1 != c2 and temp not in comps:
out_obj.write(str(temp[0])+'\t'+str(temp[1])+'\n')
comps.append(temp)
"""
### Simple method comparing each subsequent ordered cluster (HOPACH orders based on relative similarity)
last_cluster = None
for c1 in unique_clusters:
if last_cluster !=None:
out_obj.write(c1+'\t'+last_cluster+'\n')
last_cluster=c1
out_obj.close()
return new_groups_dir
def logTransform(value):
try: v = math.log(value,2)
except Exception: v = math.log(0.001,2)
return str(v)
class MultiCorrelatePatterns():
def __init__(self,expressed_values):
self.expressed_values = expressed_values
def __call__(self,features_to_correlate):
from scipy import stats
correlated_genes={}
for uid in features_to_correlate:
ref_values = self.expressed_values[uid]
for uid2 in self.expressed_values:
values = self.expressed_values[uid2]
rho,p = stats.pearsonr(values,ref_values)
if rho>rho_cutoff or rho<-1*rho_cutoff:
if uid!=uid2 and rho != 1.0:
try: correlated_genes[uid].append(uid2)
except Exception: correlated_genes[uid] = [uid]
return correlated_genes
def parseCountFile(fn,parseFeature,search_exon_db):
novel_exon_db={}; firstLine=True
unique_genes={}
for line in open(fn,'rU').xreadlines():
key = string.split(line,'\t')[0]
#t = string.split(line,'\t')
if firstLine: firstLine = False
else:
#uid, coordinates = string.split(key,'=')
#values = map(lambda x: float(x), t[1:])
#gene = string.split(uid,':')[0]
#if max(values)>5: unique_genes[gene] = []
if '_' in key: ### Only look at novel exons
#ENSG00000112695:I2.1_75953139=chr6:75953139-75953254
uid, coordinates = string.split(key,'=')
gene = string.split(uid,':')[0]
if parseFeature == 'exons':
if '-' not in uid:
chr,coordinates = string.split(coordinates,':') ### Exclude the chromosome
coord1,coord2 = string.split(coordinates,'-')
intron = string.split(uid,'_')[0]
intron = string.split(intron,':')[1]
first = intron+'_'+coord1
second = intron+'_'+coord2
proceed = True
if first in uid: search_uid = second ### if the first ID is already the one looked for, store the second with the exon ID
elif second in uid: search_uid = first
else:
proceed = False
#print uid, first, second; sys.exit()
#example: ENSG00000160785:E2.15_156170151;E2.16_156170178=chr1:156170151-156170178
if proceed:
try: novel_exon_db[gene].append((uid,search_uid))
except Exception: novel_exon_db[gene] = [(uid,search_uid)]
elif '-' in uid and 'I' in uid: ### get junctions
if gene in search_exon_db:
for (u,search_uid) in search_exon_db[gene]:
#if gene == 'ENSG00000137076': print u,search_uid,uid
if search_uid in uid:
novel_exon_db[uid] = u ### Relate the currently examined novel exon ID to the junction not current associated
#if gene == 'ENSG00000137076': print u, uid
#print uid;sys.exit()
#print len(unique_genes); sys.exit()
return novel_exon_db
def getJunctionType(species,fn):
root_dir = string.split(fn,'ExpressionInput')[0]
fn = filepath(root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt')
firstLine=True
junction_type_db={}; type_db={}
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
id=t[0]; junction_type = t[8]
if '-' in id:
if 'trans-splicing' in line:
junction_type = 'trans-splicing'
junction_type_db[id] = junction_type
try: type_db[junction_type]+=1
except Exception: type_db[junction_type]=1
print 'Breakdown of event types'
for type in type_db:
print type, type_db[type]
return junction_type_db
def maxCount(ls):
c=0
for i in ls:
if i>0.5: c+=1
return c
def getHighExpNovelExons(species,fn):
""" Idea - if the ranking of exons based on expression changes from one condition to another, alternative splicing is occuring """
junction_type_db = getJunctionType(species,fn)
### Possible issue detected with novel exon reads: ['ENSG00000121577'] ['119364543'] cardiac
exon_max_exp_db={}; uid_key_db={}; firstLine=True
novel_intronic_junctions = {}
novel_intronic_exons = {}
cutoff = 0.2
read_threshold = 0.5
expressed_junction_types={}
features_to_export={}
exon_coord_db={}
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
key=t[0]
#ENSG00000112695:I2.1_75953139=chr6:75953139-75953254
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
gene = string.split(uid,':')[0]
values = map(lambda x: float(x), t[1:])
max_read_counts = max(values)
try: exon_max_exp_db[gene].append((max_read_counts,uid))
except Exception: exon_max_exp_db[gene] = [(max_read_counts,uid)]
uid_key_db[uid] = key ### retain the coordinate info
if '-' in uid and (':E' in uid or '-E' in uid):
junction_type = junction_type_db[uid]
if max_read_counts>read_threshold:
samples_expressed = maxCount(values)
if samples_expressed>2:
try: expressed_junction_types[junction_type]+=1
except Exception: expressed_junction_types[junction_type]=1
if junction_type == 'trans-splicing' and'_' not in uid:
try: expressed_junction_types['known transplicing']+=1
except Exception: expressed_junction_types['known transplicing']=1
elif junction_type == 'novel' and '_' not in uid:
try: expressed_junction_types['novel but known sites']+=1
except Exception: expressed_junction_types['novel but known sites']=1
elif junction_type == 'novel' and 'I' not in uid:
try: expressed_junction_types['novel but within 50nt of a known sites']+=1
except Exception: expressed_junction_types['novel but within 50nt of a known sites']=1
elif 'I' in uid and '_' in uid and junction_type!='trans-splicing':
#print uid;sys.exit()
try: expressed_junction_types['novel intronic junctions']+=1
except Exception: expressed_junction_types['novel intronic junctions']=1
coord = string.split(uid,'_')[-1]
if '-' in coord:
coord = string.split(coord,'-')[0]
try: novel_intronic_junctions[gene]=[coord]
except Exception: novel_intronic_junctions[gene].append(coord)
elif ('I' in uid or 'U' in uid) and '_' in uid and max_read_counts>read_threshold:
if '-' not in uid:
samples_expressed = maxCount(values)
if samples_expressed>2:
try: expressed_junction_types['novel intronic exon']+=1
except Exception: expressed_junction_types['novel intronic exon']=1
coord = string.split(uid,'_')[-1]
#print uid, coord;sys.exit()
#if 'ENSG00000269897' in uid: print [gene,coord]
try: novel_intronic_exons[gene].append(coord)
except Exception: novel_intronic_exons[gene]=[coord]
exon_coord_db[gene,coord]=uid
print 'Expressed (count>%s for at least 3 samples) junctions' % read_threshold
for junction_type in expressed_junction_types:
print junction_type, expressed_junction_types[junction_type]
expressed_junction_types={}
#print len(novel_intronic_junctions)
#print len(novel_intronic_exons)
for gene in novel_intronic_junctions:
if gene in novel_intronic_exons:
for coord in novel_intronic_junctions[gene]:
if coord in novel_intronic_exons[gene]:
try: expressed_junction_types['confirmed novel intronic exons']+=1
except Exception: expressed_junction_types['confirmed novel intronic exons']=1
uid = exon_coord_db[gene,coord]
features_to_export[uid]=[]
#else: print [gene], novel_intronic_junctions[gene]; sys.exit()
for junction_type in expressed_junction_types:
print junction_type, expressed_junction_types[junction_type]
out_file = string.replace(fn,'.txt','-highExp.txt')
print 'Exporting the highest expressed exons to:', out_file
out_obj = export.ExportFile(out_file)
### Compare the relative expression of junctions and exons separately for each gene (junctions are more comparable)
for gene in exon_max_exp_db:
junction_set=[]; exon_set=[]; junction_exp=[]; exon_exp=[]
exon_max_exp_db[gene].sort()
exon_max_exp_db[gene].reverse()
for (exp,uid) in exon_max_exp_db[gene]:
if '-' in uid: junction_set.append((exp,uid)); junction_exp.append(exp)
else: exon_set.append((exp,uid)); exon_exp.append(exp)
if len(junction_set)>0:
maxJunctionExp = junction_set[0][0]
try: lower25th,median_val,upper75th,int_qrt_range = statistics.iqr(junction_exp)
except Exception: print junction_exp;sys.exit()
if int_qrt_range>0:
maxJunctionExp = int_qrt_range
junction_percent_exp = map(lambda x: (x[1],expThreshold(x[0]/maxJunctionExp,cutoff)), junction_set)
high_exp_junctions = []
for (uid,p) in junction_percent_exp: ### ID and percentage of expression
if p!='NA':
if uid in features_to_export: ### novel exons only right now
out_obj.write(uid_key_db[uid]+'\t'+p+'\n') ### write out the original ID with coordinates
if len(exon_set)>0:
maxExonExp = exon_set[0][0]
lower25th,median_val,upper75th,int_qrt_range = statistics.iqr(exon_exp)
if int_qrt_range>0:
maxExonExp = int_qrt_range
exon_percent_exp = map(lambda x: (x[1],expThreshold(x[0]/maxExonExp,cutoff)), exon_set)
high_exp_exons = []
for (uid,p) in exon_percent_exp: ### ID and percentage of expression
if p!='NA':
if uid in features_to_export:
out_obj.write(uid_key_db[uid]+'\t'+p+'\n')
out_obj.close()
def expThreshold(ratio,cutoff):
#print [ratio,cutoff]
if ratio>cutoff: return str(ratio)
else: return 'NA'
def compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir):
results_dir = root_dir +'AltResults/AlternativeOutput/'
dir_list = read_directory(results_dir)
filtered_dir_db={}
#"""
try: novel_exon_junction_db = getNovelExonCoordinates(species,root_dir)
except Exception:
#print traceback.format_exc()
print 'No counts file found.'
novel_exon_junction_db={} ### only relevant to RNA-Seq analyses
for comparison_file in summary_results_db:
for results_file in dir_list:
if (comparison_file in results_file and '-exon-inclusion-results.txt' in results_file) and ('comparison' not in results_file):
try: filtered_dir_db[comparison_file].append(results_file)
except Exception: filtered_dir_db[comparison_file] = [results_file]
try: os.remove(string.split(results_dir,'AltResults')[0]+'AltResults/Clustering/Combined-junction-exon-evidence.txt')
except Exception: pass
for comparison_file in filtered_dir_db:
alt_result_files = filtered_dir_db[comparison_file]
#print alt_result_files, comparison_file
importAltAnalyzeExonResults(alt_result_files,novel_exon_junction_db,results_dir)
#"""
### Build combined clusters of high-confidence exons
graphics2=[]; graphics=[]
import ExpressionBuilder
try:
input_dir = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExonConfirmed/'
cluster_file, rows_in_file = ExpressionBuilder.buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExonConfirmed')
if rows_in_file > 5000: useHOPACH = False
else: useHOPACH = True
if rows_in_file < 12000:
graphics = ExpressionBuilder.exportHeatmap(cluster_file,useHOPACH=useHOPACH)
except Exception: pass
try:
input_dir = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExon/'
cluster_file, rows_in_file = ExpressionBuilder.buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExon')
if rows_in_file > 5000: useHOPACH = False
else: useHOPACH = True
if rows_in_file < 12000:
graphics2 = ExpressionBuilder.exportHeatmap(cluster_file,useHOPACH=useHOPACH)
except Exception: pass
return graphics+graphics2
class SplicingData:
def __init__(self,score,symbol,description,exonid,probesets,direction,splicing_event,external_exon,genomic_loc,gene_exp,protein_annot,domain_inferred,domain_overlap,method,dataset):
self.score = score; self.dataset = dataset
self.symbol = symbol;
self.description=description;self.exonid=exonid;self.probesets=probesets;self.direction=direction
self.splicing_event=splicing_event;self.external_exon=external_exon;self.genomic_loc=genomic_loc;
self.gene_exp=gene_exp;self.protein_annot=protein_annot;self.domain_inferred=domain_inferred
self.domain_overlap=domain_overlap;self.method=method
def Score(self): return self.score
def setScore(self,score): self.score = score
def GeneExpression(self): return self.gene_exp
def Dataset(self): return self.dataset
def Symbol(self): return self.symbol
def Description(self): return self.description
def ExonID(self): return self.exonid
def appendExonID(self,exonid): self.exonid+='|'+exonid
def Probesets(self): return self.probesets
def ProbesetDisplay(self):
if len(self.Probesets()[1])>0:
return string.join(self.Probesets(),'-')
else:
return self.Probesets()[0]
def ProbesetsSorted(self):
### Don't sort the original list
a = [self.probesets[0],self.probesets[1]]
a.sort()
return a
def Direction(self): return self.direction
def setDirection(self,direction): self.direction = direction
def SplicingEvent(self): return self.splicing_event
def ProteinAnnotation(self): return self.protein_annot
def DomainInferred(self): return self.domain_inferred
def DomainOverlap(self): return self.domain_overlap
def Method(self): return self.method
def setEvidence(self,evidence): self.evidence = evidence
def Evidence(self): return self.evidence
def GenomicLocation(self): return self.genomic_loc
def setExonExpStatus(self, exon_expressed): self.exon_expressed = exon_expressed
def ExonExpStatus(self): return self.exon_expressed
def importAltAnalyzeExonResults(dir_list,novel_exon_junction_db,results_dir):
regulated_critical_exons={}; converted_db={}
includeExonJunctionComps=True ### Allow ASPIRE comparisons with the inclusion feature as an exon to count for additive reciprocal evidence
print "Reading AltAnalyze results file"
root_dir = string.split(results_dir,'AltResults')[0]
for filename in dir_list:
x=0; regulated_critical_exon_temp={}
fn=filepath(results_dir+filename)
new_filename = string.join(string.split(filename,'-')[:-5],'-')
if '_vs_' in filename and '_vs_' in new_filename: export_filename = new_filename
else: export_filename = string.join(string.split(filename,'-')[:-5],'-')
export_path = results_dir+export_filename+'-comparison-evidence.txt'
try: os.remove(filepath(export_path)) ### If we don't do this, the old results get added to the new
except Exception: null=[]
if 'AltMouse' in filename:
altmouse_ensembl_db = importAltMouseEnsembl()
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1; #print t[12],t[13],t[22],t[23]
else:
converted = False ### Indicates both junction sides were regulated
geneid = t[0]; exonid = t[4]; probeset1 = t[6]; probeset2 = ''; score = t[1][:4]; symbol = t[2]; description = t[3]; regions = t[-4]; direction = t[5]
genomic_loc = t[-1]; splicing_event = t[-3]; external_exon = t[-6]; gene_exp_fold = t[-8]; protein_annot = t[14]; domain_inferred = t[15]; domain_overlap = t[17]
expressed_exon = 'NA'
if 'RNASeq' in filename: expressed_exon = 'no' ### Set by default
if ':' in geneid: geneid = string.split(geneid,':')[0] ### User reported that gene:gene was appearing and not sure exactly where or why but added this to address it
if 'FIRMA' in fn: method = 'FIRMA'
elif 'splicing-index' in fn: method = 'splicing-index'
if 'ASPIRE' in filename or 'linearregres' in filename:
f1=float(t[12]); f2=float(t[13]); probeset1 = t[8]; probeset2 = t[10]; direction = t[6]; exonid2 = t[5]; splicing_event = t[-4]
protein_annot = t[19]; domain_inferred = t[20]; domain_overlap = t[24]; method = 'linearregres'; regions = t[-5]
exon1_exp=float(t[-15]); exon2_exp=float(t[-14]); fold1=float(t[12]); fold2=float(t[13])
if fold1<0: fold1 = 1 ### don't factor in negative changes
if fold2<0: fold2 = 1 ### don't factor in negative changes
"""
if 'RNASeq' not in filename:
exon1_exp = math.pow(2,exon1_exp)
exon2_exp = math.log(2,exon2_exp)
m1 = exon1_exp*fold1
m2 = exon2_exp*fold2
max_exp = max([m1,m2])
min_exp = min([m1,m2])
percent_exon_expression = str(min_exp/max_exp)
"""
if 'ASPIRE' in filename: method = 'ASPIRE'; score = t[1][:5]
if '-' not in exonid and includeExonJunctionComps == False:
exonid=None ### Occurs when the inclusion just in an exon (possibly won't indicate confirmation so exclude)
else: exonid = exonid+' vs. '+exonid2
if 'AltMouse' in filename:
try: geneid = altmouse_ensembl_db[geneid]
except Exception: geneid = geneid
if 'RNASeq' not in filename and 'junction' not in filename: regions = string.replace(regions,'-','.')
else:
if 'RNASeq' in filename and '-' not in exonid:
fold = float(t[10]); exon_exp = float(t[18]); gene_exp = float(t[19])
if fold < 0: fold = -1.0/fold
GE_fold = float(gene_exp_fold)
if GE_fold < 0: GE_fold = -1.0/float(gene_exp_fold)
exon_psi1 = abs(exon_exp)/(abs(gene_exp))
exon_psi2 = (abs(exon_exp)*fold)/(abs(gene_exp)*GE_fold)
max_incl_exon_exp = max([exon_psi1,exon_psi2])
#if max_incl_exon_exp>0.20: expressed_exon = 'yes'
expressed_exon = max_incl_exon_exp
#if 'I2.1_75953139' in probeset1:
#print [exon_exp,gene_exp,exon_exp*fold,gene_exp*GE_fold]
#print exon_psi1, exon_psi2;sys.exit()
probesets = [probeset1,probeset2]
if (method == 'splicing-index' or method == 'FIRMA') and ('-' in exonid) or exonid == None:
pass #exclude junction IDs
else:
regions = string.replace(regions,';','|')
regions = string.replace(regions,'-','|')
regions = string.split(regions,'|')
for region in regions:
if len(region) == 0:
try: region = t[17]+t[18] ### For junction introns where no region ID exists
except Exception: null=[]
if ':' in region: region = string.split(region,':')[-1] ### User reported that gene:gene was appearing and not sure exactly where or why but added this to address it
if probeset1 in novel_exon_junction_db:
uid = novel_exon_junction_db[probeset1] ### convert the uid (alternative exon) to the annotated ID for the novel exon
converted_db[uid] = probeset1
else:
uid = geneid+':'+region
ss = SplicingData(score,symbol,description,exonid,probesets,direction,splicing_event,external_exon,genomic_loc,gene_exp_fold,protein_annot,domain_inferred,domain_overlap,method,filename)
ss.setExonExpStatus(str(expressed_exon))
try: regulated_critical_exon_temp[uid].append(ss)
except Exception: regulated_critical_exon_temp[uid] = [ss]
#print filename, len(regulated_critical_exon_temp)
for uid in regulated_critical_exon_temp:
report=None
if len(regulated_critical_exon_temp[uid])>1:
### We are only reporting one here and that's OK, since we are only reporting the top scores... won't include all inclusion junctions.
scores=[]
for ss in regulated_critical_exon_temp[uid]: scores.append((float(ss.Score()),ss))
scores.sort()
if (scores[0][0]*scores[-1][0])<0:
ss1 = scores[0][1]; ss2 = scores[-1][1]
if ss1.ProbesetsSorted() == ss2.ProbesetsSorted(): ss1.setDirection('mutual') ### same exons, hence, mutually exclusive event (or similiar)
else: ss1.setDirection('both') ### opposite directions in the same comparison-file, hence, conflicting data
report=[ss1]
else:
if abs(scores[0][0])>abs(scores[-1][0]): report=[scores[0][1]]
else: report=[scores[-1][1]]
else:
report=regulated_critical_exon_temp[uid]
### Combine data from different analysis files
try: regulated_critical_exons[uid]+=report
except Exception: regulated_critical_exons[uid]=report
"""if 'ENSG00000204120' in uid:
print uid,
for i in regulated_critical_exon_temp[uid]:
print i.Probesets(),
print ''
"""
try: report[0].setEvidence(len(regulated_critical_exon_temp[uid])) ###set the number of exons demonstrating regulation of this exons
except Exception: null=[]
clearObjectsFromMemory(regulated_critical_exon_temp)
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
header = string.join(['uid','source-IDs','symbol','description','exonids','independent confirmation','score','regulation direction','alternative exon annotations','associated isoforms','inferred regulated domains','overlapping domains','method','supporting evidence score','novel exon: high-confidence','percent exon expression of gene','differential gene-expression','genomic location'],'\t')+'\n'
export_data.write(header)
combined_export_path = string.split(results_dir,'AltResults')[0]+'AltResults/Clustering/Combined-junction-exon-evidence.txt'
combined_export_data, status= AppendOrWrite(combined_export_path)
if status == 'not found':
header = string.join(['uid','source-IDs','symbol','description','exonids','independent confirmation','score','regulation direction','alternative exon annotations','associated isoforms','inferred regulated domains','overlapping domains','method','supporting evidence score','novel exon: high-confidence','percent exon expression of gene','differential gene-expression','genomic location','comparison'],'\t')+'\n'
combined_export_data.write(header)
print len(regulated_critical_exons), 'regulated exon IDs imported.\n'
print 'writing:',export_path; n=0
# print [len(converted_db)]
### Check for alternative 3' or alternative 5' exon regions that were not matched to the right reciprocal junctions (occurs because only one of the exon regions is called alternative)
regulated_critical_exons_copy={}
for uid in regulated_critical_exons:
regulated_critical_exons_copy[uid]=regulated_critical_exons[uid]
u=0
### This is most applicable to RNA-Seq since the junction IDs correspond to the Exon Regions not the probeset Exon IDs
for uid in regulated_critical_exons_copy: ### Look through the copied version since we can't delete entries while iterating through
ls = regulated_critical_exons_copy[uid]
u+=1
#if u<20: print uid
for jd in ls:
if jd.Method() != 'splicing-index' and jd.Method() != 'FIRMA':
try: ### Applicable to RNA-Seq
gene,exonsEx = string.split(jd.Probesets()[1],':') ### Exclusion probeset will have the exon not annotated as the critical exon (although it should be as well)
gene,exonsIn = string.split(jd.Probesets()[0],':')
except Exception:
gene, ce = string.split(uid,':')
exonsIn, exonsEx = string.split(jd.ExonID(),'vs.')
if gene !=None:
critical_exon = None
five_prime,three_prime = string.split(exonsEx,'-')
try: five_primeIn,three_primeIn = string.split(exonsIn,'-')
except Exception: five_primeIn = exonsIn; three_primeIn = exonsIn ### Only should occur during testing when a exon rather than junction ID is considered
#if gene == 'ENSG00000133083': print five_prime,three_prime, five_primeIn,three_primeIn
if five_primeIn == five_prime: ### Hence, the exclusion 3' exon should be added
critical_exon = gene+':'+three_prime
exonid = three_prime
elif three_primeIn == three_prime: ### Hence, the exclusion 3' exon should be added
critical_exon = gene+':'+five_prime
exonid = five_prime
else:
if ('5' in jd.SplicingEvent()) or ('five' in jd.SplicingEvent()):
critical_exon = gene+':'+five_prime
exonid = five_prime
elif ('3' in jd.SplicingEvent()) or ('three' in jd.SplicingEvent()):
critical_exon = gene+':'+three_prime
exonid = three_prime
elif ('alt-N-term' in jd.SplicingEvent()) or ('altPromoter' in jd.SplicingEvent()):
critical_exon = gene+':'+five_prime
exonid = five_prime
elif ('alt-C-term' in jd.SplicingEvent()):
critical_exon = gene+':'+three_prime
exonid = three_prime
#print critical_exon, uid, jd.ExonID(),jd.SplicingEvent(); sys.exit()
if critical_exon != None:
if critical_exon in regulated_critical_exons:
#print uid, critical_exon; sys.exit()
if len(regulated_critical_exons[critical_exon]) == 1:
if len(ls)==1 and uid in regulated_critical_exons: ### Can be deleted by this method
if 'vs.' not in regulated_critical_exons[critical_exon][0].ExonID() and 'vs.' not in regulated_critical_exons[critical_exon][0].ExonID():
regulated_critical_exons[uid].append(regulated_critical_exons[critical_exon][0])
del regulated_critical_exons[critical_exon]
elif uid in regulated_critical_exons: ###If two entries already exit
ed = regulated_critical_exons[uid][1]
ed2 = regulated_critical_exons[critical_exon][0]
if 'vs.' not in ed.ExonID() and 'vs.' not in ed2.ExonID():
if ed.Direction() != ed2.Direction(): ### should be opposite directions
ed.appendExonID(exonid)
ed.setEvidence(ed.Evidence()+1)
ed.setScore(ed.Score()+'|'+ed2.Score())
del regulated_critical_exons[critical_exon]
firstEntry=True
for uid in regulated_critical_exons:
if uid in converted_db:
converted = True
else: converted = False
#if 'ENSG00000133083' in uid: print [uid]
exon_level_confirmation = 'no'
ls = regulated_critical_exons[uid]
jd = regulated_critical_exons[uid][0] ### We are only reporting one here and that's OK, since we are only reporting the top scores... won't include all inclusion junctions.
if len(ls)>1:
methods = []; scores = []; direction = []; exonids = []; probesets = []; evidence = 0; genomic_location = []
junctionids=[]
junction_data_found = 'no'; exon_data_found = 'no'
for jd in ls:
if jd.Method() == 'ASPIRE' or jd.Method() == 'linearregres':
junction_data_found = 'yes'
methods.append(jd.Method())
scores.append(jd.Score())
direction.append(jd.Direction())
exonids.append(jd.ExonID())
junctionids.append(jd.ExonID())
probesets.append(jd.ProbesetDisplay())
evidence+=jd.Evidence()
genomic_location.append(jd.GenomicLocation())
### Prefferentially obtain isoform annotations from the reciprocal analysis which is likely more accurate
isoform_annotations = [jd.ProteinAnnotation(), jd.DomainInferred(), jd.DomainOverlap()]
for ed in ls:
if ed.Method() == 'splicing-index' or ed.Method() == 'FIRMA':
exon_data_found = 'yes' ### pick one of them
methods.append(ed.Method())
scores.append(ed.Score())
direction.append(ed.Direction())
exonids.append(ed.ExonID())
probesets.append(ed.ProbesetDisplay())
evidence+=ed.Evidence()
genomic_location.append(ed.GenomicLocation())
#isoform_annotations = [ed.ProteinAnnotation(), ed.DomainInferred(), ed.DomainOverlap()]
if junction_data_found == 'yes' and exon_data_found == 'yes':
exon_level_confirmation = 'yes'
for junctions in junctionids:
if 'vs.' in junctions:
j1 = string.split(junctions,' vs. ')[0] ### inclusion exon or junction
if '-' not in j1: ### not a junction, hence, may not be sufficient to use for confirmation (see below)
if 'I' in j1: ### intron feature
if '_' in j1: ### novel predicted exon
exon_level_confirmation = 'no'
else:
exon_level_confirmation = 'yes'
else:
if '_' in j1:
exon_level_confirmation = 'no'
else:
exon_level_confirmation = 'partial'
method = string.join(methods,'|')
unique_direction = unique.unique(direction)
genomic_location = unique.unique(genomic_location)
if len(unique_direction) == 1: direction = unique_direction[0]
else: direction = string.join(direction,'|')
score = string.join(scores,'|')
probesets = string.join(probesets,'|')
exonids_unique = unique.unique(exonids)
if len(exonids_unique) == 1: exonids = exonids_unique[0]
else: exonids = string.join(exonids,'|')
if len(genomic_location) == 1: genomic_location = genomic_location[0]
else: genomic_location = string.join(genomic_location,'|')
evidence = str(evidence)
if 'mutual' in direction: direction = 'mutual'
if len(ls) == 1:
probesets = jd.ProbesetDisplay()
direction = jd.Direction()
score = jd.Score()
method = jd.Method()
exonids = jd.ExonID()
evidence = jd.Evidence()
genomic_location = jd.GenomicLocation()
isoform_annotations = [jd.ProteinAnnotation(), jd.DomainInferred(), jd.DomainOverlap()]
try:
#if int(evidence)>4 and 'I' in uid: novel_exon = 'yes' ### high-evidence novel exon
#else: novel_exon = 'no'
if converted == True:
novel_exon = 'yes'
splicing_event = 'cassette-exon'
else:
novel_exon = 'no'
splicing_event = jd.SplicingEvent()
values = [uid, probesets, jd.Symbol(), jd.Description(), exonids, exon_level_confirmation, score, direction, splicing_event]
values += isoform_annotations+[method, str(evidence),novel_exon,jd.ExonExpStatus(),jd.GeneExpression(),genomic_location]
values = string.join(values,'\t')+'\n'
#if 'yes' in exon_level_confirmation:
export_data.write(values); n+=1
if exon_level_confirmation != 'no' and ('|' not in direction):
geneID = string.split(uid,':')[0]
try: relative_exon_exp = float(jd.ExonExpStatus())
except Exception: relative_exon_exp = 1
if firstEntry:
### Also export high-confidence predictions for GO-Elite
elite_export_path = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExonConfirmed/'+export_filename+'-junction-exon-evidence.txt'
elite_export_data = export.ExportFile(elite_export_path)
elite_export_data.write('GeneID\tEn\tExonID\tScores\tGenomicLocation\n')
firstEntry = False
if relative_exon_exp>0.10:
elite_export_data.write(string.join([geneID,'En',uid,score,genomic_location],'\t')+'\n')
#if 'DNA' in isoform_annotations[-1]:
if '2moter' not in jd.SplicingEvent() and '2lt-N' not in jd.SplicingEvent():
values = [uid, probesets, jd.Symbol(), jd.Description(), exonids, exon_level_confirmation, score, direction, splicing_event]
values += isoform_annotations+[method, str(evidence),novel_exon,jd.ExonExpStatus(),jd.GeneExpression(),genomic_location,export_filename]
values = string.join(values,'\t')+'\n'
combined_export_data.write(values)
except Exception, e:
#print traceback.format_exc();sys.exit()
pass ### Unknown error - not evaluated in 2.0.8 - isoform_annotations not referenced
print n,'exon IDs written to file.'
export_data.close()
try: elite_export_data.close()
except Exception: pass
clearObjectsFromMemory(regulated_critical_exons)
clearObjectsFromMemory(regulated_critical_exons_copy)
#print '!!!!Within comparison evidence'
#returnLargeGlobalVars()
def FeatureCounts(bed_ref, bam_file):
output = bam_file[:-4]+'__FeatureCounts.bed'
import subprocess
#if '/bin' in kallisto_dir: kallisto_file = kallisto_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
kallisto_dir= 'AltDatabase/subreads/'
if os.name == 'nt':
featurecounts_file = kallisto_dir + 'PC/featureCounts.exe'; plat = 'Windows'
elif 'darwin' in sys.platform:
featurecounts_file = kallisto_dir + 'Mac/featureCounts'; plat = 'MacOSX'
elif 'linux' in sys.platform:
featurecounts_file = kallisto_dir + '/Linux/featureCounts'; plat = 'linux'
print 'Using',featurecounts_file
featurecounts_file = filepath(featurecounts_file)
featurecounts_root = string.split(featurecounts_file,'bin/featureCounts')[0]
featurecounts_file = filepath(featurecounts_file)
print [featurecounts_file,"-a", "-F", "SAF",bed_ref, "-o", output, bam_file]
retcode = subprocess.call([featurecounts_file,"-a",bed_ref, "-F", "SAF", "-o", output, bam_file])
def filterFASTAFiles(fasta_files):
filter_fasta_files=[]
filter_dir = export.findParentDir(fasta_files[0])+'/filtered_fasta'
try: os.mkdir(filter_dir)
except Exception: pass
for file in fasta_files:
if 'filtered.fa' in file:
filter_fasta_files.append(file)
else:
filtered_fasta = file[:-3]+'-filtered.fa'
filter_fasta_files.append(filtered_fasta)
filename = export.findFilename(file)
eo=export.ExportFile(filtered_fasta)
for line in open(file,'rU').xreadlines():
if '>'==line[0]:
skip=False
### Exclude non-standard chromosomal transcripts
if 'PATCH' in line or '_1_' in line or '_1:' in line or ':HSCHR' in line or 'putative' in line or 'supercontig' in line or 'NOVEL_TEST' in line:
skip=True
else:
eo.write(line)
elif skip==False:
eo.write(line)
eo.close()
shutil.move(file,filter_dir+'/'+filename)
return filter_fasta_files
def getCoordinateFile(species):
geneCoordFile = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
geneCoordFile = unique.filepath(geneCoordFile)
status = verifyFile(geneCoordFile)
if status == 'not found':
try:
from build_scripts import EnsemblSQL
ensembl_version = string.replace(unique.getCurrentGeneDatabaseVersion(),'EnsMart','')
configType = 'Advanced'; analysisType = 'AltAnalyzeDBs'; externalDBName = ''; force = 'no'
EnsemblSQL.buildEnsemblRelationalTablesFromSQL(species,configType,analysisType,externalDBName,ensembl_version,force,buildCommand='exon')
except Exception:
#print traceback.format_exc()
print 'Failed to export a transcript-exon coordinate file (similar to a GTF)!!!!\n...Proceeding with standard Kallisto (no-splicing).'
geneCoordFile=None
return geneCoordFile
def runKallisto(species,dataset_name,root_dir,fastq_folder,mlp,returnSampleNames=False,customFASTA=None,log_output=True):
#print 'Running Kallisto...please be patient'
import subprocess
n_threads = mlp.cpu_count()
print 'Number of threads =',n_threads
#n_threads = 1
kallisto_dir_objects = os.listdir(unique.filepath('AltDatabase/kallisto'))
### Determine version
version = '0.43.1'
for subdir in kallisto_dir_objects:
if subdir.count('.')>1: version = subdir
kallisto_dir= 'AltDatabase/kallisto/'+version+'/'
if os.name == 'nt':
kallisto_file = kallisto_dir + 'PC/bin/kallisto.exe'; plat = 'Windows'
elif 'darwin' in sys.platform:
kallisto_file = kallisto_dir + 'Mac/bin/kallisto'; plat = 'MacOSX'
elif 'linux' in sys.platform:
kallisto_file = kallisto_dir + '/Linux/bin/kallisto'; plat = 'linux'
print 'Using',kallisto_file
kallisto_file = filepath(kallisto_file)
kallisto_root = string.split(kallisto_file,'bin/kallisto')[0]
fn = filepath(kallisto_file)
try: os.chmod(fn,0777) ### It's rare, but this can be a write issue
except: pass
output_dir=root_dir+'/ExpressionInput/kallisto/'
try: os.mkdir(root_dir+'/ExpressionInput')
except Exception: pass
try: os.mkdir(root_dir+'/ExpressionInput/kallisto')
except Exception: pass
fastq_folder += '/'
dir_list = read_directory(fastq_folder)
fastq_paths = []
for file in dir_list:
file_lower = string.lower(file)
if 'fastq' in file_lower and '._' not in file[:4]: ### Hidden files
fastq_paths.append(fastq_folder+file)
fastq_paths,paired = findPairs(fastq_paths)
### Check to see if Kallisto files already exist and use these if so (could be problematic but allows for outside quantification)
kallisto_tsv_paths=[]
dir_list = read_directory(output_dir)
for folder in dir_list:
kallisto_outdir = output_dir+folder+'/abundance.tsv'
status = os.path.isfile(kallisto_outdir)
if status:
kallisto_tsv_paths.append(fastq_folder+file)
if returnSampleNames:
return fastq_paths
### Store/retreive the Kallisto index in the Ensembl specific SequenceData location
kallisto_index_root = 'AltDatabase/'+species+'/SequenceData/'
try: os.mkdir(filepath(kallisto_index_root))
except Exception: pass
indexFile = filepath(kallisto_index_root+species)
#indexFile = filepath(kallisto_index_root + 'Hs_intron')
indexStatus = os.path.isfile(indexFile)
if indexStatus == False or customFASTA!=None:
try: fasta_files = getFASTAFile(species)
except Exception: fasta_files = []
index_file = filepath(kallisto_index_root+species)
if len(fasta_files)==0 and customFASTA==None:
###download Ensembl fasta file to the above directory
from build_scripts import EnsemblSQL
ensembl_version = string.replace(unique.getCurrentGeneDatabaseVersion(),'EnsMart','')
try:
EnsemblSQL.getEnsemblTranscriptSequences(ensembl_version,species,restrictTo='cDNA')
fasta_files = getFASTAFile(species)
except Exception: pass
elif customFASTA!=None: ### Custom FASTA file supplied by the user
fasta_files = [customFASTA]
indexFile = filepath(kallisto_index_root+species+'-custom')
try: os.remove(indexFile) ### erase any pre-existing custom index
except Exception: pass
if len(fasta_files)>0:
print 'Building kallisto index file...'
arguments = [kallisto_file, "index","-i", indexFile]
fasta_files = filterFASTAFiles(fasta_files)
for fasta_file in fasta_files:
arguments.append(fasta_file)
try:
retcode = subprocess.call(arguments)
except Exception:
print traceback.format_exc()
if customFASTA!=None:
reimportExistingKallistoOutput = False
elif len(kallisto_tsv_paths) == len(fastq_paths):
reimportExistingKallistoOutput = True
elif len(kallisto_tsv_paths) > len(fastq_paths):
reimportExistingKallistoOutput = True ### If working with a directory of kallisto results
else:
reimportExistingKallistoOutput = False
if reimportExistingKallistoOutput:
print 'NOTE: Re-import PREVIOUSLY GENERATED kallisto output:',reimportExistingKallistoOutput
print '...To force re-analysis of FASTQ files, delete the folder "kallisto" in "ExpressionInput"'
### Just get the existing Kallisto output folders
fastq_paths = read_directory(output_dir)
kallisto_folders=[]
try:
import collections
expMatrix = collections.OrderedDict()
countMatrix = collections.OrderedDict()
countSampleMatrix = collections.OrderedDict()
sample_total_counts = collections.OrderedDict()
except Exception:
try:
import ordereddict
expMatrix = ordereddict.OrderedDict()
countMatrix = ordereddict.OrderedDict()
countSampleMatrix = ordereddict.OrderedDict()
sample_total_counts = ordereddict.OrderedDict()
except Exception:
expMatrix={}
countMatrix={}
countSampleMatrix={}
sample_total_counts={}
headers=['UID']
### Verify, import, create and/or ignore the transcript exon coordinate file for BAM file creation
geneCoordFile = getCoordinateFile(species)
for n in fastq_paths:
output_path = output_dir+n
kallisto_folders.append(output_path)
if reimportExistingKallistoOutput == False:
begin_time = time.time()
if geneCoordFile != None: ### For BAM and BED file generation
print 'Running kallisto on:',n,'...',
p=fastq_paths[n]
b=[" > "+n+'.sam']
bedFile = root_dir+ '/' + n + '__junction.bed'
kallisto_out = open(root_dir+ '/' + n + '.bam', 'ab')
if log_output:
err_out = open(output_dir + '/log.txt', 'a')
err_out.seek(0, 2) # Subprocess doesn't move the file pointer when appending!
else:
err_out = None
kallisto_out.seek(0, 2) # Subprocess doesn't move the file pointer when appending!
if paired == 'paired':
s=[]
else:
s=["--single","-l","200","-s","20"]
#geneCoordFile=None - force to run simple Kallisto
if geneCoordFile==None:
try: ### Without BAM and BED file generation
retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path]+s+p)
except Exception:
print traceback.format_exc()
else: ### Attempt to export BAM and BED files with Kallisto quantification
kallisto_command = [kallisto_file, "quant", "-i", indexFile, "-o", output_path,
"-g", geneCoordFile, "-j", bedFile, "--threads="+str(n_threads), "--sortedbam"] + s +p
kallisto_process = subprocess.Popen(kallisto_command, stdout=kallisto_out, stderr=err_out)
kallisto_process.communicate()
retcode = kallisto_process.returncode
if os.name == 'nt':
try:
sam_process = subprocess.Popen('AltDatabase\samtools\samtools.exe index ' + root_dir+ '/' + n + '.bam')
sam_process.communicate()
retcode_sam = sam_process.returncode
except: pass
#retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path,"--pseudobam"]+p+b)
#retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path]+p)
"""except Exception:
print traceback.format_exc()
kill
retcode = subprocess.call(['kallisto', "quant","-i", indexFile, "-o", output_path]+p)"""
if retcode == 0: print 'completed in', int(time.time()-begin_time), 'seconds'
else: print 'kallisto failed due to an unknown error (report to altanalyze.org help).'
#"""
input_path = output_path+'/abundance.txt'
try:
try: expMatrix,countMatrix,countSampleMatrix=importTPMs(n,input_path,expMatrix,countMatrix,countSampleMatrix)
except Exception:
input_path = output_path+'/abundance.tsv'
expMatrix,countMatrix,countSampleMatrix=importTPMs(n,input_path,expMatrix,countMatrix,countSampleMatrix)
headers.append(n)
sample_total_counts = importTotalReadCounts(n,output_path+'/run_info.json',sample_total_counts)
except Exception:
print traceback.format_exc()
sys.exit()
print n, 'TPM expression import failed'
if paired == 'paired':
print '\n...Make sure the paired-end samples were correctly assigned:'
print fastq_paths
for i in fastq_paths:
print 'Common name:',i,
for x in fastq_paths[i]:
print export.findParentDir(x),
print '\n'
### Summarize alignment information
for sample in countSampleMatrix:
try: estCounts = int(float(countSampleMatrix[sample]))
except Exception: estCounts='NA'
try: totalCounts = sample_total_counts[sample]
except Exception: totalCounts = 'NA'
try: aligned = str(100*estCounts/float(totalCounts))
except Exception: aligned = 'NA'
try: aligned = string.split(aligned,'.')[0]+'.'+string.split(aligned,'.')[1][:2]
except Exception: aligned = 'NA'
countSampleMatrix[sample] = [str(estCounts),totalCounts,aligned]
dataset_name = string.replace(dataset_name,'exp.','')
dataset_name = string.replace(dataset_name,'.txt','')
to = export.ExportFile(root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt')
ico = export.ExportFile(root_dir+'/ExpressionInput/isoCounts.'+dataset_name+'.txt')
go = export.ExportFile(root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt')
co = export.ExportFile(root_dir+'/ExpressionInput/counts.'+dataset_name+'.txt')
so = export.ExportFile(root_dir+'/ExpressionInput/summary.'+dataset_name+'.txt')
exportMatrix(to,headers,expMatrix) ### Export transcript expression matrix
exportMatrix(ico,headers,countMatrix,counts=True) ### Export transcript count matrix
try:
geneMatrix = calculateGeneTPMs(species,expMatrix) ### calculate combined gene level TPMs
countsGeneMatrix = calculateGeneTPMs(species,countMatrix) ### calculate combined gene level TPMs
exportMatrix(go,headers,geneMatrix) ### export gene expression matrix
exportMatrix(co,headers,countsGeneMatrix,counts=True) ### export gene expression matrix
except Exception:
print 'AltAnalyze was unable to summarize gene TPMs from transcripts, proceeding with transcripts.'
export.copyFile(root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt',root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt')
exportMatrix(so,['SampleID','Estimated Counts','Total Fragments','Percent Aligned'],countSampleMatrix) ### export gene expression matrix
### Copy results to the Kallisto_Results directory
try: os.mkdir(root_dir+'/ExpressionInput/Kallisto_Results')
except: pass
try:
tf = root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/isoCounts.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/counts.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/summary.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
except:
print traceback.format_exc()
pass
def calculateGeneTPMs(species,expMatrix):
import gene_associations
try:
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
if len(gene_to_transcript_db)<10:
raise ValueError('Ensembl-EnsTranscript file missing, forcing download of this file')
except Exception:
try:
print 'Missing transcript-to-gene associations... downloading from Ensembl.'
from build_scripts import EnsemblSQL
db_version = unique.getCurrentGeneDatabaseVersion()
EnsemblSQL.getGeneTranscriptOnly(species,'Basic',db_version,'yes')
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
except Exception:
from build_scripts import GeneSetDownloader
print 'Ensembl-EnsTranscripts required for gene conversion... downloading from the web...'
GeneSetDownloader.remoteDownloadEnsemblTranscriptAssocations(species)
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
if len(gene_to_transcript_db)<10:
print 'NOTE: No valid Ensembl-EnsTranscripts available, proceeding with the analysis of transcripts rather than genes...'
from import_scripts import OBO_import
transcript_to_gene_db = OBO_import.swapKeyValues(gene_to_transcript_db)
gene_matrix = {}
present_gene_transcripts={}
for transcript in expMatrix:
if '.' in transcript:
transcript_alt = string.split(transcript,'.')[0]
else:
transcript_alt = transcript
if transcript_alt in transcript_to_gene_db:
gene = transcript_to_gene_db[transcript_alt][0]
try: present_gene_transcripts[gene].append(transcript)
except Exception: present_gene_transcripts[gene] = [transcript]
else: pass ### could keep track of the missing transcripts
for gene in present_gene_transcripts:
gene_values = []
for transcript in present_gene_transcripts[gene]:
gene_values.append(map(float,expMatrix[transcript]))
gene_tpms = [sum(value) for value in zip(*gene_values)] ### sum of all transcript tmp's per sample
gene_tpms = map(str,gene_tpms)
gene_matrix[gene] = gene_tpms
if len(gene_matrix)>0:
return gene_matrix
else:
print "NOTE: No valid transcript-gene associations available... proceeding with Transcript IDs rather than gene."
return expMatrix
def exportMatrix(eo,headers,matrix,counts=False):
eo.write(string.join(headers,'\t')+'\n')
for gene in matrix:
values = matrix[gene]
if counts:
values = map(str,map(int,map(float,values)))
eo.write(string.join([gene]+values,'\t')+'\n')
eo.close()
def importTPMs(sample,input_path,expMatrix,countMatrix,countSampleMatrix):
firstLine=True
for line in open(input_path,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')
else:
target_id,length,eff_length,est_counts,tpm = string.split(data,'\t')
try: float(est_counts);
except Exception: ### nan instead of float found due to lack of alignment
est_counts = '0.0'
tpm = '0.0'
if '.' in target_id:
target_id = string.split(target_id,'.')[0] ### Ensembl isoform IDs in more recent Ensembl builds
try: expMatrix[target_id].append(tpm)
except Exception: expMatrix[target_id]=[tpm]
try: countSampleMatrix[sample]+=float(est_counts)
except Exception: countSampleMatrix[sample]=float(est_counts)
try: countMatrix[target_id].append(est_counts)
except Exception: countMatrix[target_id]=[est_counts]
return expMatrix,countMatrix,countSampleMatrix
def importTotalReadCounts(sample,input_path,sample_total_counts):
### Import from Kallisto Json file
for line in open(input_path,'rU').xreadlines():
data = cleanUpLine(line)
if "n_processed: " in data:
total = string.split(data,"n_processed: ")[1]
total = string.split(total,',')[0]
sample_total_counts[sample]=total
return sample_total_counts
def findPairs(fastq_paths):
#fastq_paths = ['/Volumes/test/run0718_lane12_read1_index701=Kopan_RBP_02_14999.fastq.gz','/Volumes/run0718_lane12_read2_index701=Kopan_RBP_02_14999.fastq.gz']
import export
read_notation=0
under_suffix_notation=0
suffix_notation=0
equal_notation=0
suffix_db={}
for i in fastq_paths:
if 'read1' in i or 'read2' in i or 'pair1' in i or 'pair2' or 'R1' in i or 'R2' in i:
read_notation+=1
f = export.findFilename(i)
if 'fastq' in f:
name = string.split(f,'fastq')[0]
elif 'FASTQ' in f:
name = string.split(f,'FASTQ')[0]
elif 'fq' in f:
name = string.split(f,'fq')[0]
if '_1.' in name or '_2.' in name:
under_suffix_notation+=1
elif '1.' in name or '2.' in name:
suffix_notation+=1
suffix_db[name[-2:]]=[]
if '=' in name:
equal_notation+=1
if read_notation==0 and suffix_notation==0 and under_suffix_notation==0:
new_names={}
for i in fastq_paths:
if '/' in i or '\\' in i:
n = export.findFilename(i)
if '=' in n:
n = string.split(n,'=')[1]
new_names[n] = [i]
### likely single-end samples
return new_names, 'single'
else:
new_names={}
paired = 'paired'
if equal_notation==len(fastq_paths):
for i in fastq_paths:
name = string.split(i,'=')[-1]
name = string.replace(name,'.fastq.gz','')
name = string.replace(name,'.fastq','')
name = string.replace(name,'.FASTQ.gz','')
name = string.replace(name,'.FASTQ','')
name = string.replace(name,'.fq.gz','')
name = string.replace(name,'.fq','')
if '/' in name or '\\' in name:
name = export.findFilename(name)
if '=' in name:
name = string.split(name,'=')[1]
try: new_names[name].append(i)
except Exception: new_names[name]=[i]
else:
for i in fastq_paths:
if suffix_notation == len(fastq_paths) and len(suffix_db)==2: ### requires that files end in both .1 and .2
pairs = ['1.','2.']
else:
pairs = ['-read1','-read2','-pair1','-pair2','_read1','_read2','_pair1','_pair2','read1','read2','pair1','pair2','_1.','_2.','_R1','_R2','-R1','-R2','R1','R2']
n=str(i)
n = string.replace(n,'fastq.gz','')
n = string.replace(n,'fastq','')
for p in pairs: n = string.replace(n,p,'')
if '/' in n or '\\' in n:
n = export.findFilename(n)
if '=' in n:
n = string.split(n,'=')[1]
if n[-1]=='.':
n = n[:-1] ###remove the last decimal
try: new_names[n].append(i)
except Exception: new_names[n]=[i]
for i in new_names:
if len(new_names[i])>1:
pass
else:
paired = 'single'
new_names = checkForMultipleLanes(new_names)
return new_names, paired
def checkForMultipleLanes(new_names):
""" This function further aggregates samples run across multiple flowcells """
read_count = 0
lane_count = 0
updated_names={}
for sample in new_names:
reads = new_names[sample]
count=0
for read in reads:
read_count+=1
if '_L00' in read and '_001':
### assumes no more than 9 lanes/sample
count+=1
if len(reads) == count: ### Multiple lanes run per sample
lane_count+=count
if lane_count==read_count:
for sample in new_names:
sample_v1 = string.replace(sample,'_001','')
sample_v1 = string.split(sample_v1,'_L00')
if len(sample_v1[-1])==1: ### lane number
sample_v1 = sample_v1[0]
if sample_v1 in updated_names:
updated_names[sample_v1]+=new_names[sample]
else:
updated_names[sample_v1]=new_names[sample]
if len(updated_names)==0:
updated_names = new_names
return updated_names
def getFASTAFile(species):
fasta_folder = 'AltDatabase/'+species+'/SequenceData/'
fasta_files=[]
dir_list = read_directory(filepath(fasta_folder))
for file in dir_list:
if '.fa' in file:
fasta_files.append(filepath(fasta_folder)+file)
return fasta_files
if __name__ == '__main__':
samplesDiffering = 3
column_method = 'hopach'
species = 'Hs'
excludeCellCycle = False
platform = 'RNASeq'; graphic_links=[('','/Volumes/HomeBackup/CCHMC/PBMC-10X/ExpressionInput/SamplePrediction/DataPlots/Clustering-33k_CPTT_matrix-CORRELATED-FEATURES-iterFilt-hierarchical_cosine_cosine.txt')]
"""
graphic_links,new_results_file = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',
numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,graphics=graphic_links,
ColumnMethod=column_method, transpose=True, includeMoreCells=True)
"""
import UI; import multiprocessing as mlp
#runKallisto('Mm','BoneMarrow','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/altanalyze/Mm-FASTQ','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/altanalyze/Mm-FASTQ',mlp);sys.exit()
runKallisto('Hs','BreastCancer','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/BreastCancerDemo/FASTQs/input','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/BreastCancerDemo/FASTQs/input',mlp);sys.exit()
results_file = '/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/l/July-2017/PSI/test/Clustering-exp.round2-Guide3-hierarchical_cosine_correlation.txt'
#correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=50,ReDefinedClusterBlocks=True,filter=True)
#sys.exit()
#correlateClusteredGenes('exons',results_file,stringency='strict',rhoCutOff=0.6);sys.exit()
#sys.exit()
species='Hs'; platform = "3'array"; vendor = "3'array"
#FeatureCounts('/Users/saljh8/Downloads/subread-1.5.2-MaxOSX-x86_64/annotation/mm10_AltAnalyze.txt', '/Users/saljh8/Desktop/Grimes/GEC14074/Grimes_092914_Cell12.bam')
#sys.exit()
import UI; import multiprocessing as mlp
gsp = UI.GeneSelectionParameters(species,platform,vendor)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setJustShowTheseIDs('')
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(1,50,4,4,
True,'gene','protein_coding',False,'cosine','hopach',0.4)
#expFile = '/Users/saljh8/Desktop/Grimes/KashishNormalization/test/Original/ExpressionInput/exp.CombinedSingleCell_March_15_2015.txt'
expFile = '/Volumes/My Passport/salomonis2/SRP042161_GBM-single-cell/bams/ExpressionInput/exp.GBM_scRNA-Seq-steady-state.txt'
#singleCellRNASeqWorkflow('Hs', "RNASeq", expFile, mlp, parameters=gsp);sys.exit()
filename = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Grimes/Trumpp-HSC-2017/counts.rawTrumpp.txt'
filename = '/Volumes/salomonis2/Erica-data/GSE98451/counts.GSE98451_uterus_single_cell_RNA-Seq_counts-Ensembl.txt'
#fastRPKMCalculate(filename);sys.exit()
#calculateRPKMsFromGeneCounts(filename,'Mm',AdjustExpression=False);sys.exit()
#copyICGSfiles('','');sys.exit()
import multiprocessing as mlp
import UI
species='Mm'; platform = "3'array"; vendor = 'Ensembl'
gsp = UI.GeneSelectionParameters(species,platform,vendor)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setJustShowTheseIDs('')
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(0,0,1.5,3,
False,'PSI','protein_coding',False,'cosine','hopach',0.35)
#gsp.setSampleDiscoveryParameters(1,1,4,3, True,'Gene','protein_coding',False,'cosine','hopach',0.5)
filename = '/Volumes/SEQ-DATA/AML_junction/AltResults/AlternativeOutput/Hs_RNASeq_top_alt_junctions-PSI-clust.txt'
#fastRPKMCalculate(filename);sys.exit()
results_file = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/ExpressionInput/DataPlots/400 fold for at least 4 samples/Clustering-myeloblast-steady-state-correlated-features-hierarchical_euclidean_cosine-hopach.txt'
guideGeneFile = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/ExpressionInput/drivingTFs-symbol.txt'
expFile = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/ExpressionInput/exp.CombinedSingleCell_March_15_2015.txt'
expFile = '/Users/saljh8/Desktop/dataAnalysis/Mm_Kiddney_tubual/ExpressionInput/exp.E15.5_Adult_IRI Data-output.txt'
expFile = '/Users/saljh8/Desktop/PCBC_MetaData_Comparisons/temp/C4Meth450-filtered-SC-3_regulated.txt'
expFile = '/Volumes/SEQ-DATA/Grimeslab/TopHat/AltResults/AlternativeOutput/Mm_RNASeq_top_alt_junctions-PSI-clust-filter.txt'
expFile = '/Users/saljh8/Documents/L_TargetPSIFiles/exp.TArget_psi_noif_uncorr_03-50missing-12high.txt'
expFile = '/Volumes/BOZEMAN2015/Hs_RNASeq_top_alt_junctions-PSI-clust-filter.txt'
singleCellRNASeqWorkflow('Hs', "exons", expFile, mlp, exp_threshold=0, rpkm_threshold=0, parameters=gsp);sys.exit()
#expFile = '/Users/saljh8/Desktop/Grimes/AltSplice/Gmp-cluster-filter.txt'
#singleCellRNASeqWorkflow('Mm', "exons", expFile, mlp, exp_threshold=0, rpkm_threshold=0, parameters=gsp);sys.exit()
#expFile = '/Users/saljh8/Downloads/methylation/ExpressionInput/exp.female-steady-state.txt'
#singleCellRNASeqWorkflow('Hs', 'RNASeq', expFile, mlp, exp_threshold=50, rpkm_threshold=5) # drivers=guideGeneFile)
#sys.exit()
#correlateClusteredGenes(results_file);sys.exit()
#reformatExonFile('Hs','exon',True);sys.exit()
filename = '/Volumes/Time Machine Backups/dataAnalysis/PCBC_Sep2013/C4-reference/ExpressionInput/counts.C4.txt'
#fastRPKMCalculate(filename);sys.exit()
file1 = '/Volumes/My Passport/dataAnalysis/CardiacRNASeq/BedFiles/ExpressionInput/exp.CardiacRNASeq.txt'
file2 = '/Volumes/Time Machine Backups/dataAnalysis/PCBC_Sep2013/C4-reference/ReferenceComps/ExpressionInput/counts.C4.txt'
#getHighExpNovelExons('Hs',file1);sys.exit()
#mergeCountFiles(file1,file2); sys.exit()
import UI
test_status = 'yes'
data_type = 'ncRNA'
data_type = 'mRNA'
array_type = 'RNASeq'
array_type = 'junction'
species = 'Hs' ### edit this
summary_results_db = {}
root_dir = '/Volumes/Time Machine Backups/dataAnalysis/Human Blood/Exon/Multiple Sclerosis/Untreated_MS-analysis/'
#root_dir = '/Volumes/Time Machine Backups/dataAnalysis/Human Blood/Exon/Multiple Sclerosis/2-3rds_training-untreated/'
root_dir = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/400-original/'
#root_dir = '/Volumes/My Passport/dataAnalysis/PCBC_Dec2013/All/bedFiles/'
root_dir = '/Users/saljh8/Desktop/dataAnalysis/HTA2.0 Files/'
#summary_results_db['Hs_Junction_d14_vs_d7.p5_average-ASPIRE-exon-inclusion-results.txt'] = [] ### edit this
#summary_results_db['Hs_Junction_d14_vs_d7.p5_average-splicing-index-exon-inclusion-results.txt'] = [] ### edit this
results_dir = root_dir +'AltResults/AlternativeOutput/'
dir_list = read_directory(results_dir)
for i in dir_list:
if '_average' in i:
comparison, end = string.split(i,'_average')
if '-exon-inclusion-results.txt' in i: summary_results_db[comparison]=[]
compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir); sys.exit()
fl = UI.ExpressionFileLocationData('','','',''); fl.setCELFileDir(loc); fl.setRootDir(loc)
exp_file_location_db={}; exp_file_location_db['test']=fl
alignJunctionsToEnsembl(species,exp_file_location_db,'test'); sys.exit()
getEnsemblAssociations(species,data_type,test_status,'yes'); sys.exit()
| [
"[email protected]"
] | |
4bbb61036ed3e1205a84859392f799268266563b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/3230.py | 8f3a1c414936539dd2e442282a4b352c344049da | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | '''
Created on Apr 8, 2016
@author: Thomas
'''
import re
import sys
def flip_stack(s):
'''flip the provided stack/substack of pancakes'''
# replace with temporary character
s = s.replace('+', 't')
# switch - for +
s = s.replace('-', '+')
# switch + for -
s = s.replace('t', '-')
return s
def flip(stack, k):
start = stack.find("-")
end = (start + k)
past_end = end - (len(stack) - 1)
if past_end > 0:
start -= past_end
end -= past_end
s_sub = stack[start:end]
stack = stack[:start] + flip_stack(s_sub) + stack[end:]
return stack
def flip_decision(stack, k, num_flips=0):
'''decide what to flip, do the flip, and continue until all happy faces'''
print stack
if "-" in stack:
# Not all Happy Face Pancakes
if ('-' * k) in stack:
num_occ = stack.count('-' * k)
stack = stack.replace(('-' * k), ('+' * k))
num_flips += num_occ
elif stack.find("-") >= 0:
print "pre" + stack
stack = flip(stack, k)
num_flips += 1
print "pos" + stack
if num_flips > len(stack):
return "IMPOSSIBLE"
return flip_decision(stack, k, num_flips)
else:
return num_flips
if __name__ == '__main__':
out = {}
with open("A-small-attempt2.in", 'rb') as f:
lines = f.readlines()[1:]
for idx,line in enumerate(lines):
line = line.rstrip()
pancakes = re.search("[+-]+", line).group(0)
k = int(re.search("[0-9]+", line).group(0))
print line + str("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
num_flips = flip_decision(pancakes, k)
out[idx+1] = num_flips
with open("output.out", 'w') as f:
f.write("")
for key, val in out.iteritems():
line = "Case #" + str(key) + ": " + str(val) + "\n"
f.write(line) | [
"[email protected]"
] | |
1e5e01255835cf813cfcd8b8b7518254fa4f2372 | e9173667eec2576782863a51ee63672f9b419297 | /p56.py | e81a5ce0a7732cbeb84457ad982a93ae8b778e8b | [] | no_license | sabareesh123/pythonprogamming | d41c23ddae183ded09eafde445273126c6b56fcf | 004f248aa2e25f2855d6ccafbb9244447bfb5873 | refs/heads/master | 2020-05-30T06:28:54.901030 | 2019-08-06T11:50:10 | 2019-08-06T11:50:10 | 189,580,451 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #B
q=input()
count3=0
for j1 in q:
if (j1.isdigit() or j1.isalpha()):
count3+=1
if count3!=0:
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
50014adc7f08c346171e0509cbe789c8a4a21a53 | 098662ca9c95151e669753e246d7c158dccad201 | /temp/playAIvsAI100.py | 98a645521ab3b1639d89409f7460d2bd114f93f7 | [] | no_license | ava9/CS6700 | 92dd92081614c3596b880de204e72d3098d85f2f | bcad9094a48784635ae8e6081cea4267e3729df0 | refs/heads/master | 2021-03-27T18:13:43.680760 | 2017-05-22T16:19:07 | 2017-05-22T16:19:07 | 84,686,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,900 | py | from board import board
from human import humanInput
from decentAI import decentAI
from randomAI import randomAI
from inorderAI import inorderAI
from minimaxAI100 import minimaxAI100
from miniAIb100 import miniAIb100
from uctAI import uctAI
import random
class play:
p1 = 1
p2 = -1
current = p1
win = 0
userInput = humanInput()
b = board(7, 6)
def __init__(self):
self.current = self.p1
self.win = 0
def begin(self, whoGoesFirst):
#print "Would you like to go first? Enter: [y/n]"
#note that if user enters anything other than "n", user goes first
if (whoGoesFirst == 1):
valid = True
self.current = self.p2
ai = True
if (ai == True):
opp = minimaxAI100() #1
opp2 = miniAIb100() #-1
depth = 4
depth2 = 5
while(self.win == 0):
self.b.update()
if self.b.boardFull() == True:
break
if (ai == True):
if (self.current < 0):
#print "--------AI 2's Move-------"
# 1
self.b.move(self.current, opp2.chooseMove(self.b, self.current, depth2))
elif (self.current > 0):
self.b.move(self.current, opp.chooseMove(self.b, self.current, depth))
valid = True
#print "------AI 1's Move------"
# -1
elif not ai:
valid = True
self.win = self.b.winner(self.current)
if (valid == False):
continue
else:
self.current = self.current * -1
self.b.update()
# update print statement to print ai/user won
#print opp.uctTree
#opp.writeTree()
#print"The winner is "
print self.win
# playAgain = True
# count = 0
# while(playAgain == True):
# count = count + 1
p = play()
# if (count <=50):
p.begin(0)
# else:
# p.begin(1)
# #print "Would you like to play again? Enter: [y/n]"
# #note that if user enters anything other than "n", user plays again
# #if (raw_input() == "n"):
# #playAgain = False
# if (count > 100):
# playAgain = False
# else:
p.b.setUp()
| [
"[email protected]"
] | |
f696ff8d7d9240fa81168d2453e6f4cc46a5e659 | 555b9f764d9bca5232360979460bc35c2f5ad424 | /google/ads/google_ads/v2/proto/services/ad_group_ad_asset_view_service_pb2_grpc.py | 1f8bcffe7cd224c6699d2bc6688076e783c5247a | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | juanmacugat/google-ads-python | b50256163782bc0223bcd8b29f789d74f4cfad05 | 0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a | refs/heads/master | 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 | Apache-2.0 | 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null | UTF-8 | Python | false | false | 2,412 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__ad__asset__view__pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__ad__asset__view__service__pb2
class AdGroupAdAssetViewServiceStub(object):
"""Proto file describing the ad group ad asset view service.
Service to fetch ad group ad asset views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroupAdAssetView = channel.unary_unary(
'/google.ads.googleads.v2.services.AdGroupAdAssetViewService/GetAdGroupAdAssetView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__ad__asset__view__service__pb2.GetAdGroupAdAssetViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__ad__asset__view__pb2.AdGroupAdAssetView.FromString,
)
class AdGroupAdAssetViewServiceServicer(object):
"""Proto file describing the ad group ad asset view service.
Service to fetch ad group ad asset views.
"""
def GetAdGroupAdAssetView(self, request, context):
"""Returns the requested ad group ad asset view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupAdAssetViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroupAdAssetView': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroupAdAssetView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__ad__asset__view__service__pb2.GetAdGroupAdAssetViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__ad__asset__view__pb2.AdGroupAdAssetView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.AdGroupAdAssetViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"[email protected]"
] | |
31504b49f2f0d932d6d843c066bd85325a8a5feb | 00eb801cfd1e4b93f2db564ac8d0b30bdefca90b | /githubapi.py | 67ab73ed8cef04090d15749fbd4f7f7f9753e27c | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cclauss/repo-tools | 048f376f17f49ebbb028a5ddcb354ea3fb2e3db4 | 3fb40a6da5191fbdda91f3a6a4b1b0b91d7cf18f | refs/heads/master | 2020-04-16T11:34:21.676617 | 2019-01-03T22:01:19 | 2019-01-03T22:01:19 | 165,541,674 | 0 | 0 | Apache-2.0 | 2019-01-13T18:49:20 | 2019-01-13T18:49:20 | null | UTF-8 | Python | false | false | 5,651 | py | """Access to information using the GitHub API."""
from __future__ import print_function
import operator
import pprint
import dateutil.parser
from urlobject import URLObject
from helpers import paginated_get, requests
from models import PullRequestBase
class JsonAttributeHelper(object):
@classmethod
def from_json(cls, issues_data):
for issue_data in issues_data:
if not cls.want_this_json_object(issue_data):
continue
yield cls(issue_data)
@classmethod
def want_this_json_object(cls, obj):
return True
def attribute_lookup(self, name, field_map, mapped_fields=None):
obj = None
for field_names, value in field_map:
if name in field_names:
obj = value
break
if obj is not None:
if mapped_fields:
name = mapped_fields.get(name, name)
val = self.deep_getitem(obj, name)
if name.endswith('_at') and val is not None:
val = dateutil.parser.parse(val)
return val
raise AttributeError("Nope: don't have {!r} attribute on {}".format(name, self.__class__.__name__))
def deep_getitem(self, val, key):
for k in key.split("."):
if val is None:
break
val = val[k]
return val
class PullRequest(JsonAttributeHelper, PullRequestBase):
def __init__(self, issue_data):
self._issue = issue_data
if 0:
print("---< Issue >---------------------------------")
pprint.pprint(issue_data)
self._pull = None
self.labels = [self.short_label(l['name']) for l in self.labels]
@classmethod
def want_this_json_object(cls, obj):
pr_url = obj.get('pull_request', {}).get('url')
return bool(pr_url)
ISSUE_FIELDS = {
'assignee_login',
'closed_at',
'comments',
'comments_url',
'created_at',
'labels',
'number',
'pull_request_url',
'pull_request_html_url',
'state',
'title',
'updated_at',
'user_html_url',
'user_login',
}
PULL_FIELDS = {
'additions',
'base_ref',
'changed_files',
'commits',
'deletions',
'merged_at',
}
MAPPED_FIELDS = {
'assignee_login': 'assignee.login',
'base_ref': 'base.ref',
'pull_request_url': 'pull_request.url',
'pull_request_html_url': 'pull_request.html_url',
'user_login': 'user.login',
'user_html_url': 'user.html_url',
}
def __getattr__(self, name):
return self.attribute_lookup(
name,
[(self.ISSUE_FIELDS, self._issue), (self.PULL_FIELDS, self._pull)],
self.MAPPED_FIELDS
)
def load_pull_details(self, pulls=None):
"""Get pull request details also.
`pulls` is a dictionary of pull requests, to perhaps avoid making
another request.
"""
if pulls:
self._pull = pulls.get(self.number)
if not self._pull:
self._pull = requests.get(self.pull_request_url).json()
if 0:
print("---< Pull Request >--------------------------")
pprint.pprint(self._pull)
class Comment(JsonAttributeHelper):
def __init__(self, obj):
self._comment = obj
FIELDS = {
'body',
'created_at',
'user_login',
}
def __getattr__(self, name):
return self.attribute_lookup(
name,
[(self.FIELDS, self._comment)],
{'user_login': 'user.login'},
)
def get_pulls(owner_repo, labels=None, state="open", since=None, org=False, pull_details=None):
"""
Get a bunch of pull requests (actually issues).
`pull_details` indicates how much information you want from the associated
pull request document. None means just issue information is enough. "list"
means the information available when listing pull requests is enough. "all"
means you need all the details. See the GitHub API docs for the difference:
https://developer.github.com/v3/pulls/
"""
url = URLObject("https://api.github.com/repos/{}/issues".format(owner_repo))
if labels:
url = url.set_query_param('labels', ",".join(labels))
if since:
url = url.set_query_param('since', since.isoformat())
if state:
url = url.set_query_param('state', state)
url = url.set_query_param('sort', 'updated')
issues = PullRequest.from_json(paginated_get(url))
if org:
issues = sorted(issues, key=operator.attrgetter("org"))
pulls = None
if pull_details == "list":
issues = list(issues)
if issues:
# Request a bunch of pull details up front, for joining to. We can't
# ask for exactly the ones we need, so make a guess.
limit = int(len(issues) * 1.5)
pull_url = URLObject("https://api.github.com/repos/{}/pulls".format(owner_repo))
if state:
pull_url = pull_url.set_query_param('state', state)
pulls = { pr['number']: pr for pr in paginated_get(pull_url, limit=limit) }
for issue in issues:
if pull_details:
issue.load_pull_details(pulls=pulls)
issue.id = "{}.{}".format(owner_repo, issue.number)
yield issue
def get_comments(pull):
url = URLObject(pull.comments_url).set_query_param("sort", "created").set_query_param("direction", "desc")
comments = Comment.from_json(paginated_get(url))
return comments
| [
"[email protected]"
] | |
50a644d7e27f7cc2ea8d42c87fccae68515309ce | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/jnrkhy001/question2.py | d1392f0535dfa0b41ee30c71baaaf0b3483de830 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # Khyati Jinerdeb
# Assignment 9
# Date: 17.05.2014
# to program a text file to make them of same length
def readF(filename):
newLines = [] #To create a list
file = open(filename,"r") #To open the file
lines = file.readlines() #To read lines into a string
file.close() #To close the file
for line in lines:
newLines.append(line.replace('\n','')) #To replace all the command for empty lines by an empty string(to remove all \n)
return newLines #To add it to the list also
def setLines(l,w):
s = ''
tmpL = ['']
i = 0
for line in l:
if len(line) > 1:
words = line.split(' ')
for word in words:
if (len(tmpL[i])+len(word)) <= w :
tmpL[i] += word+" "
elif word == 'a':
tmpL[i] += word
else:
tmpL.append(word+" ")
i += 1
else:
tmpL.append('\n')
i += 1
return tmpL
def writeF(filename,lines):
#write contents of outList to output file
f = open(filename,'w')
for line in lines:
print(line,file=f)
f.close()
def main():
inputF = input("Enter the input filename:\n")
outputF = input("Enter the output filename:\n")
wid = eval(input("Enter the line width:\n"))
lines = readF(inputF)
lines = setLines(lines,wid)
writeF(outputF,lines)
main()
| [
"[email protected]"
] | |
4fae9ac1bbf6df3f073e7624c668d4427c7807a7 | 9cff940d26e8c7ca7431c6d6516072c65cefa00c | /testRNN.py | 16627f8f32b001e61b9a87911e6d7579e7942cfb | [] | no_license | sumitparw/NLP_Sentiment-Analysis-using-lstm | 4a90dd842e24592b432ef47113fa1f17a2c0f2cf | 6c1cc7717999cb16089376fe27a1e48e5b8ce2c7 | refs/heads/master | 2020-12-26T22:44:08.935738 | 2020-02-01T20:21:53 | 2020-02-01T20:21:53 | 237,672,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,795 | py | import nltk
import random
import pandas as pd
from nltk.tokenize import word_tokenize
import string
import re
import numpy as np
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import accuracy_score,f1_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,LSTM,Dropout
from keras.optimizers import Adam
class rnn():
word_dict=dict()
max_cap=80
def assign_label(self,x):
if x[2] < 3.0 : return "negative"
elif x[2] > 3.0 : return "positive"
else: return "neutral"
def clean_document(self,doco):
punctuation = string.punctuation + '\n\n';
punc_replace = ''.join([' ' for s in punctuation]);
doco_clean = doco.replace('-', ' ');
doco_alphas = re.sub(r'\W +', '', doco_clean)
trans_table = str.maketrans(punctuation, punc_replace);
doco_clean = ' '.join([word.translate(trans_table) for word in doco_alphas.split(' ')]);
doco_clean = doco_clean.split(' ');
doco_clean = [word.lower() for word in doco_clean if len(word) > 0];
return doco_clean;
def return_train_test_data_rnn(self,file_path):
df = pd.read_csv(file_path,header=None)
df = df[df.columns[2:4]]
df[2] = df.apply(self.assign_label, axis=1)
inx = df[df[2]=='neutral'].index
df.drop(inx,inplace=True)
df[2] = df[2].map({'negative': 0, 'positive': 1})
reviews = np.array(df[3].to_list())
labels = np.array(df[2].to_list())
review_cleans = [self.clean_document(doc) for doc in reviews];
sentences = [' '.join(r) for r in review_cleans]
tokenizer = Tokenizer();
tokenizer.fit_on_texts(sentences);
text_sequences = np.array(tokenizer.texts_to_sequences(sentences));
sequence_dict = tokenizer.word_index;
self.word_dict = dict((num, val) for (val, num) in sequence_dict.items());
reviews_encoded = [];
for i, review in enumerate(review_cleans):
reviews_encoded.append([sequence_dict[x] for x in review]);
lengths = [len(x) for x in reviews_encoded];
with plt.xkcd():
plt.hist(lengths, bins=range(100))
max_cap = 80;
X = pad_sequences(reviews_encoded, maxlen=max_cap, truncating='post')
Y = np.array([[0,1] if label == 0 else [1,0] for label in labels])
np.random.seed(1024);
random_posits = np.arange(len(X))
np.random.shuffle(random_posits);
# Shuffle X and Y
X = X[random_posits];
Y = Y[random_posits];
# Divide the reviews into Training, Dev, and Test data.
train_cap = int(0.70 * len(X));
dev_cap = int(0.85 * len(X));
X_train, Y_train = X[:train_cap], Y[:train_cap];
X_dev, Y_dev = X[train_cap:dev_cap], Y[train_cap:dev_cap];
X_test, Y_test = X[dev_cap:], Y[dev_cap:]
return X_train,Y_train,X_dev,Y_dev,X_test,Y_test
def build_model(self):
model = Sequential();
model.add(Embedding(len(self.word_dict), self.max_cap, input_length=self.max_cap));
model.add(LSTM(80, return_sequences=True, recurrent_dropout=0.2));
model.add(Dropout(0.2))
model.add(LSTM(80, recurrent_dropout=0.2));
model.add(Dense(80, activation='relu'));
model.add(Dense(2, activation='softmax'));
print(model.summary());
return model
def train_model(self,X_train,Y_train,X_dev,Y_dev):
model= self.build_model()
optimizer = Adam(lr=0.01, decay=0.001);
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# fit model
model.fit(X_train, Y_train, batch_size=600, epochs=1, validation_data=(X_dev, Y_dev))
return model
def predict(self,X_test,model):
predictions = model.predict_classes(X_test)
return predictions
def accuracy(self,predictions,X_test,Y_test,model):
# Convert Y_test to the same format as predictions
actuals = [0 if y[0] == 1 else 1 for y in Y_test]
print("f1_score:"+f1_score)
# Use SkLearn's Metrics module
return accuracy_score(predictions, actuals)
| [
"[email protected]"
] | |
8fa99a71c36d7d8bfe0c03af05d83e0f8ab3dbb3 | 56cdf15ecf8621a7d64eee1fcac8c05a7bb227b4 | /setup.py | ca019200306a195b1dbfad27c4b3d04011638dde | [
"Apache-2.0"
] | permissive | pombredanne/google-resumable-media-python | 076ec91e0b81999c0571009d30eb4649f4be3e39 | c158f0f2e43d2730350bd1fbcce4ddde35c4aa96 | refs/heads/master | 2021-07-11T04:37:58.824232 | 2017-10-12T16:44:05 | 2017-10-13T17:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
REQUIREMENTS = [
'six',
]
EXTRAS_REQUIRE = {
'requests': [
'requests >= 2.18.0, < 3.0.0dev',
],
}
setuptools.setup(
name='google-resumable-media',
version='0.3.0',
description='Utilities for Google Media Downloads and Resumable Uploads',
author='Google Cloud Platform',
author_email='[email protected]',
long_description=README,
namespace_packages=['google'],
scripts=[],
url='https://github.com/GoogleCloudPlatform/google-resumable-media-python',
packages=setuptools.find_packages(exclude=('tests*',)),
license='Apache 2.0',
platforms='Posix; MacOS X; Windows',
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
],
)
| [
"[email protected]"
] | |
1009696b09ebe1c6c83517db0ed3e096f49cd272 | 697c7514abc80e53dab70f22177c649d499500ce | /0015/A0015.py | e5f4ea89406e03fafc27a45f034c554f39ce471a | [] | no_license | aliceqin12/ShowMeTheCode | c2fbcf82090c7ccea47936f73c35efbfe927fc28 | 4d52e5b331912a6cc0f2dd842939067d0d2507d9 | refs/heads/master | 2021-07-01T19:24:04.602407 | 2017-09-20T02:09:59 | 2017-09-20T02:09:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import json
import xlrd
import xlwt
def writeCityDataToExcel(city_data):
wb = xlwt.Workbook()
table = wb.add_sheet('city')
i = 0
for number in city_data:
city_name = city_data.get(number)
table.write(i, 0, number)
table.write(i, 1, city_name)
i += 1
wb.save('city.xls')
if __name__ == '__main__':
filename = 'city.txt'
with open(filename, 'r', encoding='utf-8') as f:
city_data = json.load(f)
writeCityDataToExcel(city_data) | [
"[email protected]"
] | |
fc78ea23f77df88d8c88ccf06ee7ba2efa3455b1 | d01d4fe61ff5161cfc00ff85fc0abc616b82f78e | /Programs/Oops/destuctor2.py | 70cf14616b802b7289e2df46787f867e5a102481 | [] | no_license | Susama91/Project | 8f14feadea104b6e258f9a3c4678e67da65c24ba | a580c29bf92403fc84c99514e918d8994126f7b1 | refs/heads/master | 2020-05-15T01:56:11.035480 | 2019-04-19T07:13:43 | 2019-04-19T07:13:43 | 182,039,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | class x:
def __init__(self):
print("in constructor of x")
def m1(self):
print("in m1 of x")
def __del__(self):
print("in destructor of x")
x1=x()
print(x1)
x2=x1
print(x2)
x3=x2
print(x3)
x1=x()
print(x1)
x2=x()
print(x2)
x3=x()
print(x3)
| [
"[email protected]"
] | |
52e02810f632692cc57531ee6f8a11cd0b629405 | 8a45adaed54a171a508da5bd855d20ee727846f0 | /userauth/migrations/0004_auto_20191205_2057.py | 1f51eef4d9fdae5491ec712f3f3bc6e3f23e967a | [] | no_license | Aksa123/ecommerce | 4b73571eb92ec3b36a3321cd368fbe40874b68bc | 5de73daa318ab90cdf864600de6644266dc56ed5 | refs/heads/master | 2020-09-28T13:20:45.848574 | 2019-12-09T05:04:02 | 2019-12-09T05:04:02 | 226,787,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # Generated by Django 2.2.7 on 2019-12-05 13:57
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('userauth', '0003_auto_20191205_2051'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='birthday',
field=models.DateField(default=datetime.datetime(2019, 12, 5, 13, 57, 55, 294801, tzinfo=utc)),
),
]
| [
"="
] | = |
95faccea24e6b2ab12d71dc79deb1d28e75712e8 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/keras/applications/xception.py | d4a2c3f668cfdab2d8c3b86a1e4fca1705ffeb94 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5fc4a23b6bf0c06ea81c0d7f377ba3b35b99e19b6f9f35c06bdc98df2ce6e102
size 1649
| [
"github@cuba12345"
] | github@cuba12345 |
6c2b89f0c4abae0a19d4ed88a0ec61d2b0381a44 | a3c16ce3fedb4c2b0b4fbe002738b423e58f3c2e | /venv/Scripts/temboo/Library/Google/Directions/GetBicyclingDirections.py | cde366c29ff3ab8d08f58f44735e9d9e575eb3cd | [] | no_license | DevHyperCoder/News_Manager | 45b05061db5be8bb32f1485ff5480d4aa6145b3f | 88b54c3d1995b8f015dc03ac30657e6f9777f3aa | refs/heads/master | 2020-08-21T14:22:57.248732 | 2019-10-19T09:14:45 | 2019-10-19T09:14:45 | 216,178,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,411 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetBicyclingDirections
# Generate biking directions between two locations, denoted by address or latitude/longitude coordinates.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetBicyclingDirections(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetBicyclingDirections Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetBicyclingDirections, self).__init__(temboo_session, '/Library/Google/Directions/GetBicyclingDirections')
def new_input_set(self):
return GetBicyclingDirectionsInputSet()
def _make_result_set(self, result, path):
return GetBicyclingDirectionsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetBicyclingDirectionsChoreographyExecution(session, exec_id, path)
class GetBicyclingDirectionsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetBicyclingDirections
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Google.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('APIKey', value)
def set_Alternatives(self, value):
"""
Set the value of the Alternatives input for this Choreo. ((optional, string) If set to true, additional routes will be returned.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Alternatives', value)
def set_Destination(self, value):
"""
Set the value of the Destination input for this Choreo. ((required, string) Enter the address or latitude/longitude coordinates from which directions will be generated (i.e."104 Franklin St, New York, NY" or "40.7160,-74.0037").)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Destination', value)
def set_Origin(self, value):
"""
Set the value of the Origin input for this Choreo. ((required, string) Enter the address or latitude/longitude coordinates from which directions will be computed (i.e."104 Franklin St, New York, NY" or "40.7160,-74.0037").)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Origin', value)
def set_Region(self, value):
"""
Set the value of the Region input for this Choreo. ((optional, string) Enter the region code for the directions, specified as a ccTLD two-character value.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Region', value)
def set_Sensor(self, value):
"""
Set the value of the Sensor input for this Choreo. ((optional, boolean) Indicates whether or not the directions request is from a device with a location sensor. Value must be either 1 or 0. Defaults to 0 (false).)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Sensor', value)
def set_Units(self, value):
"""
Set the value of the Units input for this Choreo. ((optional, string) Specify the units to be used when displaying results. Options include, metric, or imperial.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Units', value)
def set_Waypoints(self, value):
"""
Set the value of the Waypoints input for this Choreo. ((optional, string) Specify route waypoints, either by address, or latitude/longitude coordinates.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Waypoints', value)
class GetBicyclingDirectionsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetBicyclingDirections Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
class GetBicyclingDirectionsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetBicyclingDirectionsResultSet(response, path)
| [
"[email protected]"
] | |
24fcdb8ccf0280fb8ba2ed75a3851675935d6a4f | 313bb88c43d74995e7426f9482c6c8e670fdb63c | /11_OOP/smartwatch.py | bb0fa9b462d04264234a57f615c8c9b8574a8d84 | [] | no_license | martakedzior/python-course | 8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654 | 3af2296c2092023d91ef5ff3b4ef9ea27ec2f227 | refs/heads/main | 2023-05-06T07:26:58.452520 | 2021-05-26T16:50:26 | 2021-05-26T16:50:26 | 339,822,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | class UsefulStuff:
def __init__(self, name):
print(name, 'is used to make life easier!')
class Watch(UsefulStuff):
def __init__(self, watch_name):
print(watch_name, "is small and convenient")
super().__init__(watch_name)
class Phone(UsefulStuff):
def __init__(self, phone_name):
print(phone_name, "can make a call")
super().__init__(phone_name)
class SmartWatch(Watch, Phone):
def __init__(self):
print('Smartwatch is great!')
super().__init__('Smartwatch')
sw = SmartWatch() | [
"[email protected]"
] | |
ee2492091381225d0905da86eba4bf1a846bc850 | eea70db78a214217ba41801d870aba127ba56c56 | /Code/Untested SarsaZero and SarsaLambda/ARL_package/CodeFramework/PlotAgent.py | 7ef19024638e26d23ff9fb33f386856cef932710 | [] | no_license | 356255531/poppyProject | 191b9a9e29817e3d6ce8c85dd5c0702982dd7157 | 678044afffa6390fac8cb402099bd32ae72d8a33 | refs/heads/master | 2021-01-21T14:32:47.373344 | 2016-07-12T19:42:25 | 2016-07-12T19:42:25 | 58,334,432 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from copy import deepcopy
class PlotAgent(object):
def __init__(self, dimension):
"""
Diagramm:
Horizontal: Episoid Number
Vertical:
1. Step Number
2. Total Reward
3. If reach center
4. Q function difference every 100 episoid
Graph:
Policy after 100 Episoid
"""
self.dimension = dimension
def get_qFun_diff(self, qFuncHistory):
qFunDiff = []
qFuncPre = []
qFuncCurrent = []
for i in qFuncHistory:
qFuncPre = qFuncCurrent
qFuncCurrent = i
if len(list(qFuncPre)) == 0:
continue
temp = 0
for x, y in zip(qFuncPre.values(), qFuncCurrent.values()):
temp += (np.array(x.values(), dtype=float) - np.array(y.values(), dtype=float)) ** 2
qFunDiff.append(np.sqrt(sum(temp)))
qFuncPre = qFuncCurrent
return qFunDiff
def plot_policy_graph(self, policyHistory):
for singlePolicy in reversed(policyHistory):
soaList = []
for state in singlePolicy.keys():
if state == (0, 0):
continue
action = singlePolicy[state]
x, y = state
m, n = action
soaList.append([x, y, m, n])
X,Y,U,V = zip(*soaList)
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,U,V,angles='xy',scale_units='xy',scale=1)
ax.set_xlim([-list(self.dimension)[0] // 2,
list(self.dimension)[0] // 2 + 1])
ax.set_ylim([-list(self.dimension)[1] // 2,
list(self.dimension)[1] // 2 + 1])
break
plt.draw()
plt.show()
def plot_diag(self, diagInfo, qFuncDiff):
stepNumTrue = []
totalReward = []
ifReachCenter = []
for i in diagInfo:
stepNumTrue.append(list(i)[0])
totalReward.append(list(i)[1])
ifReachCenter.append(list(i)[2])
stepNumFalse = deepcopy(stepNumTrue)
for i in xrange(len(ifReachCenter)):
if ifReachCenter[i]:
stepNumFalse[i] = 0
else:
stepNumTrue[i] = 0
length = np.arange(1, len(diagInfo) + 1)
plt.subplot(3, 1, 1)
plt.plot(length, stepNumFalse, 'r')
plt.plot(length, stepNumTrue, 'b')
plt.title('How many steps does learning algorithm need to reach Terminal or failed')
plt.ylabel('Step Number')
# plt.subplot(4, 1, 2)
# plt.plot(length, ifReachCenter, 'r.-')
# plt.title('If the agent reach the goal state')
# plt.ylabel('1 for Reaching')
# plt.xlabel('Episoid')
# # ifReachCenter = np.array(ifReachCenter, dtype=bool) * 1
# # ifReachCenter = np.array(ifReachCenter, dtype=int)
plt.subplot(3, 1, 2)
plt.plot(length, totalReward, 'k')
plt.title('How much is the total reward in one episoid')
plt.ylabel('Reward')
length = np.arange(1, len(qFuncDiff) + 1)
plt.subplot(3, 1, 3)
plt.plot(length, qFuncDiff, 'g-')
plt.title('How big is the difference of Q function in every 10 episoids')
plt.ylabel('Difference')
plt.show()
def plot(self, diagInfo, qFuncHistory, policyHistory):
qFuncDiff = self.get_qFun_diff(qFuncHistory)
self.plot_diag(diagInfo, qFuncDiff)
self.plot_policy_graph(policyHistory)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
soa =np.array( [ [0,0,3,2], [0,0,1,1],[0,0,9,9]])
X,Y,U,V = zip(*soa)
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,U,V,angles='xy',scale_units='xy',scale=1)
ax.set_xlim([-1,10])
ax.set_ylim([-1,10])
plt.draw()
plt.show() | [
"[email protected]"
] | |
4a3a05b184e11d4858f1f956115c5dd9c78fc203 | 3d8027f2ef3f723e13b31e056d0c03da4ed74aa8 | /08-09-20(Day-14)/EmailSend/FirstApp/migrations/0002_auto_20200909_1645.py | 7a1f067a5b808a0614207c3246015e92d575d536 | [] | no_license | satyavani462/Django-Batch5 | 2efbc99223008954896667dee46d2606b6559c82 | 1b975bc21e7fdeed11bef7505d22d4fed126656c | refs/heads/master | 2022-12-08T19:57:33.996903 | 2020-09-10T14:23:15 | 2020-09-10T14:23:15 | 294,688,262 | 1 | 0 | null | 2020-09-11T12:22:16 | 2020-09-11T12:22:15 | null | UTF-8 | Python | false | false | 563 | py | # Generated by Django 3.0.8 on 2020-09-09 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailuser',
name='email',
field=models.EmailField(max_length=50, unique=True),
),
migrations.AlterField(
model_name='emailuser',
name='username',
field=models.CharField(max_length=50, unique=True),
),
]
| [
"[email protected]"
] | |
3ac8aea314fa6a6da561563e62a994edd3cbe06d | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/GZip/Scripts/UnzipGZFile/UnzipGZFile.py | 2e9d7d2a0e3b9413aa048c73acff6dc6785cf495 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 2,896 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import gzip
import re
import shutil
from os.path import isfile
ESCAPE_CHARACTERS = r'[/\<>"|?*]'
def escape_illegal_characters_in_file_name(file_name: str) -> str:
if file_name:
file_name = re.sub(ESCAPE_CHARACTERS, '-', file_name)
file_name = re.sub(r'-+', '-', file_name) # prevent more than one consecutive dash in the file name
return file_name
def gzip_file(fileEntryID: str):
entry_ids = argToList(fileEntryID)
file_names = list()
for entry_id in entry_ids:
res = demisto.executeCommand('getFilePath', {'id': entry_id})
if is_error(res):
raise DemistoException(
'Failed to get the file path for entry: ' + entry_id + ' the error message was ' + get_error(res))
filePath = res[0]['Contents']['path']
fileCurrentName = escape_illegal_characters_in_file_name(res[0]['Contents']['name'])
if not isfile(filePath): # in case that the user will send a directory
raise DemistoException(entry_id + ' is not a file. Please recheck your input.')
# Handling duplicate names.
if fileCurrentName in file_names:
name, ext = os.path.splitext(fileCurrentName)
i = 0
while fileCurrentName in file_names:
i += 1
fileCurrentName = f'{name} {i}{ext}'
# copying the file to current location
shutil.copy(filePath, fileCurrentName)
file_names.append(fileCurrentName)
unzippedgzFileNames = []
for file_name in file_names:
with gzip.open(file_name, 'r') as f_in, open(file_name[:-3], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with open(file_name[:-3], 'rb') as f:
file_data = f.read()
demisto.results(fileResult(file_name[:-3], file_data))
unzippedgzFileNames.append(file_name[:-3])
readable_output = tableToMarkdown(name="Unzipped GZ Files",
t=[{'Unzipped GZ File Names': unzippedgzFileNames, 'Original File Names': file_names}],
removeNull=True)
return CommandResults(
outputs_prefix="UnzipGZFile.UnzippedGZFiles",
outputs_key_field="UnzippedGZFiles",
outputs=unzippedgzFileNames,
readable_output=readable_output,
raw_response={'UnzippedGZFiles': unzippedgzFileNames},
)
def main():
try:
args = demisto.args()
entryID = args.get('entryID')
if not entryID:
raise DemistoException('You must set an entryID when using the unzip GZ script')
result = gzip_file(fileEntryID=entryID)
return_results(result)
except Exception as exc:
return_error(exc)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"[email protected]"
] | |
2d25dae11da6ffe93923bd7a829eedcbff38f671 | 79bc9a420df5c706b2ae06f4b75bf2bd2ba9646e | /emission/analysis/intake/segmentation/vij_implementation_unused.py | 53f57449538ebd08f98cbd7c85c09b44be874c46 | [
"BSD-3-Clause"
] | permissive | Andrew-Tan/e-mission-server | 7022786a13b4be87be62cfc2cc6d82543d063e5d | 91d59bee86e63d803e401f10f4b6a2502effedda | refs/heads/master | 2021-01-16T18:25:17.860723 | 2017-11-21T19:24:40 | 2017-11-21T19:24:40 | 100,073,534 | 0 | 0 | BSD-3-Clause | 2018-05-05T18:26:36 | 2017-08-11T22:13:44 | Jupyter Notebook | UTF-8 | Python | false | false | 25,332 | py | import urllib2
import csv
import math
import numpy
import datetime, pytz
from os import remove
from pymongo import MongoClient
import sys
print "old pythonpath = %s" % sys.path
sys.path.extend(['', '/home/ubuntu/anaconda/lib/python27.zip',
'/home/ubuntu/anaconda/lib/python2.7',
'/home/ubuntu/anaconda/lib/python2.7/plat-linux2',
'/home/ubuntu/anaconda/lib/python2.7/lib-tk',
'/home/ubuntu/anaconda/lib/python2.7/lib-old',
'/home/ubuntu/anaconda/lib/python2.7/lib-dynload',
'/home/ubuntu/anaconda/lib/python2.7/site-packages',
'/home/ubuntu/anaconda/lib/python2.7/site-packages/PIL',
'/home/ubuntu/anaconda/lib/python2.7/site-packages/setuptools-2.2-py2.7.egg'])
print "new pythonpath = %s" % sys.path
import numpy
# Procedure that takes as input strings deonting the tester name, test phone number, date and time,
# and a temporary file path name. Output is a list of lists containing GPS data collected over
# the last 24 hours for that tester and phone.
def getNewGPSData(testerName, phoneNum, lastUpdate, gpsFilePath):
url = 'http://' + phoneNum + 'gp.appspot.com/gaeandroid?query=1'
data = urllib2.urlopen(url)
localFile = open(gpsFilePath, 'w')
localFile.write(data.read())
localFile.close()
year = int(lastUpdate[0:4])
month = int(lastUpdate[4:6])
day = int(lastUpdate[6:8])
hours = int(lastUpdate[9:11])
minutes = int(lastUpdate[11:13])
seconds = int(lastUpdate[13:15])
endTime = 1000 * int(datetime.datetime(year, month, day, hours, minutes, seconds).strftime('%s'))
startTime = endTime - (24 * 60 * 60 * 1000)
gpsData = []
with open(gpsFilePath, 'rU') as csvfile:
for row in csv.reader(csvfile, delimiter = '\t'):
try:
if int(row[1]) >= startTime and int(row[1]) <= endTime:
tList = []
for element in row:
try:
tList.append(float(element))
except:
tList.append(element)
gpsData.append(tList)
except:
pass
gpsData = sorted(gpsData, key = lambda x: int(x[1]))
remove(gpsFilePath)
return gpsData
# Function that uses the haversine formula to calculate the 'great-circle' distance in meters
# between two points whose latitutde and longitude are known
def calDistance(point1, point2):
earthRadius = 6371000
dLat = math.radians(point1[0]-point2[0])
dLon = math.radians(point1[1]-point2[1])
lat1 = math.radians(point1[0])
lat2 = math.radians(point2[0])
a = (math.sin(dLat/2) ** 2) + ((math.sin(dLon/2) ** 2) * math.cos(lat1) * math.cos(lat2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = earthRadius * c
return d
# Function that takes as input a point and a list of points, where a point is itself a list containing
# the elements in the row in the input file corresponding to that point. The function outputs the maximum
# distance, in meters, from the 95% CI around that point to the 95% CI around any point in the list of points
def calDistanceToPoint(point, points):
maxDistance = 0
for i in range(0, len(points)):
dist = calDistance(point[2:4], points[i][2:4]) - point[4] - points[i][4]
if dist > maxDistance:
maxDistance = dist
return maxDistance
# Function that takes as input two lists of points, where a point is itself a list containing
# the elements in the row in the input file corresponding to that point. The function outputs the
# distance, in meters, between the median points in the two lists
def calDistanceBetweenPoints(points1, points2):
latLon1, latLon2 = numpy.zeros(shape = (len(points1), 2)), numpy.zeros(shape = (len(points2), 2))
for i in range(0, len(points1)):
latLon1[i, 0] = points1[i][2]
latLon1[i, 1] = points1[i][3]
for i in range(0, len(points2)):
latLon2[i, 0] = points2[i][2]
latLon2[i, 1] = points2[i][3]
point1 = [numpy.median(latLon1[:, 0]), numpy.median(latLon1[:, 1])]
point2 = [numpy.median(latLon2[:, 0]), numpy.median(latLon2[:, 1])]
return calDistance(point1, point2)
# Procedure that takes as input the start and end points to an event, the list of events and holes,
# the list comprising the raw GPS data and the threshold for labelling a gap in the data a hole,
# and infers holes in the data and splits the event accordingly into multiple events
def inferHoles(eventStart, eventEnd, events, holes, gpsTraces, minSamplingRate):
j = eventStart + 1
while j <= eventEnd:
while (j < eventEnd and
gpsTraces[j][1] - gpsTraces[j - 1][1] < minSamplingRate):
j += 1
if gpsTraces[j][1] - gpsTraces[j - 1][1] >= minSamplingRate:
holes.append([j - 1, j])
if j - 1 > eventStart:
events.append([eventStart, j - 1])
else:
events.append([eventStart, j])
eventStart, j = j, j + 1
# Method that takes as input the list containing GPS data, called gpsTraces, and two empty lists,
# called trips and activities.
#
# Each element of trips is a tuple and corresponds to a particular trip. The elements of the tuple are the
# indices of the corresponding GPS data points in gpsTraces for where the trip began and ended, respectively.
# Similarly, each element of activities is a tuple and corresponds to a particular activity. The elements
# of the tuple are the indices of the corresponding GPS data point in gpsTraces for where the activity began
# and ended, respectively.
#
# An activity is defined as a set of GPS points over a minimum duration of minDuration milliseconds that fall within
# a circle of radius maxRadius meters. The minimum interval between successive activites must be at least
# minInterval milliseconds, for them to be recorded as separate activities.
#
# GPS traces whose accuracy is above gpsAccuracyThreshold meters are ignored.
def inferTripActivity(gpsTraces, minDuration, maxRadius, minSeparationDistance,
minSeparationTime, minSamplingRate, gpsAccuracyThreshold):
trips, activities, holes = [], [], []
# Infer activities
i = 0
while i < len(gpsTraces) - 1:
# Skip over any black points at the beginning
while i < len(gpsTraces) - 1 and gpsTraces[i][4] >= gpsAccuracyThreshold:
i += 1
# Create a collection of successive points that lie within a circle of radius maxRadius meters, such that no
# two consecutive points in space are separated by more than minSamplingRate milliseconds
j = i + 1
points = [gpsTraces[i]]
while (j < len(gpsTraces) and gpsTraces[j][4] < gpsAccuracyThreshold
and gpsTraces[j][1] - gpsTraces[j-1][1] < minSamplingRate
and calDistanceToPoint(gpsTraces[j], points) < maxRadius):
points.append(gpsTraces[j])
j += 1
# Check for black points
k = j
while k < len(gpsTraces) and gpsTraces[k][4] >= gpsAccuracyThreshold:
k += 1
if k > j:
if k < len(gpsTraces):
if calDistanceToPoint(gpsTraces[k], points) < maxRadius:
j = k + 1
# Check if the duration over which these points were collected exceeds minDuration milliseconds
if gpsTraces[j-1][1] - gpsTraces[i][1] > minDuration:
# Check if the activity is separated in space from previous activity by at least minSeparationDistance meters
# and separated in time by minSeparationTime milliseconds
if (len(activities) > 0 and gpsTraces[j-1][1] - gpsTraces[activities[-1][1]][1] < minSeparationTime
and calDistanceBetweenPoints(gpsTraces[activities[-1][0]:activities[-1][1]],
gpsTraces[i:j-1]) < minSeparationDistance):
activities[-1][-1] = j-1
else:
activities.append([i, j-1])
i = j - 1
else:
i += 1
if k == len(gpsTraces):
break
# Impute trips and identify holes in data
numActivities, newActivities = len(activities), []
if numActivities != 0:
# Check if the GPS log begins with a trip
if activities[0][0] != 0:
inferHoles(0, activities[0][0], trips, holes, gpsTraces, minSamplingRate)
# Interpolate trips from activities and identify holes in activities
if numActivities > 1:
for i in range(0, numActivities - 1):
inferHoles(activities[i][0], activities[i][1], newActivities, holes, gpsTraces, minSamplingRate)
inferHoles(activities[i][1], activities[i + 1][0], trips, holes, gpsTraces, minSamplingRate)
# Identify holes in the last activity
inferHoles(activities[-1][0], activities[-1][1], newActivities, holes, gpsTraces, minSamplingRate)
# Check if the GPS log ends with a trip
if activities[-1][-1] < len(gpsTraces) - 2:
inferHoles(activities[-1][1], len(gpsTraces) - 2, trips, holes, gpsTraces, minSamplingRate)
# If the data comprises a single trip
else:
trips.append([0, len(gpsTraces)-1])
return trips, newActivities, holes
# Functions that calculate the four features of a GPS point: distance to next point (in meters),
# time interval (seconds), speed (mph) and acceleration (mph2)
def lengthPoint(gpsTraces, j):
return calDistance(gpsTraces[j][2:4], gpsTraces[j+1][2:4])
def timePoint(gpsTraces, j):
return (gpsTraces[j+1][1] - gpsTraces[j][1]) / 1000.0
def speedPoint(gpsTraces, j):
return 2.23694 * (float(lengthPoint(gpsTraces, j)) / timePoint(gpsTraces, j))
def accelerationPoint(gpsTraces, j):
return abs(speedPoint(gpsTraces, j + 1) - speedPoint(gpsTraces, j)) / (timePoint(gpsTraces,j) / 3600.0)
# Method that that takes as input the list containing GPS data, called gpsTraces, and a tuple containing the
# indices of the start and end point of a trip, called trip.
#
# The trips are decomposed into their mode chains.
def inferModeChain(gpsTraces, trip, maxWalkSpeed, maxWalkAcceleration,
minSegmentDuration, minSegmentLength, gpsAccuracyThreshold):
# Step 1: Label GPS points as walk points or non-walk points
walkDummy = {}
i = trip[0]
while i < trip[1]:
start, end = i, i
while end < trip[1] and (gpsTraces[end][4] > gpsAccuracyThreshold
or gpsTraces[end + 1][4] > gpsAccuracyThreshold
or gpsTraces[end + 2][4] > gpsAccuracyThreshold):
end += 1
if start == end:
if speedPoint(gpsTraces, i) < maxWalkSpeed and accelerationPoint(gpsTraces, i) < maxWalkAcceleration:
walkDummy[i] = 1
else:
walkDummy[i] = 0
i += 1
else:
distance = calDistance(gpsTraces[start][2:4], gpsTraces[end][2:4])
time = (gpsTraces[end][1] - gpsTraces[start][1]) / 1000.0
speed = 2.23694 * (float(distance) / time)
dummy = int(speed < maxWalkSpeed)
while i < end:
walkDummy[i] = dummy
i += 1
#print walkDummy
#print
# Step 2: Identify walk and non-walk segments as consecutive walk or non-walk points
modeChains = []
beginSegment = trip[0]
currentPoint = trip[0] + 1
while currentPoint < trip[1]:
if walkDummy[currentPoint] != walkDummy[beginSegment]:
modeChains.append([beginSegment, currentPoint, int(walkDummy[beginSegment] != 0)])
beginSegment = currentPoint
currentPoint += 1
modeChains.append([beginSegment, currentPoint, int(walkDummy[beginSegment] != 0)])
#print modeChains
#print
# Step 3: If the time span of a segment is greater than minSegmentDuration milliseconds, label it
# as certain. If it is less than minSegmentDuration milliseconds, and its backward segment is certain,
# merge it with the backward segment. If no certain backward segment exists, label the segment as
# uncertain, and save it as an independent segment.
newModeChains = []
for i in range(0, len(modeChains)):
if gpsTraces[modeChains[i][1]][1] - gpsTraces[modeChains[i][0]][1] >= minSegmentDuration:
modeChains[i].append(1)
newModeChains.append(modeChains[i])
elif newModeChains and newModeChains[-1][-1] == 1:
newModeChains[-1][1] = modeChains[i][1]
else:
modeChains[i].append(0)
newModeChains.append(modeChains[i])
modeChains = newModeChains
#print modeChains
#print
# Step 4: Merge consecutive uncertain segments into a single certain segment. Calculate average
# speed over segment and compare it against maxWalkSpeed to determine whether walk or non-walk.
# Check if this segment exceeds minSegmentDuration milliseconds. If it doesn't, and there exists
# a certain forward segment, merge the new segment with this forward segment.
newModeChains, i = [modeChains[0][0:-1]], 1
while i < len(modeChains) and modeChains[i][-1] == 0:
i += 1
if i > 1:
newModeChains[0][1] = modeChains[i-1][1]
distance = calDistance(gpsTraces[newModeChains[0][0]][2:4], gpsTraces[newModeChains[0][1]][2:4])
time = (gpsTraces[newModeChains[0][1]][1] - gpsTraces[newModeChains[0][0]][1]) / 1000.0
speed = 2.23694 * (float(distance) / time)
newModeChains[0][-1] = int(speed < maxWalkSpeed)
if i < len(modeChains) and modeChains[0][-1] == 0:
time = (gpsTraces[newModeChains[0][1]][1] - gpsTraces[newModeChains[0][0]][1])
if time < minSegmentDuration:
modeChains[i][0] = trip[0]
newModeChains = []
while i < len(modeChains):
newModeChains.append(modeChains[i][:-1])
i += 1
modeChains = newModeChains
#print modeChains
#print
# Step 5: Merge consecutive walk segments and consecutive non-walk segments
newModeChains = [modeChains[0]]
for i in range(1, len(modeChains)):
if modeChains[i][2] == newModeChains[-1][2]:
newModeChains[-1][1] = modeChains[i][1]
else:
newModeChains.append(modeChains[i])
modeChains = newModeChains
return modeChains
# Method for generating list of dictionary elements, where each elements correspond to an inferred event in the
# last 24 hours for each of the system users
def collect_vij():
gmtConversion = datetime.datetime.now(pytz.timezone('America/Los_Angeles')).strftime('%z')
testers = [{'name': 'Andrew', 'ph': '5107259365'},
{'name': 'Caroline', 'ph': '5107250774'},
{'name': 'Rory', 'ph': '5107250619'},
{'name': 'Sreeta', 'ph': '5107250786'},
{'name': 'Ziheng', 'ph': '5107250744'},
{'name': 'Vij', 'ph': '5107250740'}]
db = MongoClient('54.218.218.130').Test_database
Test_Trips=db.Test_Trips
Test_Sections=db.Test_Sections
lastUpdate = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') + gmtConversion
data = []
for tester in testers:
try:
rawDataFileName = tester['ph'] + '_' + tester['name'] + '_' + lastUpdate + '.txt'
gpsTraces = getNewGPSData(tester['name'], tester['ph'], lastUpdate, rawDataFileName)
# print(gpsTraces)
minDuration, maxRadius, minSamplingRate, gpsAccuracyThreshold = 360000, 50, 300000, 200
minSeparationDistance, minSeparationTime = 100, 360000
maxWalkSpeed, maxWalkAcceleration, minSegmentDuration, minSegmentLength = 3.10, 1620, 90000, 200
trips, activities, holes = inferTripActivity(gpsTraces, minDuration, maxRadius, minSeparationDistance,
minSeparationTime, minSamplingRate, gpsAccuracyThreshold)
while (trips and activities) or (activities and holes) or (holes and trips):
event = {}
user_id=tester['name']
trip_id=datetime.datetime.fromtimestamp(int(gpsTraces[trips[0][0]][1]/1000)).strftime('%Y%m%dT%H%M%S') + gmtConversion
eventID = user_id + trip_id
if ((trips and activities and holes and trips[0][0] < activities[0][0] and trips[0][0] < holes[0][0])
or (trips and not activities and holes and trips[0][0] < holes[0][0])
or (trips and activities and not holes and trips[0][0] < activities[0][0])
or (trips and not activities and not holes)):
modeChain = inferModeChain(gpsTraces, trips[0], maxWalkSpeed, maxWalkAcceleration,
minSegmentDuration, minSegmentLength, gpsAccuracyThreshold)
segmentID, segments = 0, []
for mode in modeChain:
trackPoints = []
for i in range(mode[0], mode[1]):
trackPoint = {'Location': {'type': 'Point',
'coordinates': [gpsTraces[i][3], gpsTraces[i][2]]},
'Time': (datetime.datetime.fromtimestamp(int(gpsTraces[i][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion)}
trackPoints.append(trackPoint)
if Test_Sections.find({"$and":[ {"user_id":user_id},{"trip_id": trip_id},{"section_id": segmentID}]}).count()==0:
sections_todo = {'source':'ITS Berkeley',
'trip_id':trip_id,
'user_id':user_id,
'_id':user_id + datetime.datetime.fromtimestamp(int(gpsTraces[mode[0]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion,
'section_id': segmentID,
'type':'move',
'mode': ((mode[-1] == 0) * 'Non-walk') + ((mode[-1] == 1) * 'Walk'),
'confirmed Mode': '',
'group':'',
'manual':False,
'section_start_time': (datetime.datetime.fromtimestamp(int(gpsTraces[mode[0]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'section_end_time': (datetime.datetime.fromtimestamp(int(gpsTraces[mode[1]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'track_points':trackPoints}
Test_Sections.insert(sections_todo)
segments.append(sections_todo)
segmentID += 1
if Test_Trips.find({"$and":[ {"user_id":user_id},{"trip_id": trip_id}]}).count()==0:
trips_todo = {'source': 'ITS Berkeley',
'user_id': user_id,
'trip_id':trip_id,
'_id': eventID,
'type': 'move',
'trip_start_time': (datetime.datetime.fromtimestamp(int(gpsTraces[trips[0][0]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'trip_end_Time': (datetime.datetime.fromtimestamp(int(gpsTraces[trips[0][1]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'sections': [sections['section_id'] for sections in Test_Sections.find({"$and":[{"user_id":user_id}, {"trip_id":trip_id}]})],
'last_update_time': lastUpdate}
Test_Trips.insert(trips_todo)
data.append(trips_todo)
trips = trips[1:]
elif ((activities and trips and holes and activities[0][0] < trips[0][0] and activities[0][0] < holes[0][0])
or (activities and not trips and holes and activities[0][0] < holes[0][0])
or (activities and trips and not holes and activities[0][0] < trips[0][0])
or (activities and not trips and not holes)):
trackPoints = []
for i in range(activities[0][0], activities[0][1]):
trackPoint = {'Location': {'type': 'Point',
'coordinates': [gpsTraces[i][3], gpsTraces[i][2]]},
'Time': (datetime.datetime.fromtimestamp(int(gpsTraces[i][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion)}
trackPoints.append(trackPoint)
if Test_Sections.find({"$and":[ {"user_id":user_id},{"trip_id": trip_id}]}).count()==0:
sections_todo = {'source':'ITS Berkeley',
'trip_id':trip_id,
'user_id':user_id,
'_id':eventID,
'section_id': 0,
'type':'place',
'section_start_time': (datetime.datetime.fromtimestamp(int(gpsTraces[activities[0][0]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'section_end_time': (datetime.datetime.fromtimestamp(int(gpsTraces[activities[0][1]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'track_points' : trackPoints}
Test_Sections.insert(sections_todo)
if Test_Trips.find({"$and":[ {"user_id":user_id},{"trip_id": trip_id}]}).count()==0:
trips_todo = {'source': 'ITS Berkeley',
'user_id': user_id,
'trip_id': trip_id,
'_id': eventID,
'type':'place',
'trip_start_time': (datetime.datetime.fromtimestamp(int(gpsTraces[activities[0][0]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'trip_end_time': (datetime.datetime.fromtimestamp(int(gpsTraces[activities[0][1]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'sections': [sections['section_id'] for sections in Test_Sections.find({"$and":[{"user_id":user_id}, {"trip_id":trip_id}]})],
'last_update_time': lastUpdate}
Test_Trips.insert(trips_todo)
data.append(event)
activities = activities[1:]
elif holes:
if Test_Trips.find({"$and":[ {"user_id":user_id},{"trip_id": trip_id}]}).count()==0:
trips_todo = {'source': 'ITS Berkeley',
'user_id': user_id,
'trip_id': trip_id,
'_id':eventID,
'type': 'hole',
'trip_start_time': (datetime.datetime.fromtimestamp(int(gpsTraces[holes[0][0]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'trip_end_time': (datetime.datetime.fromtimestamp(int(gpsTraces[holes[0][1]][1]/1000)).strftime('%Y%m%dT%H%M%S')
+ gmtConversion),
'last_update_time': lastUpdate}
Test_Trips.insert(trips_todo)
data.append(event)
holes = holes[1:]
remove(rawDataFileName)
except:
pass
return data
# This is pretty sucky because we really just want to edit the PYTHONPATH in a separate file.
# But I don't have much patience left right now
if __name__ == "__main__":
collect_vij()
# Tester personal details, change as appropriate
# Difference in hours between local time and UTC time, remember to change for daylight savings
# Generate list of events
# data = generateEvents(testers, gmtConversion)
| [
"[email protected]"
] | |
3f08c6f4b90708762d29d9cca893e5352aefebb7 | 374d62b3aa78a2aa98077b28a1d78271d1e67a4a | /mike/db.py | 0ef6bbc8339f00f6587fdff6ac35f25ba5778500 | [
"MIT"
] | permissive | emre/mike | b15fc3ea34072db9fa2d71b81828dda160803519 | d682fa3385568d4f3d37b8e4e5578cc729c63dcc | refs/heads/master | 2020-04-22T16:54:51.540783 | 2019-03-17T21:04:35 | 2019-03-17T21:04:35 | 170,524,004 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | import os
import dataset
class Database:
"""The wrapper class for database operations.
"""
def __init__(self, connection_uri=None):
self.connection = dataset.connect(
connection_uri or os.getenv("MIKE_DB"))
@property
def subscriptions(self):
"""Returns the dataset table object."""
return self.connection["subscriptions"]
def subscribe(self, player_account, discord_account, discord_backend_id):
"""Subscribe to a player account from a discord account.
:param player_account: Account name in STEEM blockchain
:param discord_account: Discord ID. (foo#N)
:return: None
"""
self.subscriptions.insert(dict(
player_account=player_account,
discord_account=discord_account,
discord_backend_id=discord_backend_id,
))
def unsubscribe(self, player_account, discord_account):
"""Unsubscribe from a discord account to a discord account.
:param player_account: Account name in STEEM blockchain
:param discord_account: Discord ID. (foo#N)
:return: None
"""
self.subscriptions.delete(
player_account=player_account,
discord_account=discord_account,
)
def subscription_exists(self, player_account, discord_account):
"""Check if a subscription is already exists.
:param player_account: Account name in STEEM blockchain
:param discord_account: Discord ID. (foo#N)
:return (boolean): True or False based on the existence
"""
if self.subscriptions.find_one(
player_account=player_account,
discord_account=discord_account):
return True
return False
def active_subscription_count(self, discord_account):
"""Return the active subscription count for a discord account.
:param discord_account: Discord ID. (foo#N)
:return (boolean): True or False based on the existence
"""
return len(list(
self.subscriptions.find(discord_account=discord_account)))
def all_subscriptions(self):
"""Returns all subscriptions globally."""
return list(self.subscriptions.find())
def subscriptions_by_user(self, discord_account):
"""Return all subscriptions of a particular user.
:param discord_account: Discord ID. (foo#N)
"""
return list(self.subscriptions.find(discord_account=discord_account))
def subscriptions_by_player_account(self, player_account):
"""Return all subscriptions of a player account.
:param player_account: Account name
"""
return list(self.subscriptions.find(player_account=player_account))
def registered_targets(self):
targets = list(self.subscriptions.find())
return [t["player_account"] for t in targets] | [
"[email protected]"
] | |
5836243f7b145db5656f8e58b2df169ceefab64f | ab79f8297105a7d412303a8b33eaa25038f38c0b | /education/timetable/report/__init__.py | 634e31b396b90a640640fde891364d7a03dcf01d | [] | no_license | adahra/addons | 41a23cbea1e35079f7a9864ade3c32851ee2fb09 | c5a5678379649ccdf57a9d55b09b30436428b430 | refs/heads/master | 2022-06-17T21:22:22.306787 | 2020-05-15T10:51:14 | 2020-05-15T10:51:14 | 264,167,002 | 1 | 0 | null | 2020-05-15T10:39:26 | 2020-05-15T10:39:26 | null | UTF-8 | Python | false | false | 1,238 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2012 Serpent Consulting Services (<http://www.serpentcs.com>)
# Copyright (C) 2013-2014 Serpent Consulting Services (<http://www.serpentcs.com>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import timetable_info
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] | prog1@381544ba-743e-41a5-bf0d-221725b9d5af |
7a2d05ce4585126a339e5fe6678268ec288490f0 | 25b914aecd6b0cb49294fdc4f2efcfdf5803cc36 | /homeassistant/components/smarttub/binary_sensor.py | f5af165525520438b3dbdab8a7cca6a8bc6deacf | [
"Apache-2.0"
] | permissive | jason0x43/home-assistant | 9114decaa8f7c2f1582f84e79dc06736b402b008 | 8bf6aba1cf44ee841de063755c935ea78040f399 | refs/heads/dev | 2023-03-04T01:14:10.257593 | 2022-01-01T12:11:56 | 2022-01-01T12:11:56 | 230,622,861 | 1 | 1 | Apache-2.0 | 2023-02-22T06:15:07 | 2019-12-28T14:45:43 | Python | UTF-8 | Python | false | false | 5,737 | py | """Platform for binary sensor integration."""
from __future__ import annotations
from smarttub import SpaError, SpaReminder
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.helpers import entity_platform
from .const import ATTR_ERRORS, ATTR_REMINDERS, DOMAIN, SMARTTUB_CONTROLLER
from .entity import SmartTubEntity, SmartTubSensorBase
# whether the reminder has been snoozed (bool)
ATTR_REMINDER_SNOOZED = "snoozed"
ATTR_ERROR_CODE = "error_code"
ATTR_ERROR_TITLE = "error_title"
ATTR_ERROR_DESCRIPTION = "error_description"
ATTR_ERROR_TYPE = "error_type"
ATTR_CREATED_AT = "created_at"
ATTR_UPDATED_AT = "updated_at"
# how many days to snooze the reminder for
ATTR_REMINDER_DAYS = "days"
RESET_REMINDER_SCHEMA = {
vol.Required(ATTR_REMINDER_DAYS): vol.All(
vol.Coerce(int), vol.Range(min=30, max=365)
)
}
SNOOZE_REMINDER_SCHEMA = {
vol.Required(ATTR_REMINDER_DAYS): vol.All(
vol.Coerce(int), vol.Range(min=10, max=120)
)
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up binary sensor entities for the binary sensors in the tub."""
controller = hass.data[DOMAIN][entry.entry_id][SMARTTUB_CONTROLLER]
entities = []
for spa in controller.spas:
entities.append(SmartTubOnline(controller.coordinator, spa))
entities.append(SmartTubError(controller.coordinator, spa))
entities.extend(
SmartTubReminder(controller.coordinator, spa, reminder)
for reminder in controller.coordinator.data[spa.id][ATTR_REMINDERS].values()
)
async_add_entities(entities)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
"snooze_reminder",
SNOOZE_REMINDER_SCHEMA,
"async_snooze",
)
platform.async_register_entity_service(
"reset_reminder",
RESET_REMINDER_SCHEMA,
"async_reset",
)
class SmartTubOnline(SmartTubSensorBase, BinarySensorEntity):
"""A binary sensor indicating whether the spa is currently online (connected to the cloud)."""
_attr_device_class = BinarySensorDeviceClass.CONNECTIVITY
def __init__(self, coordinator, spa):
"""Initialize the entity."""
super().__init__(coordinator, spa, "Online", "online")
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry.
This seems to be very noisy and not generally useful, so disable by default.
"""
return False
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
return self._state is True
class SmartTubReminder(SmartTubEntity, BinarySensorEntity):
"""Reminders for maintenance actions."""
_attr_device_class = BinarySensorDeviceClass.PROBLEM
def __init__(self, coordinator, spa, reminder):
"""Initialize the entity."""
super().__init__(
coordinator,
spa,
f"{reminder.name.title()} Reminder",
)
self.reminder_id = reminder.id
@property
def unique_id(self):
"""Return a unique id for this sensor."""
return f"{self.spa.id}-reminder-{self.reminder_id}"
@property
def reminder(self) -> SpaReminder:
"""Return the underlying SpaReminder object for this entity."""
return self.coordinator.data[self.spa.id][ATTR_REMINDERS][self.reminder_id]
@property
def is_on(self) -> bool:
"""Return whether the specified maintenance action needs to be taken."""
return self.reminder.remaining_days == 0
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_REMINDER_SNOOZED: self.reminder.snoozed,
ATTR_REMINDER_DAYS: self.reminder.remaining_days,
}
async def async_snooze(self, days):
"""Snooze this reminder for the specified number of days."""
await self.reminder.snooze(days)
await self.coordinator.async_request_refresh()
async def async_reset(self, days):
"""Dismiss this reminder, and reset it to the specified number of days."""
await self.reminder.reset(days)
await self.coordinator.async_request_refresh()
class SmartTubError(SmartTubEntity, BinarySensorEntity):
"""Indicates whether an error code is present.
There may be 0 or more errors. If there are >0, we show the first one.
"""
_attr_device_class = BinarySensorDeviceClass.PROBLEM
def __init__(self, coordinator, spa):
"""Initialize the entity."""
super().__init__(
coordinator,
spa,
"Error",
)
@property
def error(self) -> SpaError | None:
"""Return the underlying SpaError object for this entity."""
errors = self.coordinator.data[self.spa.id][ATTR_ERRORS]
if len(errors) == 0:
return None
return errors[0]
@property
def is_on(self) -> bool:
"""Return true if an error is signaled."""
return self.error is not None
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if (error := self.error) is None:
return {}
return {
ATTR_ERROR_CODE: error.code,
ATTR_ERROR_TITLE: error.title,
ATTR_ERROR_DESCRIPTION: error.description,
ATTR_ERROR_TYPE: error.error_type,
ATTR_CREATED_AT: error.created_at.isoformat(),
ATTR_UPDATED_AT: error.updated_at.isoformat(),
}
| [
"[email protected]"
] | |
843108678682e8392270e75b6a3dcf8f91e7a60d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02686/s152409115.py | 5134590d61085ac150e14818c0e30a5374b13fcf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import sys
N = int(sys.stdin.readline().strip())
S = []
for _ in range(N):
s_i = sys.stdin.readline().strip()
S.append(s_i)
# left bracket - right bracketを高さとした、最小の高さと最終的な高さの座標列
# ex)
# ")": (-1, -1)
# "(": (1, 1)
# "()": (1, 0)
# ")()(": (-1, 0)
# "))))(((((: (-4, 1)
plus_seqs = []
minus_seqs = []
for s_i in S:
h = 0
min_h = float("inf")
for bracket in s_i:
if bracket == "(":
h += 1
else:
h -= 1
min_h = min(min_h, h)
if h >= 0:
plus_seqs.append((min_h, h))
else:
# minus_seqs.append((-1 * min_h, -1 * h))
minus_seqs.append((min_h - h, -1 * h))
# print(plus_seqs)
# print(minus_seqs)
hight = 0
for (min_h, h) in sorted(plus_seqs, reverse=True):
if hight + min_h < 0:
print("No")
sys.exit()
hight += h
hight2 = 0
for (min_h, h) in sorted(minus_seqs, reverse=True):
if hight2 + min_h < 0:
print("No")
sys.exit()
hight2 += h
# print(hight, hight2)
if hight == hight2:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
463be4007737de91d1de827ce584c2849f45f000 | 20c75b34256a9e2c6d1ac18ac14e923778846660 | /Interview_Preperation_Kit/WarmUpChallenges/CountingValleys.py | d33e153fa0e6134089ebbf856bb74ce0af0de2ab | [] | no_license | ktyagi12/HackerRank | 124303551dfe5b231654b5e96644ac43a775e31d | d10fbf50bc549297492618bb1896eca2e0cf3184 | refs/heads/master | 2020-09-06T19:33:27.422287 | 2020-01-11T12:02:36 | 2020-01-11T12:02:36 | 220,526,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #Problem available at: https://www.hackerrank.com/challenges/counting-valleys/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=warmup
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
def countingValleys(n, s):
sea_level = 0
valley = 0
step = 0
while(step<n):
if(s[step] == 'U'):
sea_level= sea_level+1
if(s[step] == 'D'):
sea_level = sea_level -1
if(sea_level==0 and s[step]=='U'):
valley = valley+1
step+= 1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
d1291af4db5155e59a61c437de7fcdb6c98f9866 | 841ad26ec31cd4339792c46513109d76c58161e9 | /aula05/exercicio 8.py | f4c1ded6d79d92464638baf72349f0a97f01db8d | [] | no_license | Elvis-Lopes/Ci-ncias-de-Dados-Uni9 | ab5537bfc0f570d639e9763bb80b9654838e76d2 | 2939216c6adef7c64c8a7045b99c117753baaae8 | refs/heads/master | 2021-02-11T12:54:56.248564 | 2020-03-30T23:24:03 | 2020-03-30T23:24:03 | 244,492,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | listaNumerica = []
n = float(input('Digite um numero: '))
listaNumerica.append(n)
while n != 0:
n = float(input('Digite um numero: '))
listaNumerica.append(n)
print(listaNumerica)
| [
"[email protected]"
] | |
a9e0812a200d12dea201acea2f097974ca462be5 | b5e9349b073d90ee1188e3fc0f844eaefff68640 | /travello/views.py | 28028cf0fe7217f501e771ab2c103086a692ead9 | [] | no_license | tamanna090903/travello | 7e33162c450dd6bf8429036e40a5b631a14084cc | 6d207597ade49354516bc09fa2e5e77624f3a8f3 | refs/heads/master | 2020-06-24T17:32:37.622240 | 2019-07-28T16:52:37 | 2019-07-28T16:52:37 | 199,031,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from django.shortcuts import render
from .models import Destination
# Create your views here.
def index(request):
dests = Destination.objects.all()
return render(request ,"index.html", {'dests': dests})
| [
"[email protected]"
] | |
afa043d0d46daf0c393a951b77ce58cfe19f86d3 | 215fa1a675e15117f6579a96974e187952f0a0b1 | /gevent/thread.py | 8de1c6b35ad1f42f5977144ae6beff0d154c30ac | [
"MIT"
] | permissive | easel/gevent | 1d04b36deb871a2cc4578f3d533de0205abf2ccd | dcb431e55037192a0461ef8067d8f087a3e084d7 | refs/heads/master | 2021-01-01T18:33:59.774821 | 2012-06-06T15:07:40 | 2012-06-06T15:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | """Implementation of the standard :mod:`thread` module that spawns greenlets.
.. note::
This module is a helper for :mod:`gevent.monkey` and is not intended to be
used directly. For spawning greenlets in your applications, prefer
:class:`Greenlet` class.
"""
import sys
__implements__ = ['allocate_lock',
'get_ident',
'exit',
'LockType',
'stack_size',
'start_new_thread',
'_local']
__imports__ = ['error']
if sys.version_info[0] <= 2:
__target__ = 'thread'
else:
__target__ = '_thread'
__thread__ = __import__(__target__)
error = __thread__.error
from gevent.hub import getcurrent, GreenletExit
from gevent.greenlet import Greenlet
from gevent.lock import Semaphore as LockType
from gevent.local import local as _local
def get_ident(gr=None):
if gr is None:
return id(getcurrent())
else:
return id(gr)
def start_new_thread(function, args=(), kwargs={}):
greenlet = Greenlet.spawn(function, *args, **kwargs)
return get_ident(greenlet)
def allocate_lock():
return LockType(1)
def exit():
raise GreenletExit
if hasattr(__thread__, 'stack_size'):
_original_stack_size = __thread__.stack_size
def stack_size(size=None):
if size is None:
return _original_stack_size()
if size > _original_stack_size():
return _original_stack_size(size)
else:
pass
# not going to decrease stack_size, because otherwise other greenlets in this thread will suffer
else:
__implements__.remove('stack_size')
__all__ = __implements__ + __imports__
__all__.remove('_local')
# XXX interrupt_main
| [
"[email protected]"
] | |
560169b217749e498eb42da49f860f132a024c60 | 1cfcfa686489885843b9a142c8ba980ebd5d5ffd | /tests/layers/test_pixelcnn.py | 374041d91034cbd848b0e23174efc96299c2c408 | [
"MIT"
] | permissive | qyz-thu/gnn_vae | 9d2d8e984a96d0f22f74362889fdd1c0613df46d | 278aeb7038216812a94c7f7acd2ca425696f986b | refs/heads/master | 2023-02-05T20:07:24.097968 | 2020-12-18T06:34:20 | 2020-12-18T06:34:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,981 | py | import unittest
from itertools import product
from typing import *
import numpy as np
import pytest
import tensorkit as tk
from tensorkit import tensor as T
from tensorkit.tensor import Tensor
from tests.helper import *
from tests.ops import *
def make_causal_test_input(size: List[int],
pos: List[int],
single_point: bool = True,
) -> np.ndarray:
ret = np.zeros(size, dtype=np.float32)
if single_point:
tmp = ret
for p in pos[:-1]:
tmp = tmp[p]
tmp[pos[-1]] = 1.
else:
tmp = ret
for p in pos[:-1]:
tmp[(p+1):] = 1.
tmp = tmp[p]
tmp[pos[-1]:] = 1.
return np.reshape(ret, make_conv_shape([1], 1, size))
def make_causal_mask(size: List[int], pos: List[int]) -> np.ndarray:
ret = make_causal_test_input(size, pos, single_point=False)
r_shape = ret.shape
ret = ret.reshape(size)
tmp = ret
for p in pos[:-1]:
tmp = tmp[p]
tmp[pos[-1]] = 0.
return ret.reshape(r_shape)
def iter_causal_test_pos(size: List[int]):
return list(product(*([0, s // 2, s - 1] for s in size)))
def ensure_stacks_causality(ctx,
outputs,
size: List[int],
pos: List[int]):
ctx.assertEqual(len(outputs), len(size))
spatial_ndims = len(outputs)
for i in range(spatial_ndims):
output = outputs[i]
if isinstance(output, T.Tensor):
output = T.to_numpy(output)
output = output.reshape(size)
this_pos = list(pos)
this_pos[i] += 1
k = i
while k > 0 and this_pos[k] >= size[k]:
this_pos[k - 1] += 1
this_pos[k] = 0
k -= 1
for j in range(i + 1, spatial_ndims):
this_pos[j] = 0
if this_pos[0] >= size[0]:
mask = np.zeros(size, dtype=np.float32)
else:
mask = make_causal_test_input(size, this_pos, single_point=False)
is_wrong = np.any(
np.logical_and(
np.abs(output) > 1e-6,
np.logical_not(mask.astype(np.bool))
)
)
ctx.assertFalse(
is_wrong,
msg=f'stack.id={i}, pos={pos}, output={output}, mask={mask}'
)
def ensure_full_receptive_field(ctx,
output,
size: List[int],
pos: List[int]):
if isinstance(output, T.Tensor):
output = T.to_numpy(output)
output_true = (np.abs(output.reshape(size)) >= 1e-6).astype(np.int32)
mask = make_causal_mask(size, pos).astype(np.int32)
ctx.assertTrue(
np.all(
np.logical_not(
np.logical_xor(
mask.astype(np.bool),
output_true.astype(np.bool)
)
)
),
msg=f'pos={pos}, output_true={output_true}, mask={mask}'
)
class _MyAddContext(tk.layers.BaseLayer):
def forward(self, input: Tensor, context: List[Tensor]) -> Tensor:
if len(context) == 0:
return input
elif len(context) == 1:
return input + context[0]
else:
raise ValueError('Expected context to have 0 or 1 element.')
class PixelCNNTestCase(TestCase):
def test_causality_and_receptive_field(self):
for size in [[12], [12, 11], [12, 11, 10]]:
spatial_ndims = len(size)
for kernel_size in [3, 5, [5, 3, 5][:spatial_ndims]]:
# ---- construct the layers ----
# the input layer
input_layer_cls = getattr(
tk.layers, f'PixelCNNInput{spatial_ndims}d')
input_layer = input_layer_cls(
1, 1, kernel_size=kernel_size, edge_bias=False,
weight_init=tk.init.ones,
)
input_layer = tk.layers.jit_compile(input_layer)
with pytest.raises(Exception,
match='`input` is expected to be .*d'):
_ = input_layer(T.zeros([1] * (spatial_ndims + 1)))
with pytest.raises(Exception,
match='`input` is expected to be .*d'):
_ = input_layer(T.zeros([1] * (spatial_ndims + 3)))
# `add_ones_channnel = True`
input_layer2 = input_layer_cls(
1, 1, kernel_size=kernel_size, weight_init=tk.init.ones)
# the pixelcnn resblock
resblock_layer_cls = getattr(
tk.layers, f'PixelCNNResBlock{spatial_ndims}d')
with pytest.raises(ValueError,
match=r'`kernel_size` is required to be at '
r'least 3'):
_ = resblock_layer_cls(1, 1, kernel_size=1)
with pytest.raises(ValueError,
match=r'`kernel_size` is required to be odd'):
_ = resblock_layer_cls(1, 1, kernel_size=[4, 3, 5][:spatial_ndims])
resblock_layer = resblock_layer_cls(
1, 1, kernel_size=kernel_size, weight_init=tk.init.ones
)
resblock_layer = tk.layers.jit_compile(resblock_layer)
with pytest.raises(Exception):
_ = resblock_layer([T.zeros([])] * (spatial_ndims - 1))
with pytest.raises(Exception):
_ = resblock_layer([T.zeros([])] * (spatial_ndims + 1))
# the down-sampling and up-sampling layer
down_sample_cls = getattr(tk.layers, f'PixelCNNConv{spatial_ndims}d')
down_sample_layer = down_sample_cls(1, 1, kernel_size, stride=2)
down_sample_layer = tk.layers.jit_compile(down_sample_layer)
down_sample_output_size = T.shape(down_sample_layer(
[T.zeros(make_conv_shape([1], 1, size))] * spatial_ndims)[0])
up_sample_cls = getattr(tk.layers, f'PixelCNNConvTranspose{spatial_ndims}d')
up_sample_layer = up_sample_cls(
1, 1, kernel_size, stride=2,
output_padding=tk.layers.get_deconv_output_padding(
input_size=[down_sample_output_size[a]
for a in get_spatial_axis(spatial_ndims)],
output_size=size,
kernel_size=kernel_size,
stride=2,
padding='half', # sum of the both sides == (kernel_size - 1) * dilation
)
)
up_sample_layer = tk.layers.jit_compile(up_sample_layer)
# the output layer
output_layer_cls = getattr(
tk.layers, f'PixelCNNOutput{spatial_ndims}d')
output_layer = output_layer_cls()
output_layer = tk.layers.jit_compile(output_layer)
with pytest.raises(Exception,
match=r'`len\(inputs\)` is expected to be .*'):
_ = output_layer([T.zeros([])] * (spatial_ndims - 1))
with pytest.raises(Exception,
match=r'`len\(inputs\)` is expected to be .*'):
_ = output_layer([T.zeros([])] * (spatial_ndims + 1))
# ---- test the causality ----
for pos, single_point in product(
iter_causal_test_pos(size),
(True, False)
):
x = make_causal_test_input(
size, pos, single_point=single_point)
x_t = T.as_tensor(x)
# check the input layer output
outputs = input_layer(x_t)
ensure_stacks_causality(self, outputs, size, pos)
# check the final output
assert_allclose(output_layer(outputs), outputs[-1])
# check the resblock output
resblock_outputs = resblock_layer(outputs)
ensure_stacks_causality(self, resblock_outputs, size, pos)
outputs2 = resblock_outputs
for i in range(4):
outputs2 = resblock_layer(outputs2)
ensure_full_receptive_field(self, outputs2[-1], size, pos)
# check the down-sample and up-sample
down_sample_outputs = down_sample_layer(outputs)
up_sample_outputs = up_sample_layer(down_sample_outputs)
ensure_stacks_causality(self, up_sample_outputs, size, pos)
# ---- test zero input on different input layers ----
x_t = T.zeros(make_conv_shape([1], 1, size), dtype=T.float32)
outputs = input_layer(x_t)
assert_equal(
(np.abs(T.to_numpy(outputs[-1])) >= 1e-6).astype(np.int32),
x_t
)
outputs = input_layer2(x_t)
assert_equal(
(np.abs(T.to_numpy(outputs[-1])) >= 1e-6).astype(np.int32),
make_causal_mask(size, [0] * spatial_ndims).astype(np.int32)
)
def test_pixelcnn_network(self):
in_channels = 3
out_channels = 5
for size in [[15], [15, 13], [15, 13, 11]]:
spatial_ndims = len(size)
for kernel_size in [3, 5, [5, 3, 5][:spatial_ndims]]:
# ---- construct the layers ----
# the input layer
input_layer_cls = getattr(
tk.layers, f'PixelCNNInput{spatial_ndims}d')
input_layer = input_layer_cls(
in_channels, out_channels, kernel_size=kernel_size)
input_layer = tk.layers.jit_compile(input_layer)
# the pixelcnn layers
resblock_layer_cls = getattr(
tk.layers, f'PixelCNNResBlock{spatial_ndims}d')
conv_layer_cls = getattr(
tk.layers, f'PixelCNNConv{spatial_ndims}d')
deconv_layer_cls = getattr(
tk.layers, f'PixelCNNConvTranspose{spatial_ndims}d')
normalizer_cls = getattr(
tk.layers, f'BatchNorm{spatial_ndims}d')
dropout_cls = getattr(
tk.layers, f'Dropout{spatial_ndims}d')
pixelcnn_layers = [
resblock_layer_cls(
out_channels, out_channels, kernel_size=kernel_size,
activation=tk.layers.LeakyReLU, normalizer=normalizer_cls,
merge_context1=_MyAddContext,
data_init=tk.init.StdDataInit,
),
conv_layer_cls(
out_channels, out_channels, kernel_size=kernel_size,
stride=2, activation=tk.layers.Tanh, normalizer=normalizer_cls,
data_init=tk.init.StdDataInit,
),
deconv_layer_cls(
out_channels, out_channels, kernel_size=kernel_size,
stride=2, activation=tk.layers.Tanh, normalizer=normalizer_cls,
data_init=tk.init.StdDataInit,
),
resblock_layer_cls(
out_channels, out_channels, kernel_size=kernel_size,
activation=tk.layers.Sigmoid, normalizer=normalizer_cls,
dropout=0.5, merge_context1=_MyAddContext,
data_init=tk.init.StdDataInit,
),
]
pixelcnn_layers = [tk.layers.jit_compile(l) for l in pixelcnn_layers]
# the pixelcnn network
network_cls = getattr(tk.layers, f'PixelCNN{spatial_ndims}d')
with pytest.raises(TypeError,
match='`input_layer` must be an instance of'):
_ = network_cls(tk.layers.Linear(2, 3))
network1 = network_cls(input_layer)
network2 = network_cls(input_layer, pixelcnn_layers[0], pixelcnn_layers[1:])
# ---- test the network ----
x_t = T.random.randn(make_conv_shape([3], in_channels, size))
context = [T.random.randn(make_conv_shape([3], out_channels, size))]
_ = network2(T.random.randn(T.shape(x_t))) # run the initializers
tk.layers.set_train_mode(network1, False)
tk.layers.set_train_mode(network2, False)
# without context
expected_outputs2 = expected_outputs1 = input_layer(x_t)
expected_output1 = expected_outputs1[-1]
for l in pixelcnn_layers:
expected_outputs2 = l(expected_outputs2)
expected_output2 = expected_outputs2[-1]
assert_allclose(network1(x_t), expected_output1, atol=1e-6, rtol=1e-4)
assert_allclose(network2(x_t), expected_output2, atol=1e-6, rtol=1e-4)
# with context
expected_outputs2 = expected_outputs1 = input_layer(x_t)
expected_output1 = expected_outputs1[-1]
for l in pixelcnn_layers:
expected_outputs2 = l(expected_outputs2, context)
expected_output2 = expected_outputs2[-1]
assert_allclose(network1(x_t, context), expected_output1, atol=1e-6, rtol=1e-4)
assert_allclose(network2(x_t, context), expected_output2, atol=1e-6, rtol=1e-4)
| [
"[email protected]"
] | |
b0565a046adfc997bf8ea3559143f82649e12133 | 91da8a59561d6f2c7852c0548298434e0ede2ac7 | /Hash table/MaxNumberofK-SumPairs.py | 4a0bfbbcd318ab64c3250a8770c8878e0d3af028 | [] | no_license | prashant97sikarwar/leetcode | 6d3828772cc426ccf53dad07edb1efbc2f1e1ded | e76054e27a5d4493bd1bcef2ebdeb21d257afb63 | refs/heads/master | 2023-08-23T05:06:23.181869 | 2021-10-28T18:19:10 | 2021-10-28T18:19:10 | 286,057,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | #Problem Link:- https://leetcode.com/problems/max-number-of-k-sum-pairs/
"""You are given an integer array nums and an integer k. In one operation, you can pick two
numbers from the array whose sum equals k and remove them from the array.Return the maximum
number of operations you can perform on the array."""
class Solution(object):
def maxOperations(self, nums, k):
d = dict()
res = 0
for i in range(len(nums)):
if k-nums[i] not in d:
if nums[i] not in d:
d[nums[i]] = 1
else:
d[nums[i]] += 1
else:
res += 1
d[k-nums[i]] -= 1
if d[k-nums[i]] == 0:
del d[k-nums[i]]
return res | [
"[email protected]"
] | |
1e52ec2ede49f6add05f994482b8aeb958a08cfc | 49185bd5cf7e2f5190ce22b5189a09fe1ab6bb0f | /Proper/proper/examples/simple_prescription.py | cf7b181641c3e6576ad699572b6fa0f84fe0d83c | [
"MIT"
] | permissive | RupertDodkins/MEDIS | c3f55d8adb6a8c4120593ba6552c9dfe3784d4e2 | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | refs/heads/master | 2021-07-05T20:06:44.162517 | 2019-09-05T22:16:12 | 2019-09-05T22:16:12 | 160,850,558 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | # Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import proper
def simple_prescription(wavelength, gridsize):
# Define entrance aperture diameter and other quantities
diam = 1.0
focal_ratio = 15.0
focal_length = diam * focal_ratio
beam_ratio = 0.5
# Define the wavefront
wfo = proper.prop_begin(diam, wavelength, gridsize, beam_ratio)
# Define a circular aperture
proper.prop_circular_aperture(wfo, diam/2)
# Define entrance
proper.prop_define_entrance(wfo)
# Define a lens
proper.prop_lens(wfo, focal_length)
# Propagate the wavefront
proper.prop_propagate(wfo, focal_length)
# End
(wfo, sampling) = proper.prop_end(wfo)
return (wfo, sampling)
| [
"[email protected]"
] | |
4c68017ce4aae30013d89c3c9a04d30934043953 | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /xlsxwriter/test/comparison/test_chart_errorbars10.py | bc261b91a0893ee6210674e79ef3870005290cca | [
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] | permissive | mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_errorbars10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [69198976, 69200896]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'y_error_bars': {
'type': 'custom',
'plus_values': '=Sheet1!$A$1',
'minus_values': '=Sheet1!$B$1:$B$3',
'plus_data': [1],
'minus_data': [2, 4, 6],
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
480048ab14fa77b38474b844721b38de1a29f589 | c287efc62bf76323f99f0e8e8460c67123bbe9c4 | /getdeal/apps/profiles/permissions.py | 9ce3f833d2f2df58d124cacdd717e675d404c122 | [] | no_license | PankeshGupta/getdeal | ff702e1ab629a06bc6d7ad012c55bc0b0e0c1415 | b0702a8f7f60de6db9de7f712108e68d66f07f61 | refs/heads/master | 2020-03-18T14:15:37.874570 | 2015-02-23T00:12:58 | 2015-02-23T00:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # -*- coding: utf-8 -*-
"""
Created on Sep 22, 2013
"""
from rest_framework import permissions
class IsUserOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the profile
return obj == request.user
class IsProfileOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the profile
return obj.user == request.user
| [
"[email protected]"
] | |
e04952dde880fe3405122e68d43e9ebf08240a5a | 4be56098894a95da5964622fc4102b69e4530ab6 | /题库/1616.三次操作后最大值与最小值的最小差.py | 57cffe65dc06ba8eb374d865d2f9e0a8ad6774bf | [] | no_license | ACENDER/LeetCode | 7c7c7ecc8d0cc52215272f47ec34638637fae7ac | 3383b09ab1246651b1d7b56ab426a456f56a4ece | refs/heads/master | 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 1616.三次操作后最大值与最小值的最小差.py
| [
"[email protected]"
] | |
16f40e8784b34bacfaa785103de9305a3c190416 | ec0e1779383bec96de803ba893de5096c563158f | /tensorflow/python/framework/ops.py | b4dc220fcec07f76bc99d005016ebdc00a0165f8 | [] | no_license | DengZhuangSouthRd/simple_tensorflow | 45d8fc7c2ef9da947f11f876aff7c1e169dc457c | 83d742219c4a04c61822935487626890bc735301 | refs/heads/master | 2021-01-18T19:05:36.414639 | 2017-04-01T15:06:16 | 2017-04-01T15:06:16 | 86,887,616 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 152,675 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import linecache
import re
import sys
import threading
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _convert_stack(stack):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 4-tuples, (filename, lineno, name, frame_globals).
Returns:
A list of n 4-tuples (filename, lineno, name, code), where the code tuple
element is calculated from the corresponding elements of the input tuple.
"""
ret = []
for filename, lineno, name, frame_globals in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
ret.append((filename, lineno, name, line))
return ret
# pylint: disable=line-too-long
def _extract_stack():
"""A lightweight re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Returns:
A list of 4-tuples (filename, lineno, name, frame_globals) corresponding to
the call stack of the current thread.
"""
# pylint: enable=line-too-long
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
ret.append((filename, lineno, name, frame_globals))
f = f.f_back
ret.reverse()
return ret
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property")
except AttributeError:
raise TypeError("Type %s does not define a `name` property")
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property")
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property")
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`tf.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
self._shape = tensor_shape.unknown_shape()
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
# Attributes used for C++ shape inference. Not inspected, only forwarded.
self._handle_shape = tensor_shape_pb2.TensorShapeProto()
self._handle_dtype = types_pb2.DT_INVALID
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
return self._shape
def _shape_as_list(self):
if self._shape.ndims is not None:
return [dim.value for dim in self._shape.dims]
else:
return None
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
self._shape = self._shape.merge_with(shape)
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
return self._consumers
def _add_consumer(self, consumer):
"""Add a consumer to this tensor.
Args:
consumer: an Operation.
Raises:
TypeError: if the consumer is not an Operation.
"""
if not isinstance(consumer, Operation):
raise TypeError("Consumer must be an Operation: %s" % consumer)
self._consumers.append(consumer)
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the Tensor from 0 to
infinity. Declaring this method prevents this unintended
behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Tensor' object is not iterable.")
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
% (dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]}
register_dense_tensor_like_type(Tensor)
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
error_prefix = "" if name is None else "%s: " % name
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
for _, funcs_at_priority in sorted(_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r"
% (error_prefix, conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s"
% (error_prefix, conversion_func, base_type,
dtype.name, ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered."
% (error_prefix, value, type(value)))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype))
return ret
def convert_n_to_tensor(values,
dtype=None,
name=None,
preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
% (dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(value,
dtype=dtype,
name=name,
as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None,
name=None, as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
def register_tensor_conversion_function(base_type, conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple)
and all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
Contrast this representation with
@{tf.SparseTensor},
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values,
(", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self, node_def, g, inputs=None, output_types=None,
control_inputs=None, input_types=None, original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
if not isinstance(node_def, node_def_pb2.NodeDef):
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._node_def = copy.deepcopy(node_def)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
self._inputs = list(inputs) # Defensive copy.
for a in self._inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
# Mark that we consume the inputs.
a._add_consumer(self) # pylint: disable=protected-access
if output_types is None:
output_types = []
self._output_types = output_types
self._outputs = [Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)]
if input_types is None:
input_types = [i.dtype.base_dtype for i in self._inputs]
else:
if not all(x.is_compatible_with(i.dtype)
for i, x in zip(self._inputs, input_types)):
raise TypeError("Inputs are not compatible with input types")
self._input_types = input_types
# Build the list of control inputs.
self._control_inputs = []
if control_inputs:
for c in control_inputs:
c_op = None
if isinstance(c, Operation):
c_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
c_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
self._control_inputs.append(c_op)
self._original_op = original_op
self._op_def = op_def
self._traceback = _extract_stack()
# Add this op to the current control flow context:
self._control_flow_context = g._get_control_flow_context()
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
# NOTE(keveman): Control flow context's AddOp could be creating new ops and
# setting op.inputs[index] = new_op. Thus the new ops' id could be larger
# than this op's id even though this op depend on them. Therefore, delaying
# assigning id to this op until all ops this could be dependent on are
# created.
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._recompute_node_def()
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" %
self._node_def.name)]
if "_class" not in self._node_def.attr:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [class_name
for class_name in self.get_attr("_class")
if class_name.startswith(b"loc:@")]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, context):
"""Sets the current control flow context of this op.
Args:
context: a context object.
"""
self._control_flow_context = context
@property
def name(self):
"""The full name of this operation."""
return self._node_def.name
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return self._node_def.device
def _set_device(self, device):
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._node_def.device = _device_string(device)
def _add_input(self, tensor, dtype=None):
"""Add a new input to this operation.
Args:
tensor: the Tensor to add as an input.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s"
% (tensor.dtype.name, dtype.name))
self._inputs.append(tensor)
self._input_types.append(dtype)
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _update_input(self, index, tensor, dtype=None):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s"
% (tensor.dtype.name, dtype.name))
self._inputs[index].consumers().remove(self)
self._inputs[index] = tensor
self._input_types[index] = dtype
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
if ops:
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
_assert_same_graph(self, op)
self._control_inputs.append(op)
self._recompute_node_def()
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
self._add_control_inputs([op])
# Methods below are used when building the NodeDef and Graph proto.
def _recompute_node_def(self):
del self._node_def.input[:]
self._node_def.input.extend([t._as_node_def_input() for t in self._inputs])
if self._control_inputs:
self._node_def.input.extend(["^%s" % op.name for op in
self._control_inputs])
def __str__(self):
return str(self._node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, op):
self._op = op
def __iter__(self):
return iter(self._op._inputs)
def __len__(self):
return len(self._op._inputs)
def __bool__(self):
return bool(self._op._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._op._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
return Operation._InputList(self)
@property
def _input_dtypes(self):
return self._input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
return self._control_inputs
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return self._node_def.op
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
"""Returns a serialized `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
return self._node_def
@property
def op_def(self):
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
return self._op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return _convert_stack(self._traceback)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor"]
if name not in self._node_def.attr:
raise ValueError("No attr named '" + name + "' in " +
str(self._node_def))
x = self._node_def.attr[name]
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs: return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shapes_for_outputs(op):
"""Uses the registered shape functions to set the shapes for op's outputs."""
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_shapes = shapes_dict["handle_shapes"]
handle_dtypes = shapes_dict["handle_dtypes"]
for output, handle_shape, handle_dtype in zip(op.outputs, handle_shapes, handle_dtypes):
# pylint: disable=protected-access
output._handle_shape = handle_shape
output._handle_dtype = handle_dtype
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output.set_shape(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s.",
self.statistic_type, other.statistic_type)
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if name[-1] == "/" else name
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
@{tf.Graph.as_default}
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects the core state that may be accessed by multiple readers.
# Only state that can be returned via public accessors (`as_graph_def()`,
# `get_operations()`, `as_graph_element()`, `get_collection()`, and
# `get_collection_ref()`) is by the lock. Thread-safety is provided on a
# best-effort basis to support buggy programs, and is not guaranteed by the
# public `tf.Graph` API.
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.Lock()
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Current name stack: uniquified names
self._name_stack = ""
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
# Functions that will be applied to choose a device if none is specified.
self._device_function_stack = []
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
self._control_dependencies_stack = []
# Arbritrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops
self._colocation_stack = []
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Resource container.
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
return self._graph_def_versions
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, context):
"""Sets the current control flow context.
Args:
context: a context object.
"""
self._control_flow_context = context
def _as_graph_def(self, from_version=None, add_shapes=False):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
with self._lock:
graph = graph_pb2.GraphDef()
graph.versions.CopyFrom(self._graph_def_versions)
bytesize = 0
for op_id in sorted(self._nodes_by_id):
op = self._nodes_by_id[op_id]
if from_version is None or op_id > from_version:
graph.node.extend([op.node_def])
if op.outputs and add_shapes:
assert "_output_shapes" not in graph.node[-1].attr
graph.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in op.outputs])
bytesize += op.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
if self._functions:
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph.library.gradient.extend([grad_def])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
previous = self._functions.get(name, None)
if previous:
raise ValueError("Another function is already defined with that name")
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (
function.python_grad_func is not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
self._functions[name] = function
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
def create_op(self, op_type, inputs, dtypes,
input_types=None, name=None, attrs=None, op_def=None,
compute_shapes=True, compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) If True, shape inference will be performed
to compute the shapes of the outputs.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
if key not in node_def.attr:
if callable(value):
value = value(node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
node_def.attr[key].CopyFrom(value)
# Apply a kernel label if one has been specified for this op_type.
try:
kernel_label = self._op_to_kernel_label_map[op_type]
node_def.attr["_kernel"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op_type for gradients if one has been
# specified for this op_type.
try:
mapped_op_type = self._gradient_override_map[op_type]
node_def.attr["_gradient_op_type"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
control_inputs = self._control_dependencies_for_inputs(inputs)
ret = Operation(node_def, self, inputs=inputs, output_types=dtypes,
control_inputs=control_inputs, input_types=input_types,
original_op=self._default_original_op, op_def=op_def)
if compute_shapes:
set_shapes_for_outputs(ret)
self._add_op(ret)
self._record_op_seen_by_control_dependencies(ret)
if compute_device:
self._apply_device_functions(ret)
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack:
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to
# provide consistency between the device and the colocation
# property.
if ret.device and ret.device != colocation_op.device:
logging.warning("Tried to colocate %s with an op %s that had "
"a different device: %s vs %s. "
"Ignoring colocation property.",
name, colocation_op.name,
ret.device, colocation_op.device)
else:
ret._set_device(colocation_op.device)
all_colocation_groups = sorted(set(all_colocation_groups))
ret.node_def.attr["_class"].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if (self._container and
op_type in self._registered_ops and
self._registered_ops[op_type].is_stateful and
"container" in ret.node_def.attr and
not ret.node_def.attr["container"].s):
ret.node_def.attr["container"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(self._container)))
return ret
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs."
% (repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s."
% (repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s."
% (repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s."
% (type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s."
% type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s."
% type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly. Use this method with the `with` keyword
to specify that ops created within the scope of a block should be
added to this graph.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
"""
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
"""
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
"""
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
return []
if scope is None:
return list(coll_list)
else:
c = []
regex = re.compile(scope)
for item in coll_list:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
try:
self._default_original_op = op
yield
finally:
self._default_original_op = old_original_op
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name and name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
i = self._names_in_use.get(name, 0)
# Increment the number for "name".
if mark_as_used:
self._names_in_use[name] = i + 1
if i > 0:
base_name = name
# Make sure the composed name is not already used.
while name in self._names_in_use:
name = "%s_%d" % (base_name, i)
i += 1
# Mark the composed name as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name] = 1
return name
@contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError(
"Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = []
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = []
if op is not None:
self._colocation_stack.append(op)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
@contextlib.contextmanager
def device(self, device_name_or_function):
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/gpu:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/gpu:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
"""
if (device_name_or_function is not None
and not callable(device_name_or_function)):
device_function = pydev.merge_device(device_name_or_function)
else:
device_function = device_name_or_function
try:
self._device_function_stack.append(device_function)
yield
finally:
self._device_function_stack.pop()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in reverse order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
for device_function in reversed(self._device_function_stack):
if device_function is None:
break
op._set_device(device_function(op))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
try:
self._container = container_name
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs = []
self._new_stack = True
else:
self._control_inputs = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_tensors):
"""For an op that takes `input_tensors` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_tensors: The direct data dependencies for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
input_ops = set([t.op for t in input_tensors])
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types)
and isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types)
and isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
"""
return get_default_graph().device(device_name_or_function)
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def colocate_with(op, ignore_existing=False):
return get_default_graph().colocate_with(op, ignore_existing)
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
for more details.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects"
% type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack()
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack):
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
_default_graph_stack = _DefaultGraphStack()
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
"""
_default_graph_stack.reset()
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError(
"%s must be from the same graph as %s." % (item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
graph_element = None
if isinstance(op_input, (Operation, _TensorLike)):
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError(
"%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
* `WEIGHTS`: weights inside neural network layers
* `BIASES`: biases inside neural network layers
* `ACTIVATIONS`: activations of neural network layers
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.warning("VARIABLES collection name is deprecated, "
"please use GLOBAL_VARIABLES instead; "
"VARIABLES will be removed after 2017-03-02.")
return cls.GLOBAL_VARIABLES
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
"""
get_default_graph().add_to_collection(name, value)
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
get_default_graph().add_to_collections(names, value)
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
"""
return get_default_graph().get_collection_ref(key)
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def name_scope(name, default_name=None, values=None):
"""Returns a context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Returns:
A context manager for use in defining Python ops. Yields the name scope.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
n = default_name if name is None else name
if n is None and values is not None:
# We only raise an error if values is not None (provided) because currently
# tf.name_scope(None) (values=None then) is sometimes used as an idiom
# to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided." % (
name, default_name))
if values is None:
values = []
g = _get_graph_from_inputs(values)
with g.as_default(), g.name_scope(n) as scope:
yield scope
# pylint: enable=g-doc-return-or-yield
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
else:
return name
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name, proto_type=None, to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(
("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
| [
"[email protected]"
] | |
e4cd3f418288f140c96540175a456d68bc217d6b | 5a281cb78335e06c631181720546f6876005d4e5 | /karbor-1.3.0/karbor/services/protection/protectable_plugins/share.py | 60a4bc2f57d26d0747b3f8b2cc01936164dbd252 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 3,812 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from karbor.common import constants
from karbor import exception
from karbor import resource
from karbor.services.protection.client_factory import ClientFactory
from karbor.services.protection import protectable_plugin
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
INVALID_SHARE_STATUS = ['deleting', 'deleted', 'error', 'error_deleting',
'manage_error', 'unmanage_error', 'extending_error',
'shrinking_error', 'reverting_error']
class ShareProtectablePlugin(protectable_plugin.ProtectablePlugin):
"""Manila share protectable plugin"""
_SUPPORT_RESOURCE_TYPE = constants.SHARE_RESOURCE_TYPE
def _client(self, context):
self._client_instance = ClientFactory.create_client(
"manila",
context)
return self._client_instance
def get_resource_type(self):
return self._SUPPORT_RESOURCE_TYPE
def get_parent_resource_types(self):
return (constants.PROJECT_RESOURCE_TYPE, )
def list_resources(self, context, parameters=None):
try:
shares = self._client(context).shares.list(detailed=True)
except Exception as e:
LOG.exception("List all summary shares from manila failed.")
raise exception.ListProtectableResourceFailed(
type=self._SUPPORT_RESOURCE_TYPE,
reason=six.text_type(e))
else:
return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE,
id=share.id, name=share.name)
for share in shares
if share.status not in INVALID_SHARE_STATUS]
def show_resource(self, context, resource_id, parameters=None):
try:
share = self._client(context).shares.get(resource_id)
except Exception as e:
LOG.exception("Show a summary share from manila failed.")
raise exception.ProtectableResourceNotFound(
id=resource_id,
type=self._SUPPORT_RESOURCE_TYPE,
reason=six.text_type(e))
else:
if share.status in INVALID_SHARE_STATUS:
raise exception.ProtectableResourceInvalidStatus(
id=resource_id, type=self._SUPPORT_RESOURCE_TYPE,
status=share.status)
return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE,
id=share.id, name=share.name)
def get_dependent_resources(self, context, parent_resource):
try:
shares = self._client(context).shares.list()
except Exception as e:
LOG.exception("List all shares from manila failed.")
raise exception.ListProtectableResourceFailed(
type=self._SUPPORT_RESOURCE_TYPE,
reason=six.text_type(e))
else:
return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE,
id=share.id,
name=share.name)
for share in shares
if share.project_id == parent_resource.id
and share.status not in INVALID_SHARE_STATUS]
| [
"Wayne [email protected]"
] | Wayne [email protected] |
3c23614bc277fda06b4575acdf845d4b5ce8cfd1 | b1201c46096eed638571ec34c66cb86cc759662b | /tests/test_point_mesh_distance.py | d914dcb873290b6cb822f6eed7abef7ff9d35cb6 | [
"BSD-3-Clause"
] | permissive | eduardohenriquearnold/pytorch3d | 2ec19b72163634f1507b3a8c82e59c80654f6cbd | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | refs/heads/master | 2022-11-10T10:57:57.287590 | 2020-06-10T21:11:10 | 2020-06-10T21:13:30 | 271,846,501 | 0 | 0 | NOASSERTION | 2020-06-12T16:45:45 | 2020-06-12T16:45:44 | null | UTF-8 | Python | false | false | 29,537 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy as np
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d import _C
from pytorch3d.loss import point_mesh_edge_distance, point_mesh_face_distance
from pytorch3d.structures import Meshes, Pointclouds, packed_to_list
class TestPointMeshDistance(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
np.random.seed(42)
torch.manual_seed(42)
@staticmethod
def eps():
return 1e-8
@staticmethod
def init_meshes_clouds(
batch_size: int = 10,
num_verts: int = 1000,
num_faces: int = 3000,
num_points: int = 3000,
device: str = "cuda:0",
):
device = torch.device(device)
nump = torch.randint(low=1, high=num_points, size=(batch_size,))
numv = torch.randint(low=3, high=num_verts, size=(batch_size,))
numf = torch.randint(low=1, high=num_faces, size=(batch_size,))
verts_list = []
faces_list = []
points_list = []
for i in range(batch_size):
# Randomly choose vertices
verts = torch.rand((numv[i], 3), dtype=torch.float32, device=device)
verts.requires_grad_(True)
# Randomly choose faces. Our tests below compare argmin indices
# over faces and edges. Argmin is sensitive even to small numeral variations
# thus we make sure that faces are valid
# i.e. a face f = (i0, i1, i2) s.t. i0 != i1 != i2,
# otherwise argmin due to numeral sensitivities cannot be resolved
faces, allf = [], 0
validf = numv[i].item() - numv[i].item() % 3
while allf < numf[i]:
ff = torch.randperm(numv[i], device=device)[:validf].view(-1, 3)
faces.append(ff)
allf += ff.shape[0]
faces = torch.cat(faces, 0)
if faces.shape[0] > numf[i]:
faces = faces[: numf[i]]
verts_list.append(verts)
faces_list.append(faces)
# Randomly choose points
points = torch.rand((nump[i], 3), dtype=torch.float32, device=device)
points.requires_grad_(True)
points_list.append(points)
meshes = Meshes(verts_list, faces_list)
pcls = Pointclouds(points_list)
return meshes, pcls
@staticmethod
def _point_to_bary(point: torch.Tensor, tri: torch.Tensor) -> torch.Tensor:
"""
Computes the barycentric coordinates of point wrt triangle (tri)
Note that point needs to live in the space spanned by tri = (a, b, c),
i.e. by taking the projection of an arbitrary point on the space spanned by tri
Args:
point: FloatTensor of shape (3)
tri: FloatTensor of shape (3, 3)
Returns:
bary: FloatTensor of shape (3)
"""
assert point.dim() == 1 and point.shape[0] == 3
assert tri.dim() == 2 and tri.shape[0] == 3 and tri.shape[1] == 3
a, b, c = tri.unbind(0)
v0 = b - a
v1 = c - a
v2 = point - a
d00 = v0.dot(v0)
d01 = v0.dot(v1)
d11 = v1.dot(v1)
d20 = v2.dot(v0)
d21 = v2.dot(v1)
denom = d00 * d11 - d01 * d01
s2 = (d11 * d20 - d01 * d21) / denom
s3 = (d00 * d21 - d01 * d20) / denom
s1 = 1.0 - s2 - s3
bary = torch.tensor([s1, s2, s3])
return bary
@staticmethod
def _is_inside_triangle(point: torch.Tensor, tri: torch.Tensor) -> torch.Tensor:
"""
Computes whether point is inside triangle tri
Note that point needs to live in the space spanned by tri = (a, b, c)
i.e. by taking the projection of an arbitrary point on the space spanned by tri
Args:
point: FloatTensor of shape (3)
tri: FloatTensor of shape (3, 3)
Returns:
inside: BoolTensor of shape (1)
"""
bary = TestPointMeshDistance._point_to_bary(point, tri)
inside = ((bary >= 0.0) * (bary <= 1.0)).all()
return inside
@staticmethod
def _point_to_edge_distance(
point: torch.Tensor, edge: torch.Tensor
) -> torch.Tensor:
"""
Computes the squared euclidean distance of points to edges
Args:
point: FloatTensor of shape (3)
edge: FloatTensor of shape (2, 3)
Returns:
dist: FloatTensor of shape (1)
If a, b are the start and end points of the segments, we
parametrize a point p as
x(t) = a + t * (b - a)
To find t which describes p we minimize (x(t) - p) ^ 2
Note that p does not need to live in the space spanned by (a, b)
"""
s0, s1 = edge.unbind(0)
s01 = s1 - s0
norm_s01 = s01.dot(s01)
same_edge = norm_s01 < TestPointMeshDistance.eps()
if same_edge:
dist = 0.5 * (point - s0).dot(point - s0) + 0.5 * (point - s1).dot(
point - s1
)
return dist
t = s01.dot(point - s0) / norm_s01
t = torch.clamp(t, min=0.0, max=1.0)
x = s0 + t * s01
dist = (x - point).dot(x - point)
return dist
@staticmethod
def _point_to_tri_distance(point: torch.Tensor, tri: torch.Tensor) -> torch.Tensor:
"""
Computes the squared euclidean distance of points to edges
Args:
point: FloatTensor of shape (3)
tri: FloatTensor of shape (3, 3)
Returns:
dist: FloatTensor of shape (1)
"""
a, b, c = tri.unbind(0)
cross = torch.cross(b - a, c - a)
norm = cross.norm()
normal = torch.nn.functional.normalize(cross, dim=0)
# p0 is the projection of p onto the plane spanned by (a, b, c)
# p0 = p + tt * normal, s.t. (p0 - a) is orthogonal to normal
# => tt = dot(a - p, n)
tt = normal.dot(a) - normal.dot(point)
p0 = point + tt * normal
dist_p = tt * tt
# Compute the distance of p to all edge segments
e01_dist = TestPointMeshDistance._point_to_edge_distance(point, tri[[0, 1]])
e02_dist = TestPointMeshDistance._point_to_edge_distance(point, tri[[0, 2]])
e12_dist = TestPointMeshDistance._point_to_edge_distance(point, tri[[1, 2]])
with torch.no_grad():
inside_tri = TestPointMeshDistance._is_inside_triangle(p0, tri)
if inside_tri and (norm > TestPointMeshDistance.eps()):
return dist_p
else:
if e01_dist.le(e02_dist) and e01_dist.le(e12_dist):
return e01_dist
elif e02_dist.le(e01_dist) and e02_dist.le(e12_dist):
return e02_dist
else:
return e12_dist
def test_point_edge_array_distance(self):
"""
Test CUDA implementation for PointEdgeArrayDistanceForward
& PointEdgeArrayDistanceBackward
"""
P, E = 16, 32
device = get_random_cuda_device()
points = torch.rand((P, 3), dtype=torch.float32, device=device)
edges = torch.rand((E, 2, 3), dtype=torch.float32, device=device)
# randomly make some edge points equal
same = torch.rand((E,), dtype=torch.float32, device=device) > 0.5
edges[same, 1] = edges[same, 0].clone().detach()
points.requires_grad = True
edges.requires_grad = True
grad_dists = torch.rand((P, E), dtype=torch.float32, device=device)
# Naive python implementation
dists_naive = torch.zeros((P, E), dtype=torch.float32, device=device)
for p in range(P):
for e in range(E):
dist = self._point_to_edge_distance(points[p], edges[e])
dists_naive[p, e] = dist
# Cuda Forward Implementation
dists_cuda = _C.point_edge_array_dist_forward(points, edges)
# Compare
self.assertClose(dists_naive.cpu(), dists_cuda.cpu())
# CUDA Bacwkard Implementation
grad_points_cuda, grad_edges_cuda = _C.point_edge_array_dist_backward(
points, edges, grad_dists
)
dists_naive.backward(grad_dists)
grad_points_naive = points.grad
grad_edges_naive = edges.grad
# Compare
self.assertClose(grad_points_naive.cpu(), grad_points_cuda.cpu())
self.assertClose(grad_edges_naive.cpu(), grad_edges_cuda.cpu())
def test_point_edge_distance(self):
"""
Test CUDA implementation for PointEdgeDistanceForward
& PointEdgeDistanceBackward
"""
device = get_random_cuda_device()
N, V, F, P = 4, 32, 16, 24
meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device)
# make points packed a leaf node
points_packed = pcls.points_packed().detach().clone() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_p = pcls.num_points_per_cloud().max().item()
# make edges packed a leaf node
verts_packed = meshes.verts_packed()
edges_packed = verts_packed[meshes.edges_packed()] # (E, 2, 3)
edges_packed = edges_packed.clone().detach()
edges_first_idx = meshes.mesh_to_edges_packed_first_idx()
# leaf nodes
points_packed.requires_grad = True
edges_packed.requires_grad = True
grad_dists = torch.rand(
(points_packed.shape[0],), dtype=torch.float32, device=device
)
# Cuda Implementation: forrward
dists_cuda, idx_cuda = _C.point_edge_dist_forward(
points_packed, points_first_idx, edges_packed, edges_first_idx, max_p
)
# Cuda Implementation: backward
grad_points_cuda, grad_edges_cuda = _C.point_edge_dist_backward(
points_packed, edges_packed, idx_cuda, grad_dists
)
# Naive Implementation: forward
edges_list = packed_to_list(edges_packed, meshes.num_edges_per_mesh().tolist())
dists_naive = []
for i in range(N):
points = pcls.points_list()[i]
edges = edges_list[i]
dists_temp = torch.zeros(
(points.shape[0], edges.shape[0]), dtype=torch.float32, device=device
)
for p in range(points.shape[0]):
for e in range(edges.shape[0]):
dist = self._point_to_edge_distance(points[p], edges[e])
dists_temp[p, e] = dist
# torch.min() doesn't necessarily return the first index of the
# smallest value, our warp_reduce does. So it's not straightforward
# to directly compare indices, nor the gradients of grad_edges which
# also depend on the indices of the minimum value.
# To be able to compare, we will compare dists_temp.min(1) and
# then feed the cuda indices to the naive output
start = points_first_idx[i]
end = points_first_idx[i + 1] if i < N - 1 else points_packed.shape[0]
min_idx = idx_cuda[start:end] - edges_first_idx[i]
iidx = torch.arange(points.shape[0], device=device)
min_dist = dists_temp[iidx, min_idx]
dists_naive.append(min_dist)
dists_naive = torch.cat(dists_naive)
# Compare
self.assertClose(dists_naive.cpu(), dists_cuda.cpu())
# Naive Implementation: backward
dists_naive.backward(grad_dists)
grad_points_naive = torch.cat([cloud.grad for cloud in pcls.points_list()])
grad_edges_naive = edges_packed.grad
# Compare
self.assertClose(grad_points_naive.cpu(), grad_points_cuda.cpu(), atol=1e-7)
self.assertClose(grad_edges_naive.cpu(), grad_edges_cuda.cpu(), atol=5e-7)
def test_edge_point_distance(self):
"""
Test CUDA implementation for EdgePointDistanceForward
& EdgePointDistanceBackward
"""
device = get_random_cuda_device()
N, V, F, P = 4, 32, 16, 24
meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device)
# make points packed a leaf node
points_packed = pcls.points_packed().detach().clone() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
# make edges packed a leaf node
verts_packed = meshes.verts_packed()
edges_packed = verts_packed[meshes.edges_packed()] # (E, 2, 3)
edges_packed = edges_packed.clone().detach()
edges_first_idx = meshes.mesh_to_edges_packed_first_idx()
max_e = meshes.num_edges_per_mesh().max().item()
# leaf nodes
points_packed.requires_grad = True
edges_packed.requires_grad = True
grad_dists = torch.rand(
(edges_packed.shape[0],), dtype=torch.float32, device=device
)
# Cuda Implementation: forward
dists_cuda, idx_cuda = _C.edge_point_dist_forward(
points_packed, points_first_idx, edges_packed, edges_first_idx, max_e
)
# Cuda Implementation: backward
grad_points_cuda, grad_edges_cuda = _C.edge_point_dist_backward(
points_packed, edges_packed, idx_cuda, grad_dists
)
# Naive Implementation: forward
edges_list = packed_to_list(edges_packed, meshes.num_edges_per_mesh().tolist())
dists_naive = []
for i in range(N):
points = pcls.points_list()[i]
edges = edges_list[i]
dists_temp = torch.zeros(
(edges.shape[0], points.shape[0]), dtype=torch.float32, device=device
)
for e in range(edges.shape[0]):
for p in range(points.shape[0]):
dist = self._point_to_edge_distance(points[p], edges[e])
dists_temp[e, p] = dist
# torch.min() doesn't necessarily return the first index of the
# smallest value, our warp_reduce does. So it's not straightforward
# to directly compare indices, nor the gradients of grad_edges which
# also depend on the indices of the minimum value.
# To be able to compare, we will compare dists_temp.min(1) and
# then feed the cuda indices to the naive output
start = edges_first_idx[i]
end = edges_first_idx[i + 1] if i < N - 1 else edges_packed.shape[0]
min_idx = idx_cuda.cpu()[start:end] - points_first_idx[i].cpu()
iidx = torch.arange(edges.shape[0], device=device)
min_dist = dists_temp[iidx, min_idx]
dists_naive.append(min_dist)
dists_naive = torch.cat(dists_naive)
# Compare
self.assertClose(dists_naive.cpu(), dists_cuda.cpu())
# Naive Implementation: backward
dists_naive.backward(grad_dists)
grad_points_naive = torch.cat([cloud.grad for cloud in pcls.points_list()])
grad_edges_naive = edges_packed.grad
# Compare
self.assertClose(grad_points_naive.cpu(), grad_points_cuda.cpu(), atol=1e-7)
self.assertClose(grad_edges_naive.cpu(), grad_edges_cuda.cpu(), atol=5e-7)
def test_point_mesh_edge_distance(self):
"""
Test point_mesh_edge_distance from pytorch3d.loss
"""
device = get_random_cuda_device()
N, V, F, P = 4, 32, 16, 24
meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device)
# clone and detach for another backward pass through the op
verts_op = [verts.clone().detach() for verts in meshes.verts_list()]
for i in range(N):
verts_op[i].requires_grad = True
faces_op = [faces.clone().detach() for faces in meshes.faces_list()]
meshes_op = Meshes(verts=verts_op, faces=faces_op)
points_op = [points.clone().detach() for points in pcls.points_list()]
for i in range(N):
points_op[i].requires_grad = True
pcls_op = Pointclouds(points_op)
# Cuda implementation: forward & backward
loss_op = point_mesh_edge_distance(meshes_op, pcls_op)
# Naive implementation: forward & backward
edges_packed = meshes.edges_packed()
edges_list = packed_to_list(edges_packed, meshes.num_edges_per_mesh().tolist())
loss_naive = torch.zeros(N, dtype=torch.float32, device=device)
for i in range(N):
points = pcls.points_list()[i]
verts = meshes.verts_list()[i]
v_first_idx = meshes.mesh_to_verts_packed_first_idx()[i]
edges = verts[edges_list[i] - v_first_idx]
num_p = points.shape[0]
num_e = edges.shape[0]
dists = torch.zeros((num_p, num_e), dtype=torch.float32, device=device)
for p in range(num_p):
for e in range(num_e):
dist = self._point_to_edge_distance(points[p], edges[e])
dists[p, e] = dist
min_dist_p, min_idx_p = dists.min(1)
min_dist_e, min_idx_e = dists.min(0)
loss_naive[i] = min_dist_p.mean() + min_dist_e.mean()
loss_naive = loss_naive.mean()
# NOTE that hear the comparison holds despite the discrepancy
# due to the argmin indices returned by min(). This is because
# we don't will compare gradients on the verts and not on the
# edges or faces.
# Compare forward pass
self.assertClose(loss_op, loss_naive)
# Compare backward pass
rand_val = torch.rand(1).item()
grad_dist = torch.tensor(rand_val, dtype=torch.float32, device=device)
loss_naive.backward(grad_dist)
loss_op.backward(grad_dist)
# check verts grad
for i in range(N):
self.assertClose(
meshes.verts_list()[i].grad, meshes_op.verts_list()[i].grad
)
self.assertClose(pcls.points_list()[i].grad, pcls_op.points_list()[i].grad)
def test_point_face_array_distance(self):
"""
Test CUDA implementation for PointFaceArrayDistanceForward
& PointFaceArrayDistanceBackward
"""
P, T = 16, 32
device = get_random_cuda_device()
points = torch.rand((P, 3), dtype=torch.float32, device=device)
tris = torch.rand((T, 3, 3), dtype=torch.float32, device=device)
points.requires_grad = True
tris.requires_grad = True
grad_dists = torch.rand((P, T), dtype=torch.float32, device=device)
points_temp = points.clone().detach()
points_temp.requires_grad = True
tris_temp = tris.clone().detach()
tris_temp.requires_grad = True
# Naive python implementation
dists_naive = torch.zeros((P, T), dtype=torch.float32, device=device)
for p in range(P):
for t in range(T):
dist = self._point_to_tri_distance(points[p], tris[t])
dists_naive[p, t] = dist
# Naive Backward
dists_naive.backward(grad_dists)
grad_points_naive = points.grad
grad_tris_naive = tris.grad
# Cuda Forward Implementation
dists_cuda = _C.point_face_array_dist_forward(points, tris)
# Compare
self.assertClose(dists_naive.cpu(), dists_cuda.cpu())
# CUDA Backward Implementation
grad_points_cuda, grad_tris_cuda = _C.point_face_array_dist_backward(
points, tris, grad_dists
)
# Compare
self.assertClose(grad_points_naive.cpu(), grad_points_cuda.cpu())
self.assertClose(grad_tris_naive.cpu(), grad_tris_cuda.cpu(), atol=5e-6)
def test_point_face_distance(self):
"""
Test CUDA implementation for PointFaceDistanceForward
& PointFaceDistanceBackward
"""
device = get_random_cuda_device()
N, V, F, P = 4, 32, 16, 24
meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device)
# make points packed a leaf node
points_packed = pcls.points_packed().detach().clone() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_p = pcls.num_points_per_cloud().max().item()
# make edges packed a leaf node
verts_packed = meshes.verts_packed()
faces_packed = verts_packed[meshes.faces_packed()] # (T, 3, 3)
faces_packed = faces_packed.clone().detach()
faces_first_idx = meshes.mesh_to_faces_packed_first_idx()
# leaf nodes
points_packed.requires_grad = True
faces_packed.requires_grad = True
grad_dists = torch.rand(
(points_packed.shape[0],), dtype=torch.float32, device=device
)
# Cuda Implementation: forward
dists_cuda, idx_cuda = _C.point_face_dist_forward(
points_packed, points_first_idx, faces_packed, faces_first_idx, max_p
)
# Cuda Implementation: backward
grad_points_cuda, grad_faces_cuda = _C.point_face_dist_backward(
points_packed, faces_packed, idx_cuda, grad_dists
)
# Naive Implementation: forward
faces_list = packed_to_list(faces_packed, meshes.num_faces_per_mesh().tolist())
dists_naive = []
for i in range(N):
points = pcls.points_list()[i]
tris = faces_list[i]
dists_temp = torch.zeros(
(points.shape[0], tris.shape[0]), dtype=torch.float32, device=device
)
for p in range(points.shape[0]):
for t in range(tris.shape[0]):
dist = self._point_to_tri_distance(points[p], tris[t])
dists_temp[p, t] = dist
# torch.min() doesn't necessarily return the first index of the
# smallest value, our warp_reduce does. So it's not straightforward
# to directly compare indices, nor the gradients of grad_tris which
# also depend on the indices of the minimum value.
# To be able to compare, we will compare dists_temp.min(1) and
# then feed the cuda indices to the naive output
start = points_first_idx[i]
end = points_first_idx[i + 1] if i < N - 1 else points_packed.shape[0]
min_idx = idx_cuda.cpu()[start:end] - faces_first_idx[i].cpu()
iidx = torch.arange(points.shape[0], device=device)
min_dist = dists_temp[iidx, min_idx]
dists_naive.append(min_dist)
dists_naive = torch.cat(dists_naive)
# Compare
self.assertClose(dists_naive.cpu(), dists_cuda.cpu())
# Naive Implementation: backward
dists_naive.backward(grad_dists)
grad_points_naive = torch.cat([cloud.grad for cloud in pcls.points_list()])
grad_faces_naive = faces_packed.grad
# Compare
self.assertClose(grad_points_naive.cpu(), grad_points_cuda.cpu(), atol=1e-7)
self.assertClose(grad_faces_naive.cpu(), grad_faces_cuda.cpu(), atol=5e-7)
def test_face_point_distance(self):
"""
Test CUDA implementation for FacePointDistanceForward
& FacePointDistanceBackward
"""
device = get_random_cuda_device()
N, V, F, P = 4, 32, 16, 24
meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device)
# make points packed a leaf node
points_packed = pcls.points_packed().detach().clone() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
# make edges packed a leaf node
verts_packed = meshes.verts_packed()
faces_packed = verts_packed[meshes.faces_packed()] # (T, 3, 3)
faces_packed = faces_packed.clone().detach()
faces_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_f = meshes.num_faces_per_mesh().max().item()
# leaf nodes
points_packed.requires_grad = True
faces_packed.requires_grad = True
grad_dists = torch.rand(
(faces_packed.shape[0],), dtype=torch.float32, device=device
)
# Cuda Implementation: forward
dists_cuda, idx_cuda = _C.face_point_dist_forward(
points_packed, points_first_idx, faces_packed, faces_first_idx, max_f
)
# Cuda Implementation: backward
grad_points_cuda, grad_faces_cuda = _C.face_point_dist_backward(
points_packed, faces_packed, idx_cuda, grad_dists
)
# Naive Implementation: forward
faces_list = packed_to_list(faces_packed, meshes.num_faces_per_mesh().tolist())
dists_naive = []
for i in range(N):
points = pcls.points_list()[i]
tris = faces_list[i]
dists_temp = torch.zeros(
(tris.shape[0], points.shape[0]), dtype=torch.float32, device=device
)
for t in range(tris.shape[0]):
for p in range(points.shape[0]):
dist = self._point_to_tri_distance(points[p], tris[t])
dists_temp[t, p] = dist
# torch.min() doesn't necessarily return the first index of the
# smallest value, our warp_reduce does. So it's not straightforward
# to directly compare indices, nor the gradients of grad_tris which
# also depend on the indices of the minimum value.
# To be able to compare, we will compare dists_temp.min(1) and
# then feed the cuda indices to the naive output
start = faces_first_idx[i]
end = faces_first_idx[i + 1] if i < N - 1 else faces_packed.shape[0]
min_idx = idx_cuda.cpu()[start:end] - points_first_idx[i].cpu()
iidx = torch.arange(tris.shape[0], device=device)
min_dist = dists_temp[iidx, min_idx]
dists_naive.append(min_dist)
dists_naive = torch.cat(dists_naive)
# Compare
self.assertClose(dists_naive.cpu(), dists_cuda.cpu())
# Naive Implementation: backward
dists_naive.backward(grad_dists)
grad_points_naive = torch.cat([cloud.grad for cloud in pcls.points_list()])
grad_faces_naive = faces_packed.grad
# Compare
self.assertClose(grad_points_naive.cpu(), grad_points_cuda.cpu(), atol=1e-7)
self.assertClose(grad_faces_naive.cpu(), grad_faces_cuda.cpu(), atol=5e-7)
def test_point_mesh_face_distance(self):
"""
Test point_mesh_face_distance from pytorch3d.loss
"""
device = get_random_cuda_device()
N, V, F, P = 4, 32, 16, 24
meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device)
# clone and detach for another backward pass through the op
verts_op = [verts.clone().detach() for verts in meshes.verts_list()]
for i in range(N):
verts_op[i].requires_grad = True
faces_op = [faces.clone().detach() for faces in meshes.faces_list()]
meshes_op = Meshes(verts=verts_op, faces=faces_op)
points_op = [points.clone().detach() for points in pcls.points_list()]
for i in range(N):
points_op[i].requires_grad = True
pcls_op = Pointclouds(points_op)
# naive implementation
loss_naive = torch.zeros(N, dtype=torch.float32, device=device)
for i in range(N):
points = pcls.points_list()[i]
verts = meshes.verts_list()[i]
faces = meshes.faces_list()[i]
tris = verts[faces]
num_p = points.shape[0]
num_t = tris.shape[0]
dists = torch.zeros((num_p, num_t), dtype=torch.float32, device=device)
for p in range(num_p):
for t in range(num_t):
dist = self._point_to_tri_distance(points[p], tris[t])
dists[p, t] = dist
min_dist_p, min_idx_p = dists.min(1)
min_dist_t, min_idx_t = dists.min(0)
loss_naive[i] = min_dist_p.mean() + min_dist_t.mean()
loss_naive = loss_naive.mean()
# Op
loss_op = point_mesh_face_distance(meshes_op, pcls_op)
# Compare forward pass
self.assertClose(loss_op, loss_naive)
# Compare backward pass
rand_val = torch.rand(1).item()
grad_dist = torch.tensor(rand_val, dtype=torch.float32, device=device)
loss_naive.backward(grad_dist)
loss_op.backward(grad_dist)
# check verts grad
for i in range(N):
self.assertClose(
meshes.verts_list()[i].grad, meshes_op.verts_list()[i].grad
)
self.assertClose(pcls.points_list()[i].grad, pcls_op.points_list()[i].grad)
@staticmethod
def point_mesh_edge(N: int, V: int, F: int, P: int, device: str):
device = torch.device(device)
meshes, pcls = TestPointMeshDistance.init_meshes_clouds(
N, V, F, P, device=device
)
torch.cuda.synchronize()
def loss():
point_mesh_edge_distance(meshes, pcls)
torch.cuda.synchronize()
return loss
@staticmethod
def point_mesh_face(N: int, V: int, F: int, P: int, device: str):
device = torch.device(device)
meshes, pcls = TestPointMeshDistance.init_meshes_clouds(
N, V, F, P, device=device
)
torch.cuda.synchronize()
def loss():
point_mesh_face_distance(meshes, pcls)
torch.cuda.synchronize()
return loss
| [
"[email protected]"
] | |
b25410fbc35275f299a22b2b9d4a9530a7d3c99f | 127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06 | /2021_하반기 코테연습/leet_451.py | c8f3066418b4e33e2c805fefef2f434513deadaf | [] | no_license | holim0/Algo_Study | 54a6f10239368c6cf230b9f1273fe42caa97401c | ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c | refs/heads/master | 2023-08-25T14:07:56.420288 | 2021-10-25T12:28:23 | 2021-10-25T12:28:23 | 276,076,057 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from collections import defaultdict
class Solution:
def frequencySort(self, s: str) -> str:
fre_cnt = defaultdict(int)
for cur in s:
fre_cnt[cur]+=1
sorted_fre = sorted(fre_cnt.items(), key=lambda x: -x[1])
answer =""
for cur in sorted_fre:
letter, cnt = cur
for _ in range(cnt):
answer+=letter
return answer | [
"[email protected]"
] | |
3d1ee720084284ab0d66d5f7ee9426d38e28411f | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/src/programy/spelling/autocorrection.py | 7db00c74a2d09c9c7b6bb1c1ef281f74da74f0fb | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,562 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from autocorrect import spell
from programy.spelling.base import SpellingChecker
class AutoCorrectSpellingChecker(SpellingChecker):
def __init__(self, spelling_config=None):
SpellingChecker.__init__(self, spelling_config)
def correct(self, phrase):
words = phrase.split()
correct = [spell(word).upper() for word in words]
return ' '.join(correct)
| [
"[email protected]"
] | |
f88a8f94c2dd7cf204eba1fa2b522da44f2431ef | dde00571d8e65208c0642f009cb1d4bc33460026 | /bigmler/retrain/dispatcher.py | 8cc577939895b4f3f5a583c63afcd8068c1d2c27 | [
"Apache-2.0"
] | permissive | javs0188/bigmler | 44e5505f4751ebdfece7da87e4d4592b0da7ff51 | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | refs/heads/master | 2021-03-01T02:08:29.730986 | 2020-01-25T10:43:01 | 2020-01-25T10:43:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer retrain main processing
Functions to retrain a modeling resource
"""
from __future__ import absolute_import
import sys
import os
import shutil
import bigmler.processing.args as a
import bigmler.utils as u
from bigmler.defaults import DEFAULTS_FILE
from bigmler.retrain.retrain import retrain_model
from bigmler.dispatcher import (SESSIONS_LOG,
clear_log_files)
from bigmler.command import get_context
COMMAND_LOG = u".bigmler_retrain"
DIRS_LOG = u".bigmler_retrain_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
DEFAULT_OUTPUT = "retrain_script"
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def check_compulsory_options(flags, args):
"""Checks whether the id or a unique tag are provided
"""
return args.resource_id is not None or \
len([flag for flag in flags if flag.endswith("-tag")]) > 0
def retrain_dispatcher(args=sys.argv[1:]):
"""Main processing of the parsed options for BigMLer retrain
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
# parses the command line to get the context args and the log files to use
command_args, command, api, session_file, resume = get_context(args,
SETTINGS)
# --id or --model-tag, --ensemble-tag, etc. is compulsory
if check_compulsory_options(command.flags, command_args):
retrain_model(command_args, api, command,
session_file=session_file)
u.log_message("_" * 80 + "\n", log_file=session_file)
else:
sys.exit("You must provide the ID of the resource to be"
" retrained in the --id option or a unique tag"
" to retrieve such ID."
" Type bigmler retrain --help\n"
" to see all the available options.")
| [
"[email protected]"
] | |
ff41520a1318a531ff4c623d7d94c949f05421e2 | d2b53b3568890dd805575035d09635c422c6bc4d | /python/ray/tests/aws/test_autoscaler_aws.py | b8ad6f31e04390f923b33d399f671f73469a7377 | [
"Apache-2.0",
"MIT"
] | permissive | mehrdadn/ray | 939deda7099eb30371cbb920a9725b314c58c0b5 | 3506910c5da257215d38d02f424acc4f419ddbaf | refs/heads/master | 2020-09-03T15:33:35.578248 | 2020-07-31T21:33:27 | 2020-07-31T21:33:27 | 219,498,150 | 2 | 1 | Apache-2.0 | 2019-11-04T12:37:23 | 2019-11-04T12:37:22 | null | UTF-8 | Python | false | false | 3,393 | py | import pytest
import ray.tests.aws.utils.stubs as stubs
import ray.tests.aws.utils.helpers as helpers
from ray.tests.aws.utils.constants import AUX_SUBNET, DEFAULT_SUBNET, \
DEFAULT_SG_AUX_SUBNET, DEFAULT_SG, DEFAULT_SG_DUAL_GROUP_RULES, \
DEFAULT_SG_WITH_RULES_AUX_SUBNET, DEFAULT_SG_WITH_RULES, AUX_SG
def test_create_sg_different_vpc_same_rules(iam_client_stub, ec2_client_stub):
# use default stubs to skip ahead to security group configuration
stubs.skip_to_configure_sg(ec2_client_stub, iam_client_stub)
# given head and worker nodes with custom subnets defined...
# expect to first describe the worker subnet ID
stubs.describe_subnets_echo(ec2_client_stub, AUX_SUBNET)
# expect to second describe the head subnet ID
stubs.describe_subnets_echo(ec2_client_stub, DEFAULT_SUBNET)
# given no existing security groups within the VPC...
stubs.describe_no_security_groups(ec2_client_stub)
# expect to first create a security group on the worker node VPC
stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG_AUX_SUBNET)
# expect new worker security group details to be retrieved after creation
stubs.describe_sgs_on_vpc(
ec2_client_stub,
[AUX_SUBNET["VpcId"]],
[DEFAULT_SG_AUX_SUBNET],
)
# expect to second create a security group on the head node VPC
stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG)
# expect new head security group details to be retrieved after creation
stubs.describe_sgs_on_vpc(
ec2_client_stub,
[DEFAULT_SUBNET["VpcId"]],
[DEFAULT_SG],
)
# given no existing default head security group inbound rules...
# expect to authorize all default head inbound rules
stubs.authorize_sg_ingress(
ec2_client_stub,
DEFAULT_SG_DUAL_GROUP_RULES,
)
# given no existing default worker security group inbound rules...
# expect to authorize all default worker inbound rules
stubs.authorize_sg_ingress(
ec2_client_stub,
DEFAULT_SG_WITH_RULES_AUX_SUBNET,
)
# given the prior modification to the head security group...
# expect the next read of a head security group property to reload it
stubs.describe_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_RULES)
# given the prior modification to the worker security group...
# expect the next read of a worker security group property to reload it
stubs.describe_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_RULES_AUX_SUBNET)
# given our mocks and an example config file as input...
# expect the config to be loaded, validated, and bootstrapped successfully
config = helpers.bootstrap_aws_example_config_file("example-subnets.yaml")
# expect the bootstrapped config to show different head and worker security
# groups residing on different subnets
assert config["head_node"]["SecurityGroupIds"] == [DEFAULT_SG["GroupId"]]
assert config["head_node"]["SubnetIds"] == [DEFAULT_SUBNET["SubnetId"]]
assert config["worker_nodes"]["SecurityGroupIds"] == [AUX_SG["GroupId"]]
assert config["worker_nodes"]["SubnetIds"] == [AUX_SUBNET["SubnetId"]]
# expect no pending responses left in IAM or EC2 client stub queues
iam_client_stub.assert_no_pending_responses()
ec2_client_stub.assert_no_pending_responses()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"[email protected]"
] | |
ed8bbd681e45b68b05353f1c094adb6d65c762bd | a604c63ee46dd6c5a3942ed84cada11e54c1eaff | /starslib/base.py | b021f62739f69e0657167662d5d589b9b8a1c482 | [
"MIT"
] | permissive | jbradberry/starslib | 9673636b76c99aa1ae94289ee470df9a382b289c | 48ad64fc55f1f5ebe854cf842b265924447a8a2c | refs/heads/master | 2022-06-17T19:22:24.616889 | 2020-03-22T00:20:50 | 2020-03-22T00:20:50 | 226,234,863 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,650 | py | from __future__ import absolute_import
from __future__ import division
from bisect import bisect
import struct
import six
from six.moves import map
from six.moves import range
from six.moves import zip
class StarsError(Exception):
pass
class ValidationError(StarsError):
pass
class ParseError(StarsError):
pass
class Value(object):
"""An accessor that is attached to a Struct class as a proxy for a Field.
This is a descriptor (getter/setter) that gets an instance
attached as a class attribute to a Struct subclass at
class-construction time, that has access to the Field that it
represents. It calls its Field's methods to do the cleaning,
validation, and updating of related fields.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError
# A field is dynamic if another field has a reference to it.
if self.field.dynamic:
field, t = self.field.dynamic
# Fields that are involved in a relationship provide a method
# beginning with 'value_' to report the current value of the
# desired attribute.
update = getattr(field, 'value_'+t)(obj)
self.field.set_value(obj, update)
# A field has references if some attribute of it, such as bitwidth,
# is stored in another field.
for ref, t in self.field.references:
update = getattr(self.field, 'value_'+t)(obj)
ref.set_value(obj, update, True)
value = obj.__dict__[self.field.name]
self.field.validate(obj, value)
return value
def __set__(self, obj, value):
value = self.field.clean(value)
obj.__dict__[self.field.name] = value
for ref, t in self.field.references:
ref.set_value(obj, getattr(self.field, 'value_'+t)(obj), True)
self.field.validate(obj, value)
def make_contrib(new_cls, func=None):
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(new_cls, self).contribute_to_class(cls, name)
setattr(cls, self.name, Value(self))
return contribute_to_class
class FieldBase(type):
def __new__(cls, names, bases, attrs):
new_cls = super(FieldBase, cls).__new__(cls, names, bases, attrs)
new_cls.contribute_to_class = make_contrib(
new_cls, attrs.get('contribute_to_class'))
return new_cls
class Field(six.with_metaclass(FieldBase, object)):
"""A data member on a Struct.
bitwidth: Specifies the number of bits to be consumed to populate
this field.
value: if non-None, the value that this field must always contain
max: the maximum integer value this field may contain
choices: specify the enumerated values this field may contain
option: this field is optional, and is only present when option
evaluates as True
"""
counter = 0
def __init__(self, bitwidth=16, value=None, max=None,
choices=None, option=None, **kwargs):
"""
"""
self._counter = Field.counter
Field.counter += 1
self.references = []
self.dynamic = None
self._bitwidth = bitwidth
if callable(bitwidth):
self.bitwidth = self._callable_bitwidth
elif isinstance(bitwidth, six.string_types):
self.references.append([bitwidth, 'bitwidth'])
self.bitwidth = self._ref_bitwidth
else:
self.bitwidth = self._const_bitwidth
self.value = value
self.max = max
self.choices = choices
self.option = option
def _callable_bitwidth(self, obj):
return self._bitwidth(obj)
def _ref_bitwidth(self, obj):
return self._bitwidth.get_value(obj)
def _const_bitwidth(self, obj):
return self._bitwidth
def __lt__(self, other):
return self._counter < other._counter
def __le__(self, other):
return self._counter <= other._counter
def __eq__(self, other):
return self._counter == other._counter
def __ne__(self, other):
return self._counter != other._counter
def __gt__(self, other):
return self._counter > other._counter
def __ge__(self, other):
return self._counter >= other._counter
def contribute_to_class(self, cls, name):
self.name = name
self.struct = cls
cls.fields.insert(bisect(cls.fields, self), self)
def _parse_vars(self, obj, seq, vars):
vars.bitwidth = self.bitwidth(obj)
def _pre_parse(self, obj, seq, vars):
if vars.bitwidth and obj.byte >= len(seq):
raise ParseError("%s.%s: %s" % (self.struct.__name__,
self.name, seq))
def _parse(self, obj, seq, vars):
pass
def _post_parse(self, obj, seq, vars):
return vars.result
def parse(self, obj, seq):
vars = obj._vars
self._parse_vars(obj, seq, vars)
self._pre_parse(obj, seq, vars)
self._parse(obj, seq, vars)
return self._post_parse(obj, seq, vars)
def _deparse_vars(self, obj, vars):
vars.bitwidth = self.bitwidth(obj)
vars.value = getattr(obj, self.name)
def _pre_deparse(self, obj, vars):
pass
def _deparse(self, obj, vars):
pass
def _post_deparse(self, obj, vars):
return vars.result
def deparse(self, obj):
vars = obj._vars
self._deparse_vars(obj, vars)
self._pre_deparse(obj, vars)
self._deparse(obj, vars)
return self._post_deparse(obj, vars)
def clean(self, value):
return value
def skip(self, obj):
if self.option and not self.option(obj):
return True
if self.bitwidth(obj) is None:
return True
return False
def validate(self, obj, value):
bitwidth = self.bitwidth(obj)
if value is None:
if self.option:
if self.option(obj):
raise ValidationError
return True
if bitwidth is not None and bitwidth > 0:
raise ValidationError("bitwidth: %s" % bitwidth)
return True
if bitwidth is None and value is not None:
raise ValidationError
if self.value is not None and value != self.value:
raise ValidationError("%s: %s != %s" % (self.name, value,
self.value))
def get_value(self, obj):
value = obj.__dict__[self.name]
if value is None:
return None
if self.choices is not None:
value = dict(self.choices)[value]
return value
def set_value(self, obj, value, side_effects=False):
if value is not None and self.choices is not None:
value = min((v, k) for k, v in self.choices if value <= v)[1]
if side_effects:
setattr(obj, self.name, value)
else:
obj.__dict__[self.name] = self.clean(value)
class Int(Field):
def _parse(self, obj, seq, vars):
vars.result = 0
try:
acc_bit = 0
while vars.bitwidth > 0:
if vars.bitwidth >= 8-obj.bit:
vars.result += (seq[obj.byte]>>obj.bit) << acc_bit
obj.byte += 1
acc_bit += 8-obj.bit
vars.bitwidth -= 8-obj.bit
obj.bit = 0
else:
vars.result += ((seq[obj.byte] >> obj.bit) &
(2**vars.bitwidth - 1)) << acc_bit
obj.bit += vars.bitwidth
vars.bitwidth = 0
except IndexError:
raise ParseError("%s %s: %s > %s" % (self.struct.__name__,
seq, obj.byte, len(seq)))
def _deparse(self, obj, vars):
result = []
value = vars.value << obj.bit | obj.prev
vars.bitwidth += obj.bit
while vars.bitwidth >= 8:
value, tmp = value >> 8, value & 0xff
vars.bitwidth -= 8
result.append(tmp)
obj.prev, obj.bit = value, vars.bitwidth
vars.result = result
def clean(self, value):
if value is None:
return None
return int(value)
def validate(self, obj, value):
if super(Int, self).validate(obj, value):
return True
if self.max is not None and value > self.max:
raise ValidationError("%s: %s > %s" % (self.name, value, self.max))
if not 0 <= value < 2**self.bitwidth(obj):
raise ValidationError
def value_bitwidth(self, obj):
value = obj.__dict__[self.name]
if value is None:
return None
return 0 if value == 0 else len(bin(value)) - 2
class Bool(Int):
def __init__(self, **kwargs):
kwargs.update(bitwidth=1, choices=None)
super(Bool, self).__init__(**kwargs)
def _post_parse(self, obj, seq, vars):
return bool(vars.result)
def clean(self, value):
if value is None:
return None
return bool(value)
class Sequence(Field):
"""A field that stores some dynamic sequence of values.
head: denotes some number of bits at the beginning of the
bitstream that stores the length information. Must be a multiple
of 8.
length: an externally specified number of items for the
sequence. May be a fixed number, a callable, or a string which
encodes a reference to another field which stores the length. Only
specified if head is None. If length is then also None, this
generally means to consume all remaining bytes in the sequence.
bitwidth: the number of bits each element of the sequence consumes
"""
def __init__(self, head=None, length=None, bitwidth=8, **kwargs):
kwargs.update(bitwidth=bitwidth)
super(Sequence, self).__init__(**kwargs)
self.head = head
self._length = length
if length is None:
if head is not None:
self.length = self._head_length
else:
self.length = self._remainder_length
elif callable(length):
self.length = self._callable_length
elif isinstance(length, six.string_types):
self.references.append([length, 'length'])
self.length = self._ref_length
else:
self.length = self._const_length
def _head_length(self, obj, seq=None):
if seq is None:
return None
return sum(x<<(8*n) for n, x in
enumerate(seq[obj.byte:obj.byte+self.head//8]))
def _remainder_length(self, obj, seq=None):
if seq is None:
return None
return (len(seq) - obj.byte) // (self.bitwidth(obj) // 8)
def _callable_length(self, obj, seq=None):
return self._length(obj)
def _ref_length(self, obj, seq=None):
return self._length.get_value(obj)
def _const_length(self, obj, seq=None):
return self._length
def _parse_vars(self, obj, seq, vars):
super(Sequence, self)._parse_vars(obj, seq, vars)
vars.length = self.length(obj, seq)
if self._length is None and self.head is not None:
obj.byte += self.head // 8
def _pre_parse(self, obj, seq, vars):
if vars.bitwidth % 8 != 0:
raise ParseError
if obj.bit != 0:
raise ParseError
if vars.length * vars.bitwidth//8 > len(seq) - obj.byte:
raise ParseError("byte: %s, seq: %s" % (obj.byte, seq))
def _parse(self, obj, seq, vars):
result = seq[obj.byte:obj.byte + vars.length * vars.bitwidth//8]
result = list(zip(*(iter(result),) * (vars.bitwidth//8)))
result = [sum(x<<(8*n) for n, x in enumerate(b)) for b in result]
obj.byte += vars.length * vars.bitwidth//8
vars.result = result
def _pre_deparse(self, obj, vars):
if obj.prev != 0 or obj.bit != 0:
raise ParseError
if vars.bitwidth % 8 != 0:
raise ParseError
def _deparse(self, obj, vars):
vars.result = [x>>(8*n) & 0xff for x in vars.value
for n in range(vars.bitwidth//8)]
def _post_deparse(self, obj, vars):
L = len(vars.value)
vars.L = L
if self._length is None and self.head is not None:
head = [L>>(8*n) & 0xff for n in range(self.head//8)]
vars.result = head + vars.result
return vars.result
def validate(self, obj, value):
if super(Sequence, self).validate(obj, value):
return True
length = self.length(obj)
if self._length is None:
if self.head is not None:
if not 0 <= len(value) < 2**self.head:
raise ValidationError
else:
if not 0 <= len(value) < 1024:
raise ValidationError
# don't worry about the basestring case; the chained setattr
# will get it.
elif not isinstance(self._length, six.string_types):
if len(value) != length:
raise ValidationError
def value_length(self, obj):
value = obj.__dict__[self.name]
return len(value) if value is not None else None
class Str(Sequence):
def __init__(self, length=None, head=None, **kwargs):
kwargs.update(bitwidth=8, length=length, head=head)
super(Str, self).__init__(**kwargs)
def _post_parse(self, obj, seq, vars):
return ''.join(map(chr, vars.result))
def _pre_deparse(self, obj, vars):
vars.value = list(map(ord, vars.value))
def validate(self, obj, value):
if super(Str, self).validate(obj, value):
return True
if not isinstance(value, six.string_types):
raise ValidationError
class CStr(Str):
top = " aehilnorstbcdfgjkmpquvwxyz+-,!.?:;'*%$"
def __init__(self, head=8, **kwargs):
kwargs.update(head=head)
super(CStr, self).__init__(**kwargs)
def _pre_parse(self, obj, seq, vars):
super(CStr, self)._pre_parse(obj, seq, vars)
if vars.length == 0:
obj.byte += 1
def _post_parse(self, obj, seq, vars):
return self.decompress(vars.result)
def _pre_deparse(self, obj, vars):
vars.value = self.compress(vars.value)
def _post_deparse(self, obj, vars):
vars.result = super(CStr, self)._post_deparse(obj, vars)
if self._length is None:
if vars.L == 0:
vars.result.append(0)
return vars.result
def value_length(self, obj):
value = self.compress(obj.__dict__[self.name])
return len(value) if value is not None else None
def decompress(self, lst):
# break lst up into a sequence of 4-bit nibbles
tmp = ((x>>i) & 0xf for x in lst for i in (4,0))
result = []
for x in tmp:
if 0x0 <= x <= 0xA:
C = self.top[x]
elif 0xB <= x <= 0xE:
x = ((x-0xB)<<4) + next(tmp)
if x < 0x1A:
C = chr(x + 0x41)
elif x < 0x24:
C = chr(x + 0x16)
else:
C = self.top[x - 0x19]
elif x == 0xF:
try:
C = chr(next(tmp) + (next(tmp)<<4))
except StopIteration:
break
result.append(C)
return ''.join(result)
def compress(self, S):
result = []
for c in S:
if c in self.top[:11]:
result.append(self.top.index(c))
elif c in self.top[11:]:
tmp = self.top.index(c) + 0x19
result.extend(((tmp>>4) + 0xB, tmp & 0xF))
else:
tmp = ord(c)
if 0x41 <= tmp < 0x5B:
tmp -= 0x41
result.extend(((tmp>>4) + 0xB, tmp & 0xF))
elif 0x30 <= tmp < 0x3A:
tmp -= 0x16
result.extend(((tmp>>4) + 0xB, tmp & 0xF))
else:
result.extend((0xF, tmp & 0xF, tmp>>4))
if len(result) % 2 != 0:
result.append(0xF)
return [(result[i]<<4)+result[i+1] for i in range(0, len(result), 2)]
class Array(Sequence):
def __init__(self, head=8, length=None, **kwargs):
kwargs.update(head=head, length=length)
super(Array, self).__init__(**kwargs)
def validate(self, obj, value):
if super(Array, self).validate(obj, value):
return True
bitwidth = self.bitwidth(obj)
if not all(0 <= x < 2**bitwidth for x in value):
raise ValidationError
def value_bitwidth(self, obj):
value = obj.__dict__[self.name]
if value is None:
return None
if not value:
return 0
return max(0 if x == 0 else len(bin(x)) - 2 for x in value)
class ObjArray(Array):
def __init__(self, **kwargs):
super(ObjArray, self).__init__(**kwargs)
self.bitwidths = self.bitwidth
self.bitwidth = self._inner_bitwidths
def _inner_bitwidths(self, obj):
return sum(bw for name, bw in self.bitwidths(obj))
def _parse_vars(self, obj, seq, vars):
super(ObjArray, self)._parse_vars(obj, seq, vars)
vars.bitwidths = self.bitwidths(obj)
def _post_parse(self, obj, seq, vars):
bitwidths = vars.bitwidths
bw = [(b[0], b[1], sum([0] + [v for _, v in bitwidths][:i]))
for i, b in enumerate(bitwidths)]
return [dict((k, (x>>o)&(2**b-1)) for k, b, o in bw)
for x in vars.result]
def _deparse_vars(self, obj, vars):
super(ObjArray, self)._deparse_vars(obj, vars)
vars.bitwidths = self.bitwidths(obj)
def _pre_deparse(self, obj, vars):
Sequence._pre_deparse(self, obj, vars)
bitwidths = vars.bitwidths
bw = [(b[0], b[1], sum([0] + [v for _, v in bitwidths][:i]))
for i, b in enumerate(bitwidths)]
vars.value = [sum(x[k]<<o for k, b, o in bw) for x in vars.value]
def validate(self, obj, value):
if Sequence.validate(self, obj, value):
return True
bitwidths = self.bitwidths(obj)
if not all(all(0 <= x[k] < 2**v for k, v in bitwidths) for x in value):
raise ValidationError
if any(set(six.iterkeys(x)) - set(b[0] for b in bitwidths)
for x in value):
raise ValidationError
class StructBase(type):
def __new__(cls, name, bases, attrs):
super_new = super(StructBase, cls).__new__
parents = [b for b in bases if isinstance(b, StructBase)]
if not parents:
return super_new(cls, name, bases, attrs)
new_attrs = {}
new_attrs['__module__'] = attrs.pop('__module__')
if '__classcell__' in attrs:
new_attrs['__classcell__'] = attrs['__classcell__']
new_class = super_new(cls, name, bases, new_attrs)
new_class.add_to_class('fields', [])
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
by_name = dict((field.name, field) for field in new_class.fields)
for field in new_class.fields:
for ref in field.references:
new_field = by_name[ref[0]]
new_field.dynamic = (field, ref[1])
ref[0] = new_field
setattr(field, '_'+ref[1], new_field)
if 'type' in attrs:
new_class._registry[attrs['type']] = new_class
return new_class
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
class Vars(object):
pass
@six.python_2_unicode_compatible
class Struct(six.with_metaclass(StructBase, object)):
_registry = {}
encrypted = True
def __init__(self, sfile):
self.file = sfile
self._vars = Vars()
self._vars._seq = sfile.counts.get(self.type, 0)
def __str__(self):
return "{%s}" % (', '.join("%s: %r" % (f.name,
getattr(self, f.name, None))
for f in self.fields
if getattr(self, f.name) is not None),)
@property
def bytes(self):
seq, self.prev, self.bit = [], 0, 0
for field in self.fields:
if not field.skip(self):
seq.extend(field.deparse(self))
if self.bit != 0 or self.prev != 0:
raise ValidationError
return tuple(seq)
@bytes.setter
def bytes(self, seq):
self.byte, self.bit = 0, 0
for field in self.fields:
result = None if field.skip(self) else field.parse(self, seq)
setattr(self, field.name, result)
#print field.name, self.byte, getattr(self, field.name)
if self.byte != len(seq) or self.bit != 0:
raise ValidationError("%s %s (%s %s) %s" % (self.__class__.__name__,
len(seq), self.byte,
self.bit, seq))
def adjust(self):
return
class StarsFile(object):
# Primes, but not really. 279 should be 269.
prime = (3, 5, 7, 11, 13, 17, 19, 23,
29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137,
139, 149, 151, 157, 163, 167, 173, 179,
181, 191, 193, 197, 199, 211, 223, 227,
229, 233, 239, 241, 251, 257, 263, 279,
271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367,
373, 379, 383, 389, 397, 401, 409, 419,
421, 431, 433, 439, 443, 449, 457, 461,
463, 467, 479, 487, 491, 499, 503, 509,
521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617,
619, 631, 641, 643, 647, 653, 659, 661,
673, 677, 683, 691, 701, 709, 719, 727)
def __init__(self):
self.hi, self.lo = 0, 0
self.structs = []
self.stars = 0
def prng_init(self, uid, turn, player, salt, flag):
i, j = (salt>>5) & 0x1f, salt & 0x1f
if salt < 0x400:
i += 0x20
else:
j += 0x20
self.hi, self.lo = self.prime[i], self.prime[j]
seed = ((player%4)+1) * ((uid%4)+1) * ((turn%4)+1) + flag
for i in range(seed):
self.prng()
def prng(self):
self.lo = (0x7fffffab * int(self.lo/-53668) + 40014 * self.lo) & 0xffffffff
if self.lo >= 0x80000000: self.lo -= 0x80000055
self.hi = (0x7fffff07 * int(self.hi/-52774) + 40692 * self.hi) & 0xffffffff
if self.hi >= 0x80000000: self.hi -= 0x800000f9
return (self.lo - self.hi) & 0xffffffff
def crypt(self, seq):
L = len(seq)
oL = L
if L % 4 != 0:
seq = list(seq) + [0] * (4 - L%4)
L = len(seq)
tmp = struct.pack("%dB" % L, *seq)
tmp = struct.unpack("%dI" % (L//4), tmp)
tmp = [x^self.prng() for x in tmp]
tmp = struct.pack("%dI" % (L//4), *tmp)
tmp = struct.unpack("%dB" % L, tmp)
return tmp[:oL]
@property
def bytes(self):
seq = []
for S in self.structs:
if S.type is not None:
# for non-star definition structs, the first 16 bits
# are 6 bits of type and 10 bits of length
L = len(S.bytes)
seq.extend((L & 0xff, S.type<<2 | L>>8))
seq.extend(self.crypt(S.bytes) if S.encrypted else S.bytes)
S.adjust()
return ''.join(map(chr, seq))
@bytes.setter
def bytes(self, data):
index = 0
self.structs = []
self.counts = {}
while index < len(data):
if self.stars > 0:
stype, size = None, 4
else:
# for non-star definition structs, the first 16 bits
# are 6 bits of type and 10 bits of length
hdr = struct.unpack("H", data[index:index+2])[0]
stype, size = (hdr & 0xfc00)>>10, hdr & 0x03ff
index += 2
S = self.dispatch(stype)
self.counts[stype] = self.counts.get(stype, 0) + 1
self.structs.append(S)
buf = struct.unpack("%dB" % size, data[index:index+size])
if S.encrypted:
buf = self.crypt(buf)
S.bytes = buf
S.adjust()
index += size
def dispatch(self, stype):
if stype in Struct._registry:
return Struct._registry[stype](self)
return FakeStruct(self, stype)
BITWIDTH_CHOICES = ((0, 0), (1, 8), (2, 16), (3, 32))
ftypes = ('xy', 'x', 'hst', 'm', 'h', 'r')
def filetypes(*args):
def ftype_check(s):
return s.file.type in args
return ftype_check
@six.python_2_unicode_compatible
class FakeStruct(Struct):
bytes = None
def __init__(self, sfile, stype):
self.type = stype # needed for deparsing FakeStructs
super(FakeStruct, self).__init__(sfile)
def __str__(self):
return six.text_type(self.bytes)
class Star(Struct):
type = None
encrypted = False
dx = Int(10)
y = Int(12)
name_id = Int(10)
def adjust(self):
self.file.stars -= 1
class Type0(Struct):
""" End of file """
type = 0
encrypted = False
info = Int(option=filetypes('hst', 'xy', 'm'))
class Type1(Struct):
""" Waypoint 0 Orders (1-Byte) """
type = 1
object_lhs = Int()
object_rhs = Int()
type_lhs = Int(4)
type_rhs = Int(4)
cargo_bits = Int(8) # ir, bo, ge, pop, fuel
# Note: the following are evaluated as signed ints
cargo = Array(head=None, bitwidth=8,
length=lambda s: bin(s.cargo_bits).count('1'))
class Type2(Struct):
""" Waypoint 0 Orders (2-Byte) """
type = 2
object_lhs = Int()
object_rhs = Int()
type_lhs = Int(4)
type_rhs = Int(4)
cargo_bits = Int(8) # ir, bo, ge, pop, fuel
# Note: the following are evaluated as signed ints
cargo = Array(head=None, bitwidth=16,
length=lambda s: bin(s.cargo_bits).count('1'))
class Type3(Struct):
""" Delete Waypoint """
type = 3
fleet_id = Int(11)
unknown1 = Int(5)
sequence = Int(8)
unknown2 = Int(8)
class Type4(Struct):
""" Add Waypoint """
type = 4
fleet_id = Int()
sequence = Int()
x = Int()
y = Int()
object_id = Int()
order = Int(4)
warp = Int(4)
intercept_type = Int(8)
transport = Array(bitwidth=8, head=None, length=None)
class Type5(Struct):
""" Modify Waypoint """
type = 5
fleet_id = Int()
sequence = Int()
x = Int()
y = Int()
object_id = Int()
order = Int(4)
warp = Int(4)
intercept_type = Int(8)
transport = Array(bitwidth=8, head=None, length=None)
def type6_trigger(S):
return S.optional_section
class Type6(Struct):
""" Race data """
type = 6
player = Int(8)
num_ship_designs = Int(8)
planets_known = Int()
visible_fleets = Int(12)
station_designs = Int(4)
unknown1 = Int(2, value=3)
optional_section = Bool()
race_icon = Int(5)
unknown2 = Int(8) # 227 is computer control
# optional section
unknown3 = Int(32, option=type6_trigger) # not const
password_hash = Int(32, option=type6_trigger)
mid_G = Int(8, option=type6_trigger)
mid_T = Int(8, option=type6_trigger)
mid_R = Int(8, option=type6_trigger)
min_G = Int(8, option=type6_trigger)
min_T = Int(8, option=type6_trigger)
min_R = Int(8, option=type6_trigger)
max_G = Int(8, option=type6_trigger)
max_T = Int(8, option=type6_trigger)
max_R = Int(8, option=type6_trigger)
growth = Int(8, option=type6_trigger)
cur_energy = Int(8, option=type6_trigger)
cur_weapons = Int(8, option=type6_trigger)
cur_propulsion = Int(8, option=type6_trigger)
cur_construction = Int(8, option=type6_trigger)
cur_electronics = Int(8, option=type6_trigger)
cur_biotech = Int(8, option=type6_trigger)
# no idea yet
whatever = Array(bitwidth=8, length=30, option=type6_trigger)
col_per_res = Int(8, option=type6_trigger)
res_per_10f = Int(8, option=type6_trigger)
f_build_res = Int(8, option=type6_trigger)
f_per_10kcol = Int(8, option=type6_trigger)
min_per_10m = Int(8, option=type6_trigger)
m_build_res = Int(8, option=type6_trigger)
m_per_10kcol = Int(8, option=type6_trigger)
leftover = Int(8, option=type6_trigger)
energy = Int(8, max=2, option=type6_trigger)
weapons = Int(8, max=2, option=type6_trigger)
propulsion = Int(8, max=2, option=type6_trigger)
construction = Int(8, max=2, option=type6_trigger)
electronics = Int(8, max=2, option=type6_trigger)
biotech = Int(8, max=2, option=type6_trigger)
prt = Int(option=type6_trigger)
imp_fuel_eff = Bool(option=type6_trigger)
tot_terraform = Bool(option=type6_trigger)
adv_remote_mine = Bool(option=type6_trigger)
imp_starbases = Bool(option=type6_trigger)
gen_research = Bool(option=type6_trigger)
ult_recycling = Bool(option=type6_trigger)
min_alchemy = Bool(option=type6_trigger)
no_ramscoops = Bool(option=type6_trigger)
cheap_engines = Bool(option=type6_trigger)
only_basic_mine = Bool(option=type6_trigger)
no_adv_scanners = Bool(option=type6_trigger)
low_start_pop = Bool(option=type6_trigger)
bleeding_edge = Bool(option=type6_trigger)
regen_shields = Bool(option=type6_trigger)
ignore = Int(2, value=0, option=type6_trigger)
unknown4 = Int(8, value=0, option=type6_trigger)
f1 = Bool(option=type6_trigger)
f2 = Bool(option=type6_trigger)
f3 = Bool(option=type6_trigger)
f4 = Bool(option=type6_trigger)
f5 = Bool(option=type6_trigger)
p75_higher_tech = Bool(option=type6_trigger)
f7 = Bool(option=type6_trigger)
f_1kTlessGe = Bool(option=type6_trigger)
# no idea yet
whatever2 = Array(bitwidth=8, length=30, option=type6_trigger)
unknown5 = Array(8, option=type6_trigger)
# end optional section
race_name = CStr(8)
plural_race_name = CStr(8)
class Type7(Struct):
""" Game definition """
type = 7
game_id = Int(32)
size = Int()
density = Int()
num_players = Int()
num_stars = Int()
start_distance = Int()
unknown1 = Int()
flags1 = Int(8)
unknown2 = Int(24)
req_pct_planets_owned = Int(8)
req_tech_level = Int(8)
req_tech_num_fields = Int(8)
req_exceeds_score = Int(8)
req_pct_exceeds_2nd = Int(8)
req_exceeds_prod = Int(8)
req_capships = Int(8)
req_highscore_year = Int(8)
req_num_criteria = Int(8)
year_declared = Int(8)
unknown3 = Int()
game_name = Str(length=32)
def adjust(self):
self.file.stars = self.num_stars
class Type8(Struct):
""" Beginning of file """
type = 8
encrypted = False
magic = Str(length=4, value="J3J3")
game_id = Int(32)
file_ver = Int()
turn = Int()
player = Int(5)
salt = Int(11)
filetype = Int(8)
submitted = Bool()
in_use = Bool()
multi_turn = Bool()
gameover = Bool()
shareware = Bool()
unused = Int(3)
def __init__(self, sfile):
super(Type8, self).__init__(sfile)
sfile.counts.clear()
def adjust(self):
self.file.prng_init(self.game_id, self.turn, self.player,
self.salt, self.shareware)
self.file.type = ftypes[self.filetype]
# class Type12(Struct):
# """ Internal Messages """
# type = 12
# messages = ObjArray(head=None, length=None, bitwidth=)
class Type13(Struct):
""" Authoritative Planet """
type = 13
planet_id = Int(11, max=998)
player = Int(5)
low_info = Bool(value=True) # add station design, if relevant
med_info = Bool(value=True) # add minerals & hab
full_info = Bool(value=True) # add real pop & structures
const = Int(4, value=0)
homeworld = Bool()
f0 = Bool(value=True)
station = Bool()
terraformed = Bool()
facilities = Bool() # turns on 8 bytes; rename
artifact = Bool()
surface_min = Bool()
routing = Bool() # turns on 2 bytes
f7 = Bool()
s1 = Int(2, max=1, choices=BITWIDTH_CHOICES)
s2 = Int(2, max=1, choices=BITWIDTH_CHOICES)
s3 = Int(4, max=1, choices=BITWIDTH_CHOICES)
frac_ir_conc = Int('s1')
frac_bo_conc = Int('s2')
frac_ge_conc = Int('s3')
ir_conc = Int(8)
bo_conc = Int(8)
ge_conc = Int(8)
grav = Int(8)
temp = Int(8)
rad = Int(8)
grav_orig = Int(8, option=lambda s: s.terraformed)
temp_orig = Int(8, option=lambda s: s.terraformed)
rad_orig = Int(8, option=lambda s: s.terraformed)
apparent_pop = Int(12, option=lambda s: s.player < 16) # times 400
apparent_defense = Int(4, option=lambda s: s.player < 16)
s4 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.surface_min)
s5 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.surface_min)
s6 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.surface_min)
s7 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.surface_min)
ir_surf = Int('s4')
bo_surf = Int('s5')
ge_surf = Int('s6')
population = Int('s7') # times 100
frac_population = Int(8, max=99, option=lambda s: s.facilities)
mines = Int(12, option=lambda s: s.facilities)
factories = Int(12, option=lambda s: s.facilities)
defenses = Int(8, option=lambda s: s.facilities)
unknown3 = Int(24, option=lambda s: s.facilities)
station_design = Int(4, max=9, option=lambda s: s.station)
station_flags = Int(28, option=lambda s: s.station)
routing_dest = Int(option=lambda s: s.routing and s.player < 16)
class Type14(Struct):
""" Scanned Planet """
type = 14
planet_id = Int(11, max=998)
player = Int(5)
# collapse these 4 fields into a single info_level field
low_info = Bool() # add station design, if relevant
med_info = Bool() # add minerals & hab
full_info = Bool() # add real pop & structures
const = Int(4, value=0)
# /collapse
homeworld = Bool()
f0 = Bool(value=True)
station = Bool()
terraformed = Bool()
facilities = Bool(value=False) # turns on 8 bytes; rename
artifact = Bool()
surface_min = Bool()
routing = Bool() # turns on 2 bytes
f7 = Bool()
s1 = Int(2, max=1, choices=BITWIDTH_CHOICES,
option=lambda s: s.med_info or s.full_info)
s2 = Int(2, max=1, choices=BITWIDTH_CHOICES,
option=lambda s: s.med_info or s.full_info)
s3 = Int(4, max=1, choices=BITWIDTH_CHOICES,
option=lambda s: s.med_info or s.full_info)
frac_ir_conc = Int('s1')
frac_bo_conc = Int('s2')
frac_ge_conc = Int('s3')
ir_conc = Int(8, option=lambda s: s.med_info or s.full_info)
bo_conc = Int(8, option=lambda s: s.med_info or s.full_info)
ge_conc = Int(8, option=lambda s: s.med_info or s.full_info)
grav = Int(8, option=lambda s: s.med_info or s.full_info)
temp = Int(8, option=lambda s: s.med_info or s.full_info)
rad = Int(8, option=lambda s: s.med_info or s.full_info)
grav_orig = Int(8, option=lambda s:
(s.med_info or s.full_info) and s.terraformed)
temp_orig = Int(8, option=lambda s:
(s.med_info or s.full_info) and s.terraformed)
rad_orig = Int(8, option=lambda s:
(s.med_info or s.full_info) and s.terraformed)
apparent_pop = Int(12, option=lambda s: # times 400
(s.med_info or s.full_info) and s.player < 16)
apparent_defense = Int(4, option=lambda s:
(s.med_info or s.full_info) and s.player < 16)
s4 = Int(2, choices=BITWIDTH_CHOICES,
option=lambda s: s.full_info and s.surface_min)
s5 = Int(2, choices=BITWIDTH_CHOICES,
option=lambda s: s.full_info and s.surface_min)
s6 = Int(2, choices=BITWIDTH_CHOICES,
option=lambda s: s.full_info and s.surface_min)
s7 = Int(2, choices=BITWIDTH_CHOICES,
option=lambda s: s.full_info and s.surface_min)
ir_surf = Int('s4')
bo_surf = Int('s5')
ge_surf = Int('s6')
station_design = Int(8, max=9, option=lambda s: s.station)
last_scanned = Int(option=filetypes('h'))
class Type16(Struct):
""" Authoritative Fleet """
type = 16
fleet_id = Int(9)
player = Int(7)
player2 = Int()
info_level = Int(8)
flags = Int(8)
planet_id = Int()
x = Int()
y = Int()
design_bits = Int()
count_array = Array(bitwidth=lambda s: 16 - (s.flags & 0x8),
length=lambda s: bin(s.design_bits).count('1'))
s1 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
s2 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
s3 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
s4 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
s5 = Int(8, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
ironium = Int('s1', option=lambda s: s.info_level >= 4)
boranium = Int('s2', option=lambda s: s.info_level >= 4)
germanium = Int('s3', option=lambda s: s.info_level >= 4)
colonists = Int('s4', option=lambda s: s.info_level >= 7)
fuel = Int('s5', option=lambda s: s.info_level >= 7)
dmg_design_bits = Int()
damage_amts = ObjArray(bitwidth=(('pct_of_type_damaged', 7),
('damage', 9)),
length=lambda s: bin(s.dmg_design_bits).count('1'))
battle_plan = Int(8)
queue_len = Int(8)
class Type17(Struct):
""" Alien Fleet """
type = 17
fleet_id = Int(9)
player = Int(7)
player2 = Int()
info_level = Int(8)
flags = Int(8)
planet_id = Int()
x = Int()
y = Int()
design_bits = Int()
count_array = Array(bitwidth=lambda s: 16 - (s.flags & 0x8),
length=lambda s: bin(s.design_bits).count('1'))
s1 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
s2 = Int(2, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
s3 = Int(12, choices=BITWIDTH_CHOICES, option=lambda s: s.info_level >= 4)
ironium = Int('s1', option=lambda s: s.info_level >= 4)
boranium = Int('s2', option=lambda s: s.info_level >= 4)
germanium = Int('s3', option=lambda s: s.info_level >= 4)
dx = Int(8)
dy = Int(8)
warp = Int(4)
unknown2 = Int(12)
mass = Int(32)
class Type19(Struct):
""" Orders-at Waypoint """
type = 19
x = Int()
y = Int()
planet_id = Int()
order = Int(4)
warp = Int(4)
intercept_type = Int(8)
ir_quant = Int(12)
ir_order = Int(4)
bo_quant = Int(12)
bo_order = Int(4)
ge_quant = Int(12)
ge_order = Int(4)
col_quant = Int(12)
col_order = Int(4)
fuel_quant = Int(12)
fuel_order = Int(4)
class Type20(Struct):
""" Waypoint """
type = 20
x = Int()
y = Int()
planet_id = Int()
order = Int(4)
warp = Int(4)
intercept_type = Int(8)
class Type21(Struct):
""" Fleet Name """
type = 21
name = CStr()
class Type23(Struct):
""" Split Fleet """
type = 23
fleet_id = Int(11)
unknown = Int(5)
fleet2_id = Int(11)
unknown2 = Int(5)
thirtyfour = Int(8, value=34)
design_bits = Int()
# Note: the following are interpreted as negative numbers
adjustment = Array(head=None, bitwidth=16,
length=lambda s: bin(s.design_bits).count('1'))
class Type24(Struct):
""" Original Fleet on Split """
type = 24
fleet_id = Int(11)
unknown = Int(5)
class Type26(Struct):
""" Ship & Starbase Design """
type = 26
info_level = Int(8)
unknown = Array(bitwidth=8, length=5)
slots_length = Int(8, option=lambda s: s.info_level > 3)
initial_turn = Int(option=lambda s: s.info_level > 3)
total_constructed = Int(32, option=lambda s: s.info_level > 3)
current_quantity = Int(32, option=lambda s: s.info_level > 3)
slots = ObjArray(bitwidth=(('flags', 16),
('part_sub_id', 8),
('quantity', 8)),
length='slots_length',
option=lambda s: s.info_level > 3)
name = CStr()
class Type27(Struct):
""" New Ship & Starbase Design """
type = 27
info_level = Int(4)
player_id = Int(4)
index = Int(8)
unknown = Array(bitwidth=8, length=6, option=lambda s: s.info_level)
slots_length = Int(8, option=lambda s: s.info_level)
initial_turn = Int(option=lambda s: s.info_level)
total_constructed = Int(32, value=0, option=lambda s: s.info_level)
current_quantity = Int(32, value=0, option=lambda s: s.info_level)
slots = ObjArray(bitwidth=(('flags', 16),
('part_sub_id', 8),
('quantity', 8)),
length='slots_length',
option=lambda s: s.info_level)
name = CStr(option=lambda s: s.info_level)
class Type28(Struct):
""" New Turn Queue State """
type = 28
queue = ObjArray(length=None, head=None, bitwidth=(('quantity', 10),
('build_type', 6),
('unknown', 8),
('frac_complete', 8)))
class Type29(Struct):
""" Update Queue """
type = 29
planet_id = Int()
queue = ObjArray(length=None, head=None, bitwidth=(('quantity', 10),
('build_type', 6),
('unknown', 8),
('frac_complete', 8)))
class Type30(Struct):
""" Battle plans """
type = 30
id = Int(8)
flags = Int(8)
u1 = Int(4, option=lambda s: not (s.flags & 64))
u2 = Int(4, option=lambda s: not (s.flags & 64))
u3 = Int(4, option=lambda s: not (s.flags & 64))
u4 = Int(4, option=lambda s: not (s.flags & 64))
name = CStr(option=lambda s: not (s.flags & 64))
class Type37(Struct):
""" Merge Fleet """
type = 37
fleets = Array(bitwidth=16, head=None, length=None)
class Type40(Struct):
""" In-game messages """
type = 40
unknown1 = Int(32)
sender = Int()
receiver = Int()
unknown2 = Int()
text = CStr(16)
class Type43(Struct):
""" Minefields / Debris / Mass Packets / Wormholes / Mystery Trader """
type = 43
# optional sizes: 2, 4, and 18
quantity = Int(option=lambda s: s.file.type in ('hst', 'm') and
s._vars._seq == 0)
index = Int(9, option=lambda s: s.quantity is None)
owner = Int(4, option=lambda s: s.quantity is None)
misc_type = Int(3, max=3, option=lambda s: s.quantity is None)
detonate = Int(option=filetypes('x'))
x = Int(option=lambda s: s.quantity is None and s.detonate is None)
y = Int(option=lambda s: s.quantity is None and s.detonate is None)
## minefields
num_mines = Int(option=lambda s: s.detonate is None and s.misc_type == 0)
zero1 = Int(value=0, option=lambda s: s.detonate is None
and s.misc_type == 0)
flags_mf = Array(length=6, option=lambda s: s.detonate is None
and s.misc_type == 0)
## end minefields
## debris / mass packets
dest_planet_id = Int(10, option=lambda s: s.detonate is None
and s.misc_type == 1)
unknown_mp = Int(6, option=lambda s: s.detonate is None
and s.misc_type == 1)
mass_ir = Int(option=lambda s: s.detonate is None and s.misc_type == 1)
mass_bo = Int(option=lambda s: s.detonate is None and s.misc_type == 1)
mass_ge = Int(option=lambda s: s.detonate is None and s.misc_type == 1)
flags_mp = Int(option=lambda s: s.detonate is None and s.misc_type == 1)
## end debris / mass packets
## wormholes
flags_wh = Array(length=10, option=lambda s: s.detonate is None
and s.misc_type == 2)
## end wormholes
## mystery trader
x_end = Int(option=lambda s: s.detonate is None and s.misc_type == 3)
y_end = Int(option=lambda s: s.detonate is None and s.misc_type == 3)
warp = Int(4, option=lambda s: s.detonate is None and s.misc_type == 3)
unknown_mt1 = Int(12, value=1, option=lambda s: s.detonate is None
and s.misc_type == 3)
interceptions = Int(option=lambda s: s.detonate is None
and s.misc_type == 3)
unknown_mt2 = Int(option=lambda s: s.detonate is None and s.misc_type == 3)
## end mystery trader
previous_turn = Int(option=lambda s: s.quantity is None and
s.detonate is None)
class Type45(Struct):
""" Score data """
type = 45
player = Int(5)
unknown1 = Bool(value=True) # rare False?
f_owns_planets = Bool()
f_attains_tech = Bool()
f_exceeds_score = Bool()
f_exceeds_2nd = Bool()
f_production = Bool()
f_cap_ships = Bool()
f_high_score = Bool()
unknown2 = Bool(value=False) # rare True?
f_declared_winner = Bool()
unknown3 = Bool()
year = Int() # or rank in .m files
score = Int(32)
resources = Int(32)
planets = Int()
starbases = Int()
unarmed_ships = Int()
escort_ships = Int()
capital_ships = Int()
tech_levels = Int()
# class Type31(Struct):
# """ Battle Recording """
# type = 31
# battle_id = Int(8)
# unknown1 = Array(bitwidth=8, length=3)
# participant_bits = Int()
# total_len = Int()
# planet_id = Int()
# x = Int()
# y = Int()
# class Type38(Struct):
# """ Player Relations """
# type = 38
# relations = Array(length=lambda s: s.sfile.num_players)
| [
"[email protected]"
] | |
00006783d5e79988872b0772507bea6d8a61f0db | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/251/36331/submittedfiles/testes.py | 2d065c88525ae84222b04ffffd65216933bdb1e3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
investimento = float (input('Digite o valor do investimento inicial: '))
taxa = float (input('Digite o valor da taxa anual (em decimais): '))
renda = (investimento+(investimento*taxa))
for i in range (1,10+1,1):
print('%.2f'%renda)
investimento=renda
| [
"[email protected]"
] | |
56bb280379109927a2e04eac8c3e377a33c399a9 | 3f1edc1a3b9f27c365b9c851d92abba7b1401c02 | /features_server/features_server.py | f780b5f6e54294a687e62896c08f1e7172801396 | [] | no_license | nakamoo/sumica | 278e137c8ac79f8d7d6743093b81e1466a89e26e | 302c72f283edc1f7953e224faf31d3f96bfe73e8 | refs/heads/master | 2021-03-22T02:15:42.215137 | 2018-02-13T13:22:55 | 2018-02-13T13:22:55 | 94,852,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,934 | py | import flask
import scipy.misc
import sys
import uuid
from flask import Flask, request
app = Flask(__name__)
import re
import os
import json
from skimage import io
import numpy as np
from io import BytesIO
import cv2
import PyOpenPose as OP
from concurrent.futures import ThreadPoolExecutor, wait
def init_pose():
op = OP.OpenPose((656, 368), (368, 368), (1280, 720), "COCO", "/home/sean/openpose/models/", 0, False,
OP.OpenPose.ScaleMode.ZeroToOne, True, True)
return op
# GPU conflict somehow goes away when using threads
pose_executor = ThreadPoolExecutor(1)
future = pose_executor.submit(init_pose)
wait([future])
op = future.result()
import detection_nn
from i3dnn import I3DNN
i3d = I3DNN("2")
datafiles_root = "../main_server/sumica/datafiles"
def format_image(image):
if len(image.shape) == 2: # grayscale -> 3 channels
image = np.expand_dims(image, 2)
image = np.repeat(image, 3, 2)
elif image.shape[2] > 3: # 4-channel -> 3-channels
image = image[:, :, :3]
elif image.shape[2] == 1: # single-channel -> 3-channelS
image = np.repeat(image, 3, 2)
return image
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
def preprocess(imgmat):
img_yuv = cv2.cvtColor(imgmat, cv2.COLOR_RGB2YUV)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)
return img_output
def nms(dets, iou_threshold=0.5):
sorted_list = sorted(dets, key=lambda k: k['confidence'])
filtered_list = []
for det in dets:
skip = False
for b in filtered_list:
if b["label"] == det["label"] and iou(b["box"], det["box"]) > iou_threshold:
skip = True
break
if not skip:
filtered_list.append(det)
return filtered_list
def iou(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def pose_estimation(img):
op.detectPose(img)
op.detectFace(img)
op.detectHands(img)
body = op.getKeypoints(op.KeypointType.POSE)[0]
hand = op.getKeypoints(op.KeypointType.HAND)[0]
face = op.getKeypoints(op.KeypointType.FACE)[0]
new_data = {'body': [], 'hand': [], 'face': []}
if body is not None:
new_data['body'] = body.tolist()
if hand is not None:
new_data['hand'] = hand.tolist()
if face is not None:
new_data['face'] = face.tolist()
return new_data
def object_detection(imgmat, query):
# default settings
conf_thresh = 0.3
get_img_feats = False
get_obj_feats = False
get_obj_dets = True
if "detection_threshold" in query:
conf_thresh = float(query["detection_threshold"])
if "get_image_features" in query:
get_img_feats = query["get_image_features"] == "true"
if "get_object_detections" in query:
get_obj_dets = query["get_object_detections"] == "true"
if "get_object_features" in query:
get_obj_feats = query["get_object_features"] == "true"
only_img_feats = get_img_feats and not get_obj_feats and not get_obj_dets
# if only_img_feats, RCNN will only do region proposal step
out = detection_nn.detect(imgmat, conf_thresh, only_img_feats)
out_data = {}
img_feats, obj_dets, obj_feats = out
if get_img_feats:
fn = str(uuid.uuid4()) + ".npy"
feats = np.max(img_feats, axis=(0, 1)) # collapse feature maps into vector
np.save(datafiles_root + "/image_features/" + fn, feats)
out_data["image_features_filename"] = fn
if get_obj_feats:
fn = str(uuid.uuid4()) + ".npy"
obj_feats = np.array(obj_feats)
np.save(datafiles_root + "/object_features/" + fn, obj_feats)
out_data["object_features_filename"] = fn
if get_obj_dets:
if "nms_threshold" in query:
out_data["detections"] = nms(obj_dets, float(query["nms_threshold"]))
else:
out_data["detections"] = obj_dets
return out_data
def action_recognition(whole_img, data):
for i in range(len(data["detections"])):
if data["detections"][i]["label"] == "person":
box = data["detections"][i]["box"]
im_w, im_h = whole_img.shape[1], whole_img.shape[0]
box_w, box_h = (box[2] - box[0]), (box[3] - box[1])
# expand
cx, cy = (box[0] + box[2]) // 2, (box[1] + box[3]) // 2
longer_side = max(box_w, box_h) * 2.0
constrained_side = min(min(im_w, im_h), longer_side)
a = constrained_side / 2.0
x1, y1, x2, y2 = cx - a, cy - a, cx + a, cy + a
if x1 < 0:
x2 -= x1
x1 = 0
if y1 < 0:
y2 -= y1
y1 = 0
if x2 >= im_w:
x1 -= x2 - im_w
x2 = im_w
if y2 >= im_h:
y1 -= y2 - im_h
y2 = im_h
x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
crop = whole_img[y1:y2, x1:x2, :]
crop = cv2.resize(crop, (224, 224))
img = np.repeat(crop[None, None, :], 10, axis=1)
prob, logits, label, feats = i3d.process_image(img)
det = data["detections"][i]
updates = {}
updates["action_label"] = label
updates["action_confidence"] = float(prob)
updates["action_crop"] = [x1, y1, x2, y2]
updates["action_vector"] = feats
det.update(updates)
# a = persons.index(i)
# if pose_indices[a] is not None:
# updates["detections.{}.pose_body_index".format(i)] = pose_indices[a]
return data
@app.route('/extract_features', methods=["POST"])
def extract_features():
query = request.form.to_dict()
imgmat = format_image(io.imread(query["path"]))
out_data = object_detection(imgmat, query)
future = pose_executor.submit(pose_estimation, (imgmat))
wait([future])
pose_data = future.result()
out_data["pose"] = pose_data
out_data = action_recognition(imgmat, out_data)
return json.dumps(out_data)
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=False, use_reloader=False, debug=False, port=5002)
| [
"[email protected]"
] | |
8b2e65993a3863ac9ac5c852480122cca60b8959 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/af6282605186f294d7c47dcace86864f4f872c6egenDb.py | af6282605186f294d7c47dcace86864f4f872c6e | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 1,241 | py | from review.models import *
def run():
n1= node_user(fb_id="34",user_name="joe")
n1.save()
n2= node_user(fb_id="35",user_name="sam")
n2.save()
n3= node_user(fb_id="36",user_name="john")
n3.save()
n4= node_user(fb_id="37",user_name="jeff")
n4.save()
n5= node_user(fb_id="38",user_name="tom")
n5.save()
n6= node_user(fb_id="39",user_name="ravi")
n6.save()
n7= node_user(fb_id="40",user_name="lucky")
n7.save()
edge_friend(node_user_1=n1,node_user_2= n2).save()
edge_friend(node_user_1=n2,node_user_2= n1).save()
edge_friend(node_user_1=n1,node_user_2=n3).save()
edge_friend(node_user_1=n3,node_user_2=n1).save()
edge_friend(node_user_1=n4,node_user_2=n1).save()
edge_friend(node_user_1=n1,node_user_2=n4).save()
edge_friend(node_user_1=n2,node_user_2=n5).save()
edge_friend(node_user_1=n5,node_user_2=n2).save()
reviews(product_id=1234,user_id=n2,comment="ABC",rating=2).save()
reviews(product_id=1234,user_id=n3,comment="DEF",rating=3).save()
reviews(product_id=1234,user_id=n4,comment="GHI",rating=4).save()
reviews(product_id=1234,user_id=n5,comment="JKL",rating=8).save()
reviews(product_id=1234,user_id=n6,comment="MNO",rating=6).save()
reviews(product_id=1234,user_id=n7,comment="PQR",rating=9).save()
| [
"[email protected]"
] | |
b5391465dd3262d455b496d8d0456ca778bfd174 | 52ad58b5412f9124822283d168391e5e2b8fa150 | /Linux/Linux命令介绍04.py | 2a62916447e0b8dbefa96a704151ee2d5f9deddc | [] | no_license | JiangHuYiXiao/PythonStudy | 69ad9795faaf24a6166ab21cae564f6461e1363e | aeebce2cacbf3757d25c8c4d24d15639e0bb8e37 | refs/heads/master | 2021-08-17T16:54:43.477502 | 2021-08-11T01:06:11 | 2021-08-11T01:06:11 | 153,078,386 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,201 | py | # -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2020/9/11 8:40
# @Software : Python_study
# @Python_verison : 3.7
'''
命令提示符
登录系统后,第一眼看到的内容是:
[root@localhost ~]#
这就是 Linux 系统的命令提示符。那么,这个提示符的含义是什么呢?
[]:这是提示符的分隔符号,没有特殊含义。
root:显示的是当前的登录用户,笔者现在使用的是 root 用户登录。
@:分隔符号,没有特殊含义。
localhost:当前系统的简写主机名(完整主机名是 localhost.localdomain)。
~:代表用户当前所在的目录,此例中用户当前所在的目录是家目录。
#:命令提示符,Linux 用这个符号标识登录的用户权限等级。如果是超级用户,提示符就是 #;如果是普通用户,提示符就是 $。
家目录(又称主目录)是什么? Linux 系统是纯字符界面,用户登录后,要有一个初始登录的位置,这个初始登录位置就称为用户的家:
超级用户的家目录:/root。
普通用户的家目录:/home/用户名。
用户在自己的家目录中拥有完整权限,所以我们也建议操作实验可以放在家目录中进行。我们切换一下用户所在目录,看看有什么效果。
[root@localhost ~]# cd /usr/local
[root@localhost local]#
仔细看,如果切换用户所在目录,那么命令提示符中的会变成用户当前所在目录的最后一个目录(不显示完整的所在目录 /usr/ local,只显示最后一个目录 local)。
命令的基本格式
接下来看看 Linux 命令的基本格式:
[root@localhost ~]# 命令[选项][参数]
命令格式中的 [] 代表可选项,也就是有些命令可以不写选项或参数,也能执行。那么,我们就用 Linux 中最常见的 ls 命令来解释一下命令的格式(有关 ls 命令的具体用法,后续章节会详细介绍)。如果按照命令的分类,那么 ls 命令应该属于目录操作命令。
[root@localhost ~]# ls
anaconda-ks.cfg install.log install.log.syslog
1) 选项的作用
ls 命令之后不加选项和参数也能执行,不过只能执行最基本的功能,即显示当前目录下的文件名。那么加入一个选项,会出现什么结果?
[root@localhost ~]# Is -l
总用量44
-rw-------.1 root root 1207 1 月 14 18:18 anaconda-ks.cfg
-rw-r--r--.1 root root 24772 1 月 14 18:17 install.log
-rw-r--r--.1 root root 7690 1 月 14 18:17 install.log.syslog
如果加一个"-l"选项,则可以看到显示的内容明显增多了。"-l"是长格式(long list)的意思,也就是显示文件的详细信息。至于 "-l" 选项的具体含义,我们稍后再详细讲解。可以看到选项的作用是调整命令功能。如果没有选项,那么命令只能执行最基本的功能;而一旦有选项,则可以显示更加丰富的数据。
Linux 的选项又分为短格式选项(-l)和长格式选项(--all)。短格式选项是英文的简写,用一个减号调用,例如:
[root@localhost ~]# ls -l
而长格式选项是英文完整单词,一般用两个减号调用,例如:
[root@localhost ~]# ls --all
一般情况下,短格式选项是长格式选项的缩写,也就是一个短格式选项会有对应的长格式选项。当然也有例外,比如 ls 命令的短格式选项 -l 就没有对应的长格式选项。所以具体的命令选项可以通过后面我们要学习的帮助命令来进行査询。
2) 参数的作用
参数是命令的操作对象,一般文件、目录、用户和进程等可以作为参数被命令操作。例如:
[root@localhost ~]# ls -l anaconda-ks.cfg
-rw-------.1 root root 1207 1 月 14 18:18 anaconda-ks.cfg
但是为什么一开始 ls 命令可以省略参数?那是因为有默认参数。命令一般都需要加入参数,用于指定命令操作的对象是谁。如果可以省略参数,则一般都有默认参数。例如:
[root@localhost ~]# ls
anaconda-ks.cfg install.log install.log.syslog
这个 ls 命令后面没有指定参数,默认参数是当前所在位置,所以会显示当前目录下的文件名。
''' | [
"[email protected]"
] | |
2012b74d2ce14fa5c56da7a2de113423caeae59d | 0aaf6ce59d305428611958a5bf6a5831407bca65 | /advisor_server/suggestion/early_stop_algorithm/early_stop_descending.py | 6f824fe160698a53bf830074eb354aa93ad923bd | [
"Apache-2.0"
] | permissive | mlaradji/advisor | d770043a5307af1037cad6be1c449d541acf87b0 | 8ec0f8b64809daa80a20d717b4e45ad9fbcadbb0 | refs/heads/master | 2023-05-26T05:59:50.169748 | 2018-10-18T10:34:42 | 2018-10-18T10:34:42 | 154,219,666 | 0 | 0 | Apache-2.0 | 2023-04-29T17:00:36 | 2018-10-22T21:27:59 | Jupyter Notebook | UTF-8 | Python | false | false | 989 | py | import json
from suggestion.models import Study
from suggestion.models import TrialMetric
from suggestion.early_stop_algorithm.abstract_early_stop import AbstractEarlyStopAlgorithm
class EarlyStopDescendingAlgorithm(AbstractEarlyStopAlgorithm):
def get_early_stop_trials(self, trials):
result = []
for trial in trials:
study = Study.objects.get(name=trial.study_name)
study_configuration_json = json.loads(study.study_configuration)
study_goal = study_configuration_json["goal"]
metrics = TrialMetric.objects.filter(
trial_id=trial.id).order_by("-training_step")
metrics = [metric for metric in metrics]
if len(metrics) >= 2:
if study_goal == "MAXIMIZE":
if metrics[0].objective_value < metrics[1].objective_value:
result.append(trial)
elif study_goal == "MINIMIZE":
if metrics[0].objective_value > metrics[1].objective_value:
result.append(trial)
return result
| [
"[email protected]"
] | |
d4d40b998c742dd9fa7449d997840d297270fb5c | 909b118604b756f46694c3caa02015b4e9a4affd | /oscar/apps/offer/benefits.py | baf6085a6a9ccf6a87fbd1976865a06b6444f5ff | [] | no_license | LivinSam/hairexpo | 808e1b6710f36eb6f9682421daffcaa737e6864e | 50c5ac05c768379546a159a43bf8843346183200 | refs/heads/master | 2021-04-09T13:03:24.504240 | 2018-03-20T09:56:18 | 2018-03-20T09:56:18 | 125,603,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,201 | py | from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_class, get_classes, get_model
from oscar.templatetags.currency_filters import currency
Benefit = get_model('offer', 'Benefit')
BasketDiscount, SHIPPING_DISCOUNT, ZERO_DISCOUNT = get_classes('offer.results', [
'BasketDiscount', 'SHIPPING_DISCOUNT', 'ZERO_DISCOUNT'])
CoverageCondition, ValueCondition = get_classes('offer.conditions', ['CoverageCondition', 'ValueCondition'])
range_anchor = get_class('offer.utils', 'range_anchor')
__all__ = [
'PercentageDiscountBenefit', 'AbsoluteDiscountBenefit', 'FixedPriceBenefit',
'ShippingBenefit', 'MultibuyDiscountBenefit',
'ShippingAbsoluteDiscountBenefit', 'ShippingFixedPriceBenefit',
'ShippingPercentageDiscountBenefit',
]
def apply_discount(line, discount, quantity, offer=None):
"""
Apply a given discount to the passed basket
"""
line.discount(discount, quantity, incl_tax=False, offer=offer)
class PercentageDiscountBenefit(Benefit):
"""
An offer benefit that gives a percentage discount
"""
_description = _("%(value)s%% discount on %(range)s")
@property
def name(self):
return self._description % {
'value': self.value,
'range': self.range.name}
@property
def description(self):
return self._description % {
'value': self.value,
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
def apply(self, basket, condition, offer, discount_percent=None,
max_total_discount=None):
if discount_percent is None:
discount_percent = self.value
discount_amount_available = max_total_discount
line_tuples = self.get_applicable_lines(offer, basket)
discount_percent = min(discount_percent, D('100.0'))
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
if discount_amount_available == 0:
break
quantity_affected = min(
line.quantity_without_offer_discount(offer),
max_affected_items - affected_items)
line_discount = self.round(discount_percent / D('100.0') * price
* int(quantity_affected))
if discount_amount_available is not None:
line_discount = min(line_discount, discount_amount_available)
discount_amount_available -= line_discount
apply_discount(line, line_discount, quantity_affected, offer)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class AbsoluteDiscountBenefit(Benefit):
"""
An offer benefit that gives an absolute discount
"""
_description = _("%(value)s discount on %(range)s")
@property
def name(self):
return self._description % {
'value': currency(self.value),
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'value': currency(self.value),
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
def apply(self, basket, condition, offer, discount_amount=None,
max_total_discount=None):
if discount_amount is None:
discount_amount = self.value
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket)
# Determine which lines can have the discount applied to them
max_affected_items = self._effective_max_affected_items()
num_affected_items = 0
affected_items_total = D('0.00')
lines_to_discount = []
for price, line in line_tuples:
if num_affected_items >= max_affected_items:
break
qty = min(
line.quantity_without_offer_discount(offer),
max_affected_items - num_affected_items)
lines_to_discount.append((line, price, qty))
num_affected_items += qty
affected_items_total += qty * price
# Ensure we don't try to apply a discount larger than the total of the
# matching items.
discount = min(discount_amount, affected_items_total)
if max_total_discount is not None:
discount = min(discount, max_total_discount)
if discount == 0:
return ZERO_DISCOUNT
# Apply discount equally amongst them
affected_lines = []
applied_discount = D('0.00')
for i, (line, price, qty) in enumerate(lines_to_discount):
if i == len(lines_to_discount) - 1:
# If last line, then take the delta as the discount to ensure
# the total discount is correct and doesn't mismatch due to
# rounding.
line_discount = discount - applied_discount
else:
# Calculate a weighted discount for the line
line_discount = self.round(
((price * qty) / affected_items_total) * discount)
apply_discount(line, line_discount, qty, offer)
affected_lines.append((line, line_discount, qty))
applied_discount += line_discount
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class FixedPriceBenefit(Benefit):
"""
An offer benefit that gives the items in the condition for a
fixed price. This is useful for "bundle" offers.
Note that we ignore the benefit range here and only give a fixed price
for the products in the condition range. The condition cannot be a value
condition.
We also ignore the max_affected_items setting.
"""
_description = _("The products that meet the condition are sold "
"for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
def apply(self, basket, condition, offer): # noqa (too complex (10))
if isinstance(condition, ValueCondition):
return ZERO_DISCOUNT
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket,
range=condition.range)
if not line_tuples:
return ZERO_DISCOUNT
# Determine the lines to consume
num_permitted = int(condition.value)
num_affected = 0
value_affected = D('0.00')
covered_lines = []
for price, line in line_tuples:
if isinstance(condition, CoverageCondition):
quantity_affected = 1
else:
quantity_affected = min(
line.quantity_without_offer_discount(offer),
num_permitted - num_affected)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D('0.00'))
if not discount:
return ZERO_DISCOUNT
# Apply discount to the affected lines
discount_applied = D('0.00')
last_line = covered_lines[-1][1]
for price, line, quantity in covered_lines:
if line == last_line:
# If last line, we just take the difference to ensure that
# rounding doesn't lead to an off-by-one error
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected)
apply_discount(line, line_discount, quantity, offer)
discount_applied += line_discount
return BasketDiscount(discount)
class MultibuyDiscountBenefit(Benefit):
_description = _("Cheapest product from %(range)s is free")
@property
def name(self):
return self._description % {
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Multibuy discount benefit")
verbose_name_plural = _("Multibuy discount benefits")
def apply(self, basket, condition, offer):
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return ZERO_DISCOUNT
# Cheapest line gives free product
discount, line = line_tuples[0]
apply_discount(line, discount, 1, offer)
affected_lines = [(line, discount, 1)]
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
# =================
# Shipping benefits
# =================
class ShippingBenefit(Benefit):
def apply(self, basket, condition, offer):
condition.consume_items(offer, basket, affected_lines=())
return SHIPPING_DISCOUNT
class Meta:
app_label = 'offer'
proxy = True
class ShippingAbsoluteDiscountBenefit(ShippingBenefit):
_description = _("%(amount)s off shipping cost")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping absolute discount benefit")
verbose_name_plural = _("Shipping absolute discount benefits")
def shipping_discount(self, charge):
return min(charge, self.value)
class ShippingFixedPriceBenefit(ShippingBenefit):
_description = _("Get shipping for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price shipping benefit")
verbose_name_plural = _("Fixed price shipping benefits")
def shipping_discount(self, charge):
if charge < self.value:
return D('0.00')
return charge - self.value
class ShippingPercentageDiscountBenefit(ShippingBenefit):
_description = _("%(value)s%% off of shipping cost")
@property
def name(self):
return self._description % {
'value': self.value}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping percentage discount benefit")
verbose_name_plural = _("Shipping percentage discount benefits")
def shipping_discount(self, charge):
discount = charge * self.value / D('100.0')
return discount.quantize(D('0.01'))
| [
"[email protected]"
] | |
2ff11310059b2cc48f811548c17c1873d63feae0 | abad4b3101e46e0d8002f9b5796a3c32e958fd10 | /Demo/demo/settings.py | a84aed7b9b3bc62bb44c45d1c30c24332708895d | [] | no_license | Twishar/aiohttp | 9ffec2f7d1431943780ac56c46fa140a589961da | e9bc0a8447f9792767ae1b93e15db22875ed114d | refs/heads/master | 2020-03-22T03:35:47.398634 | 2019-03-19T15:33:17 | 2019-03-19T15:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py |
import yaml
from pathlib import Path
__all__ = ('load_config',)
def load_config(config_file=None):
default_file = Path(__file__).parent / 'config.yaml'
with open(default_file, 'r') as f:
config = yaml.safe_load(f)
cf_dict = {}
if config_file:
cf_dict = yaml.safe_load(config_file)
config.update(**cf_dict)
return config
| [
"[email protected]"
] | |
d7177e48fc24bdf79a2b8548d5f909d8e2eb3b7a | bc441bb06b8948288f110af63feda4e798f30225 | /collector_service_sdk/model/pipeline/build_pb2.py | 012f8fc42bd1099fd303451a934b735197599d5d | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 9,089 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: build.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from collector_service_sdk.model.pipeline import git_meta_pb2 as collector__service__sdk_dot_model_dot_pipeline_dot_git__meta__pb2
from collector_service_sdk.model.pipeline import build_status_pb2 as collector__service__sdk_dot_model_dot_pipeline_dot_build__status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='build.proto',
package='pipeline',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/pipeline'),
serialized_pb=_b('\n\x0b\x62uild.proto\x12\x08pipeline\x1a\x33\x63ollector_service_sdk/model/pipeline/git_meta.proto\x1a\x37\x63ollector_service_sdk/model/pipeline/build_status.proto\"\xcc\x02\n\x05\x42uild\x12\n\n\x02id\x18\x01 \x01(\t\x12#\n\x08git_meta\x18\x02 \x01(\x0b\x32\x11.pipeline.GitMeta\x12\x0e\n\x06sender\x18\x03 \x01(\t\x12*\n\x08\x61rtifact\x18\x04 \x01(\x0b\x32\x18.pipeline.Build.Artifact\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x05\x12\x13\n\x0byaml_string\x18\x06 \x01(\t\x12%\n\x06status\x18\x07 \x01(\x0b\x32\x15.pipeline.BuildStatus\x12\x0e\n\x06number\x18\x08 \x01(\t\x12\x0e\n\x06\x65vents\x18\t \x03(\t\x1ai\n\x08\x41rtifact\x12\x13\n\x0bpackageName\x18\x01 \x01(\t\x12\x13\n\x0bversionName\x18\x02 \x01(\t\x12\r\n\x05\x63time\x18\x03 \x01(\t\x12\x11\n\tpackageId\x18\x04 \x01(\t\x12\x11\n\tversionId\x18\x05 \x01(\tBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/pipelineb\x06proto3')
,
dependencies=[collector__service__sdk_dot_model_dot_pipeline_dot_git__meta__pb2.DESCRIPTOR,collector__service__sdk_dot_model_dot_pipeline_dot_build__status__pb2.DESCRIPTOR,])
_BUILD_ARTIFACT = _descriptor.Descriptor(
name='Artifact',
full_name='pipeline.Build.Artifact',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packageName', full_name='pipeline.Build.Artifact.packageName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionName', full_name='pipeline.Build.Artifact.versionName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='pipeline.Build.Artifact.ctime', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='pipeline.Build.Artifact.packageId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='pipeline.Build.Artifact.versionId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=363,
serialized_end=468,
)
_BUILD = _descriptor.Descriptor(
name='Build',
full_name='pipeline.Build',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pipeline.Build.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='git_meta', full_name='pipeline.Build.git_meta', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sender', full_name='pipeline.Build.sender', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artifact', full_name='pipeline.Build.artifact', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created', full_name='pipeline.Build.created', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yaml_string', full_name='pipeline.Build.yaml_string', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='pipeline.Build.status', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number', full_name='pipeline.Build.number', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='events', full_name='pipeline.Build.events', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BUILD_ARTIFACT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=468,
)
_BUILD_ARTIFACT.containing_type = _BUILD
_BUILD.fields_by_name['git_meta'].message_type = collector__service__sdk_dot_model_dot_pipeline_dot_git__meta__pb2._GITMETA
_BUILD.fields_by_name['artifact'].message_type = _BUILD_ARTIFACT
_BUILD.fields_by_name['status'].message_type = collector__service__sdk_dot_model_dot_pipeline_dot_build__status__pb2._BUILDSTATUS
DESCRIPTOR.message_types_by_name['Build'] = _BUILD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Build = _reflection.GeneratedProtocolMessageType('Build', (_message.Message,), {
'Artifact' : _reflection.GeneratedProtocolMessageType('Artifact', (_message.Message,), {
'DESCRIPTOR' : _BUILD_ARTIFACT,
'__module__' : 'build_pb2'
# @@protoc_insertion_point(class_scope:pipeline.Build.Artifact)
})
,
'DESCRIPTOR' : _BUILD,
'__module__' : 'build_pb2'
# @@protoc_insertion_point(class_scope:pipeline.Build)
})
_sym_db.RegisterMessage(Build)
_sym_db.RegisterMessage(Build.Artifact)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
929356f001a36d06a80acbe8aba1e2c37beeae02 | fd62d8096dc95923341cfac29f0209bfbea887b4 | /models_evaluation/xgboost/grid_search/jobs_test/5.0_0.07_0.0_10.0_10.0.job.py | 6972e9e01ee719fd52a79035172c6ae1a7271bd2 | [] | no_license | Eulerianial/premise-selection-deepmath-style | 06c8f2f540bc7e3840c6db0a66c5b30b5f4257f9 | 8684a59b5d8beab1d02a3a7c568a16c790ea4b45 | refs/heads/master | 2021-07-17T17:04:13.472687 | 2017-10-25T13:54:44 | 2017-10-25T13:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | import xgboost as xgb
import argparse
import sys
import os
from saving_loading import *
#####################################
p = {
"max_depth":int(5.0),
"eta":0.07,
"gamma":0.0,
"num_boost_round":int(10.0),
"early_stopping_rounds":int(10.0)
}
#####################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run CV for xgboost with particular combination of parameters')
parser.add_argument("X",
help = "path to CSR matrix with features of pairs (theorem, premise)")
parser.add_argument("y",
help = "path to CSV file with labels reflecting relevances of pairs (theorem, premise)")
parser.add_argument("output_directory",
help = "path to directory where performance of tested model should be saved")
args = parser.parse_args()
y = read_csv(os.path.abspath(args.y), type_of_records = "int")
X = load_obj(os.path.abspath(args.X))
output_directory = os.path.abspath(args.output_directory)
dtrain = xgb.DMatrix(X, label = y)
params = {
"max_depth":p["max_depth"],
"eta":p["eta"],
"gamma":p["gamma"],
"objective":"binary:logistic"
}
x = xgb.cv(
params = params,
dtrain = dtrain,
num_boost_round = p["num_boost_round"],
early_stopping_rounds = p["early_stopping_rounds"],
nfold = 4,
metrics = {"error","auc","logloss"}
)
output_name = os.path.join(output_directory, "_".join(map(str, list(p.values())))+".pkl")
save_obj({"params":p, "stats":x}, output_name)
| [
"[email protected]"
] | |
b877783770fb5ebffef4b657a4b127ada97799b4 | 64e24096ab40259cea27d431dce0814bc58597e2 | /src/pymor/discretizers/fenics/cg.py | 0f0cdaa80a93a228707624d1669589420e33ced8 | [
"BSD-2-Clause"
] | permissive | lbalicki/pymor | ea657d25d141895a40345533460543b27b79c6f0 | e99f260097bd1db0eeb26102cdef8c672b3c9868 | refs/heads/main | 2023-03-18T18:35:38.993176 | 2022-08-26T06:30:33 | 2022-08-26T06:30:33 | 228,632,806 | 0 | 1 | NOASSERTION | 2019-12-17T14:20:50 | 2019-12-17T14:20:49 | null | UTF-8 | Python | false | false | 7,419 | py | # This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
config.require('FENICS')
import dolfin as df
from pymor.algorithms.preassemble import preassemble as preassemble_
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.analyticalproblems.functions import LincombFunction
from pymor.bindings.fenics import FenicsVectorSpace, FenicsMatrixBasedOperator, FenicsVisualizer
from pymor.discretizers.fenics.domaindiscretizer import discretize_domain
from pymor.models.basic import StationaryModel
from pymor.operators.constructions import LincombOperator, NumpyConversionOperator
from pymor.operators.block import BlockColumnOperator
def discretize_stationary_cg(analytical_problem, diameter=None, degree=1, preassemble=True):
"""Discretizes a |StationaryProblem| with finite elements using FEniCS.
Parameters
----------
analytical_problem
The |StationaryProblem| to discretize.
diameter
If not `None`, `diameter` is passed as an argument to the `domain_discretizer`.
degree
polynomial degree of the finite element.
preassemble
If `True`, preassemble all operators in the resulting |Model|.
Returns
-------
m
The |Model| that has been generated.
data
Dictionary with the following entries:
:mesh: The generated dolfin mesh object.
:boundary_mask: Codim-1 `MeshFunctionSizet` indicating which boundary type a
bundary facet belongs to.
:boundary_ids: Dict mapping boundary types to ids used in `boundary_mask`.
:unassembled_m: In case `preassemble` is `True`, the generated |Model|
before preassembling operators.
"""
assert isinstance(analytical_problem, StationaryProblem)
p = analytical_problem
if p.diffusion is not None and not p.diffusion.shape_range == ():
raise NotImplementedError
if p.nonlinear_advection is not None:
raise NotImplementedError
if p.nonlinear_advection_derivative is not None:
raise NotImplementedError
if p.nonlinear_reaction is not None:
raise NotImplementedError
if p.nonlinear_reaction_derivative is not None:
raise NotImplementedError
if not p.domain.boundary_types <= {'dirichlet', 'neumann'}:
raise NotImplementedError
if p.dirichlet_data is not None and p.dirichlet_data.parametric:
raise NotImplementedError
mesh, (boundary_mask, boundary_ids) = discretize_domain(p.domain, diameter=diameter)
V = df.FunctionSpace(mesh, 'Lagrange', degree)
bc = df.DirichletBC(V, 0. if p.dirichlet_data is None else p.dirichlet_data.to_fenics(mesh)[0].item(),
boundary_mask, boundary_ids['dirichlet'])
u = df.TrialFunction(V)
v = df.TestFunction(V)
dx, ds = df.dx, df.ds
Li = [FenicsMatrixBasedOperator(df.Constant(0.)*u*v*dx, {}, bc, bc_zero=False, name='boundary_part')]
coefficients = [1.]
_assemble_operator(p.diffusion, lambda c: df.inner(c.item() * df.grad(u), df.grad(v)) * dx,
mesh, bc, True, 'diffusion',
Li, coefficients)
_assemble_operator(
p.advection, lambda c: u * sum(ci * gi for ci, gi in zip(c, df.grad(v))) * dx,
mesh, bc, True, 'advection',
Li, coefficients
)
_assemble_operator(
p.reaction, lambda c: c * u * v * dx,
mesh, bc, True, 'reaction',
Li, coefficients,
)
L = LincombOperator(operators=Li, coefficients=coefficients, name='ellipticOperator')
# right-hand side
Fi = []
coefficients_F = []
_assemble_operator(p.rhs, lambda c: c.item() * v * dx,
mesh, bc, False, 'rhs',
Fi, coefficients_F)
if p.neumann_data is not None and p.domain.has_neumann:
_assemble_operator(
p.neumann_data, lambda c: c.item() * v * ds,
mesh, bc, False, 'neumann',
Fi, coefficients_F, negative=True
)
F = LincombOperator(operators=Fi, coefficients=coefficients_F, name='rhsOperator')
h1_0_semi_product = FenicsMatrixBasedOperator(df.inner(df.grad(u), df.grad(v))*dx, {}, bc, bc_zero=False,
name='h1_0_semi')
l2_product = FenicsMatrixBasedOperator(u*v*dx, {}, name='l2')
h1_semi_product = FenicsMatrixBasedOperator(df.inner(df.grad(u), df.grad(v))*dx, {}, bc, bc_zero=False,
name='h1_0_semi')
h1_product = l2_product + h1_semi_product
products = {
'l2': l2_product,
'h1_semi': h1_0_semi_product,
'h1': h1_product,
'h1_0_semi': h1_0_semi_product,
}
if p.outputs:
if any(o[0] not in ('l2', 'l2_boundary') for o in p.outputs):
raise NotImplementedError
outputs = []
for o in p.outputs:
if o[0] == 'l2':
outputs.append(
_assemble_operator(o[1], lambda c: c * v * dx, mesh,
functional=True, name='l2_output')
)
else:
outputs.append(
_assemble_operator(o[1], lambda c: c * v * ds, mesh,
functional=True, name='l2_boundary_output')
)
if len(outputs) > 1:
output_functional = BlockColumnOperator(outputs)
output_functional = NumpyConversionOperator(output_functional.range) @ output_functional
else:
output_functional = outputs[0]
else:
output_functional = None
m = StationaryModel(L, F, output_functional=output_functional, products=products,
visualizer=FenicsVisualizer(FenicsVectorSpace(V)),
name=f'{p.name}_CG')
data = {
'mesh': mesh,
'boundary_mask': boundary_mask,
'boundary_ids': boundary_ids,
'bc': bc,
}
if preassemble:
data['unassembled_m'] = m
m = preassemble_(m)
return m, data
def _assemble_operator(function, factory,
mesh, bc=None, bc_zero=None, name=None,
ops=None, coeffs=None,
negative=False, functional=False):
def assemble_op(f, name):
coeff, params = f.to_fenics(mesh)
return FenicsMatrixBasedOperator(factory(coeff), params,
bc=bc, bc_zero=bc_zero, functional=functional, name=name)
if isinstance(function, LincombFunction):
operators = [assemble_op(f, f'{name}_{i}') for i, f in enumerate(function.functions)]
cfs = [-c if negative else c for c in function.coefficients]
if ops is not None:
ops.extend(operators)
coeffs.extend(cfs)
else:
return LincombOperator(operators, cfs, name=name)
elif function is not None:
operator = assemble_op(function, name)
if ops is not None:
ops.append(operator)
coeffs.append(-1 if negative else 1.)
else:
return -operator if negative else operator
| [
"[email protected]"
] | |
a25c812d2cceaf4bdb7d6e95c7aeeb05abfe9817 | 53865bdc3b5e5bb26ecd40d30b66ad71de1081cc | /src/bed_gff_manipulation/filter_bed_to_fasta.py | 33d2602ba502ca600e516a540f906c2a190a9c7e | [
"MIT"
] | permissive | olgatsiouri1996/biomisc | a5477279ab53a5307ce026868fa77639b45a44af | e3709f566c5c93aec884558f1f2b620a1cf9792d | refs/heads/main | 2023-08-19T08:59:32.899646 | 2023-08-16T13:10:05 | 2023-08-16T13:10:05 | 300,590,735 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | # python3
import sys
import argparse
from pyfaidx import Fasta
import pandas as pd
import warnings
# input parameters
ap = argparse.ArgumentParser()
ap.add_argument("-bed", "--bed", required=True, help="input bed file(made with bedops, every feature in the \'.gff\' or \'.gff3\' file should have an \'ID\' tag in the \'attributes\' column)")
ap.add_argument("-in", "--input", required=True, help="input fasta file")
ap.add_argument("-out", "--output", required=True, help="output fasta file")
ap.add_argument("-fea", "--feature", required=False, default='gene', type=str, help="specify the pattern to select the lines that have it. Default is \'gene\'")
ap.add_argument("-pro", "--program", required=False, default=1, type=int, help="program to choose: 1) filter the bed file before retrieving sequences, 2) do not filter. Default is 1")
args = vars(ap.parse_args())
# main
# create function to split the input sequence based on a specific number of characters(60)
def split_every_60(s): return [str(s)[i:i+60] for i in range(0,len(str(s)),60)]
# ignore warnings
warnings.filterwarnings('ignore')
# import bed with no headers specified
df = pd.read_csv(args['bed'], sep= "\t", header=None)
# choose program
if args['program'] == 1:
# select the rows containing the feature
bool2 = df.iloc[:, 7].str.contains(args['feature'])
df = df[bool2]
else:
pass
# convert each column to list
chrom = df.iloc[:,0].values.tolist()
start = df.iloc[:,1].values.tolist()
end = df.iloc[:,2].values.tolist()
ids = df.iloc[:,3].values.tolist()
strand = df.iloc[:,5].values.tolist()
# import fasta file
features = Fasta(args['input'])
# iterate all below lists in pairs
sys.stdout = open(args['output'], 'a')
for (a, b, c, d, e) in zip(ids, chrom, start, end, strand):
if str(e) == "+":
print(''.join([">",str(a)," ",str(b),":",str(int(c) + 1),"-",str(d)]).replace('\r', ''))
print('\n'.join(split_every_60(features[str(b)][int(c):int(d)].seq)))
else:
print(''.join([">",str(a)," ",str(b),":",str(int(c) + 1),"-",str(d)," ","reverse complement"]).replace('\r', ''))
print('\n'.join(split_every_60(features[str(b)][int(c):int(d)].reverse.complement.seq)))
sys.stdout.close()
| [
"[email protected]"
] | |
f0cd7cf24537cb5be88b18a99b4c9f72c7b130e8 | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /bin/Python27/Lib/site-packages/sympy/core/core.py | 4c2bf72e9018dc683d8f301c69a313158ff99217 | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 3,764 | py | """ The core's core. """
from sympy.core.compatibility import cmp
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One','Half','Infinity','NaN','NegativeOne','NegativeInfinity',
# numbers
'Integer','Rational','Float',
# singleton symbols
'Exp1','Pi','ImaginaryUnit',
# symbols
'Symbol','Wild','Temporary',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Derivative','Integral',
# defined singleton functions
'Abs','Sign','Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp','Log',
'Sin','Cos','Tan','Cot','ASin','ACos','ATan','ACot',
'Sinh','Cosh','Tanh','Coth','ASinh','ACosh','ATanh','ACoth',
'RisingFactorial','FallingFactorial',
'factorial','binomial',
'Gamma','LowerGamma','UpperGamma','PolyGamma',
'Erf',
# special polynomials
'Chebyshev','Chebyshev2',
# undefined functions
'Function','WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictGreaterThan', 'StrictLessThan',
'GreaterThan', 'LessThan',
]
class BasicType(type):
pass
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects, kept in sync with C
all_classes = set()
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = []
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in self.__class__.__dict__.itervalues():
all_classes.remove(cls)
C = ClassRegistry()
class BasicMeta(BasicType):
def __init__(cls, *args, **kws):
setattr(C, cls.__name__, cls)
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicType):
return -1
n1 = cls.__name__
n2 = other.__name__
c = cmp(n1, n2)
if not c:
return 0
UNKNOWN = len(ordering_of_classes) + 1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return c
return cmp(i1, i2)
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other) == 1:
return True
return False
C.BasicMeta = BasicMeta
| [
"[email protected]"
] | |
dc82b171cc6ab60e7af9a0c3dfb107309555c95e | 8114909d3ed6ee1e6d1fbe14a37723015ab53af6 | /source_test.py | 91ee4a6072716b0382d94851d413d9bb445b4364 | [
"LicenseRef-scancode-public-domain"
] | permissive | notenoughneon/activitystreams-unofficial | b0c66d48eb3b43d68b76df069ba237dce9d77489 | 1f45bde45d3d18ef39d69ebd698e248233b94ce9 | refs/heads/master | 2021-01-18T03:01:03.101619 | 2014-08-15T15:00:00 | 2014-08-15T23:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,093 | py | # coding=utf-8
"""Unit tests for source.py.
"""
__author__ = ['Ryan Barrett <[email protected]>']
import copy
import json
import mox
from source import Source
from oauth_dropins.webutil import testutil
from oauth_dropins.webutil import util
LIKES = [{
'verb': 'like',
'author': {'id': 'tag:fake.com:person', 'numeric_id': '5'},
'object': {'url': 'http://foo/like/5'},
}, {
'verb': 'like',
'author': {'id': 'tag:fake.com:6'},
'object': {'url': 'http://bar/like/6'},
},
]
ACTIVITY = {
'id': '1',
'object': {
'id': '1',
'tags': LIKES,
}
}
RSVPS = [{
'id': 'tag:fake.com:246_rsvp_11500',
'objectType': 'activity',
'verb': 'rsvp-yes',
'actor': {'displayName': 'Aaron P', 'id': 'tag:fake.com,2013:11500'},
}, {
'objectType': 'activity',
'verb': 'rsvp-no',
'actor': {'displayName': 'Ryan B'},
}, {
'id': 'tag:fake.com:246_rsvp_987',
'objectType': 'activity',
'verb': 'rsvp-maybe',
'actor': {'displayName': 'Foo', 'id': 'tag:fake.com,2013:987'},
}]
EVENT = {
'id': 'tag:fake.com:246',
'objectType': 'event',
'displayName': 'Homebrew Website Club',
}
EVENT_WITH_RSVPS = copy.deepcopy(EVENT)
EVENT_WITH_RSVPS.update({
'attending': [RSVPS[0]['actor']],
'notAttending': [RSVPS[1]['actor']],
'maybeAttending': [RSVPS[2]['actor']],
})
class FakeSource(Source):
DOMAIN = 'fake.com'
def __init__(self, **kwargs):
pass
class SourceTest(testutil.HandlerTest):
def setUp(self):
super(SourceTest, self).setUp()
self.source = FakeSource()
self.mox.StubOutWithMock(self.source, 'get_activities')
def test_original_post_discovery(self):
activity = {'object': {
'objectType': 'article',
'displayName': 'article abc',
'url': 'http://example.com/article-abc',
'tags': [],
}}
self.assert_equals(activity, Source.original_post_discovery(
copy.deepcopy(activity)))
# missing objectType
activity['object']['attachments'] = [{'url': 'http://x.com/y'}]
Source.original_post_discovery(activity)
self.assert_equals([], activity['object']['tags'])
activity['object']['content'] = 'x (not.at end) y (at.the end)'
Source.original_post_discovery(activity)
self.assert_equals(['http://at.the/end'],
activity['object']['upstreamDuplicates'])
self.assert_equals([], activity['object']['tags'])
activity['object'].update({
'content': 'x http://baz/3 y',
'attachments': [{'objectType': 'article', 'url': 'http://foo/1'}],
'tags': [{'objectType': 'article', 'url': 'http://bar/2'}],
})
Source.original_post_discovery(activity)
self.assert_equals([
{'objectType': 'article', 'url': 'http://foo/1'},
{'objectType': 'article', 'url': 'http://bar/2'},
{'objectType': 'article', 'url': 'http://baz/3'},
], activity['object']['tags'])
# leading parens used to cause us trouble
activity = {'object': {'content' : 'Foo (http://snarfed.org/xyz)'}}
Source.original_post_discovery(activity)
self.assert_equals(
[{'objectType': 'article', 'url': 'http://snarfed.org/xyz'}],
activity['object']['tags'])
# don't duplicate PSCs and PSLs with http and https
for field in 'tags', 'attachments':
for scheme in 'http', 'https':
url = scheme + '://foo.com/1'
activity = {'object': {
'content': 'x (foo.com/1)',
field: [{'objectType': 'article', 'url': url}],
}}
Source.original_post_discovery(activity)
self.assert_equals([{'objectType': 'article', 'url': url}],
activity['object']['tags'])
# exclude ellipsized URLs
for ellipsis in '...', u'…':
url = 'foo.com/1' + ellipsis
activity = {'object': {
'content': 'x (%s)' % url,
'attachments': [{'objectType': 'article', 'url': 'http://' + url}],
}}
Source.original_post_discovery(activity)
self.assert_equals([], activity['object']['tags'])
def test_get_like(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([ACTIVITY])
self.mox.ReplayAll()
self.assert_equals(LIKES[1], self.source.get_like('author', 'activity', '6'))
def test_get_like_numeric_id(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([ACTIVITY])
self.mox.ReplayAll()
self.assert_equals(LIKES[0], self.source.get_like('author', 'activity', '5'))
def test_get_like_not_found(self):
activity = copy.deepcopy(ACTIVITY)
del activity['object']['tags']
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([activity])
self.mox.ReplayAll()
self.assert_equals(None, self.source.get_like('author', 'activity', '6'))
def test_get_like_no_activity(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([])
self.mox.ReplayAll()
self.assert_equals(None, self.source.get_like('author', 'activity', '6'))
def test_get_share(self):
activity = copy.deepcopy(ACTIVITY)
share = activity['object']['tags'][1]
share['verb'] = 'share'
self.source.get_activities(user_id='author', activity_id='activity',
fetch_shares=True).AndReturn([activity])
self.mox.ReplayAll()
self.assert_equals(share, self.source.get_share('author', 'activity', '6'))
def test_get_share_not_found(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_shares=True).AndReturn([ACTIVITY])
self.mox.ReplayAll()
self.assert_equals(None, self.source.get_share('author', 'activity', '6'))
def test_add_rsvps_to_event(self):
event = copy.deepcopy(EVENT)
Source.add_rsvps_to_event(event, [])
self.assert_equals(EVENT, event)
Source.add_rsvps_to_event(event, RSVPS)
self.assert_equals(EVENT_WITH_RSVPS, event)
def test_get_rsvps_from_event(self):
self.assert_equals([], Source.get_rsvps_from_event(EVENT))
self.assert_equals(RSVPS, Source.get_rsvps_from_event(EVENT_WITH_RSVPS))
def test_get_rsvps_from_event_bad_id(self):
event = copy.deepcopy(EVENT)
for id in None, 'not_a_tag_uri':
event['id'] = id
self.assert_equals([], Source.get_rsvps_from_event(event))
def test_base_object_multiple_objects(self):
like = copy.deepcopy(LIKES[0])
like['object'] = [like['object'], {'url': 'http://fake.com/second'}]
self.assert_equals(('second', 'http://fake.com/second'),
self.source.base_object(like))
def test_content_for_create(self):
def cfc(base, extra):
obj = base.copy()
obj.update(extra)
return self.source._content_for_create(obj)
self.assertEqual(None, cfc({}, {}))
for base in ({'objectType': 'article'},
{'inReplyTo': {'url': 'http://not/fake'}},
{'objectType': 'comment', 'object': {'url': 'http://not/fake'}}):
self.assertEqual(None, cfc(base, {}))
self.assertEqual('c', cfc(base, {'content': ' c '}))
self.assertEqual('n', cfc(base, {'content': 'c', 'displayName': 'n'}))
self.assertEqual('s', cfc(base, {'content': 'c', 'displayName': 'n',
'summary': 's'}))
for base in ({'objectType': 'note'},
{'inReplyTo': {'url': 'http://fake.com/post'}},
{'objectType': 'comment',
'object': {'url': 'http://fake.com/post'}}):
self.assertEqual(None, cfc(base, {}))
self.assertEqual('n', cfc(base, {'displayName': 'n'}))
self.assertEqual('c', cfc(base, {'displayName': 'n', 'content': 'c'}))
self.assertEqual('s', cfc(base, {'displayName': 'n', 'content': 'c',
'summary': ' s '}))
| [
"[email protected]"
] | |
b27eade25115e891e7aff1fada285bf11bcc7f81 | dd9e19abfff532e7f4dea5f5b57ac6a4da9f1e6f | /fabric/thread_handling.py | 25aa3a2326b3f02aad1cc42e0c3341329190fd37 | [
"BSD-2-Clause"
] | permissive | jonatkinson/fabric | 27c6146243a2c846162e0a6e14f282b900cb2734 | 64eb6c56e1aa4c0b654bb8d17f0a09386616342b | refs/heads/master | 2020-12-24T20:01:02.759635 | 2011-03-04T02:26:31 | 2011-03-04T02:26:31 | 1,441,428 | 0 | 0 | BSD-2-Clause | 2020-06-01T13:22:18 | 2011-03-04T23:00:45 | Python | UTF-8 | Python | false | false | 587 | py | import threading
import sys
class ThreadHandler(object):
def __init__(self, name, callable, *args, **kwargs):
# Set up exception handling
self.exception = None
def wrapper(*args, **kwargs):
try:
callable(*args, **kwargs)
except BaseException:
self.exception = sys.exc_info()
# Kick off thread
thread = threading.Thread(None, wrapper, name, args, kwargs)
thread.setDaemon(True)
thread.start()
# Make thread available to instantiator
self.thread = thread
| [
"[email protected]"
] | |
b22c5bba251df2059e2e293d1f03d796f0be5fc0 | 95b0b12c8e3b9982aff752b4f5e69e7812e56728 | /12-Spider/09_UA.py | b53bce5333ba1cfe540fc9814bdfdbbfd79ab36e | [] | no_license | PeterTao666/learnpython2 | fb6792de7d28d306eaeda9098914fa5bb2151592 | 56a506590bf625c5c1ab23a530f30b23b89c8864 | refs/heads/master | 2020-04-17T11:41:12.587398 | 2019-02-12T14:06:38 | 2019-02-12T14:06:38 | 166,550,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # 访问一个网址
# 更改自己的UserAgent进行伪装
from urllib import request, error
if __name__ == '__main__':
url = 'http://www.baidu.com'
try:
# 使用head方法伪装UA
# 方法一:
#headers = {}
#headers['User-Agent'] = 'Mozilla/5.0 (ipad;CPU OS 5_0 like Mac OS X) AppleWibKit/534.46(KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'
#req = request.Request(url, headers=headers)
# 方法二:使用add_header方法
req = request.Request(url)
req.add_header("User-Agent", "Mozilla/5.0 (ipad;CPU OS 5_0 like Mac OS X) AppleWibKit/534.46(KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'")
# 正常访问
rsp = request.urlopen(req)
html = rsp.read().decode()
print(html)
except error.HTTPError as e:
print(e)
except error.URLError as e:
print(e)
except Exception as e:
print(e)
print("Done...") | [
"[email protected]"
] | |
e05696c1beeb3ed1f8442fe11b4519696639551d | 634fb5fe10e8f944da44ab31896acc8471ec5f18 | /hq_env/lib/python2.7/site-packages/openpyxl/tests/test_write.py | 097e233c689ef564cb1d2d59ffbcc48be6eaf0f3 | [] | no_license | dimagi/commcarehq-venv | 277d0b6fada24f2edd54f74850267201153412a7 | 2c52e3fb0f974cae5c5feaea1d5de851fe530c80 | refs/heads/master | 2021-01-18T14:05:47.931306 | 2015-07-20T10:10:41 | 2015-07-20T10:10:41 | 11,513,855 | 1 | 1 | null | 2015-07-20T10:10:41 | 2013-07-18T21:09:22 | Python | UTF-8 | Python | false | false | 7,980 | py | # file openpyxl/tests/test_write.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# Python stdlib imports
from StringIO import StringIO
import os.path
# 3rd party imports
from nose.tools import eq_, with_setup, raises
# package imports
from openpyxl.tests.helper import TMPDIR, DATADIR, \
assert_equals_file_content, clean_tmpdir, make_tmpdir
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.excel import save_workbook, save_virtual_workbook, \
ExcelWriter
from openpyxl.writer.workbook import write_workbook, write_workbook_rels
from openpyxl.writer.worksheet import write_worksheet, write_worksheet_rels
from openpyxl.writer.strings import write_string_table
from openpyxl.writer.styles import StyleWriter
@with_setup(setup = make_tmpdir, teardown = clean_tmpdir)
def test_write_empty_workbook():
wb = Workbook()
dest_filename = os.path.join(TMPDIR, 'empty_book.xlsx')
save_workbook(wb, dest_filename)
assert os.path.isfile(dest_filename)
def test_write_virtual_workbook():
old_wb = Workbook()
saved_wb = save_virtual_workbook(old_wb)
new_wb = load_workbook(StringIO(saved_wb))
assert new_wb
def test_write_workbook_rels():
wb = Workbook()
content = write_workbook_rels(wb)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'workbook.xml.rels'), content)
def test_write_workbook():
wb = Workbook()
content = write_workbook(wb)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'workbook.xml'), content)
def test_write_string_table():
table = {'hello': 1, 'world': 2, 'nice': 3}
content = write_string_table(table)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sharedStrings.xml'), content)
def test_write_worksheet():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1.xml'), content)
def test_write_hidden_worksheet():
wb = Workbook()
ws = wb.create_sheet()
ws.sheet_state = ws.SHEETSTATE_HIDDEN
ws.cell('F42').value = 'hello'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1.xml'), content)
def test_write_bool():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = False
ws.cell('F43').value = True
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_bool.xml'), content)
def test_write_formula():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = 10
ws.cell('F2').value = 32
ws.cell('F3').value = '=F1+F2'
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_formula.xml'), content)
def test_write_style():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = '13%'
style_id_by_hash = StyleWriter(wb).get_style_by_hash()
content = write_worksheet(ws, {}, style_id_by_hash)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_style.xml'), content)
def test_write_height():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = 10
ws.row_dimensions[ws.cell('F1').row].height = 30
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_height.xml'), content)
def test_write_hyperlink():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com"
content = write_worksheet(ws, {'test': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_hyperlink.xml'), content)
def test_write_hyperlink_rels():
wb = Workbook()
ws = wb.create_sheet()
eq_(0, len(ws.relationships))
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com/"
eq_(1, len(ws.relationships))
ws.cell('A2').value = "test"
ws.cell('A2').hyperlink = "http://test2.com/"
eq_(2, len(ws.relationships))
content = write_worksheet_rels(ws, 1)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_hyperlink.xml.rels'), content)
def test_hyperlink_value():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').hyperlink = "http://test.com"
eq_("http://test.com", ws.cell('A1').value)
ws.cell('A1').value = "test"
eq_("test", ws.cell('A1').value)
def test_write_auto_filter():
wb = Workbook()
ws = wb.worksheets[0]
ws.cell('F42').value = 'hello'
ws.auto_filter = 'A1:F1'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_auto_filter.xml'), content)
content = write_workbook(wb)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'workbook_auto_filter.xml'), content)
def test_freeze_panes_horiz():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'A4'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_freeze_panes_horiz.xml'), content)
def test_freeze_panes_vert():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'D1'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_freeze_panes_vert.xml'), content)
pass
def test_freeze_panes_both():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'D4'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_freeze_panes_both.xml'), content)
def test_long_number():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = 9781231231230
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'long_number.xml'), content)
def test_short_number():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = 1234567890
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'short_number.xml'), content)
| [
"[email protected]"
] | |
4e0c8635049fc400d8256cfd2f5f3190bb8a40f3 | 814f8b85dd6435b3bb3fdebf2f193912aa145a62 | /image_segmentation/slim_fcn/utils.py | d84502d6bbb3c3d6baedec6adf6a1cc5d5015d94 | [
"Apache-2.0"
] | permissive | jacke121/pycharm | 480df86258ee918de25b76a4156e9e6b9d355df7 | b9b2963cf0c5028f622f41413f52f1b5cbde28a1 | refs/heads/master | 2020-03-18T16:35:25.579992 | 2018-01-01T02:30:58 | 2018-01-01T02:30:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by weihang huang on 17-12-23
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import tensorflow as tf
import numpy as np
def colormap(n):
cmap = np.zeros([n, 3]).astype(np.uint8)
for i in np.arange(n):
r, g, b = np.zeros(3)
for j in np.arange(8):
r = r + (1 << (7 - j)) * ((i & (1 << (3 * j))) >> (3 * j))
g = g + (1 << (7 - j)) * ((i & (1 << (3 * j + 1))) >> (3 * j + 1))
b = b + (1 << (7 - j)) * ((i & (1 << (3 * j + 2))) >> (3 * j + 2))
cmap[i, :] = np.array([r, g, b])
return cmap
class Colorize(object):
def __init__(self):
self.cmap = colormap(256)
def __call__(self, gray_image):
size = gray_image.shape
color_image = np.zeros((size[0], size[1], 3))
for i in range(color_image.shape[0]):
for j in range(color_image.shape[1]):
color_image[i, j, :] = self.cmap[gray_image[i, j]]
return color_image
def colorize(ori_img):
color_fcn = Colorize()
img = color_fcn(ori_img.astype(np.uint8))
return img
| [
"[email protected]"
] | |
be6d8f5c44955195923dcdcee4b4c0b69fd07ab1 | edf06a2bbe5f2ac332e7c93a91b391b548d2caa3 | /caso/config.py | 0f06b5ea25793404583023ee7b11c239b5b91f4f | [
"Apache-2.0"
] | permissive | enolfc/caso | b3fcb8490491f94b73ff23f516426f7bf257b20f | 22711ca71de4dcd99c231d3220005e0faee9b80d | refs/heads/master | 2023-08-05T09:57:55.633603 | 2022-03-21T11:28:09 | 2022-03-24T15:46:48 | 27,259,205 | 0 | 0 | Apache-2.0 | 2022-03-17T13:13:20 | 2014-11-28T09:00:13 | Python | UTF-8 | Python | false | false | 1,043 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import warnings
from oslo_config import cfg
from oslo_log import log
import caso
logging.captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning)
log.register_options(cfg.CONF)
def parse_args(argv, default_config_files=None):
cfg.CONF(argv[1:],
project='caso',
version=caso.__version__,
default_config_files=default_config_files)
| [
"[email protected]"
] | |
827d277eb2c6b6355e2ed92d4b2f89b51345f449 | d31d744f62c09cb298022f42bcaf9de03ad9791c | /model-analysis/tensorflow_model_analysis/extractors/__init__.py | a06399b7fca2700ab4b4b86ce7286df7ba755c01 | [] | no_license | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module for TensorFlow Model Analysis extractors."""
from tensorflow_model_analysis.extractors import legacy_meta_feature_extractor as meta_feature_extractor
from tensorflow_model_analysis.extractors.batched_input_extractor import BatchedInputExtractor
from tensorflow_model_analysis.extractors.extractor import Extractor
from tensorflow_model_analysis.extractors.extractor import Filter
from tensorflow_model_analysis.extractors.legacy_feature_extractor import FeatureExtractor
from tensorflow_model_analysis.extractors.legacy_input_extractor import InputExtractor
from tensorflow_model_analysis.extractors.legacy_predict_extractor import PredictExtractor
from tensorflow_model_analysis.extractors.slice_key_extractor import SLICE_KEY_EXTRACTOR_STAGE_NAME
from tensorflow_model_analysis.extractors.slice_key_extractor import SliceKeyExtractor
| [
"[email protected]"
] | |
bd1d6657e2a5c8b6d49190039ae96a131706ac70 | 99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6 | /algorithm/AD/장기.py | 4e1e8a0d1bdb348bfe634238d4aec62f57fbc256 | [] | no_license | HSx3/TIL | 92acc90758015c2e31660617bd927f7f100f5f64 | 981c9aaaf09c930d980205f68a28f2fc8006efcb | refs/heads/master | 2020-04-11T21:13:36.239246 | 2019-05-08T08:18:03 | 2019-05-08T08:18:03 | 162,099,042 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | import sys
sys.stdin = open("장기_input.txt")
def bfs():
que = []
que.append((R, C, 0))
data[R][C] = 1
dr = [-2, -2, -1, 1, 2, 2, 1, -1]
dc = [-1, 1, 2, 2, 1, -1, -2, -2]
while que:
r, c, turn = que.pop(0)
if r == S and c == K:
return turn
for i in range(8):
nr = r + dr[i]
nc = c + dc[i]
if nr < 0 or nr >= N or nc < 0 or nc >= M:
continue
if data[nr][nc] == 1:
continue
data[nr][nc] = 1
que.append((nr, nc, turn+1))
N, M = map(int, input().split())
R, C, S, K = map(int, input().split())
R -= 1
C -= 1
S -= 1
K -= 1
data = [[0 for _ in range(M)] for _ in range(N)]
print(bfs()) | [
"[email protected]"
] | |
164b9f7d0ee11a4e314b06179056de3565e0c3d7 | b5bde703700ccf5b575f2382d357c0d2f5bd306c | /code/.metadata/.plugins/org.eclipse.core.resources/.history/7e/90dd27476cf30014128189d707139bfe | 9f9953b39fdd1318ccfa7bad7921046b4f189881 | [] | no_license | lordwarlock/IRFinalProj | 6a4a4a8d880f27fcd38054125c5e695c179863d6 | cc2b3003fb41a63cb85c84bbdf13c20d8a7e1cba | refs/heads/master | 2016-08-07T08:52:46.564262 | 2015-05-06T21:48:42 | 2015-05-06T21:48:42 | 33,019,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,989 | #!/usr/bin/python
'''
Defines the general behavior that can be used by all cgi search modules.
'''
import unicodedata
import cgi
import cgitb
cgitb.enable()
def receive_data():
'''
Receive and return the cgi data from web-page.
@return: the cgi form data from the corresponding web-page
'''
print 'Content-Type: text/html\n\n'
data = cgi.FieldStorage()
return data
def find_search_result(rst_list, process_each_search_result):
'''
According to the search results list, and search result processing method,
return a search results html string.
@param rst_list: the search results list
@param process_each_search_result: the method of processing each search result
@return: the search result html string
'''
# If search miss
if len(rst_list) == 0:
return 'Search miss!<br>\n'
rst_string = 'Total search hits number: ' + str(len(rst_list)) + '<br><br>\n'
# Print out top 10 search hits
for i in range(0, 10):
if i >= len(rst):
break
if i % 2 == 0:
rst_string += '<div class="blog-top">\n'
search_result += process_each_search_result(rst_list[i], i + 1)
if i % 2 == 1:
search_result += '''
<div class="clear"></div>
</div>
'''
return rst_string
def html_file_top():
'''
Return a html string that corresponds to the search result page's header.
@return: a html string that corresponds to the search result page's header
'''
return '''
<html>
<head>
<title>Search Result</title>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link href="css/style.css" rel="stylesheet" type="text/css" media="all" />
<link href='http://fonts.googleapis.com/css?family=Raleway' rel='stylesheet' type='text/css'>
<script src="js/jquery.min.js"></script>
</head>
<body>
<div class="index-banner1">
<div class="header-top">
<div class="wrap">
<div class="logo">
<a href="home.html"><img src="images/another_search.png" alt=""/></a>
</div>
<div class="clear"></div>
</div>
</div>
</div>
<div class="main">
<div class="wrap">
<div class="abstract">
'''
def html_file_bottom():
'''
Return a html string that corresponds to the search result page's footer.
@return: a html string that corresponds to the search result page's footer
'''
return ''' </body>
</html>
'''
def write_and_jump(rst_html_str):
'''
Write the search result html string into ./soccer_search/result.html file,
then jump current web-page into the result page (http://localhost:8000/soccer_search/result.html)
@param rst_html_str: the search result html string
'''
# Write the processed search result html string into ./soccer_search/result.html file
with open('./soccer_search/result.html', 'w') as html_file:
html_file.write(html_file_top())
html_file.write(rst_html_str)
html_file.write(html_file_bottom())
# Jump current web-page into the result page
print '''
<html>
<meta http-equiv="refresh" content="0.1;url=http://localhost:8000/soccer_search/result.html">
</html>
''' | [
"[email protected]"
] | ||
54d43d884097f994f64480f38c5b51fee9295850 | a6476a929a361a9fcd0f1c23635d24554032000f | /horizon/horizon/dashboards/nova/images_and_snapshots/snapshots/forms.py | 1e91fb12ce2f14d5044b9194607ef23c81bed843 | [
"Apache-2.0"
] | permissive | ehazlett/horizon | 976eba79aed5390b98926389c8df29bbbc8d657b | 993cc3bcd8e47a823733af5756fcb0f42cc4c703 | refs/heads/master | 2020-12-25T12:47:48.879504 | 2012-01-06T20:56:27 | 2012-01-06T20:56:27 | 3,123,162 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from openstackx.api import exceptions as api_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class CreateSnapshot(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
instance_id = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
name = forms.CharField(max_length="20", label=_("Snapshot Name"))
def handle(self, request, data):
try:
LOG.info('Creating snapshot "%s"' % data['name'])
snapshot = api.snapshot_create(request,
data['instance_id'],
data['name'])
instance = api.server_get(request, data['instance_id'])
messages.info(request,
_('Snapshot "%(name)s" created for instance "%(inst)s"') %
{"name": data['name'], "inst": instance.name})
return shortcuts.redirect('horizon:nova:images_and_snapshots'
':snapshots:index')
except api_exceptions.ApiException, e:
msg = _('Error Creating Snapshot: %s') % e.message
LOG.exception(msg)
messages.error(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
| [
"[email protected]"
] | |
339b4951be14fbb332d93723eb4ca888cccd60f9 | c0075f31ff48142a05d92f11840229beee09f697 | /tests/plugins/test_googledrive.py | 340eb364e9161bfe2aa709b277873eebea29254f | [
"BSD-Source-Code",
"BSD-2-Clause"
] | permissive | beardypig/streamlink | c1d44605ced0c924257b4813649acb406b035cb8 | 54c25e49a45a5545d2d9a545320cd2034ea41cd3 | refs/heads/master | 2021-12-12T04:31:02.955852 | 2020-11-10T06:18:33 | 2020-11-10T06:39:46 | 70,149,227 | 5 | 2 | BSD-2-Clause | 2019-08-08T11:56:37 | 2016-10-06T11:52:09 | Python | UTF-8 | Python | false | false | 581 | py | import unittest
from streamlink.plugins.googledrive import GoogleDocs
class TestPluginGoogleDocs(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://drive.google.com/file/d/123123/preview?start=1',
]
for url in should_match:
self.assertTrue(GoogleDocs.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(GoogleDocs.can_handle_url(url))
| [
"[email protected]"
] | |
82b9b94e271939d0b03a46e1e986b5ff1619f243 | ecd2c20608e1f4a1646c87767762bd72db618d65 | /photo_blog/wsgi.py | cd6d1e1c954242c33644de5cafb80b91abfeb197 | [] | no_license | RianGirard/photo_blog | 129858ee32cbc2ff0521c8219b72b9d83c015726 | e461fa62abe027965b7143cce544d25634d5bf9c | refs/heads/master | 2023-06-20T14:36:38.040663 | 2021-07-21T01:02:13 | 2021-07-21T01:02:13 | 383,640,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for photo_blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'photo_blog.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
3435c887badc00f8d36cb10f18293efc83d2474a | 2e6f37e664d2cc85d0c704f20de05b2cae86771d | /dataloader/image_list.py | 64e7b0334e72a01f88ae9d4278fd9d20d06d2bef | [
"MIT"
] | permissive | LEOGML/cv_template | 5bee5e43efb490649f63a7c4e1b77e62a3e1d948 | c1a87465f0aeb79dab63b0cae88861a6282c045c | refs/heads/master | 2023-01-30T21:32:38.240103 | 2020-12-15T09:39:14 | 2020-12-15T09:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | # encoding=utf-8
import pdb
import os
import torch.utils.data.dataset as dataset
import misc_utils as utils
import random
import numpy as np
import cv2
from dataloader.transforms.custom_transform import read_image
class ListTrainValDataset(dataset.Dataset):
"""ImageDataset for training.
Args:
file_list(str): dataset list, input and label should be split by ','
aug(bool): data argument (×8)
norm(bool): normalization
Example:
train_dataset = ImageDataset('train.txt', aug=False)
for i, data in enumerate(train_dataset):
input, label = data['input']. data['label']
"""
def __init__(self, file_list, transforms, max_size=None):
self.im_names = []
self.labels = []
with open(file_list, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
img, label = line.split()
img, label = img.strip(), label.strip()
self.im_names.append(img)
self.labels.append(label)
self.transforms = transforms
self.max_size = max_size
def __getitem__(self, index):
"""Get indexs by index
Args:
index(int): index
Returns:
{
'input': input,
'label': label,
'path': path,
}
"""
input = read_image(self.im_names[index])
gt = read_image(self.labels[index])
sample = self.transforms(**{
'image': input,
'gt': gt,
})
sample = {
'input': sample['image'],
'label': sample['gt'],
'path': self.im_names[index],
}
return sample
def __len__(self):
if self.max_size is not None:
return min(self.max_size, len(self.im_names))
return len(self.im_names)
class ListTestDataset(dataset.Dataset):
"""ImageDataset for test.
Args:
file_list(str): dataset path'
norm(bool): normalization
Example:
test_dataset = ImageDataset('test', crop=256)
for i, data in enumerate(test_dataset):
input, file_name = data
"""
def __init__(self, file_list, transforms, max_size=None):
self.im_names = []
with open(file_list, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
img = line
self.im_names.append(img)
self.transforms = transforms
self.max_size = max_size
def __getitem__(self, index):
input = Image.open(self.im_names[index]).convert("RGB")
sample = self.transforms(**{
'image': input,
'gt': input,
})
sample = {
'input': sample['image'],
'path': self.im_names[index],
}
return sample
def __len__(self):
if self.max_size is not None:
return min(self.max_size, len(self.im_names))
return len(self.im_names)
| [
"[email protected]"
] | |
238579738f8fcf45ef60386b64825011e48f21d4 | 2e29ed138ab0fdb7e0a6e87b7c52c097b350fecf | /Misc/boltingProcedures.py | dde8b56f2cb80bc3a9118c4e34c00114bd1b4754 | [] | no_license | ronniegeiger/Abaqus-Scripts | 1e9c66664bd7dc7e5264bf763f15936eadcff529 | c071bbfe0e6c54148dfd4a23f786f017dfef4ae4 | refs/heads/master | 2023-03-18T06:33:13.690549 | 2018-08-14T11:37:07 | 2018-08-14T11:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,406 | py | from abaqusConstants import *
from abaqus import *
from textRepr import prettyPrint as pp
from VectorModule import Vector as Vector
import copy
import numpy as np
import re
import regionToolset
from textwrap import fill
scriptOwner = 'FEAS-'
def centreOfCircularArc(edge):
"""function to return the coordinates of the centrepoint of a circular arc"""
curvature_1 = edge.getCurvature(parameter = 0.0)
curvature_2 = edge.getCurvature(parameter = 0.25)
tangent_1 = Vector(curvature_1['tangent'])
tangent_2 = Vector(curvature_2['tangent'])
evaluationPoint_1 = Vector(curvature_1['evaluationPoint'])
evaluationPoint_2 = Vector(curvature_2['evaluationPoint'])
temp = tangent_1.cross(tangent_2)
Axial = temp/temp.length()
Radial_1 = tangent_1.cross(Axial)
Radial_2 = tangent_2.cross(Axial)
radius = curvature_1['radius']
check_1 = evaluationPoint_1 + radius*Radial_1
check_2 = evaluationPoint_2 + radius*Radial_2
tol = 0.01 * radius
if (check_1 - check_2).length() < tol:
centre = check_1
#print 'Found centre using positive radius'
else:
check_1 = evaluationPoint_1 - radius*Radial_1
check_2 = evaluationPoint_2 - radius*Radial_2
if (check_1 - check_2).length() < tol:
centre = check_1
#print 'Found centre using negative radius'
else:
print 'Unable to find curve centre'
return centre
def isFaceConvex(face, edge):
"""function to determine whether the edge defines a convex arc within the face"""
verySmall = 1E-2
evaluationPoint = Vector(edge.getCurvature(parameter = 0.5)['evaluationPoint'])
centre = centreOfCircularArc(edge)
testPoint = verySmall*centre +(1.0-verySmall)*evaluationPoint
try:
foundFace = ra.instances[face.instanceName].faces.findAt(coordinates=(testPoint.x(),
testPoint.y(),
testPoint.z()))
except:
foundFace= None
return foundFace==face
def findFaceAndCentroid(instance, elSet, diameter):
"""Returns face index and centre of circular arc"""
Radius = diameter/2.
tol = 0.06 # 8% variation allowed between specified washer OD and face OD
faceAndCentreList = []
for face in elSet.faces:
# if len(face.getCells()) == 0: # Ensures that cell faces are not included
for edgeID in face.getEdges():
edge = instance.edges[edgeID]
try:
edgeRadius = edge.getRadius()
except:
edgeRadius = None
if type(edgeRadius) == float:
# print 'Found', edgeRadius
if abs(edgeRadius-Radius)/Radius < tol:
if isFaceConvex(face, edge):
centre = centreOfCircularArc(edge)
try:
middleFace = instance.faces.findAt(coordinates=tuple(centre.array))
except:
middleFace = None
if middleFace == None:
faceAndCentreList.append((face.index, centre))
return faceAndCentreList
def readPartSections(instance):
"""Returns a dictionary with section assignments of an instance, grouped according to
part level sets
"""
part = model.parts[instance.partName]
sections = {}
for section in part.sectionAssignments:
name = section.region[0]
sections[name] = {}
sections[name]['name'] = section.sectionName
if 'Shell' in str(type(model.sections[section.sectionName])):
if section.offsetType == MIDDLE_SURFACE:
sections[name]['offset'] = 0.
elif section.offsetType == TOP_SURFACE:
sections[name]['offset'] = 0.5
elif section.offsetType == BOTTOM_SURFACE:
sections[name]['offset'] = -0.5
elif section.offsetType == SINGLE_VALUE:
sections[name]['offset'] = section.offset
else: # Could be expanded to measure offset
sections[name]['offset'] = 0.
sections[name]['thickness'] = model.sections[section.sectionName].thickness
else:
sections[name]['offset'] = 0.
sections[name]['thickness'] = 0.
return sections
def findWasherFaceClusters(instance, elsetName, diameter,count):
""" Returns dict with: list of face IDs, bolt hole centre, plate thickness, shell offset
grouped per bolt hole
"""
tol = 1./Units
tempFaceList = copy.deepcopy(findFaceAndCentroid(instance, instance.sets[elsetName],diameter))
sections = readPartSections(instance)
washerFaces = {}
for i, (faceId, centre) in enumerate(tempFaceList):
count += 1
faceGroup = {}
washerFaces[count]=faceGroup
faceIdGroup = [faceId]
face = instance.faces[faceId]
edges = [instance.edges[ID] for ID in face.getEdges()]
radii = []
for edge in edges:
try:
radii += [edge.getRadius()]
except:
pass
radius = min(radii)
faceGroup['IDs'] = faceIdGroup
faceGroup['instanceName'] = instance.name
faceGroup['type'] = '3D' if len(face.getCells()) > 0 else '2D'
faceGroup['centre'] = centre
faceGroup['diameter'] = radius*2
for setName in sections.keys():
if face in instance.sets[setName].faces:
faceGroup['thickness'] = sections[setName]['thickness']
faceGroup['offset'] = sections[setName]['offset']
if faceGroup['type'] == '3D':
faceGroup['thickness'] = 0.
faceGroup['offset'] = 0.
for nextFaceId, nextCentre in tempFaceList[i+1:]:
offset = (nextCentre - centre).length()
if offset < tol:
face = instance.faces[nextFaceId]
edges = [instance.edges[ID] for ID in face.getEdges()]
radii = []
for edge in edges:
try:
radii += [edge.getRadius()]
except:
pass
radius = min(radii)
if faceGroup['diameter']/2 > radius:
faceGroup['diameter']=radius*2
faceIdGroup += [nextFaceId]
tempFaceList.remove((nextFaceId, nextCentre))
return washerFaces,count
def findBoltDiam(washerFaceDiam,boltDiamList):
""" Returns the nearest sized bolt smaller than the washer face ID
"""
matchingDiams = [washerFaceDiam-i/Units for i in boltDiamList]
sizeDiff = np.array([i if i >0 else 100. for i in matchingDiams ])
return boltDiamList[np.argmin(sizeDiff)]
def matchHole(holeNum, myWasherFaces, allBoltHoles, matchedBoltHoles, boltDiamList):
""" Finds the best match for provided hole, considering distance apart
and distance being perpendicular to washer face
"""
tol_allign = 0.001 # tolerance used when matching washerface normal to target washer face centre
tol_thick = 0.005 # tolerance used for plate thickness.
tol_size = 0.05 # > abs(diameter1-diameter2)/smallDiameter. Based on Whaser OD
instanceName = allBoltHoles[holeNum][1]
matched = {}
mWF = myWasherFaces
washerNum = allBoltHoles[holeNum][2]
centre = mWF[instanceName][washerNum]['centre']
otherCentres=np.array([mWF[iN][wN]['centre'] for n,iN, wN in allBoltHoles] )
centreDistances = [[n, centreDistV.length()] for n,centreDistV in enumerate(otherCentres-centre)
if
n not in matchedBoltHoles and
n != holeNum]
centreDistances = np.array(centreDistances)
washerFaceNum = mWF[instanceName][washerNum]['IDs'][0]
washerFaceNorm = Vector(ra.instances[instanceName].faces[washerFaceNum].getNormal())
washerFaceThick = mWF[instanceName][washerNum]['thickness']
washerFaceDiam = mWF[instanceName][washerNum]['diameter']
tol_allign = washerFaceDiam/4.
potentialMates = []
for n, dist in centreDistances:
n = int(n)
targetInstanceName = allBoltHoles[n][1]
targetWasherCentre = mWF[targetInstanceName][allBoltHoles[n][2]]['centre']
targetWasherFaceThick = mWF[targetInstanceName][allBoltHoles[n][2]]['thickness']
targetWasherFaceDiam = mWF[targetInstanceName][allBoltHoles[n][2]]['diameter']
centreDiff = min([(centre-targetWasherCentre+washerFaceNorm*dist).length(),
(centre-targetWasherCentre-washerFaceNorm*dist).length()])
correctThickness = 1
correctAllignment = centreDiff < tol_allign # Checks allignmen
correctSizing = abs(washerFaceDiam-targetWasherFaceDiam)/ \
min([washerFaceDiam,targetWasherFaceDiam]) < tol_size
# print washerFaceDiam, targetWasherFaceDiam
# print correctThickness, correctAllignment , correctSizing
if correctThickness and correctAllignment and correctSizing:
potentialMates += [n]
cDistances = []
for item in potentialMates:
indexNo = np.nonzero(centreDistances[:,0]==item)[0][0]
cDistances += [centreDistances[indexNo]]
cDistances = np.array(cDistances) if len(potentialMates) > 0 else []
if len(potentialMates) > 1: # Should more than one bolthole match the criteria, use the closest one
selected = np.argmin(cDistances[:,1])
mate = int(cDistances[selected,0])
matched['holes'] = [holeNum, mate]
washer1 = mWF[allBoltHoles[holeNum][1]][allBoltHoles[holeNum][2]]
washer2 = mWF[allBoltHoles[mate][1]][allBoltHoles[mate][2]]
matched['offset'] = offsetDirection(washer1, washer2)
matched['length'] = sum(matched['offset']) + \
centreDistances[mate-1][1]
matched['diameter'] = findBoltDiam(washerFaceDiam,boltDiamList)
elif len(potentialMates) > 0:
mate = potentialMates[0]
selected = cDistances[0,0]
# print mate, selected
matched['holes'] = [holeNum, mate]
washer1 = mWF[allBoltHoles[holeNum][1]][allBoltHoles[holeNum][2]]
washer2 = mWF[allBoltHoles[mate][1]][allBoltHoles[mate][2]]
matched['offset'] = offsetDirection(washer1, washer2)
matched['length'] = sum(matched['offset']) + \
cDistances[0][1]
matched['diameter'] = findBoltDiam(washerFaceDiam,boltDiamList)
else:
matched['holes'] = [holeNum]
matchedBoltHoles += [matched]
return matchedBoltHoles
def offsetDirection(washer1, washer2):
washerFaceNum1 = washer1['IDs'][0]
washerFaceNum2 = washer2['IDs'][0]
washerNorm1 = Vector(ra.instances[washer1['instanceName']].faces[washerFaceNum1].getNormal())
washerNorm2 = Vector(ra.instances[washer2['instanceName']].faces[washerFaceNum2].getNormal())
f1tof2 = washer2['centre']-washer1['centre']
th1 = washer1['thickness']
if np.dot(washerNorm1, f1tof2) < 0: #if W1 norm is alligned with c2-c1
offset1 = th1 - (0.5+washer1['offset'])*th1
else:
offset1 = (0.5+washer1['offset'])*th1
th2 = washer2['thickness']
if np.dot(washerNorm2, f1tof2) > 0: #if W2 norm opposes c2-c1
offset2 = th2 - (0.5+washer2['offset'])*th2
else:
offset2 = (0.5+washer2['offset'])*th2
return [offset1, offset2]
def createWasherSurfaces(faceList1, faceList2, namePrefix):
"""function to create washerface surfaces with the correct orientation"""
centroidList1 = [face[1] for face in faceList1]
sum = Vector(0.,0.,0.)
for i in range(len(centroidList1)):
sum = sum + centroidList1[i]
centroid1 = sum/len(centroidList1)
centroidList2 = [face[1] for face in faceList2]
sum = Vector(0.,0.,0.)
for i in range(len(centroidList2)):
sum = sum + centroidList2[i]
centroid2 = sum/len(centroidList2)
VectorFace1ToFace2 = centroid2 - centroid1
#
side1Faces = []
side2Faces = []
for face, centroid in faceList1:
if np.dot(Vector(face.getNormal()), VectorFace1ToFace2) > 0:
side2Faces.append(ra.instances[face.instanceName].faces[face.index:face.index+1])
else:
side1Faces.append(ra.instances[face.instanceName].faces[face.index:face.index+1])
name1 = '{}Face1'.format(namePrefix)
ra.Surface(name = name1, side1Faces=side1Faces, side2Faces=side2Faces)
#
side1Faces = []
side2Faces = []
for face, centroid in faceList2:
if np.dot(Vector(face.getNormal()), VectorFace1ToFace2) < 0:
side2Faces.append(ra.instances[face.instanceName].faces[face.index:face.index+1])
else:
side1Faces.append(ra.instances[face.instanceName].faces[face.index:face.index+1])
name2 = '{}Face2'.format(namePrefix)
ra.Surface(name = name2, side1Faces=side1Faces, side2Faces=side2Faces)
return
def placeBolt(instanceName, p1,p2, offset =0.):
""" Places instance in assembly, alligning vector between nodes if instance sets
'end1' and 'end2' with vector p1 and p2. Places 'end1' on p1 and shifts by washer thickness
"""
vZ = Vector([0.,0.,1.])
instance = ra.instances[instanceName]
end1 = instance.sets['End1'].vertices[0].pointOn[0]
end2 = instance.sets['End2'].vertices[0].pointOn[0]
#Translate instance such that 'end1' lies on glogabl (0,0,0)
if end1 != (0.0, 0.0, 0.0):
translate = tuple([-i for i in end1])
ra.translate(instanceList=(instanceName, ), vector=translate)
end1 = instance.sets['End1'].vertices[0].pointOn[0]
end2 = instance.sets['End2'].vertices[0].pointOn[0]
#Rotate instance to lie on global Z axis
vB = Vector(end2)-Vector(end1) #Bolt vector
vBxy = Vector([vB[0],vB[1], 0.]) #Bolt vector on XY plane
vBxy_perp = Vector(-vBxy[1], vBxy[0], 0.) #Vector perpendiculst to vBxy
vBxy_rotate = -vB.angle(vZ)*180/pi #Angle to rotate from vBxy around vBxy_perp
ra.rotate(instanceList=(instanceName, ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=tuple(vBxy_perp.array), angle=vBxy_rotate)
#Rotate to allign with p2-p1
vB = p2-p1 #Target bolt vector
vBxy = Vector([vB[0],vB[1], 0.]) #Target bolt vector on XY plane
vBxy_perp = (-vBxy[1], vBxy[0], 0.,) if vBxy.length() > 1E-14 else (1,0,0,) #Vector perpendiculst to vBxy
vBxy_rotate = vB.angle(vZ)*180/pi #Angle to rotate from vBxy around vBxy_perp
ra.rotate(instanceList=(instanceName, ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=vBxy_perp, angle=vBxy_rotate)
#Translate instance to p1, offset by washer face
vBtrans = p1-vB.normal()*offset
ra.translate(instanceList=(instanceName, ), vector=tuple(vBtrans.array))
def createBoltSection(diameter, boltMaterialName):
sizeName = 'M{}'.format(int(diameter*Units))
stressRadius = ((diameter*Units)**2.061724*0.505537/1E6/pi)**0.5*(1000./Units)
model.CircularProfile(name=scriptOwner+'BoltProfile-'+sizeName, r=stressRadius)
model.BeamSection(name=scriptOwner+'BoltSection-'+sizeName,
integration=DURING_ANALYSIS, poissonRatio=0.0, profile=scriptOwner+'BoltProfile-'+sizeName,
material=boltMaterialName, temperatureVar=LINEAR, consistentMassMatrix=False)
def createBolt(length,diameter):
""" Creates bolt part using provided diameter and length
"""
boltName = scriptOwner+'Bolt-M{}-{:0.1f}mm'.format(int(diameter*Units), length*Units)
boltName = ''.join([b if b !='.' else 'p' for b in boltName])
boltEnd1SetName = 'End1'
boltEnd2SetName = 'End2'
# Create the part
sketch = model.ConstrainedSketch(name='__profile__', sheetSize=0.5)
sketch.Line(point1=(0.0, 0.0), point2=(length, 0.0))
part = model.Part(name=boltName, dimensionality=THREE_D,type=DEFORMABLE_BODY)
part = model.parts[boltName]
part.BaseWire(sketch=sketch)
del model.sketches['__profile__']
#Create sets
edges = part.edges
vertices = part.vertices
part.Set(edges=edges[:], name='All')
v1 = vertices.findAt(coordinates=((0,0,0),))
v2 = vertices.findAt(coordinates=((length,0,0),))
part.Set(vertices=v1, name=boltEnd1SetName)
part.Set(vertices=v2, name=boltEnd2SetName)
### Section part
part.PartitionEdgeByPoint(edge=edges[0], point=part.InterestingPoint(edge=edges[0],
rule=MIDDLE))
v3 = vertices.findAt(coordinates=((length/2.,0,0),))
part.Set(vertices=v3, name='BoltTension')
### Assign Section
region = part.sets['All']
sectionName = scriptOwner+'BoltSection-M{}'.format(int(diameter*Units))
part.SectionAssignment(region=region, sectionName=sectionName, offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='', thicknessAssignment=FROM_SECTION)
part.assignBeamSectionOrientation(region=region, method=N1_COSINES, n1=(0., 0., 1.))
# Create bolt tension surface
edges = part.edges
end2Edges = edges[0:1]
part.Surface(end2Edges=end2Edges, name='BoltTension')
#Mesh bolt
e = part.edges
pickedEdges = e.getSequenceFromMask(mask=e.getMask(), )
part.seedEdgeByNumber(edges=pickedEdges, number=1, constraint=FINER)
part.generateMesh()
def boltLength(lengthIn,boltLengthIncrement):
return ceil(lengthIn/boltLengthIncrement)*boltLengthIncrement
def affixBolt(fastener,allBoltHoles, myWasherFaces, boltMaterialStress, pretensionStepName):
#create faces
stepsNames = model.steps.keys()
temp1 = allBoltHoles[fastener['holes'][0]]
temp2 = allBoltHoles[fastener['holes'][1]]
hole1 = myWasherFaces[temp1[1]][temp1[2]]
hole2 = myWasherFaces[temp2[1]][temp2[2]]
facesList1 = [tuple([ra.instances[temp1[1]].faces[faceNum],hole1['centre']]) for faceNum in hole1['IDs']]
facesList2 = [tuple([ra.instances[temp2[1]].faces[faceNum],hole2['centre']]) for faceNum in hole2['IDs']]
namePrefix = scriptOwner+'Bolt-{}-M{}_'.format(fastener['boltNum'],fastener['diameter'] ) # Mod this naming if other fasteners are to be used
createWasherSurfaces(facesList1, facesList2, namePrefix)
#Insert instance and place in correct position
boltName = scriptOwner+'Bolt-M{}-{:0.1f}mm'.format(int(fastener['diameter']), fastener['length']*Units)
boltName = ''.join([b if b !='.' else 'p' for b in boltName])
instanceName = scriptOwner+'Bolt-{}-M{}-{:0.1f}mm'.format(fastener['boltNum'],int(fastener['diameter']), fastener['length']*Units)
instanceName = ''.join([b if b !='.' else 'p' for b in instanceName])
part = model.parts[boltName]
ra.Instance(name=instanceName, part=part, dependent=ON)
# offset = (fastener['length']-sum(fastener['offset'])-(hole1['centre']-hole2['centre']).length())/2+fastener['offset'][0]
offset = fastener['washerOffset']+fastener['offset'][0]
placeBolt(instanceName, hole1['centre'],hole2['centre'], offset = offset)
#Create Coupling A on end 1
couplingName= 'z-'+scriptOwner+'Bolt-{}-A'.format(fastener['boltNum'], temp1[1],temp2[1])
region1=ra.surfaces['{}Face1'.format(namePrefix)]
region2=ra.surfaces['{}Face2'.format(namePrefix)]
boltend = ra.instances[instanceName].sets['End1'].vertices
regionEnd = regionToolset.Region(vertices=boltend)
model.Coupling(name=couplingName, controlPoint=regionEnd, surface=region1,
influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC, localCsys=None,
u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
#Create Coupling B on end 2
couplingName= 'z-'+scriptOwner+'Bolt-{}-B'.format(fastener['boltNum'], temp1[1],temp2[1])
boltend = ra.instances[instanceName].sets['End2'].vertices
regionEnd = regionToolset.Region(vertices=boltend)
model.Coupling(name=couplingName, controlPoint=regionEnd, surface=region2,
influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC, localCsys=None,
u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
stressArea = (fastener['diameter']**2.061724*0.505537)/Units**2
pretension = (boltMaterialStress)*stressArea
loadName = 'z-'+instanceName
region = ra.instances[instanceName].surfaces['BoltTension']
model.BoltLoad(name=loadName, createStepName=pretensionStepName,
region=region, magnitude=pretension, boltMethod=APPLY_FORCE)
model.loads[loadName].setValuesInStep(stepName=stepsNames[2], boltMethod=FIX_LENGTH)
#============================= MAIN ======================================
def Main(modelName='', washerElsetName='', pretensionStepName='', boltMaterialProof='',
boltMaterialPretension='', boltInput='', boltLengthIncrement='', boltMaterialName='', UnitsSel='m'):
global model, ra, Units, scriptOwner
if UnitsSel == 'm':
Units = 1000.
else:
Units = 1.
model = mdb.models[modelName]
ra = model.rootAssembly
boltLengthIncrement = boltLengthIncrement/Units
### Basic constants
tol = 0.001
### Deletes all bolt related components from the model
for constraintKey in [key for key in model.constraints.keys() if 'z-'+scriptOwner+'Bolt-' in key]:
del model.constraints[constraintKey]
for surfaceKey in [key for key in ra.surfaces.keys() if scriptOwner+'Bolt-' in key]:
del ra.surfaces[surfaceKey]
for instanceKey in [key for key in ra.instances.keys() if scriptOwner+'Bolt-' in key]:
del ra.instances[instanceKey]
rePart = re.compile(scriptOwner+'Bolt-M(.*)-(.*)mm')
for partName in [string for string in model.parts.keys() if re.match(rePart, string)]:
del model.parts[partName]
for sectionKey in [key for key in model.sections.keys() if scriptOwner+'BoltSection-M' in key]:
del model.sections[sectionKey]
for profileKey in [key for key in model.profiles.keys() if scriptOwner+'BoltProfile-M' in key]:
del model.profiles[profileKey]
for loadKey in [key for key in model.loads.keys() if 'z-{}Bolt-'.format(scriptOwner) in key]:
del model.loads[loadKey]
### Ordered procedure
boltMaterialStress = boltMaterialProof*boltMaterialPretension
boltDiamList = [b[0] for b in boltInput]
washerODList = [b[1] for b in boltInput]
washerThickList = [b[2] for b in boltInput]
washers = {}
for b, w in zip(boltDiamList, washerThickList):
washers['M{}'.format(int(b))] = w/Units
##Probe for all possible bolt holes using potential washer faces
myWasherFaces = {}
for instanceName, instance in ra.instances.items():
try:
tempSet = instance.sets[washerElsetName]
except:
tempSet = []
if 'Set' in str(type(tempSet)):
count = 0
myWasherFaces[instanceName] = {}
for washerDiameter, boltDiameter in zip(washerODList, boltDiamList):
# print 'M{}, washer diam:{}'.format(str(boltDiameter),str(washerDiameter))
temp,count = findWasherFaceClusters(instance,washerElsetName,washerDiameter/Units,count)
for num, holeDict in temp.iteritems():
myWasherFaces[instanceName][num] = holeDict
allBoltHoles = []
n = 0
for instanceName, boltholes in myWasherFaces.iteritems():
for bolthole in boltholes.keys():
allBoltHoles += [[n, instanceName, bolthole]]
n += 1
## Match bolt holes
matchedBoltHoles = []
unmatchedHoles = []
notMidplaneFaces = []
n = 1
for holeNum, instanceName, boltholeKey in allBoltHoles:
if holeNum not in [item for sublist in [i['holes'] for i in matchedBoltHoles] for item in sublist]:
matchedBoltHoles = matchHole(holeNum, myWasherFaces, allBoltHoles, matchedBoltHoles,boltDiamList)
if len(matchedBoltHoles[-1]['holes']) == 2:
# print '-'*5
matchedBoltHoles[-1]['boltNum'] = n
washerThick = washerThickList[boltDiamList.index(matchedBoltHoles[-1]['diameter'])]/Units #$#
matchedBoltHoles[-1]['washerOffset'] = washerThick
matchedBoltHoles[-1]['length'] = boltLength(matchedBoltHoles[-1]['length']+2.*washerThick,boltLengthIncrement)
for holeNum in matchedBoltHoles[-1]['holes']:
if myWasherFaces[allBoltHoles[holeNum][1]][allBoltHoles[holeNum][2]]['offset'] != 0.0:
notMidplaneFaces += [matchedBoltHoles[-1]['boltNum']]
n += 1
else:
unmatchedHoles += matchedBoltHoles[-1]['holes']
del matchedBoltHoles[-1]
# Creates set with all unpaired potetial washer faces
faces = []
for holeNum in unmatchedHoles:
instanceName = allBoltHoles[holeNum][1]
washerFaceNum = allBoltHoles[holeNum][2]
faceIDs = myWasherFaces[instanceName][washerFaceNum]['IDs']
faces += [ra.instances[instanceName].faces[n] for n in faceIDs]
faceSetList = []
for face in faces:
faceSetList.append(ra.instances[face.instanceName].faces[face.index:face.index+1])
if len(faceSetList)>0:
ra.Set(name='A - Unpaired Faces', faces=faceSetList)
#Create list of unique bolts in model
boltPartsInModel= []
for bolt in matchedBoltHoles:
# print bolt['diameter'], bolt['length']
newBolt = [bolt['diameter'], bolt['length']]
if newBolt not in boltPartsInModel:
boltPartsInModel += [newBolt]
## Creates the required bolt sections and bolt part
for diameter in set([b[0] for b in boltPartsInModel]):
createBoltSection(diameter/Units,boltMaterialName)
for diameter, length in boltPartsInModel:
createBolt(length,diameter/Units)
#Ensure preload and load steps are present
stepsNames = model.steps.keys()
if len(stepsNames) == 1:
model.StaticStep(name=pretensionStepName, previous='Initial', initialInc=0.1)
model.StaticStep(name='Step-1', previous=pretensionStepName, initialInc=0.1)
elif len(stepsNames) == 2 and stepsNames[1] == pretensionStepName:
model.StaticStep(name='Step-1', previous=pretensionStepName, initialInc=0.1)
elif len(stepsNames) > 1 and stepsNames[1] != pretensionStepName:
model.StaticStep(name=pretensionStepName, previous='Initial', initialInc=0.1)
stepsNames = model.steps.keys()
# Create coupling beteeen bolt-ends and washer faces
for fastener in matchedBoltHoles:
affixBolt(fastener,allBoltHoles, myWasherFaces,boltMaterialStress,pretensionStepName)
# notMidPlaneInstances = []
# for boltNum in notMidplaneFaces:
# notMidPlaneInstances += [key for key in ra.instances.keys() if 'Fastener-{}'.format(boltNum) in key]
# Feedback
boltInstanceNames = [key for key in ra.instances.keys() if scriptOwner+'Bolt' in key]
report = []
reportWidth = 45
spacer_1 = '='*reportWidth
spacer_2 = '-'*reportWidth
if len(boltInstanceNames)>0:
boltPartNames = [ra.instances[key].partName[len(scriptOwner)+5:] for key in boltInstanceNames]
tempSort = np.array([x.split('-')+[boltPartNames.count(x)] for x in set(boltPartNames)] )
tempSort = tempSort[tempSort[:,1].argsort()]
boltTable = tempSort[tempSort[:,0].argsort()]
report += ['\n',spacer_1, 'Bolting Assist Feedback',spacer_1]
if len(unmatchedHoles)>0:
report += [fill('An assembly level set named "A - Unpaired faces" has been created with all unpared potential washer faces.\n',reportWidth),'']
report += [spacer_2]
# if len(notMidPlaneInstances)>0:
# report += [fill('The following instances were created in region where midplane surfaces were not used.\nThe axial positioning of the bolt instances must be checked.\n\n',reportWidth),'']
# report += notMidPlaneInstances
report += ['',spacer_1]
report += ['Bolts added to the assembly:\n']
boltTableHeading = ['Size', 'Length', 'Qty']
report += ['{:7s}{:>7s}{:>5s}'.format(*boltTableHeading)]
nBolts = 0
for b in boltTable:
name = b[0]
length = ''.join([c if c !='p' else '.' for c in b[1]][:-2])
number = int(b[2])
nBolts += number
report += ['{:7s}{:>7s}{:5}'.format(name, length, number)]
report += [' '*14+'-'*5]
report += ['{:7s}{:7s}{:5}'.format('TOTAL','',nBolts)]
report += [spacer_1]
else:
report += [fill('An assembly level set named "A - Unpaired faces" has been created with all unpared potential washer faces.\n',reportWidth),'']
report += [spacer_2]
report += ['',spacer_1]
report += [fill('No bolts were added to the model. Possible causes for this include:\n',reportWidth), \
'- Incorrect units specified' ,
'- Incorrect washer face partitioning',
'- Incorrect washer face dimensions in the table',
'- Absence of matched bolt holes\n']
report += [spacer_1]
report = '\n'.join(report)
print report
| [
"[email protected]"
] | |
30343ba62bfc5feb037558b1c669973db6ef853a | d159ea11e4080536adf40125aaa37b7010a1bb7d | /pa2_s14_c10_v4.py | d8eafeb78a152d433810deca11d01ed1b5e00062 | [] | no_license | guanyilun/act-cutflow | 0ddd69b61c2102280e1f5ab4f4e2cd3619f654b9 | 3481cafc30b980c1649a66acc2fe4f708f8bcc44 | refs/heads/master | 2020-04-11T01:44:04.223441 | 2020-01-24T13:36:48 | 2020-01-24T13:36:48 | 161,424,708 | 0 | 0 | null | 2019-03-09T15:32:26 | 2018-12-12T02:58:30 | Python | UTF-8 | Python | false | false | 10,822 | py | from todloop import TODLoop
from todloop.tod import TODLoader
from routines.cuts import CutSources, CutPlanets, CutPartial, FindJumps, RemoveSyncPickup
from routines.tod import TransformTOD, FouriorTransform, GetDetectors, CalibrateTOD
from routines.analysis import AnalyzeScan, AnalyzeDarkLF, AnalyzeLiveLF, GetDriftErrors, \
AnalyzeLiveMF, AnalyzeHF
from routines.features import JesseFeatures
from routines.report import Summarize, PrepareDataLabelNew
##############
# parameters #
##############
DEPOT = "/mnt/act3/users/yilun/depot"
actpol_shared = "/mnt/act3/users/yilun/work/actpol_data_shared"
tag = "pa2_s14_c10_v4"
pickle_file = "/mnt/act3/users/yilun/share/pa2/%s_results.pickle" % tag
output_file = "outputs/%s.h5" % tag
n_train = 80
n_validate = 20
#############
# pipeline #
#############
# initialize the pipelines
train_loop = TODLoop()
validate_loop = TODLoop()
# test_loop = TODLoop()
# specify the list of tods to go through
train_loop.add_tod_list("inputs/%s_train.txt" % tag)
validate_loop.add_tod_list("inputs/%s_validate.txt" % tag)
# test_loop.add_tod_list("inputs/%s_test.txt" % tag)
################################
# add routines to the pipeline #
################################
def add_cut_routines(loop):
"""This function registers a series of common routines for cut
analysis. This is so that we don't have to keep repeating
ourselves to register these routines for each data set (train,
validate, test).
"""
# add a routine to load tod
loader_params = {
'output_key': 'tod',
'load_opts': {
'fix_sign': True,
'repair_pointing': True
}
}
loop.add_routine(TODLoader(**loader_params))
# add a routine to cut the sources
source_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'tod': 'tod'
},
'tag_source': '%s_source' % tag,
'no_noise': True,
'depot': DEPOT,
'write_depot': True,
}
loop.add_routine(CutSources(**source_params))
# add a routine to cut the planets
planets_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'tod': 'tod',
},
'tag_planet': '%s_planet' % tag,
'depot': DEPOT,
'pointing_par': {'source': 'fp_file', \
'filename': actpol_shared + "/RelativeOffsets/template_ar2_150201us.txt"
},
'mask_params': {
'radius': (8./60) #degrees
},
'mask_shift_generator': {
'source':'file',\
'filename':actpol_shared + '/TODOffsets/tod_offsets_2014_141104_v3.txt',
'columns': [0,3,4],
'rescale_degrees': 1./60
},
'write_depot': True,
}
loop.add_routine(CutPlanets(**planets_params))
# add a routine to remove the sync pick up
sync_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'tod': 'tod',
},
'tag_sync': '%s' % tag,
'remove_sync': False,
'force_sync': False,
'depot': DEPOT,
'write_depot': True,
}
loop.add_routine(RemoveSyncPickup(**sync_params))
# add a routine to cut the glitches
partial_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'tod': 'tod',
},
'tag_partial': '%s_partial' % tag,
'include_mce': True,
'force_partial': False,
'glitchp': { 'nSig': 10., 'tGlitch' : 0.007, 'minSeparation': 30, \
'maxGlitch': 50000, 'highPassFc': 6.0, 'buffer': 200 },
'depot': DEPOT,
'write_depot': True,
}
loop.add_routine(CutPartial(**partial_params))
# add a routine to transform the TODs
transform_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'tod': 'tod',
},
'remove_mean': False,
'remove_median': True,
'detrend': False,
'remove_filter_gain': False,
'n_downsample': 1, # reduction with 2^n factor
}
loop.add_routine(TransformTOD(**transform_params))
# add a routine to analyze the scan
scan_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'scan': 'scan_params',
}
}
loop.add_routine(AnalyzeScan(**scan_params))
# add a routine to get the relevant detectors to look at
BASE_DIR = '/mnt/act3/users/yilun/work/actpol_data_shared/ArrayData/2015/ar2/'
gd_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'dets': 'dets',
},
'source': 'individual',
'live': BASE_DIR + 'live_ext2.dict',
'dark': BASE_DIR + 'dark_ext.dict',
'exclude': BASE_DIR + 'exclude.dict'
}
loop.add_routine(GetDetectors(**gd_params))
# add a routine to calibrate DAQ units to pW using flatfield and
# responsivity
cal_params = {
'inputs': {
'tod': 'tod',
'dets': 'dets',
},
'outputs': {
'tod': 'tod',
'cal': 'calData'
},
'flatfield': "/mnt/act3/users/mhasse/shared/actpol_shared_depot/FlatFields/2015/" + \
"ff_mixed_ar2_v0_actpol2_2015_c9_v0_photon_it0.dict",
'config': [{
"type": "depot_cal",
"depot": DEPOT,
"tag": "actpol2_2014_biasstep",
"name": "biasstep"
}, {
"type": "constant",
"value": 0.821018,
"name": "DC bias factor"
}],
'forceNoResp': True,
'calibrateTOD': True,
}
loop.add_routine(CalibrateTOD(**cal_params))
# add a routine to find jumps in TOD
jump_params = {
'inputs': {
'tod': 'tod'
},
'outputs':{
'jumps': 'jumps'
},
'dsStep': 4,
'window': 1,
}
loop.add_routine(FindJumps(**jump_params))
# add a routine to perform the fourior transform
fft_params = {
'inputs': {
'tod': 'tod'
},
'outputs': {
'tod': 'tod',
'fft': 'fft_data'
},
}
loop.add_routine(FouriorTransform(**fft_params))
# study the dark detectors using LF data
lf_dark_params = {
'inputs': {
'tod': 'tod',
'fft': 'fft_data',
'dets': 'dets',
'scan': 'scan_params',
},
'outputs': {
'lf_dark': 'lf_dark',
},
'cancelSync': False,
'doubleMode': False,
'freqRange': {
'fmin': 0.017,
'fshift': 0.009,
'band': 0.071,
'Nwin': 1,
},
}
loop.add_routine(AnalyzeDarkLF(**lf_dark_params))
# study the live detectors using LF data
lf_live_params = {
'inputs': {
'tod': 'tod',
'fft': 'fft_data',
'dets': 'dets',
'scan': 'scan_params',
'dark': 'lf_dark',
'cal': 'calData'
},
'outputs': {
'lf_live': 'lf_live',
},
'cancelSync': True,
'doubleMode': False,
'removeDark': True,
'freqRange': {
'fmin': 0.017,
'fshift': 0.009,
'band': 0.071,
'Nwin': 10,
},
'separateFreqs': False,
'darkModesParams' : {
'useDarks': True,
'useSVD': True,
'Nmodes': 1,
'useTherm': False
},
}
loop.add_routine(AnalyzeLiveLF(**lf_live_params))
# get the drift errors
de_params = {
'inputs': {
'tod': 'tod',
'fft': 'fft_data',
'dets': 'dets',
'scan': 'scan_params',
},
'outputs': {
'drift': 'drift',
},
'driftFilter': 0.036,
'nmodes': 3,
}
loop.add_routine(GetDriftErrors(**de_params))
# study the live detectors in mid-freq
mf_params = {
'inputs': {
'tod': 'tod',
'fft': 'fft_data',
'dets': 'dets',
'scan': 'scan_params',
},
'outputs': {
'mf_live': 'mf_live'
},
'midFreqFilter': [0.3, 1.0],
'nmodes': 8,
}
loop.add_routine(AnalyzeLiveMF(**mf_params))
# study the live and dark detectors in HF
hf_params = {
'inputs': {
'tod': 'tod',
'fft': 'fft_data',
'dets': 'dets',
'scan': 'scan_params',
},
'outputs': {
'hf': 'hf'
},
'getPartial': False,
'highFreqFilter': [9.0, 19.0],
'nLiveModes': 10,
'nDarkModes': 3,
'highOrder': True,
}
loop.add_routine(AnalyzeHF(**hf_params))
# add the routine to compute jesse's features
params = {
'inputs': {
'tod': 'tod',
},
'outputs': {
'results': 'jesse_features',
}
}
loop.add_routine(JesseFeatures(**params))
# summarize the pickle parameters
summary_params = {
'inputs': {
# calculated features to include in the report
'features': ['lf_live', 'drift', 'mf_live', 'hf', 'jumps',
'jesse_features'],
},
'outputs': {
'report': 'report',
}
}
loop.add_routine(Summarize(**summary_params))
return loop
#########
# train #
#########
# work on training data
train_loop = add_cut_routines(train_loop)
# save report and TOD data into an h5 file for
# future machine learning pipeline
prepare_params = {
'inputs': {
'tod': 'tod',
'report': 'report',
'dets': 'dets',
'fft': 'fft_data',
},
'pickle_file': pickle_file,
'output_file': output_file,
'group': 'train',
}
train_loop.add_routine(PrepareDataLabelNew(**prepare_params))
# run pipeline for training data
train_loop.run(0, n_train)
############
# validate #
############
# work on validation data
validate_loop = add_cut_routines(validate_loop)
# save report and TOD data into an h5 file for
# future machine learning pipeline
prepare_params.update({
'group': 'validate'
})
validate_loop.add_routine(PrepareDataLabelNew(**prepare_params))
# run pipeline for validation data
validate_loop.run(0, n_validate)
########
# test #
########
# # work on test data
# test_loop = add_cut_routines(test_loop)
# prepare_params.update({
# 'group': 'test'
# })
# test_loop.add_routine(PrepareDataLabelNew(**prepare_params))
# # run the pipeline for testdata
# test_loop.run(0, n_test)
########
# done #
########
| [
"[email protected]"
] | |
3f1c49d72fa189356632d260c761d1405c531bba | 53309442fbf23b02b9f8181a58e5e988f7c6e8f2 | /dlk/python/dlk/scripts/pylib/nnlib.py | 3481d35e730d0618d078acc297ad6c1427d29d78 | [
"Apache-2.0"
] | permissive | serihiro/blueoil | a12baa1224d2a7056de14e74bceebe7f80e30de8 | e538a08cb149c6f630263905819cc8c53a0a6081 | refs/heads/master | 2020-07-25T11:53:48.940466 | 2019-09-12T06:38:12 | 2019-09-12T06:38:12 | 208,280,175 | 0 | 0 | Apache-2.0 | 2019-09-13T14:22:40 | 2019-09-13T14:22:36 | null | UTF-8 | Python | false | false | 3,312 | py | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import ctypes as ct
import numpy as np
from numpy.ctypeslib import ndpointer
class NNLib(object):
def __init__(self):
self.lib = None
self.nnlib = None
def load(self, libpath):
self.lib = ct.cdll.LoadLibrary(libpath)
self.lib.network_create.argtypes = []
self.lib.network_create.restype = ct.c_void_p
self.lib.network_init.argtypes = [ct.c_void_p]
self.lib.network_init.restype = ct.c_bool
self.lib.network_delete.argtypes = [ct.c_void_p]
self.lib.network_delete.restype = None
self.lib.network_get_input_rank.argtypes = [ct.c_void_p]
self.lib.network_get_input_rank.restype = ct.c_int
self.lib.network_get_output_rank.argtypes = [ct.c_void_p]
self.lib.network_get_output_rank.restype = ct.c_int
self.lib.network_get_input_shape.argtypes = [ct.c_void_p, ndpointer(ct.c_int32, flags="C_CONTIGUOUS")]
self.lib.network_get_input_shape.restype = None
self.lib.network_get_output_shape.argtypes = [ct.c_void_p, ndpointer(ct.c_int32, flags="C_CONTIGUOUS")]
self.lib.network_get_output_shape.restype = None
self.lib.network_run.argtypes = [
ct.c_void_p,
ndpointer(
ct.c_float,
flags="C_CONTIGUOUS"),
ndpointer(
ct.c_float,
flags="C_CONTIGUOUS"),
]
self.lib.network_run.restype = None
self.nnlib = self.lib.network_create()
return True
def init(self):
return self.lib.network_init(self.nnlib)
def delete(self):
if self.nnlib:
self.lib.network_delete(self.nnlib)
self.nnlib = None
self.lib = None
def __del__(self):
self.delete()
def get_input_rank(self):
return self.lib.network_get_input_rank(self.nnlib)
def get_output_rank(self):
return self.lib.network_get_output_rank(self.nnlib)
def get_input_shape(self):
r = self.get_input_rank()
s = np.zeros(r, np.int32)
self.lib.network_get_input_shape(self.nnlib, s)
return tuple(s)
def get_output_shape(self):
r = self.get_output_rank()
s = np.zeros(r, np.int32)
self.lib.network_get_output_shape(self.nnlib, s)
return tuple(s)
def run(self, tensor):
input = tensor.flatten().astype(np.float32)
output = np.zeros((self.get_output_shape()), np.float32)
self.lib.network_run(
self.nnlib,
input,
output)
return output
| [
"[email protected]"
] | |
44b803a1e237f3e47252977e2bb7d9fe4553a3ca | d39af45744a6220d30895126f2fc531e4d9ef2af | /tests/plugin2.py | 5cb8fbb6f23b82d34c8a17108cb5644f4fb3479c | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | hugovk/coveragepy | dce0e11e3244cd9f79d0c5a432dac7a7cce74b69 | 2e09055737aaa7a4c3d61bd1cb700ef528827036 | refs/heads/master | 2023-06-27T08:39:02.120975 | 2023-06-07T10:28:18 | 2023-06-07T10:28:18 | 19,588,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""A file tracer plugin for test_plugins.py to import."""
from __future__ import annotations
import os.path
from types import FrameType
from typing import Any, Optional, Set, Tuple
from coverage import CoveragePlugin, FileReporter, FileTracer
from coverage.plugin_support import Plugins
from coverage.types import TLineNo
try:
import third.render # pylint: disable=unused-import
except ImportError:
# This plugin is used in a few tests. One of them has the third.render
# module, but most don't. We need to import it but not use it, so just
# try importing it and it's OK if the module doesn't exist.
pass
class Plugin(CoveragePlugin):
"""A file tracer plugin for testing."""
def file_tracer(self, filename: str) -> Optional[FileTracer]:
if "render.py" in filename:
return RenderFileTracer()
return None
def file_reporter(self, filename: str) -> FileReporter:
return MyFileReporter(filename)
class RenderFileTracer(FileTracer):
"""A FileTracer using information from the caller."""
def has_dynamic_source_filename(self) -> bool:
return True
def dynamic_source_filename(
self,
filename: str,
frame: FrameType,
) -> Optional[str]:
if frame.f_code.co_name != "render":
return None
source_filename: str = os.path.abspath(frame.f_locals['filename'])
return source_filename
def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
lineno = frame.f_locals['linenum']
return lineno, lineno+1
class MyFileReporter(FileReporter):
"""A goofy file reporter."""
def lines(self) -> Set[TLineNo]:
# Goofy test arrangement: claim that the file has as many lines as the
# number in its name.
num = os.path.basename(self.filename).split(".")[0].split("_")[1]
return set(range(1, int(num)+1))
def coverage_init(
reg: Plugins,
options: Any, # pylint: disable=unused-argument
) -> None:
"""Called by coverage to initialize the plugins here."""
reg.add_file_tracer(Plugin())
| [
"[email protected]"
] | |
931e0759257b0d996fd365675e052b85bb3061bd | 97af8fc69f99073f000ef8da0256c8dcc4b62c5c | /graph/migrations/0001_initial.py | 4b39b43a2ba9aca245c6501aa1888e335177d984 | [] | no_license | belal-bh/django-mptt-example | 6af4525de881780688e26b7017013e8b8640860e | e341b65af32fbda2fc7f8f04192ca32f5d30d819 | refs/heads/main | 2023-03-15T21:44:52.678226 | 2021-03-02T13:40:38 | 2021-03-02T13:40:38 | 320,348,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | # Generated by Django 3.1.4 on 2020-12-04 18:52
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30, unique=True)),
('name', models.CharField(max_length=255)),
('is_verified', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('connection', models.ManyToManyField(related_name='_node_connection_+', to='graph.Node')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='graph.node')),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
84f033145a45d5d825a7c732f5c3c30954b010cc | 576cc83449e10fd3f98281970c46016ea7a5aea2 | /OpenCV预处理/局部自适应阈值处理inv.py | 154af1ae5f36d50bf61283ada12b9c43f3c9eb18 | [] | no_license | HotView/PycharmProjects | 215ab9edd341e3293daebcf86d97537f8cd28d75 | 61393fe5ba781a8c1216a5cbe7e0d06149a10190 | refs/heads/master | 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # 目标区域为偏暗色,即灰度值最小的地方为感兴趣区
import cv2
import numpy as np
def Nothing(val):
size = cv2.getTrackbarPos("size","gray")
param = cv2.getTrackbarPos("param","gray")
thresh = cv2.adaptiveThreshold(gray,255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,2*size+1, param)
cv2.imshow("thresh", thresh)
img = cv2.imread("image/hand01.jpg")
img = cv2.GaussianBlur(img,(5,5),1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#cv2.namedWindow("thresh")
cv2.namedWindow("gray")
cv2.createTrackbar("size","gray",0,300,Nothing)
cv2.createTrackbar("param","gray",0,100,Nothing)
cv2.imshow("gray",gray)
cv2.waitKey(0) | [
"[email protected]"
] | |
3bb894282823a496a43313fedc66a4b3f100aa8b | 49d416e5c94540ba19ce1218dd253158b9f1c37c | /src/allennlp_plugins/__init__.py | 091c329d91186498e95efe351648d91fe8919931 | [
"Apache-2.0"
] | permissive | j6mes/eacl2021-debias-finetuning | b05ba45508cef45e96cdf78d2182fe0a6edb541c | f3773c4a608c042c132bfe54e7cb63b142291c93 | refs/heads/main | 2023-03-26T01:39:11.674216 | 2021-03-27T21:35:52 | 2021-03-27T21:35:52 | 351,411,446 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from debias_finetuning.commands.finetune_l2 import *
from debias_finetuning.commands.finetune_ewc import *
from debias_finetuning.commands.evaluate_with_overwrite import * | [
"[email protected]"
] | |
13b72aef7c8e90f7fff0839b4af94da5347f0931 | c0cbcf1d1b42495fdb70ad62d3bb954be2b0c322 | /learn/FileIO.py | d480471d13f9cbe0d21948a8307bd74da1ffd13e | [] | no_license | pranjalparmar/Learn-Python | 98ec11e9cab3d29d5e1e176e4b9ec3f4e232e78e | c377f8c0eca791b43ae55fae797e4f3fb6a3bcfc | refs/heads/main | 2023-02-28T17:42:53.271860 | 2021-02-06T11:18:45 | 2021-02-06T11:18:45 | 336,462,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | f = open("pranjal.txt","rt")
print(f.readlines())
# print(f.readline())
# print(f.readline())
# content = (f.readline())
# content = (f.readline())
# print("1",content)
# for line in f:
# print(line,end="")
f.close() | [
"[email protected]"
] | |
f1407cc95fbc90c393aa118b32271d74fc4e8720 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2690/60678/248819.py | 09b1630a9d7bb564180ece9ce7e5ce73c668ba77 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | times = int(input())
for loopTimes in range(0, times):
input()
stringList = input().split()
stringM = stringList[0]
stringS = stringList[1]
stringTest = stringM + ' ' + stringS
listM = list(stringM)
listS = list(stringS)
for i in range(0, len(stringS)):
while stringM.find(stringS[i]) != -1:
listM[stringM.find(stringS[i])] = '1'
stringM = ''.join(listM)
for i in range(0, len(listM)):
if listM[i] != '1':
listM[i] = '0'
stringM = ''.join(listM)
stringM = stringM.split('0')
index = 0
while index < len(stringM):
if stringM[index] == '':
del stringM[index]
index -= 1
index += 1
# print(len(stringM))
if len(stringM) == 4:
print(len(stringM))
else:
print(stringTest) | [
"[email protected]"
] | |
2304ae329181fdc87d288da984a9d02c5739dcb5 | ace7e98719c756cff4e4baf7c92e546cbc0b92ca | /LintCode/6-Linked List/2.covertSortedListToBalancedBST.py | a098c7d34acc77b54dd52275e5165369ed6a2091 | [] | no_license | armsky/OnlineJudge | f4159326c92a794695cca8a162280fef32f95a2a | c658b78c920aa94c25b3d932cd7e46c0df82b19a | refs/heads/master | 2020-04-15T01:21:18.158217 | 2015-12-11T03:05:28 | 2015-12-11T03:05:28 | 21,989,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | """
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
Example
2
1->2->3 => / \
1 3
"""
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
cur = None
def sortedListToBST(self, head):
global cur
if not head:
return None
size = self.getLen(head)
cur = head
return self.build(size)
def getLen(self, a):
n = 0
while a:
a = a.next
n += 1
return n
def build(self, size):
global cur
if size <= 0:
return None
left = self.build(size/2)
root = TreeNode(cur.val)
cur = cur.next
right = self.build(size -1 -size/2)
root.left = left
root.right = right
return root
# O(n log n) time
# No need to keep a global variable current_node
def sortedListToBST2(self, head):
if not head:
return head
size = self.getLen(head)
return self.construct(head, size)
def construct(self, head, size):
if not head or size==0:
return None
root = self.getNode(size/2, head)
root.left = self.construct(head, size/2)
root.right = self.construct(self.getNode(size/2 + 1, head), size - size/2 -1)
return root
def getNode(self, n, head):
for i in range(n):
head = head.next
return head
so = Solution()
a = ListNode(1)
a.next = ListNode(2)
a.next.next = ListNode(3)
print so.sortedListToBST(a).val
| [
"[email protected]"
] | |
3e472ab7973b0dfe437944cf0b307c2745160fd3 | f81c629865f0493500eaa2ab4e3337fd6603cf0c | /loqusdb/commands/cli.py | 6bc3d75fe6e36e25ab05477e35b792c64c45b9f6 | [] | no_license | robinandeer/loqusdb | fd8a49e325ae36169e16025793156e0a978c6716 | 15ae953589bbe51b24e549c03986bf2ea0ef6b0e | refs/heads/master | 2021-01-18T10:47:44.870986 | 2016-03-02T13:10:34 | 2016-03-02T13:10:34 | 52,794,952 | 0 | 0 | null | 2016-02-29T13:50:46 | 2016-02-29T13:50:46 | null | UTF-8 | Python | false | false | 2,473 | py | import click
from loqusdb.log import LEVELS, init_log
from loqusdb import logger, __version__
from loqusdb.plugins import MongoAdapter
@click.group()
@click.option('-db', '--database',
default='loqusdb',
show_default=True,
)
@click.option('-u', '--username',
type=str
)
@click.option('-p', '--password',
type=str
)
@click.option('-port', '--port',
default=27017,
show_default=True,
help='Specify the port where to look for the mongo database.'
)
@click.option('-h', '--host',
default='localhost',
show_default=True,
help='Specify the host where to look for the mongo database.'
)
@click.option('-b', '--backend',
default='mongo',
show_default=True,
type=click.Choice(['mongo',]),
help='Specify what backend to use.'
)
@click.option('-c', '--conn_host',
default='mongodb://',
show_default=True,
help='Used for testing.'
)
@click.option('-l', '--logfile',
type=click.Path(exists=False),
help=u"Path to log file. If none logging is "\
"printed to stderr."
)
@click.option('-v', '--verbose', count=True, default=1)
@click.version_option(__version__)
@click.pass_context
def cli(ctx, conn_host, database, username, password, port, host, verbose,
logfile, backend):
"""loqusdb: manage a local variant count database."""
# configure root logger to print to STDERR
loglevel = LEVELS.get(min(verbose,1), "INFO")
init_log(
logger = logger,
filename = logfile,
loglevel = loglevel
)
# mongo uri looks like:
#mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
uri = None
if username and password:
uri = "{0}{1}:{2}@{3}:{4}/{5}".format(
conn_host, username, password, host, port, database
)
logger.info('uri={0}'.format(uri))
adapter = MongoAdapter()
adapter.connect(
host=host,
port=port,
database=database,
uri=uri
)
ctx.obj = {}
ctx.obj['db'] = database
ctx.obj['user'] = username
ctx.obj['password'] = password
ctx.obj['port'] = port
ctx.obj['host'] = host
ctx.obj['adapter'] = adapter
| [
"[email protected]"
] | |
0f70d2f0a0efc9c22f33694d5afcb0cfafa6536a | 3c92c3f633b613a62fb67476fd617e1140133880 | /leetcode/1541. Minimum Insertions to Balance a Parentheses String.py | 096c25dacba75e51f24aca72ee3872d79ab37936 | [] | no_license | cuiy0006/Algorithms | 2787f36f8164ded5252a006f723b570c9091bee9 | 00fd1397b65c68a303fcf963db3e28cd35c1c003 | refs/heads/master | 2023-03-31T13:55:59.191857 | 2023-03-31T03:39:42 | 2023-03-31T03:39:42 | 75,001,651 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | class Solution:
def minInsertions(self, s: str) -> int:
left = 0
cnt = 0
i = 0
while i < len(s):
if s[i] == '(':
left += 1
i += 1
else:
if i == len(s) - 1 or s[i+1] != ')':
cnt += 1
i += 1
else:
i += 2
if left == 0:
cnt += 1
else:
left -= 1
return cnt + left * 2
| [
"[email protected]"
] | |
fa4e63999c54c809d85a6e317a549ba970c56c7f | 7134e45563b2045837296cb5c4f1974a025e4f2b | /.history/SANDBOX_20201224193642.py | 2148b401bfd2f9db071178899facae6656fa4df3 | [] | no_license | Nordenbox/Nordenbox_Python_Fundmental | dca175c471ac2c64453cc4bcf291dd0773be4add | 9c79fd5d0dada580072b523d5aa1d72f996e3a22 | refs/heads/master | 2022-01-21T06:37:15.084437 | 2022-01-06T13:55:30 | 2022-01-06T13:55:30 | 240,154,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | a=((2,3,4),(2,3,4))
| [
"[email protected]"
] | |
0001c01bc8a101706f658bcd83d4b303d1d9be1c | 5aa80aab7a75d76b0aa838bf8f74a276a12c876e | /src/config/device-manager/device_manager/ansible_base.py | a0ed6248cc8be3bd6bb180ff2f16a4e6610a3fbe | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | tungstenfabric/tf-controller | 83b6d58afadb5697b540b5345711a5b2af90d201 | f825fde287f4eb2089aba2225ca73eeab3888040 | refs/heads/master | 2023-08-28T02:56:27.329584 | 2023-08-20T12:15:38 | 2023-08-20T12:31:34 | 231,070,970 | 55 | 29 | Apache-2.0 | 2023-07-23T01:38:17 | 2019-12-31T10:24:38 | C++ | UTF-8 | Python | false | false | 6,410 | py | #
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation plugin base class for device config module.
The specific plugins should inherit from this class.
"""
import abc
from builtins import object
from builtins import str
from .imports import import_ansible_plugins
#
# Base Class for all plugins. pluigns must implement all abstract methods
#
class AnsibleBase(object):
_plugins = {}
class PluginError(Exception):
"""Exception class to indicate plugin error."""
def __init__(self, plugin_info):
"""Initialize the exception with plugin info."""
self.plugin_info = plugin_info
# end __init__
def __str__(self):
"""Provide plugin info in exception details."""
return "Ansible Plugin Error, Configuration = %s" % \
str(self.plugin_info)
# end __str__
# end PluginError
class PluginsRegistrationFailed(Exception):
"""Exception class to indicate plugin registration error."""
def __init__(self, exceptions):
"""Initialize the exception with nested exceptions."""
self.exceptions = exceptions
# end __init__
def __str__(self):
"""Provide details of nested exception in exception message."""
ex_mesg = "Plugin Registrations Failed:\n"
for ex in self.exceptions or []:
ex_mesg += ex + "\n"
return ex_mesg
# end __str__
# end PluginsRegistrationFailed
def __init__(self, logger):
"""Initialize the plugin."""
self._logger = logger
self.commit_stats = {
'last_commit_time': '',
'last_commit_duration': '',
'commit_status_message': '',
'total_commits_sent_since_up': 0,
}
self.initialize()
self.device_connect()
# end __init__
# instantiate a plugin dynamically
@classmethod
def plugin(cls, vendor, product, params, logger):
pr = params.get("physical_router")
name = str(pr.physical_router_role) + ":" + \
str(vendor) + ":" + str(product)
if pr.physical_router_role and vendor and product:
pconf = AnsibleBase._plugins.get(pr.physical_router_role)
if pconf:
logger.info(
"Found ansible plugin pr=%s, role/vendor/product=%s" %
(pr.uuid, name))
pconf = pconf[0] # for now one only
inst_cls = pconf.get('class')
return inst_cls(logger, params)
logger.warning(
"No ansible plugin pr=%s, role/vendor/product=%s" %
(pr.uuid, name))
return None
# end plugin
# validate plugin name
def verify_plugin(self, vendor, product, role):
return self.is_role_supported(role)
# end verify_plugin
# register all plugins with device manager
@classmethod
def register_plugins(cls):
# make sure modules are loaded
import_ansible_plugins()
# register plugins, find all leaf implementation classes derived from
# this class
subclasses = set()
work = [cls]
while work:
parent = work.pop()
if not parent.__subclasses__():
subclasses.add(parent)
continue
for child in parent.__subclasses__():
if child not in subclasses:
work.append(child)
# register all plugins,
# if there is any exception, continue to register all other plugins,
# finally throw one single exception to the caller
exceptions = []
for scls in subclasses or []:
try:
scls.register()
except AnsibleBase.PluginError as e:
exceptions.append(str(e))
if exceptions:
raise cls.PluginsRegistrationFailed(exceptions)
# end register_plugins
@classmethod
def register(cls, plugin_info):
if not plugin_info or not plugin_info.get("roles"):
raise AnsibleBase.PluginError(plugin_info)
for role in plugin_info.get("roles"):
AnsibleBase._plugins.setdefault(
role.lower(), []).append(plugin_info)
# end register
@classmethod
def is_role_supported(cls, role):
"""Check if plugin is capable of supporting role."""
return False
# end is_role_supported
@abc.abstractmethod
def plugin_init(self, is_delete=False):
"""Initialize plugin."""
# end plugin_init
@abc.abstractmethod
def initialize(self):
"""Initialize local data structures."""
# end initialize
def validate_device(self):
return True
# def validate_device
@abc.abstractmethod
def update(self, params):
"""Update plugin intialization params."""
# end update
def clear(self):
"""Clear connections and data structures."""
self.initialize()
self.device_disconnect()
# end clear
@abc.abstractmethod
def device_connect(self):
"""Initialize the device connection and get the handle."""
pass
# end device_connect
@abc.abstractmethod
def device_disconnect(self):
"""Delete the device connection and and reset the handle."""
pass
# end device_disconnect
@abc.abstractmethod
def retry(self):
"""Retry send conf or not."""
return False
# end retry
@abc.abstractmethod
def device_get(self, filters={}):
"""Retrieve configuration from device for given filter parameters."""
return {}
# end device_get
def device_get_config(self, filters={}):
"""Retrieve entire device current configuration."""
return {}
# end device_get_config
@abc.abstractmethod
def get_commit_stats(self):
"""Return Commit Statistics if any."""
return self.commit_stats
# end device_get
@abc.abstractmethod
def push_conf(self, feature_configs=None, is_delete=False):
"""Push config to device."""
return 0
# end push_conf
@abc.abstractmethod
def get_service_status(self, service_params={}):
"""Get service status for a given service."""
return {}
# end get_service_status
# end AnsibleBase
| [
"[email protected]"
] | |
958e29edf8b1e663aabc6944aef2aae04ecbf3d5 | f995860ad78fc266d04b03c3478c74e989d8b568 | /PE/pe0493.py | 5ea1aa17c923d1333d653be374eade36e150d859 | [] | no_license | 196884/Python | edd0234fd72a40d7a0b3310776edcaa8bda74478 | 8dc2e7a32dd350227cde748600e713dc3eea3f4a | refs/heads/master | 2016-09-06T19:26:19.860746 | 2015-11-09T00:09:23 | 2015-11-09T00:09:23 | 28,167,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from mpmath import *
mp.dps = 30
def sortFn((a, b)):
return a
def evolvePos(c, i):
# c is the configuration
# i is the index chosen
r = list(c)
(ni, ki) = c[i]
f = ni * ki
if ki > 1:
r[i] = (ni, ki-1)
else:
r.pop(i)
if ni > 1:
n = len(r)
found = False
for i in range(0, n):
if r[i][0] == ni - 1:
r[i] = (r[i][0], r[i][1]+1)
found = True
if not found:
r.append((ni-1, 1))
r.sort(key = sortFn)
return (f, tuple(r))
def handlePick(d, total):
r = dict()
for c, proba in d.iteritems():
nc = len(c)
for i in range(0, nc):
(f, cb) = evolvePos(c, i)
thisProba = proba * mpf(f) / mpf(total)
prevProba = r.get(cb, mpf(0))
r[cb] = prevProba + thisProba
return r
def nbColors(c):
l = list(c)
(n, k) = l[-1]
if n == 10:
return 7 - k
else:
return 7
def solve():
# Bruteforcing it...
d = dict()
d[((9,1),(10,6))] = mpf(1)
total = 69
for k in range(0, 19):
d = handlePick(d, total)
total -= 1
r = mpf(0)
for c, p in d.iteritems():
n = nbColors(c)
r = r + mpf(n) * p
return r
if __name__ == "__main__":
result = solve()
print "Result: %s" % result
| [
"[email protected]"
] | |
9ad0fc7bf19e3d9004fa97afe0d0cfd173119ba0 | ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3 | /old/ABC152C.py | 26f273585904a48a570ae025e08fea57f3a7bcb9 | [] | no_license | kussy-tessy/atcoder | 5604919747242ee9740b9131bb6e168e96af0151 | ee917fa5a5218d4a9e72f710d0d844e7c203f13b | refs/heads/master | 2023-07-21T09:25:15.464881 | 2021-09-04T14:06:02 | 2021-09-04T14:06:02 | 311,221,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # print('input >>')
N = int(input())
P = list(map(int,(input().split())))
ans = 0
now = P[0]
for p in P:
if now >= p:
ans += 1
now = p
# print('-----output-----')
print(ans) | [
"[email protected]"
] | |
d53121b2ae4fd928addc43c3fa35c1600044f7fe | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/common/dag.py | d553317dfc081b51702747ecdfc7fda8fb0ea527 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 2,995 | py | """DAG class."""
from collections import deque
from collections import OrderedDict
class DAG:
"""DAG."""
def __init__(self):
"""Init DAG."""
self.nodes = OrderedDict()
def add_node(self, node):
"""Add node."""
if node not in self.nodes:
self.nodes[node] = set()
def remove_node(self, node):
"""Remove node."""
if node in self.nodes:
self.nodes.pop(node)
for pre_node, nodes in iter(self.nodes.items()):
if node in nodes:
nodes.remove(node)
def add_edge(self, pre_node, node):
"""Add edge."""
if pre_node not in self.nodes or node not in self.nodes:
return
self.nodes[pre_node].add(node)
def remove_edge(self, pre_node, node):
"""Remove edge."""
if pre_node in self.nodes and node in self.nodes[pre_node]:
self.nodes[pre_node].remove(node)
def from_dict(self, dict_value):
"""Construct DAG from dict."""
self.nodes = OrderedDict()
for node in iter(dict_value.keys()):
self.add_node(node)
for pre_node, nodes in iter(dict_value.items()):
if not isinstance(nodes, list):
raise TypeError('dict values must be lists')
for node in nodes:
self.add_edge(pre_node, node)
def next_nodes(self, node):
"""Get all successor of the node."""
return list(self.nodes[node])
def pre_nodes(self, node):
"""Get all predecessor of the node."""
return [item for item in self.nodes if node in self.nodes[item]]
def topological_sort(self):
"""Topological sort."""
in_degree = {node: 0 for node in self.nodes}
out_degree = {node: 0 for node in self.nodes}
for node in self.nodes:
out_degree[node] = len(node)
for next_node in self.nodes[node]:
in_degree[next_node] += 1
ret = []
stack = deque()
for node in in_degree:
if in_degree[node] == 0:
stack.append(node)
while len(stack) > 0:
node = stack.pop()
for item in self.nodes[node]:
in_degree[item] -= 1
if in_degree[item] == 0:
stack.append(item)
ret.append(node)
if len(ret) != len(self.nodes):
raise ValueError("Not a directed acyclic graph")
return ret
def ind_nodes(self):
"""Independent nodes."""
in_degree = {node: 0 for node in self.nodes}
for node in self.nodes:
for next_node in self.nodes[node]:
in_degree[next_node] += 1
ret = set(node for node in self.nodes if in_degree[node] == 0)
return ret
def size(self):
"""Return the size of graph."""
return len(self.nodes)
| [
"[email protected]"
] | |
3ac427369f889bc6c292870e64829699d06f8edb | 7dfe9854a1d5e4a7b37c8d8ecbd99469f984cfe1 | /altimeter/core/graph/field/dict_field.py | 3f5bff2a4960a761f6fdeb644758a2817f9526a3 | [
"MIT",
"Python-2.0"
] | permissive | jparten/altimeter | 92c9d21dcaec144f29f22030b6722a2baf0678ed | 956cf7f7c2fe443751b8da393a764f8a7bb82348 | refs/heads/master | 2020-11-26T19:01:55.641606 | 2019-12-20T16:46:32 | 2019-12-20T16:46:32 | 229,180,153 | 0 | 0 | MIT | 2019-12-20T03:06:35 | 2019-12-20T03:06:34 | null | UTF-8 | Python | false | false | 10,850 | py | """Dict Fields represent fields which consist of dict-like data."""
from copy import deepcopy
from typing import Dict, Any, List
from altimeter.core.graph.field.exceptions import (
DictFieldValueNotADictException,
DictFieldSourceKeyNotFoundException,
DictFieldValueIsNullException,
)
from altimeter.core.graph.field.base import Field, SubField
from altimeter.core.graph.field.util import camel_case_to_snake_case
from altimeter.core.graph.link.links import MultiLink
from altimeter.core.graph.link.base import Link
class DictField(Field):
"""A DictField is a field where the input is a JSON object containing a key (source_key)
where the corresponding value is a dictionary.
Examples:
A dictionary containing two ScalarFields:
>>> from altimeter.core.graph.field.scalar_field import ScalarField
>>> input = {"Person": {"FirstName": "Bob", "LastName": "Smith"}}
>>> field = DictField("Person", ScalarField("FirstName"), ScalarField("LastName"))
>>> links = field.parse(data=input, context={})
>>> print([link.to_dict() for link in links])
[{'pred': 'person', 'obj': [{'pred': 'first_name', 'obj': 'Bob', 'type': 'simple'}, {'pred': 'last_name', 'obj': 'Smith', 'type': 'simple'}], 'type': 'multi'}]
Args:
source_key: Name of the key in the input JSON
fields: fields inside this DictField
alti_key: Optional key name to be used in the graph. By default
this is set to the source key converted to snake case.
optional: Whether this key is optional. Defaults to False.
"""
def __init__(
self, source_key: str, *fields: Field, alti_key: str = None, optional: bool = False
) -> None:
self.source_key = source_key
self.alti_key = alti_key if alti_key else camel_case_to_snake_case(self.source_key)
self.optional = optional
self.fields = fields
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: dictionary of data to parse
context: context dict containing data from higher level parsing code.
Returns:
List of MultiLink objects.
Raises:
DictFieldSourceKeyNotFoundException if self.source_key is not in data.
DictFieldValueNotADictException if the data does not appear to represent a dict.
"""
if self.source_key not in data:
if self.optional:
return []
raise DictFieldSourceKeyNotFoundException(
f"Expected key '{self.source_key}' in data, present keys: {', '.join(data.keys())}"
)
field_data = data.get(self.source_key, {})
if not isinstance(field_data, dict):
raise DictFieldValueNotADictException(
(
f"Key '{self.source_key}' value was expected to "
f"contain a dict, actual: {field_data} "
f"({type(field_data)})"
)
)
links: List[Link] = []
updated_context = deepcopy(context)
updated_context.update({"parent_alti_key": self.alti_key})
for field in self.fields:
sub_links = field.parse(field_data, updated_context)
links += sub_links
return [MultiLink(pred=self.alti_key, obj=links)]
class AnonymousDictField(Field):
"""An AnonymousDictField is a DictField where the source_key of the field is discarded
and not used as a name in the resulting graph. See Examples below for more clarity.
Args:
source_key: Name of the key in the input JSON
fields: fields inside this DictField
optional: Whether this key is optional. Defaults to False.
nullable: Whether this field's value can be null.
Examples:
A dict containing 3 ScalarFields
>>> from altimeter.core.graph.field.scalar_field import ScalarField
>>> input = {"Person": {"FirstName": "Bob", "LastName": "Smith"}}
>>> field = AnonymousDictField("Person", ScalarField("FirstName"), ScalarField("LastName"))
>>> links = field.parse(data=input, context={})
>>> for link in links: print(link.to_dict())
{'pred': 'first_name', 'obj': 'Bob', 'type': 'simple'}
{'pred': 'last_name', 'obj': 'Smith', 'type': 'simple'}
"""
def __init__(
self, source_key: str, *fields: Field, optional: bool = False, nullable: bool = False
):
self.source_key = source_key
self.fields = fields
self.optional = optional
self.nullable = nullable
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: dictionary of data to parse
context: context dict containing data from higher level parsing code.
Returns:
List of Link objects.
Raises:
DictFieldSourceKeyNotFoundException if self.source_key is not in data.
DictFieldValueNotADictException if the data does not appear to represent a dict.
"""
if self.source_key in data:
field_data = data.get(self.source_key, None)
if field_data is None:
if self.nullable:
return []
raise DictFieldValueIsNullException(
f"Key '{self.source_key} was expected to contain a dict, was null."
)
if not isinstance(field_data, dict):
raise DictFieldValueNotADictException(
(
f"Key '{self.source_key}' value expected to "
f"contain a dict, actual: {field_data} "
f"({type(field_data)})"
)
)
links: List[Link] = []
for field in self.fields:
sub_links = field.parse(field_data, context)
links += sub_links
return links
if self.optional:
return []
raise DictFieldSourceKeyNotFoundException(
f"Expected key '{self.source_key}' in data, present keys: {', '.join(data.keys())}"
)
class EmbeddedDictField(SubField):
"""An EmbeddedDictField is a field where the input is a JSON object. Generally this field
is used inside a ListField.
Args:
fields: fields inside this DictField
Examples:
A ListField containing an EmbeddedDictField with two ScalarFields:
>>> from altimeter.core.graph.field.list_field import ListField
>>> from altimeter.core.graph.field.scalar_field import ScalarField
>>> input = {"People": [{"FirstName": "Bob", "LastName": "Smith"},\
{"FirstName": "Alice", "LastName": "Smith"}]}
>>> field = ListField("People", EmbeddedDictField(ScalarField("FirstName"),\
ScalarField("LastName")))
>>> links = field.parse(data=input, context={})
>>> for link in links: print(link.to_dict())
{'pred': 'people', 'obj': [{'pred': 'first_name', 'obj': 'Bob', 'type': 'simple'}, {'pred': 'last_name', 'obj': 'Smith', 'type': 'simple'}], 'type': 'multi'}
{'pred': 'people', 'obj': [{'pred': 'first_name', 'obj': 'Alice', 'type': 'simple'}, {'pred': 'last_name', 'obj': 'Smith', 'type': 'simple'}], 'type': 'multi'}
"""
def __init__(self, *fields: Field) -> None:
self.fields = fields
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: dictionary of data to parse
context: context dict containing data from higher level parsing code.
Returns:
List of MultiLink objects.
Raises:
DictFieldSourceKeyNotFoundException if self.source_key is not in data.
DictFieldValueNotADictException if the data does not appear to represent a dict.
"""
parent_alti_key = self.get_parent_alti_key(data, context)
if not isinstance(data, dict):
raise Exception(f"{type(data)} {data} was expected to be a dict.")
links: List[Link] = []
updated_context = deepcopy(context)
updated_context.update({"parent_alti_key": parent_alti_key})
for field in self.fields:
sub_links = field.parse(data, updated_context)
links += sub_links
return [MultiLink(pred=parent_alti_key, obj=links)]
class AnonymousEmbeddedDictField(Field):
"""An AnonymousEmbeddedDictField is a EmbeddedDictField where the source_key of the parent
field is discarded and not used as a name in the resulting graph. See Examples below for more
clarity.
Args:
fields: fields inside this DictField
Examples:
A ListField containing an AnonymousEmbeddedDictField with two ScalarFields:
>>> from altimeter.core.graph.field.list_field import ListField
>>> from altimeter.core.graph.field.scalar_field import ScalarField
>>> input = {"People": [{"FirstName": "Bob", "LastName": "Smith"},\
{"FirstName": "Alice", "LastName": "Smith"}]}
>>> field = ListField("People", AnonymousEmbeddedDictField(ScalarField("FirstName"),\
ScalarField("LastName")))
>>> links = field.parse(data=input, context={})
>>> for link in links: print(link.to_dict())
{'pred': 'first_name', 'obj': 'Bob', 'type': 'simple'}
{'pred': 'last_name', 'obj': 'Smith', 'type': 'simple'}
{'pred': 'first_name', 'obj': 'Alice', 'type': 'simple'}
{'pred': 'last_name', 'obj': 'Smith', 'type': 'simple'}
"""
def __init__(self, *fields: Field) -> None:
self.fields = fields
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: dictionary of data to parse
context: context dict containing data from higher level parsing code.
Returns:
List of Link objects.
Raises:
DictFieldValueNotADictException if the data does not appear to represent a dict.
"""
if not isinstance(data, dict):
raise DictFieldValueNotADictException(f"{type(data)} {data} was expected to be a dict.")
links: List[Link] = []
for field in self.fields:
sub_links = field.parse(data, context)
links += sub_links
return links
| [
"[email protected]"
] | |
349d0a7b86159e2b854df8311790ec362c606538 | a561673adf29beb7939052b898dad5bf9167cefc | /sdk/python/lib/test/langhost/resource_thens/test_resource_thens.py | a970b60ef82197cc50194e0d28317a312639a605 | [
"Apache-2.0"
] | permissive | orionstudt/pulumi | 50fd75d4ec7bb48646cd3c83198afcf4a556a5fa | 7ef0b83c0cc7c4f9093e2a8fc0303e875d35c15c | refs/heads/master | 2023-08-12T13:57:32.605402 | 2021-10-18T12:24:46 | 2021-10-18T12:24:46 | 312,097,288 | 0 | 1 | Apache-2.0 | 2021-01-11T17:12:44 | 2020-11-11T21:43:03 | null | UTF-8 | Python | false | false | 2,961 | py | # Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
from ..util import LanghostTest
class ResourceThensTest(LanghostTest):
"""
Test that tests Pulumi's ability to track dependencies between resources.
ResourceA has an (unknown during preview) output property that ResourceB
depends on. In all cases, the SDK must inform the engine that ResourceB
depends on ResourceA. When not doing previews, ResourceB has a partial view
of ResourceA's properties.
"""
def test_resource_thens(self):
self.run_test(
program=path.join(self.base_path(), "resource_thens"),
expected_resource_count=2)
def register_resource(self, _ctx, _dry_run, ty, name, _resource, _dependencies, _parent, _custom, protect,
_provider, _property_deps, _delete_before_replace, _ignore_changes, _version, _import,
_replace_on_changes):
if ty == "test:index:ResourceA":
self.assertEqual(name, "resourceA")
self.assertDictEqual(_resource, {"inprop": 777, "inprop_2": 42})
urn = self.make_urn(ty, name)
res_id = ""
props = {}
if not _dry_run:
res_id = name
props["outprop"] = "output yeah"
return {
"urn": urn,
"id": res_id,
"object": props
}
if ty == "test:index:ResourceB":
self.assertEqual(name, "resourceB")
self.assertListEqual(_dependencies, ["test:index:ResourceA::resourceA"])
if _dry_run:
self.assertDictEqual(_resource, {
# other_in is unknown, so it is not in the dictionary.
# other_out is unknown, so it is not in the dictionary.
# other_id is also unknown so it is not in the dictionary
})
else:
self.assertDictEqual(_resource, {
"other_in": 777,
"other_out": "output yeah",
"other_id": "resourceA",
})
res_id = ""
if not _dry_run:
res_id = name
return {
"urn": self.make_urn(ty, name),
"id": res_id,
"object": {}
}
self.fail(f"unknown resource type: {ty}")
| [
"[email protected]"
] | |
d4d1407c5e94cdaedf63ccc88e1092cafd364240 | d77e61d5a9eb08736d5c3621896a66ab970ccea6 | /python/problems/array/remove_duplicates_in_place_sorted_array.py | 1e9ee54b027ac37bddc5ee0063c85f820184fad4 | [] | no_license | santhosh-kumar/AlgorithmsAndDataStructures | edc1a296746e2d2b0e1d4c748d050fe12af7b65f | 11f4d25cb211740514c119a60962d075a0817abd | refs/heads/master | 2022-11-15T00:22:53.930170 | 2020-07-10T03:31:30 | 2020-07-10T03:31:30 | 269,263,401 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | """
Remove Duplicates In-Place For a Sorted Array
Given a sorted array, the task is to remove the duplicate elements from the array.
Examples:
Input : arr[] = {2, 2, 2, 2, 2}
Output : arr[] = {2}
new size = 1
Input : arr[] = {1, 2, 2, 3, 4, 4, 4, 5, 5}
Output : arr[] = {1, 2, 3, 4, 5}
new size = 5
"""
from common.problem import Problem
class RemoveDuplicatesInPlaceSortedArray(Problem):
"""
RemoveDuplicatesInPlaceSortedArray
"""
PROBLEM_NAME = "RemoveDuplicatesInPlaceSortedArray"
def __init__(self, input_list):
"""RemoveDuplicatesInPlaceSortedArray
Args:
input_list: Contains a list of integers
Returns:
None
Raises:
None
"""
assert (len(input_list) > 0)
super().__init__(self.PROBLEM_NAME)
self.input_list = input_list
def solve(self):
"""Solve the problem
Note: The O(n) runtime and O(1) (space).
Args:
Returns:
integer
Raises:
None
"""
print("Solving {} problem ...".format(self.PROBLEM_NAME))
i = 0
while i < len(self.input_list):
j = i + 1
# iterate till we find the next non-duplicate and an increasing value
while j < len(self.input_list) and (
self.input_list[i] == self.input_list[j] or self.input_list[i] > self.input_list[j]):
j = j + 1
# swap with the next position if within the allowed size
if (i + 1) < len(self.input_list) and j < len(self.input_list):
self.input_list[i + 1], self.input_list[j] = self.input_list[j], self.input_list[i + 1]
else:
# we have reached the end w.r.t. j and hence return now
return i
i = i + 1
return i
| [
"[email protected]"
] | |
f736e2ab60d9421320cc2b75114b50859b38d7ad | 58e43a375b01413c978a05b49c935240d1656f1c | /venv/bin/pip3.7 | 524ca44fd2d6364317caac63a1ab0447f64a1d4c | [] | no_license | themockingjester/Athena | de876822ce13a2a29d3437fd22b80a785e7977e7 | 86716c29e0807930f4d360ffe548ca92c54e2758 | refs/heads/master | 2022-11-29T21:36:59.615655 | 2020-07-29T06:55:54 | 2020-07-29T06:55:54 | 279,047,059 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | 7 | #!/root/PycharmProjects/Athena/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
13e4c32ff331ce50e66711f5334464b084c2f06b | 9a3430749300a93b34b20e37505c8b1c0f7a79cf | /fixrequests.py | bf485d6785d7ccdc18e2e9a35e473522715d4c5c | [] | no_license | qyguo/makegridpacks | c127bcb83ea24bc1a6f06d7d5dce2eb4d5735367 | cd4beb1e92dbec3f074305d07c15e2f10c7ae67c | refs/heads/master | 2020-03-31T22:08:16.978758 | 2018-10-10T18:34:35 | 2018-10-10T18:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | #!/usr/bin/env python
import argparse
from makegridpacks import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", "-n", action="store_true", help="don't send anything to McM")
parser.add_argument("--times", "-t", action="store_true", help="get the times from McM for all requests")
args = parser.parse_args()
with RequestQueue() as queue:
for productionmode in "ggH", "VBF", "WplusH", "WminusH", "ZH", "ttH":
for decaymode in "4l", "2l2nu", "2l2q":
for mass in getmasses(productionmode, decaymode):
sample = POWHEGJHUGenMassScanMCSample(productionmode, decaymode, mass)
if (sample.needsupdate or args.times) and sample.prepid and os.path.exists(sample.cvmfstarball):
sample.gettimepereventfromMcM()
print sample
if sample.needsupdate and not args.dry_run:
queue.addrequest(sample, useprepid=True)
| [
"[email protected]"
] | |
e6440664549037faeeda37d40990091d4fdf3dbc | 775f887ab0933c8bb9263febceb702974966bb48 | /packages/pyright-internal/src/tests/samples/expressions8.py | 76f1a96de2a800e729e324c34729664d0230e80d | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | isabella232/pyright | 160a4d9ce366cb61946949f9d5aebe7457539c67 | a192486099503353413e02078c41d0d82bd696e8 | refs/heads/master | 2023-03-13T05:04:51.852745 | 2021-03-03T07:51:18 | 2021-03-03T07:51:18 | 344,101,663 | 0 | 0 | NOASSERTION | 2021-03-03T11:24:10 | 2021-03-03T11:21:38 | null | UTF-8 | Python | false | false | 499 | py | # This sample verifies that binary expressions like "less than"
# work if the operands are constrained TypeVars.
from abc import abstractmethod
from typing import Protocol, TypeVar
_T = TypeVar("_T")
class ComparableTo(Protocol[_T]):
@abstractmethod
def __lt__(self, x: _T) -> bool:
pass
def custom_compare(a: ComparableTo[_T], b: _T) -> bool:
return a < b
custom_compare("first", "second")
custom_compare(3, 2)
# This should generate an error.
custom_compare(3, "hi")
| [
"[email protected]"
] | |
36d9e40222f66557c42c81c7b1deadefa3382594 | 87130a19d9fa51d9b500d73ea9717ba16465f0f6 | /backend/api/errors.py | d87230fd995b428781bf85436ec04c0686447947 | [] | no_license | toyscript/toyscript | f4f236a8d1941565e6e5ed86bbb6417db73e5e2f | ac31a8ccf0f77226d7def3c6cb2744c521a89ff9 | refs/heads/main | 2023-06-06T19:32:45.642996 | 2021-07-07T04:50:42 | 2021-07-07T04:50:42 | 360,021,820 | 1 | 6 | null | 2021-06-19T08:40:39 | 2021-04-21T03:32:40 | JavaScript | UTF-8 | Python | false | false | 185 | py | class MovieDoesNotExist(Exception):
def __init__(self, message="해당 영화를 찾을 수 없습니다.", status=404):
self.message = message
self.status = status
| [
"[email protected]"
] | |
5312710fddbf6c8c78ed25f3ba26ec034c290fe6 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/database/data/CategoryDBAdapter.pyi | 67dbfd06673607d5cb56cc5c8b3474cf3ffe7bce | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 508 | pyi | import java.lang
class CategoryDBAdapter(object):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
258e40712331ad317f9ddc190c8e084e68f8b142 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4137/codes/1575_1334.py | 4a02531f8dd05fe3a3c2859b82ad6825f61ec54d | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | print("Adicao: ", 5 + 3)
print("Subtracao: ", 10 - 2)
print("Multiplicacao: ", 2*4)
print("Divisao: ", 16/2)
print("Resto: ", 16//2 )
print("Potenciacao: ", 2 ** 3) | [
"[email protected]"
] | |
82c6cd39e11ca9c71e9a7af08bfa8d5283cb0013 | 4cacf8188446170e0b4a14b05021bbd595c4db53 | /pyrolite/mineral/transform.py | 051b6d30d068c1f55e1c32d90c780845f6c5592b | [
"BSD-3-Clause",
"MIT"
] | permissive | JustinGOSSES/pyrolite | 2d145583344f79e8f935ed19fa00037d42969664 | 21eb5b28d9295625241b73b820fc8892b00fc6b0 | refs/heads/master | 2020-12-23T11:26:55.078871 | 2020-01-10T09:03:22 | 2020-01-10T09:03:22 | 237,136,389 | 1 | 0 | NOASSERTION | 2020-01-30T04:08:52 | 2020-01-30T04:08:51 | null | UTF-8 | Python | false | false | 5,271 | py | import pandas as pd
import numpy as np
import periodictable as pt
from ..util.pd import to_frame
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def formula_to_elemental(formula, weight=True):
"""Convert a periodictable.formulas.Formula to elemental composition."""
formula = pt.formula(formula)
fmass = formula.mass
composition = {}
if weight:
for a, c in formula.atoms.items():
composition[str(a)] = (c * a.mass) / fmass
else:
atoms = sum([c for a, c in formula.atoms.items()])
for a, c in formula.atoms.items():
composition[str(a)] = c / atoms
return composition
def merge_formulae(formulas):
"""
Combine multiple formulae into one. Particularly useful for defining oxide mineral
formulae.
Parameters
-----------
formulas: iterable
Iterable of multiple formulae to merge into a single larger molecular formulae.
"""
molecule = pt.formula("")
for f in formulas:
molecule += pt.formula(f)
return molecule
def parse_composition(composition):
"""
Parse a composition to provide an ionic elemental version in the form of a
pandas.Series. Currently accepts pandas.Series, periodictable.formulas.Formula
and structures which will directly convert to pandas.Series (list of tuples, dict).
Parameters
-----------
composition : :class:`pandas.Series` | :class:`periodictable.formulas.Formula`
Formulation of composition to parse.
"""
if composition is not None:
if isinstance(composition, pd.Series):
# convert to molecular oxides, then to formula, then to wt% elemental
components = [pt.formula(c) for c in composition.index]
values = composition.values
formula = merge_formulae(
[v / c.mass * c for v, c in zip(values, components)]
)
return pd.Series(formula_to_elemental(formula))
elif isinstance(composition, pt.formulas.Formula):
return pd.Series(formula_to_elemental(composition))
else:
return parse_composition(pd.Series(composition))
def recalc_cations(
df,
ideal_cations=4,
ideal_oxygens=6,
Fe_species=["FeO", "Fe", "Fe2O3"],
oxygen_constrained=False,
):
"""
Recalculate a composition to a.p.f.u.
"""
assert ideal_cations is not None or ideal_oxygens is not None
# if Fe2O3 and FeO are specified, calculate based on oxygen
moles = to_frame(df)
moles = moles.div([pt.formula(c).mass for c in moles.columns])
moles = moles.where(~np.isclose(moles, 0.0), np.nan)
# determine whether oxygen is an open or closed system
count_iron_species = np.array([i in moles.columns for i in Fe_species]).sum()
oxygen_constrained = oxygen_constrained
if not oxygen_constrained:
if count_iron_species > 1: # check that only one is defined
oxygen_constrained = (
count_iron_species
- pd.isnull(moles.loc[:, Fe_species]).all(axis=1).sum()
) > 1
if oxygen_constrained:
logger.info("Multiple iron species defined. Calculating using oxygen.")
else:
logger.info("Single iron species defined. Calculating using cations.")
components = moles.columns
as_oxides = len(list(pt.formula(components[0]).atoms)) > 1
schema = []
# if oxygen_constrained: # need to specifically separate Fe2 and Fe3
if as_oxides:
parts = [pt.formula(c).atoms for c in components]
for p in parts:
oxygens = p[pt.O]
other_components = [i for i in list(p) if not i == pt.O]
assert len(other_components) == 1 # need to be simple oxides
other = other_components[0]
charge = oxygens * 2 / p[other]
ion = other.ion[charge]
schema.append({str(ion): p[other], "O": oxygens})
else:
# elemental composition
parts = components
for part in parts:
p = list(pt.formula(part).atoms)[0]
if p.charge != 0:
charge = p.charge
else:
charge = p.default_charge
schema.append({p.ion[charge]: 1})
ref = pd.DataFrame(data=schema)
ref.columns = ref.columns.map(str)
ref.index = components
cation_masses = {c: pt.formula(c).mass for c in ref.columns}
oxygen_index = [i for i in ref.columns if "O" in i][0]
ref = ref.loc[:, [i for i in ref.columns if not i == oxygen_index] + [oxygen_index]]
moles_ref = ref.copy(deep=True)
moles_ref.loc[:, :] = (
ref.values * moles.T.values
) # this works for series, not for frame
moles_O = moles_ref[oxygen_index].sum()
moles_cations = (
moles_ref.loc[:, [i for i in moles_ref.columns if not i == oxygen_index]]
.sum()
.sum()
)
if not oxygen_constrained: # oxygen unquantified, try to calculate using cations
scale = ideal_cations / moles_cations
else: # oxygen quantified, try to calculate using oxygen
scale = ideal_oxygens / moles_O
moles_ref *= scale
return moles_ref.sum(axis=0)
| [
"[email protected]"
] | |
4d1151f44cd7a8e3a0921c051d754940e55df38b | 45f9abc3c43e021413181e9971d549ba38b030a6 | /term-1/AIND-Recognizer/asl_utils.py | 3dc52e9e1183953fd79205891978115077132b4c | [] | no_license | SteadBytes/ai-nanodegree | 01d7c707456585fdf39a83f07ac4def90264324d | ba260106dacaaba675a41558e96b2a0998685482 | refs/heads/master | 2021-09-11T00:02:11.555421 | 2018-04-04T15:39:57 | 2018-04-04T15:39:57 | 112,731,081 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,122 | py | from asl_data import SinglesData, WordsData
import numpy as np
from IPython.core.display import display, HTML
RAW_FEATURES = ['left-x', 'left-y', 'right-x', 'right-y']
GROUND_FEATURES = ['grnd-rx', 'grnd-ry', 'grnd-lx', 'grnd-ly']
def show_errors(guesses: list, test_set: SinglesData):
""" Print WER and sentence differences in tabular form
:param guesses: list of test item answers, ordered
:param test_set: SinglesData object
:return:
nothing returned, prints error report
WER = (S+I+D)/N but we have no insertions or deletions for isolated words so WER = S/N
"""
S = 0
N = len(test_set.wordlist)
num_test_words = len(test_set.wordlist)
if len(guesses) != num_test_words:
print("Size of guesses must equal number of test words ({})!".format(
num_test_words))
for word_id in range(num_test_words):
if guesses[word_id] != test_set.wordlist[word_id]:
S += 1
print("\n**** WER = {}".format(float(S) / float(N)))
print("Total correct: {} out of {}".format(N - S, N))
print('Video Recognized Correct')
print('=====================================================================================================')
for video_num in test_set.sentences_index:
correct_sentence = [test_set.wordlist[i]
for i in test_set.sentences_index[video_num]]
recognized_sentence = [guesses[i]
for i in test_set.sentences_index[video_num]]
for i in range(len(recognized_sentence)):
if recognized_sentence[i] != correct_sentence[i]:
recognized_sentence[i] = '*' + recognized_sentence[i]
print('{:5}: {:60} {}'.format(video_num, ' '.join(
recognized_sentence), ' '.join(correct_sentence)))
def getKey(item):
return item[1]
def train_all_words(training: WordsData, model_selector):
""" train all words given a training set and selector
:param training: WordsData object (training set)
:param model_selector: class (subclassed from ModelSelector)
:return: dict of models keyed by word
"""
sequences = training.get_all_sequences()
Xlengths = training.get_all_Xlengths()
model_dict = {}
for word in training.words:
model = model_selector(sequences, Xlengths, word,
n_constant=3).select()
model_dict[word] = model
return model_dict
def combine_sequences(split_index_list, sequences):
'''
concatenate sequences referenced in an index list and returns tuple of the new X,lengths
useful when recombining sequences split using KFold for hmmlearn
:param split_index_list: a list of indices as created by KFold splitting
:param sequences: list of feature sequences
:return: tuple of list, list in format of X,lengths use in hmmlearn
'''
sequences_fold = [sequences[idx] for idx in split_index_list]
X = [item for sublist in sequences_fold for item in sublist]
lengths = [len(sublist) for sublist in sequences_fold]
return X, lengths
def putHTML(color, msg):
source = """<font color={}>{}</font><br/>""".format(color, msg)
return HTML(source)
def feedback(passed, failmsg='', passmsg='Correct!'):
if passed:
return putHTML('green', passmsg)
else:
return putHTML('red', failmsg)
def test_features_tryit(asl):
print('asl.df sample')
display(asl.df.head())
sample = asl.df.ix[98, 1][GROUND_FEATURES].tolist()
correct = [9, 113, -12, 119]
failmsg = 'The values returned were not correct. Expected: {} Found: {}'.format(
correct, sample)
return feedback(sample == correct, failmsg)
def test_std_tryit(df_std):
print('df_std')
display(df_std)
sample = df_std.ix['man-1'][RAW_FEATURES]
correct = [15.154425, 36.328485, 18.901917, 54.902340]
failmsg = 'The raw man-1 values returned were not correct.\nExpected: {} for {}'.format(
correct, RAW_FEATURES)
return feedback(np.allclose(sample, correct, .001), failmsg)
| [
"="
] | = |
1664e7185a09522f272a97c6c6e2f696cb4d1958 | 34087e6a9bb41d9240de4c1bf91cb14a044126bc | /scripts/bandplot | 3e0f44ec4f5991fe99775500897ff806f0be1c70 | [] | no_license | materialsvirtuallab/phonopy | 62117e757f98447de2b247e4b6aa186b0b141aab | 97888bac864f8d8e5eee799b2eeef232e627f018 | refs/heads/master | 2020-12-01T03:09:31.707376 | 2014-09-08T15:42:54 | 2014-09-08T15:42:54 | 21,427,440 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,845 | #!/usr/bin/env python
# Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
try:
import yaml
except ImportError:
print "You need to install python-yaml."
exit(1)
try:
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from phonopy.units import VaspToTHz
def get_plot_data(data):
segment_positions = []
frequencies = []
distances = []
npoints = data['nqpoint'] / data['npath']
for j, v in enumerate(data['phonon']):
frequencies.append([f['frequency'] for f in v['band']])
distances.append(v['distance'])
if j % npoints == 0:
segment_positions.append(v['distance'])
return distances, frequencies, segment_positions
# Parse options
from optparse import OptionParser
parser = OptionParser()
parser.set_defaults(factor=1.0,
f_max=None,
f_min=None,
is_gnuplot=False,
is_points=False,
is_vertial_line=False,
output_filename=None,
labels=None,
show_legend=False,
title=None)
parser.add_option("--factor", dest="factor", type="float",
help="Conversion factor to favorite frequency unit")
parser.add_option("--fmax", dest="f_max", type="float",
help="Maximum frequency plotted")
parser.add_option("--fmin", dest="f_min", type="float",
help="Minimum frequency plotted")
parser.add_option("--gnuplot", dest="is_gnuplot", action="store_true",
help="Output in gnuplot data style")
parser.add_option("--legend", dest="show_legend",
action="store_true",
help="Show legend")
parser.add_option("--line", "-l", dest="is_vertial_line",
action="store_true",
help="Vertial line is drawn at between paths")
parser.add_option("-o", "--output", dest="output_filename",
action="store", type="string",
help="Output filename of PDF plot")
parser.add_option("--labels", dest="labels", action="store", type="string",
help="Show labels at band segments")
parser.add_option("--points", dest="is_points",
action="store_true",
help="Draw points")
parser.add_option("-t", "--title", dest="title", action="store",
type="string", help="Title of plot")
(options, args) = parser.parse_args()
if options.output_filename:
import matplotlib
matplotlib.use('Agg')
if not options.is_gnuplot:
import matplotlib.pyplot as plt
if options.labels:
from matplotlib import rc
rc('text', usetex=True)
colors = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'b--', 'g--', 'r--', 'c--', 'm--', 'y--', 'k--']
if options.is_points:
colors = [x + 'o' for x in colors]
count = 0
if len(args) == 0:
filenames = ['band.yaml']
else:
filenames = args
if options.is_gnuplot:
print "# distance frequency (bands are separated by blank lines)"
for i, filename in enumerate(filenames):
string = open(filename).read()
data = yaml.load(string, Loader=Loader)
distances, frequencies, segment_positions = get_plot_data(data)
if options.is_gnuplot:
print "# segments:",
for v in segment_positions:
print "%10.8f" % v,
print "%10.8f" % distances[-1]
elif options.is_vertial_line and len(filenames) == 1:
for v in segment_positions[1:]:
plt.axvline(x=v, linewidth=0.5, color='b')
for j, freqs in enumerate(np.array(frequencies).T):
if options.is_gnuplot:
for d, f in zip(distances, freqs * options.factor):
print d,f
print
else:
if j==0:
plt.plot(distances, freqs * options.factor, colors[i],
label=filename)
else:
plt.plot(distances, freqs * options.factor, colors[i])
if options.is_gnuplot:
print
if not options.is_gnuplot:
plt.ylabel('Frequency')
plt.xlabel('Wave vector')
plt.xlim(distances[0], distances[-1])
if not options.f_max == None:
plt.ylim(ymax = options.f_max)
if not options.f_min == None:
plt.ylim(ymin = options.f_min)
plt.axhline(y=0, linestyle=':', linewidth=0.5, color='b')
if len(filenames) == 1:
xticks = segment_positions + [distances[-1]]
if options.labels:
labels = [x for x in options.labels.split()]
if len(labels)==len(xticks):
plt.xticks(xticks, labels)
else:
print "Numbers of labels and band segments don't match."
sys.exit(1)
else:
plt.xticks(xticks, [''] * len(xticks))
else:
plt.xticks([])
if not options.title == None:
plt.title(options.title)
if options.show_legend:
plt.legend()
if not options.output_filename == None:
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.family'] = 'serif'
plt.savefig(options.output_filename)
else:
plt.show()
| [
"[email protected]"
] | ||
08c4ff249af2ede845061c68aa550a412a32f068 | b3586235dc1e1acbd49fab996f581269a808480b | /sistema/producao/migrations/0090_auto_20200419_1946.py | 05ec1f4442faf22924833cefc45f46cace20c101 | [] | no_license | gonfersilva/Sistema | 37ad1cd03dfbb7889fa0b0367c6ebd9044712ae3 | 4c6d9ade22040972efbe892eae0130939d7b5c46 | refs/heads/master | 2021-10-23T23:21:51.262723 | 2021-10-13T19:45:49 | 2021-10-13T19:45:49 | 155,545,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Generated by Django 2.2.7 on 2020-04-19 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('producao', '0089_reciclado_timestamp_edit'),
]
operations = [
migrations.AlterField(
model_name='reciclado',
name='timestamp_edit',
field=models.DateTimeField(),
),
]
| [
"[email protected]"
] | |
3eec27e2dd17dd5a63596c4c056f129c7fd1b671 | 6e4f493329341661d869d9c5a8dd21c1baa6a621 | /science/Optics/mirrorformula_cnc/mirrorformula_cnc.py | 7b65c4ba9fd8bfe303a01feb13b81077b5ec018f | [] | no_license | yujinee/scimat2 | 8dd03e1ba003715dd910d7e6a086b6f596a7f23b | 98712c061b9ce5337b3da5b421926de4aaefbe67 | refs/heads/main | 2023-08-28T01:57:53.482632 | 2021-10-22T17:30:10 | 2021-10-22T17:30:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,637 | py | import random
# An object is placed at u cm in front of a concave mirror of focal length f cm. Find at what distance image is formed and its nature.
# An image is formed by a concave mirror of focal length f cm at a distance of v cm in front of it. Find at what distance object is placed in front of mirror.
# An image is formed by a concave mirror of focal length f cm at a distance of v cm behind it. Find at what distance object is placed in front of mirror.
# An object is placed at u cm in front of a concave mirror. Image is formed at a distance of v cm in front of it. Find the focal length of the mirror.
# An object is placed at u cm in front of a concave mirror. Image is formed at a distance of v cm behind it. Find the focal length of the mirror.
# All variants of 1/f = 1/v + 1/u for concave mirror
qns = open('./questions.txt', 'w')
ans = open('./answers.txt','w')
no_of_samples = 3000000
def calculation_1(u, f) :
return round((u*f)/(u-f),1)
def calculation_2(u, f) :
return round((u*f)/(f-u),1)
def calculation_3(v, f) :
return round((v*f)/(v-f),1)
def calculation_4(v, f) :
return round((v*f)/(v+f),1)
def calculation_5(u, v) :
return round((u*v)/(u+v),1)
def calculation_6(u, v) :
return round((u*v)/(v-u),1)
def type1() :
f = random.randint(1,800)
u = random.randint(f+1,f+1200)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror of focal length " + str(f) + " cm. Find at what distance image is formed and its nature.\n"
v = str(calculation_1(u,f)) + "cm and real\n"
return q,v
def type2() :
u = random.randint(1,1000)
f = random.randint(u+1,u+1000)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror of focal length " + str(f) + " cm. Find at what distance image is formed and its nature.\n"
v = str(calculation_2(u,f)) + "cm and virtual\n"
return q,v
def type3() :
f = random.randint(1,800)
v = random.randint(f+1,f+1200)
q = "An image is formed by a concave mirror of focal length " + str(f) + " cm at a distance of " + str(v) + " cm in front of it. Find at what distance object is placed in front of mirror.\n"
u = str(calculation_3(v,f)) + "cm\n"
return q,u
def type4() :
f = random.randint(1,1000)
v = random.randint(1,1000)
q = "An image is formed by a concave mirror of focal length " + str(f) + " cm at a distance of " + str(v) + " cm behind it. Find at what distance object is placed in front of mirror.\n"
u = str(calculation_4(v,f)) + "cm\n"
return q,u
def type5() :
u = random.randint(1,1000)
v = random.randint(1,1000)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror. Image is formed at a distance of " + str(v) + " cm in front of it. Find the focal length of the mirror.\n"
f = str(calculation_5(u,v)) + "cm\n"
return q,f
def type6() :
u = random.randint(1,1000)
v = random.randint(u+1,u+1000)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror. Image is formed at a distance of " + str(v) + " cm behind it. Find the focal length of the mirror.\n"
f = str(calculation_6(u,v)) + "cm\n"
return q,f
for i in range(no_of_samples):
types = random.randint(1,6)
if types == 1:
ques,answer = type1()
elif types == 2:
ques,answer = type2()
elif types == 3:
ques,answer = type3()
elif types == 4:
ques,answer = type4()
elif types == 5:
ques,answer = type5()
elif types == 6:
ques,answer = type6()
qns.write(ques)
ans.write(answer)
qns.close()
ans.close()
| [
"[email protected]"
] | |
2c904b54db472eb9fcd58830d3373a19f91eec34 | 252d023b55575f3d25fb9ab8faa92084479244b3 | /indexpy/http/middleware.py | 4d296400f4c12251c08bdfc25b053ada44efdb7b | [
"Apache-2.0"
] | permissive | sangensong/index.py | fef31a222b34961b5869a5d2a5832040029be778 | 4b4cfd0aeef67986f484e3f5f06544b8a2cb7699 | refs/heads/master | 2023-03-03T12:24:00.468335 | 2021-02-13T14:46:33 | 2021-02-13T14:46:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | from __future__ import annotations
import typing
from indexpy.concurrency import keepasync
if typing.TYPE_CHECKING:
from .request import Request
from .responses import Response, convert_response
MiddlewareMeta = keepasync("process_request", "process_response", "process_exception")
class MiddlewareMixin(metaclass=MiddlewareMeta): # type: ignore
mounts: typing.Sequence[typing.Callable] = ()
def __init__(self, get_response: typing.Callable) -> None:
self.get_response = self.mount_middleware(get_response)
def mount_middleware(self, get_response: typing.Callable) -> typing.Callable:
for middleware in reversed(self.mounts):
get_response = middleware(get_response)
return get_response
async def __call__(self, request: Request) -> Response:
response = await self.process_request(request)
if response is None:
try:
response = await self.get_response(request)
except Exception as exc:
response = await self.process_exception(request, exc)
if response is None:
raise exc
response = convert_response(response)
response = await self.process_response(request, response)
return response
async def process_request(self, request: Request) -> typing.Optional[typing.Any]:
"""
Must return None, otherwise return the value as the result of this request.
"""
async def process_response(self, request: Request, response: Response) -> Response:
return response
async def process_exception(
self, request: Request, exception: Exception
) -> typing.Optional[typing.Any]:
"""
If return None, will raise exception.
Otherwise return the value as the result of this request.
"""
| [
"[email protected]"
] | |
8a5b454d7307b0ef888c3ccc7dbcc2a78b49ce39 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_136/ch25_2020_10_04_03_54_44_850472.py | 7687d7e86cce8171d9932c23f11bbf1f7f0be68e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import math
g= 9.8
def distancia(v, o):
k= math.sin
h= (180/math.pi)*o
d= ((v**2)*k(2*h))/g
if d>=99 and d<=101:
print ('Acertou!')
elif d>101:
print ('Muito longe')
elif d<99:
print ('Muito perto') | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.