blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a357f9ead60456ab5a7e9d939b363b35d728a205 | 1dec4f8f58202b3ebabcb12816811d22a8b7da3c | /test/scanner.py | d300ce1d50a3569256666a359fd39d53558fd36a | [
"MIT"
]
| permissive | lsst-sqre/rubin-jupyter-hub | 51df5d3590635009479732247c3c19333ea39ff5 | 162abe9422332aa60ef089af2862b7a0b0417fa1 | refs/heads/master | 2023-04-17T09:16:10.893045 | 2021-05-05T17:41:45 | 2021-05-05T17:41:45 | 287,143,775 | 0 | 0 | NOASSERTION | 2021-05-05T17:41:45 | 2020-08-13T00:33:31 | Python | UTF-8 | Python | false | false | 280 | py | #!/usr/bin/env python3
import rubin_jupyter_utils.hub as rh
q = rh.SingletonScanner(
name="sciplat-lab",
owner="lsstsqre",
debug=True,
experimentals=2,
dailies=3,
weeklies=4,
releases=3,
cachefile="/tmp/reposcan.json",
)
q.scan()
q.get_all_tags()
| [
"[email protected]"
]
| |
e2810a18610a785f47a2d469dc933a538a438a11 | b4b0e2979d3342449bd7847e619896a7f3878f2f | /day02/bathroom.py | 396d3e9c53dcdcf71a2d0337c68f939dc29ad9eb | [
"MIT"
]
| permissive | ecly/adventofcode2016 | ebbfa39e0932488d930e286321d73e0cb59d24b5 | d08411a887cb2bf6b53785b9b6c193a69f78ba87 | refs/heads/master | 2018-10-20T23:23:07.113189 | 2018-07-20T17:37:53 | 2018-07-20T17:37:53 | 114,026,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | def first(instructions):
code = ""
x, y = 1, 1
keypad = ((1,2,3),(4,5,6),(7,8,9))
for instruction in instructions:
for c in instruction:
if c == 'U'and y > 0: y-=1
if c == 'D'and y < 2: y+=1
if c == 'L'and x > 0: x-=1
if c == 'R'and x < 2: x+=1
code += str(keypad[y][x])
return code
def second(instructions):
code = ""
x, y = 0, 2
keypad = ((None,None,1 ,None,None),
(None,2 ,3 ,4 ,None),
(5 ,6 ,7 ,8 ,9 ),
(None,'A' ,'B' ,'C' ,None),
(None,None,'D' ,None,None))
for instruction in instructions:
for c in instruction:
if c == 'U'and y > 0:
if(keypad[y-1][x] != None): y-=1
if c == 'D'and y < 4:
if(keypad[y+1][x] != None): y+=1
if c == 'L'and x > 0:
if(keypad[y][x-1] != None): x-=1
if c == 'R'and x < 4:
if(keypad[y][x+1] != None): x+=1
code += str(keypad[y][x])
return code
with open('input.in', 'r') as file:
lines = file.readlines()
print(first(lines))
print(second(lines))
| [
"[email protected]"
]
| |
c2a01c5648c2e7f6f5fbaef3915cdf3dae6ded4b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2183/60895/250649.py | 2ac41841d2853fb7b23c742a376c03a26de7de4b | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | t=int(input())
while t>0:
n=int(input())
temp=n
start=0
sum=0
if n==1:
sum=3
else:
temp=temp-1
while temp>0:
start=start+2*temp
temp=temp-1
for i in range(0,2*n):
sum=sum+start+i+1
print(sum) | [
"[email protected]"
]
| |
15bbb5b5d25a476591ac3f22a66ddeba6418df73 | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/task_agent/v4_1/models/endpoint_url.py | 6ac3ef6470b0974e3a1bbc74eb73896987dc48b9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class EndpointUrl(Model):
"""EndpointUrl.
:param depends_on: Gets or sets the dependency bindings.
:type depends_on: :class:`DependsOn <task-agent.v4_1.models.DependsOn>`
:param display_name: Gets or sets the display name of service endpoint url.
:type display_name: str
:param help_text: Gets or sets the help text of service endpoint url.
:type help_text: str
:param is_visible: Gets or sets the visibility of service endpoint url.
:type is_visible: str
:param value: Gets or sets the value of service endpoint url.
:type value: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': 'DependsOn'},
'display_name': {'key': 'displayName', 'type': 'str'},
'help_text': {'key': 'helpText', 'type': 'str'},
'is_visible': {'key': 'isVisible', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, depends_on=None, display_name=None, help_text=None, is_visible=None, value=None):
super(EndpointUrl, self).__init__()
self.depends_on = depends_on
self.display_name = display_name
self.help_text = help_text
self.is_visible = is_visible
self.value = value
| [
"[email protected]"
]
| |
9bb9fa1995a4ad19d223a45f59b6a2586b015ae6 | 00db9cdb3cf82306c0de9de1aac0f7514192f1d7 | /dataset/SUN.py | 61af030add5b5c574858bbecf4fffa3c461152a7 | []
| no_license | lijiunderstand/BiSeNet-CCP | 039b4aad6a20ed848141e09a265f50e204d0a28c | 61023e13fed18fc58737194f1de8639b82382605 | refs/heads/master | 2020-04-24T22:19:47.595380 | 2019-01-22T01:30:40 | 2019-01-22T01:30:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | py | import torch
import glob
import os
from torchvision import transforms
from torchvision.transforms import functional as F
import cv2
from PIL import Image
import pandas as pd
import numpy as np
from imgaug import augmenters as iaa
import imgaug as ia
from utils import get_label_info, one_hot_it
import random
def augmentation():
# augment images with spatial transformation: Flip, Affine, Rotation, etc...
# see https://github.com/aleju/imgaug for more details
pass
def augmentation_pixel():
# augment images with pixel intensity transformation: GaussianBlur, Multiply, etc...
pass
class SUN(torch.utils.data.Dataset):
def __init__(self, image_path, depth_path, label_path, csv_path, scale, mode='train'):
super().__init__()
self.mode = mode
self.image_list = glob.glob(os.path.join(image_path, '*.jpg'))
self.image_name = [x.split('/')[-1].split('.')[0] for x in self.image_list]
self.depth_list = [os.path.join(depth_path, x + '.png') for x in self.image_name]
self.label_list = [os.path.join(label_path, x + '.png') for x in self.image_name]
self.fliplr = iaa.Fliplr(0.5)
self.label_info = get_label_info(csv_path)
# resize
self.resize_img = transforms.Resize(scale, Image.BILINEAR)
self.resize_depth = transforms.Resize(scale, Image.NEAREST)
self.resize_label = transforms.Resize(scale, Image.NEAREST)
# normalization
self.to_tensor = transforms.ToTensor()
def __getitem__(self, index):
# load image and resize
img = Image.open(self.image_list[index])
img = self.resize_img(img)
img = np.array(img)
# load depth and resize
depth = Image.open(self.depth_list[index])
depth = self.resize_depth(depth)
depth = np.array(depth)
depth = depth[:, :, np.newaxis] # add axis (480,640,1)
# load label and resize
label = Image.open(self.label_list[index])
label = self.resize_label(label)
label = np.array(label)
# convert label to one-hot graph
label = one_hot_it(label, self.label_info).astype(np.uint8)
# augment image and label
if self.mode == 'train':
seq_det = self.fliplr.to_deterministic()
img = seq_det.augment_image(img)
depth = seq_det.augment_image(depth)
label = seq_det.augment_image(label)
# image -> to_tensor [3, H, W]
img = Image.fromarray(img).convert('RGB')
img = self.to_tensor(img).float()
# depth -> to_tensor [1, H, W]
depth = depth / 65535
depth = self.to_tensor(depth).float()
# image + depth = RGBD
rgbd = torch.cat((img, depth), 0)
# label -> [num_classes, H, W]
label = np.transpose(label, [2, 0, 1]).astype(np.float32)
label = torch.from_numpy(label)
return rgbd, label
def __len__(self):
return len(self.image_list)
if __name__ == '__main__':
data = SUN('/temp_disk/xs/sun/train/image', '/temp_disk/xs/sun/train/label_img', '/temp_disk/xs/sun/seg37_class_dict.csv', (480, 640))
from utils import reverse_one_hot, get_label_info, colour_code_segmentation, compute_global_accuracy
label_info = get_label_info('/temp_disk/xs/sun/seg37_class_dict.csv')
for i, (img, label) in enumerate(data):
print(img.shape)
print(label.shape)
print()
| [
"[email protected]"
]
| |
efb453efca3b2188dcbc78500bd62e2a8122ab1d | 1ed536ef1527e6655217e731f622d643ece49c2b | /CGATPipelines/pipeline_docs/pipeline_chipseq/trackers/Intervals.py | 600487d473929b2f4042e2b2f8076266a7629344 | []
| no_license | siping/cgat | de0f7af124eb38c72d7dece78fff83ff92ddbf96 | aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e | refs/heads/master | 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,737 | py | from ChipseqReport import *
import IOTools
class FoldChangeTracker( TrackerSQL ):
'''the fold change tracker ignores unstimulated tracks.'''
def __init__(self, *args, **kwargs ):
TrackerSQL.__init__(self, *args, backend = DATABASE, **kwargs )
def getTracks( self, subset = None ):
tracks = TrackerSQL.getTracks( self, subset = subset )
return [ x for x in tracks if TAG_UNSTIM not in x ]
##################################################################################
##################################################################################
##################################################################################
## Annotation of bases with SNPs
##################################################################################
class IntervalsSummary( DefaultTracker ):
"""Summary stats of intervals called by the peak finder.
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data = self.getFirstRow( "SELECT COUNT(*), AVG(length), SUM(nprobes) FROM %(track)s_intervals" % locals() )
return odict( zip( ("nintervals", "<length>", "nprobes" ), data) )
##################################################################################
##################################################################################
##################################################################################
## Distribution of interval lengths
##################################################################################
class IntervalLengths( DefaultTracker ):
"""Distribution of interval sizes.
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data = self.getValues( "SELECT length FROM %(track)s_intervals" % locals() )
return { "length" : data }
##################################################################################
##################################################################################
##################################################################################
## Distribution of peak values
##################################################################################
class IntervalPeakValues( DefaultTracker ):
"""Distribution of peak values (the number of reads at peak).
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data = self.getValues( "SELECT peakval FROM %(track)s_intervals" % locals() )
return { "peakval" : data }
##################################################################################
##################################################################################
##################################################################################
## Distribution of peak values
##################################################################################
class EnrichmentOverUnstim( DefaultTracker ):
"""For each peakval, present the fold enrichment of a track
compared to the Unstim set.
Useful for diagnosing cutoffs.
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
cellline, condition, replicate = splitTrack( track )
if condition == "Unstim": return []
if replicate == None: replicate = ""
other_track = "run" + cellline + "Unstim" + replicate
data_fg = self.getValues( "SELECT peakval FROM %(track)s_intervals ORDER BY peakval DESC" % locals() )
data_bg = self.getValues( "SELECT peakval FROM %(other_track)s_intervals ORDER BY peakval DESC" % locals() )
mi = min( data_bg + data_fg )
ma = max( data_bg + data_fg )
n_fg = len(data_fg)
n_bg = len(data_bg)
hist_fg, bin_edges = numpy.histogram( data_fg, bins = numpy.arange( mi, ma + 1.0, 1.0 ) )
hist_bg, bin_edges = numpy.histogram( data_bg, bins = numpy.arange( mi, ma + 1.0, 1.0 ) )
hist_fg = hist_fg[::-1].cumsum()[::-1]
hist_bg = hist_bg[::-1].cumsum()[::-1]
fold = []
for fg, bg in zip( hist_fg, hist_bg ):
fold.append( float(bg) / fg )
return odict( ( ( "peakval" , bin_edges[:-1]),
( "fold" , fold ) ) )
##################################################################################
##################################################################################
##################################################################################
## Distribution of average values
##################################################################################
class IntervalAverageValues( DefaultTracker ):
"""Distribution of average values (the average number of reads within the interval)
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data = self.getValues( "SELECT avgval FROM %(track)s_intervals" % locals() )
return { "avgval" : data }
##################################################################################
##################################################################################
##################################################################################
## Distribution of average values
##################################################################################
class IntervalLengthVsAverageValue( DefaultTracker ):
"""Length vs average value.
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data = self.get( "SELECT length, avgval FROM %(track)s_intervals" % locals() )
return odict( zip( ("length", "avgval"), zip(*data) ) )
##################################################################################
##################################################################################
##################################################################################
## Distribution of average values
##################################################################################
class IntervalLengthVsPeakValue( DefaultTracker ):
"""Length vs peak value
"""
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data = self.get( "SELECT length, peakval FROM %(track)s_intervals" % locals() )
return odict( zip( ("length", "peakval"), zip(*data) ) )
##################################################################################
##################################################################################
##################################################################################
## peak location within interval
##################################################################################
class PeakLocation( DefaultTracker ):
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data1 = self.getValues( "SELECT (PeakCenter - start) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_intervals" % locals() )
data2 = self.getValues( "SELECT (end - PeakCenter) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_intervals" % locals() )
return { "distance" : data1 + data2 }
##################################################################################
##################################################################################
##################################################################################
## distance of peak to end of interval
##################################################################################
class PeakDistance( DefaultTracker ):
mPattern = "_intervals$"
def __call__(self, track, slice = None):
data1 = self.getValues( "SELECT PeakCenter - start FROM %(track)s_intervals" % locals() )
data2 = self.getValues( "SELECT end - PeakCenter FROM %(track)s_intervals" % locals() )
return { "distance" : data1 + data2 }
##################################################################################
##################################################################################
##################################################################################
##
##################################################################################
class IntervalList( DefaultTracker ):
'''list of intervals.'''
nresults = 10
mColumnsFixed = ("pos", "length" )
mColumnsVariable= ( "peakval", "avgval" )
mPattern = "_intervals$"
def getSQLStatement( self, track, slice = None ):
statement = '''
SELECT
i.interval_id, i.contig, i.start, i.end, i.peakval, i.avgval
FROM
%(track)s_intervals AS i
ORDER BY i.peakval DESC''' % locals()
if self.nresults:
statement += " LIMIT %i" % self.nresults
return statement
def __call__(self, track, slice = None ):
statement = self.getSQLStatement( track, slice )
data = self.get( statement )
ucsc_genome = UCSC_GENOME
n = odict()
for d in data:
id, contig, start, end = d[:4]
pos = "`%(contig)s:%(start)i..%(end)i <http://genome.ucsc.edu/cgi-bin/hgTracks?db=%(ucsc_genome)s&position=%(contig)s:%(start)i..%(end)i>`_" \
% locals()
n[str(id)] = odict( zip(self.mColumnsFixed + self.mColumnsVariable, (pos, end-start,) + d[4:]))
return n
##################################################################################
##################################################################################
##################################################################################
##
##################################################################################
class IntervalListFull( DefaultTracker ):
'''list of all intervals.
Table for export.
'''
nresults = None
mPattern = "_intervals$"
def __call__(self, track, slice = None ):
statement = '''
SELECT
i.contig, i.start, i.end, i.peakval, i.avgval
FROM
%(track)s_intervals AS i
ORDER BY i.peakval DESC''' % locals()
data = self.get( statement )
return odict( zip(
("contig", "start", "end", "peakval", "avgval"),
zip(*data ) ))
##################################################################################
##################################################################################
##################################################################################
##
##################################################################################
class IntervalListPeakval( IntervalList ):
'''list of intervals.'''
def getSQLStatement( self, track, slice = None ):
nresults = self.nresults
statement = '''
SELECT
i.interval_id, i.contig, i.start, i.end, i.peakval, i.avgval, i.length
FROM
%(track)s_intervals AS i
ORDER BY i.peakval DESC
LIMIT %(nresults)s''' % locals()
return statement
##################################################################################
##################################################################################
##################################################################################
##
##################################################################################
class IntervalListFoldChange( FoldChangeTracker, IntervalList ):
'''list of intervals.'''
mColumnsVariable= ( "fg_anysense_mean", "bg_anysense_mean", "fold_mean",
"fg_anysense_max", "bg_anysense_max", "fold_max" )
mMinFold = 1.5
mPattern = "_readcounts$"
def getSQLStatement( self, track, slice = None ):
nresults = self.nresults
minfold = self.mMinFold
statement = '''
SELECT
i.interval_id, i.contig, i.start, i.end,
fg_anysense_mean, bg_anysense_mean,
fg_anysense_mean / bg_anysense_mean AS fold_mean,
fg_anysense_max, bg_anysense_max,
cast(fg_anysense_max as float)/ bg_anysense_max AS fold_max
FROM
%(track)s_intervals AS i,
%(track)s_readcounts AS c
WHERE
i.interval_id = c.gene_id AND
cast(fg_anysense_max as float)/ bg_anysense_max > %(minfold)f
ORDER BY fold_max DESC
LIMIT %(nresults)s''' % locals()
return statement
##################################################################################
##################################################################################
##################################################################################
## correlations
##################################################################################
class Correlations( ChipseqTracker ):
"""Correlation between all sets.
"""
pattern = "(.*)_annotations$"
def __call__(self, track, slice = None ):
table = "%s_correlation" % self.field
return self.getValues( "SELECT %s AS %s FROM %s ORDER BY id" % (track,
self.field,
table))
class CorrelationsPeakval( Correlations ):
field = "peakval"
class CorrelationsAvgval( Correlations ):
field = "avgval"
class CorrelationsLength( Correlations ):
field = "length"
##################################################################################
##################################################################################
##################################################################################
## fold change
##################################################################################
class FoldChange( FoldChangeTracker ):
"""return fold changes for all intervals.
"""
pattern = "(.*)_readcounts$"
def __call__(self, track ):
return self.getValues( "SELECT CAST(fg_anysense_max AS float)/ bg_anysense_max AS fold FROM %(track)s_readcounts" % locals() )
##################################################################################
##################################################################################
##################################################################################
## fold change
##################################################################################
class FoldChangeCounts( FoldChangeTracker ):
"""Correlation between all sets.
"""
pattern = "(.*)_readcounts$"
mMinFoldChange = 2.0
def __call__(self, track ):
data = []
upfold = self.mMinFoldChange
downfold = 1.0 / upfold
data.append( ("> %5.2f fold" % upfold, self.getValue( "SELECT COUNT(*) FROM %(track)s_readcounts WHERE CAST(fg_anysense_max AS float)/ bg_anysense_max > %(upfold)f " % locals() )) )
data.append( ("unchanged", self.getValue( "SELECT COUNT(*) FROM %(track)s_readcounts WHERE CAST(fg_anysense_max AS float)/ bg_anysense_max between %(downfold)f and %(upfold)f" % locals() )) )
data.append( ("< %5.2f fold" % downfold, self.getValue( "SELECT COUNT(*) FROM %(track)s_readcounts WHERE CAST(fg_anysense_max AS float)/ bg_anysense_max < %(downfold)f " % locals() )) )
return odict(data)
##################################################################################
##################################################################################
##################################################################################
## peak shape
##################################################################################
class PeakShapeTracker( Tracker ):
'''return peakshape data.
Only 1000 rows are returned.
'''
tracks = [ os.path.basename( x )[:-len(".peakshape.tsv.gz")] for x in glob.glob( os.path.join( DATADIR , "*.peakshape.tsv.gz" )) ]
slices = ["peak_height", "peak_width" ]
def __call__(self, track, slice = None):
fn = os.path.join( DATADIR, "%(track)s.peakshape.tsv.gz.matrix_%(slice)s.gz" % locals() )
if not os.path.exists( fn ):
return
matrix, rownames, colnames = IOTools.readMatrix( IOTools.openFile( fn ))
nrows = len(rownames)
if nrows == 0: return
if nrows > 1000:
take = numpy.array( numpy.floor( numpy.arange( 0, nrows, nrows / 1000 ) ), dtype = int )
rownames = [ rownames[x] for x in take ]
matrix = matrix[ take ]
return odict( (('matrix', matrix),
('rows', rownames),
('columns', colnames)) )
| [
"[email protected]"
]
| |
85d1641bef7fd5909e21ca349f509fdd3421cc41 | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /contests/ABC062/C_correct.py | c036e7a61ea6e88afb42487b1449da2c480884d5 | []
| no_license | masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # メインアイデア
# T分割と||分割で差が小さい方を探す
# 90度回転させた場合も考える
# 総合的に小さい方を採用する
# 図的な解説は別のPDFをアップロードしてある
H, W = list(map(int, input().split()))
# 3の倍数の場合は即座に0とわかる
if (H * W) % 3 == 0:
print(0)
exit()
def try_divi(W, H):
# パターン1
err = W
# パターン2
w2, w3 = W // 2, W // 2
if W % 2:
w2 += 1
for h1 in range(1, H // 2 + 1):
h2 = H-h1
S1, S2, S3 = h1*W, w2*h2, w3*h2
new_err = max(S1, S2, S3) - min(S1, S2, S3)
if new_err < err:
err = new_err
return err
print(min(try_divi(W, H), try_divi(H, W)))
| [
"[email protected]"
]
| |
399d8f1ccae0a90aaadd43bbb8361fc501069da4 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Maths/Euler Sum_for.py | d2db391244ef070127e556456b9ec176533aed0a | []
| no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import math
N=int(input("Number of terms you want to sum up : \n"))
sum = 0
for x in range(1, N+1, 1):
sum = sum + 1/x**2
print("The result is : ", sum, "\n")
print("The EXACT result is : ", math.pi**2/6)
| [
"[email protected]"
]
| |
ec58eda1e6caea7793bb434bbf060c242815ae28 | 3cbf4a9d14cd487520f2bd05db10542705a37baf | /h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_biases_deeplearning.py | 76f2f8903d02c8119cc64a31daddf0f61973f02b | [
"Apache-2.0"
]
| permissive | KendraFabric/h2o-3 | 733ff021553ff2c2d8f0c3336450d886d029cf15 | c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a | refs/heads/master | 2023-03-15T12:32:02.852026 | 2016-08-26T14:01:07 | 2016-08-26T14:27:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,413 | py | import sys, os
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
def weights_and_biases():
print "Test checks if Deep Learning weights and biases are accessible from R"
covtype = h2o.upload_file(pyunit_utils.locate("smalldata/covtype/covtype.20k.data"))
covtype[54] = covtype[54].asfactor()
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
dlmodel = H2ODeepLearningEstimator(hidden=[17,191],
epochs=1,
balance_classes=False,
reproducible=True,
seed=1234,
export_weights_and_biases=True)
dlmodel.train(x=range(54),y=54,training_frame=covtype)
print dlmodel
weights1 = dlmodel.weights(0)
weights2 = dlmodel.weights(1)
weights3 = dlmodel.weights(2)
biases1 = dlmodel.biases(0)
biases2 = dlmodel.biases(1)
biases3 = dlmodel.biases(2)
w1c = weights1.ncol
w1r = weights1.nrow
assert w1c == 52, "wrong dimensionality! expected {0}, but got {1}.".format(52, w1c)
assert w1r == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, w1r)
w2c = weights2.ncol
w2r = weights2.nrow
assert w2c == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, w2c)
assert w2r == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, w2r)
w3c = weights3.ncol
w3r = weights3.nrow
assert w3c == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, w3c)
assert w3r == 7, "wrong dimensionality! expected {0}, but got {1}.".format(7, w3r)
b1c = biases1.ncol
b1r = biases1.nrow
assert b1c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b1c)
assert b1r == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, b1r)
b2c = biases2.ncol
b2r = biases2.nrow
assert b2c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b2c)
assert b2r == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, b2r)
b3c = biases3.ncol
b3r = biases3.nrow
assert b3c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b3c)
assert b3r == 7, "wrong dimensionality! expected {0}, but got {1}.".format(7, b3r)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_biases)
else:
weights_and_biases()
| [
"[email protected]"
]
| |
e8858b1d17db014e60ff494d1957a7e6b40abea3 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /group_and_right_life/have_group/point/long_point_or_next_place.py | 353072ad6581aa82d34c921d2190fd9605b2db54 | []
| no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py |
#! /usr/bin/env python
def person_or_next_work(str_arg):
problem(str_arg)
print('fact')
def problem(str_arg):
print(str_arg)
if __name__ == '__main__':
person_or_next_work('last_place')
| [
"[email protected]"
]
| |
9997b4b6559fd1d70162f2a3a4866a702f635c0f | 55591cff4b4dbfec91ac8e498652c2ad60bfdffe | /datahub/dataset/core/views.py | b8c2f58a8861f7ede2eddf27716026aace8c11eb | [
"MIT"
]
| permissive | alixedi/data-hub-api-cd-poc | 2caf6575cced33f0b2b22582d8bdcd60e99ba7d8 | a5e5ea45bb496c0d2a06635864514af0c7d4291a | refs/heads/develop | 2020-12-26T05:52:39.575158 | 2020-01-30T15:27:27 | 2020-01-30T15:27:27 | 237,407,350 | 0 | 0 | MIT | 2020-02-07T12:38:47 | 2020-01-31T10:25:34 | null | UTF-8 | Python | false | false | 1,150 | py | from rest_framework.views import APIView
from config.settings.types import HawkScope
from datahub.core.auth import PaaSIPAuthentication
from datahub.core.hawk_receiver import (
HawkAuthentication,
HawkResponseSigningMixin,
HawkScopePermission,
)
from datahub.dataset.core.pagination import DatasetCursorPagination
class BaseDatasetView(HawkResponseSigningMixin, APIView):
"""
Base API view to be used for creating endpoints for consumption
by Data Flow and insertion into Data Workspace.
"""
authentication_classes = (PaaSIPAuthentication, HawkAuthentication)
permission_classes = (HawkScopePermission, )
required_hawk_scope = HawkScope.data_flow_api
pagination_class = DatasetCursorPagination
def get(self, request):
"""Endpoint which serves all records for a specific Dataset"""
dataset = self.get_dataset()
paginator = self.pagination_class()
page = paginator.paginate_queryset(dataset, request, view=self)
return paginator.get_paginated_response(page)
def get_dataset(self):
"""Return a list of records"""
raise NotImplementedError
| [
"[email protected]"
]
| |
54be352c9be4e5ee113aa84590c71e535dc20a78 | 21a7969bea3e4fb4b4734443aa1702000c26336b | /009-Guessing-Game-One.py | 57b5a45057dda0866b8a9730d4d8a0efa3f39ef9 | []
| no_license | jaapdejong/python-challenges | 635823e7c67c6b5d769c22298d385cfa40cc9f9d | 04fbb009d6c017bdb17a2a6cc45c543a13494fe0 | refs/heads/master | 2021-01-15T22:47:23.882155 | 2017-08-10T11:18:08 | 2017-08-10T11:18:08 | 99,915,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | #!/usr/bin/python3
#
# Exercise 9
# Generate a random number between 1 and 9 (including 1 and 9).
# Ask the user to guess the number, then tell them whether they guessed too low, too high, or exactly right.
# (Hint: remember to use the user input lessons from the very first exercise)
#
from random import randint
computer = randint(1, 9)
while True:
player = int(input("Please enter a number between 1 and 9: "))
if player == computer:
print("Found!!")
break
elif player < computer:
print("Too low")
else:
print("Too high") | [
"[email protected]"
]
| |
ab5b9ad96148fb17c93b3a77ac81f34e18a0861f | 612325535126eaddebc230d8c27af095c8e5cc2f | /depot_tools/ENV/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer/htmlserializer.py | 033ec69f85594ccdd459e1609798e7af65464cb6 | [
"BSD-3-Clause"
]
| permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,229 | py | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| [
"[email protected]"
]
| |
736d50cd559b4a440abd688caf3c6c50d476faaa | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py | 3e012305c57cb4acd3c379e49fb0e9f5eebf2636 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,683 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
python delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MachineLearningServicesMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
client.registry_code_versions.begin_delete(
resource_group_name="test-rg",
registry_name="my-aml-registry",
code_name="string",
version="string",
).result()
# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/delete.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
6dd679509464d24a600eedfe687833818a76add8 | 54aeeae3c4258e2312028ecd2a38242dd3f4f34d | /examples/BLE_Alerts_Secure_2/main.py | 3bea866358ed00335b014cf3690356f44e0e374c | []
| no_license | zerynth/lib-espressif-esp32ble | 09f4f9ae28056d49d4062c19c0a330bff61396f3 | 17ab2a1fbd80e99420cd988c8fc148b8f366e379 | refs/heads/master | 2021-06-08T04:12:22.808499 | 2020-09-24T15:31:45 | 2020-09-24T15:31:45 | 167,332,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py | ################################################################################
# BLE Alerts with Security 2
#
# Created by Zerynth Team 2019 CC
# Author: G. Baldi
###############################################################################
import streams
#import the ESP32 BLE driver: a BLE capable VM is also needed!
from espressif.esp32ble import esp32ble as bledrv
# then import the BLE modue
from wireless import ble
streams.serial()
notifications_enabled = True
connected = False
# Let's define some callbacks
def value_cb(status,val):
# check incoming commands and enable/disable notifications
global notifications_enabled
print("Value changed to",val[0],val[1])
if val[0]==0:
print("Notifications enabled")
notifications_enabled = True
elif val[0]==2:
notifications_enabled = False
print("Notifications disabled")
else:
print("Notifications unchanged")
def connection_cb(address):
global connected
print("Connected to",ble.btos(address))
connected = True
def disconnection_cb(address):
global connected
print("Disconnected from",ble.btos(address))
# let's start advertising again
ble.start_advertising()
connected = False
# Let's define some security callbacks
def match_key_cb(passkey):
print("MASTER KEY IS:",passkey,"CAN WE PROCEED? PRESS BUTTON FOR YES")
pinMode(BTN0,INPUT)
for i in range(5):
if digitalRead(BTN0)!=0:
ble.confirm_passkey(1)
print("Confirmed!")
return
sleep(1000)
ble.confirm_passkey(0)
print("Not confirmed!")
try:
# initialize BLE driver
bledrv.init()
# Set GAP name and LEVEL 2 security
# !!! If security is not set, no secure connection will be possible
ble.gap("ZNotifier",security=(ble.SECURITY_MODE_1,ble.SECURITY_LEVEL_2))
# add some GAP callbacks
ble.add_callback(ble.EVT_CONNECTED,connection_cb)
ble.add_callback(ble.EVT_DISCONNECTED,disconnection_cb)
# Create a GATT Service: let's try an Alert Notification Service
# (here are the specs: https://www.bluetooth.com/specifications/gatt/viewer?attributeXmlFile=org.bluetooth.service.alert_notification.xml)
s = ble.Service(0x1811)
# The Alert Notification service has multiple characteristics. Let's add them one by one
# Create a GATT Characteristic for counting new alerts.
# specs: https://www.bluetooth.com/specifications/gatt/viewer?attributeXmlFile=org.bluetooth.characteristic.supported_new_alert_category.xml
cn = ble.Characteristic(0x2A47, ble.NOTIFY | ble.READ,16,"New Alerts",ble.BYTES)
# Add the GATT Characteristic to the Service
s.add_characteristic(cn)
# Create anothr GATT Characteristic for enabling/disabling alerts
# specs: https://www.bluetooth.com/specifications/gatt/viewer?attributeXmlFile=org.bluetooth.characteristic.alert_notification_control_point.xml
cc = ble.Characteristic(0x2A44, ble.WRITE ,2,"Alerts control",ble.BYTES)
# Add the GATT Characteristic to the Service
s.add_characteristic(cc)
# Add a callback to be notified of changes
cc.set_callback(value_cb)
# Add the Service. You can create additional services and add them one by one
ble.add_service(s)
# Configure security. BLE security is very flexible.
# In this case we declare that the device has only an output capability with yes o or no input (CAP_DISPLAY_YES_NO),
# that we require a bonding (storage of the keys after pairing)
# and that we want both secure connection and main in the middle protection.
ble.security(
capabilities=ble.CAP_DISPLAY_YES_NO,
bonding=ble.AUTH_BOND,
scheme=ble.AUTH_SC|ble.AUTH_MITM,
key_size=16)
# To do so, we need a callback to accept the passkey when needed
ble.add_callback(ble.EVT_MATCH_PASSKEY,match_key_cb)
# Setup advertising to 50ms
ble.advertising(50)
# Start the BLE stack
ble.start()
# Now start advertising
ble.start_advertising()
except Exception as e:
print(e)
# Uncomment the following lines to delte bonded devices!
for bond in ble.bonded():
print("Removing bonded:",ble.btos(bond))
ble.remove_bonded(bond)
# loop forever
while True:
print(".")
if random(0,100)<50 and notifications_enabled and connected:
value = bytearray(cn.get_value())
value[0]=0 # simple alert type
if value[1]<255:
value[1]=value[1]+1 # add a notification
print("Adding a new notification, total of",value[1])
# the remaining 14 bytes can be some text
value[2:10] = "Zerynth!"
# set the new value. If ble notifications are enabled, the connected device will receive the change
cn.set_value(value)
sleep(5000)
| [
"[email protected]"
]
| |
cc525d1d2d9552b17bd4b2818466de9dc2db4e33 | f359c953ef823cc44f7d87a3736c3e4fb1817c0b | /EDBRCommon/python/simulation/RunIIDR74X/RSGravToZZToLLQQ_M-4500.py | f8823971e11ccf67430c5a39efff4e49bce8f793 | []
| no_license | jruizvar/ExoDiBosonResonancesRun2 | aa613200725cf6cd825d7bcbde60d2e39ba84e39 | b407ab36504d0e04e6bddba4e57856f9f8c0ec66 | refs/heads/Analysis76X | 2021-01-18T20:00:57.358494 | 2016-05-30T21:30:19 | 2016-05-30T21:30:19 | 23,619,682 | 1 | 1 | null | 2016-04-22T18:38:45 | 2014-09-03T12:41:07 | Python | UTF-8 | Python | false | false | 879 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15DR74/RSGravToZZToLLQQ_kMpl01_M-4500_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/50000/0E6BC100-2608-E511-81FE-B083FED7685B.root',
'/store/mc/RunIISpring15DR74/RSGravToZZToLLQQ_kMpl01_M-4500_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/50000/2C69D809-2608-E511-9F6F-AC853DA06A1A.root',
'/store/mc/RunIISpring15DR74/RSGravToZZToLLQQ_kMpl01_M-4500_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/50000/96B4A5FF-2508-E511-8C7A-AC853D9DACD7.root' ] );
secFiles.extend( [
] )
| [
"[email protected]"
]
| |
6bddda677ffc28c03d53706a15295b771ca75757 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02594/s262687394.py | fe06f875311bd6773825bd1fa129ec75658d119b | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | x = int(input().rstrip())
if x >= 30:
print("Yes")
else:
print("No") | [
"[email protected]"
]
| |
7f755f14fed2076d30afd43d52d84419ec2c5011 | 5af5fa981a0a8598b2b031aaf10c6ba6d2f5c28c | /manage.py | b9a5ea2ec851beef60a088a13b233c88c2ace45c | []
| no_license | FMularski/image-house | d15b2fe7d0379cd237c5aef3336a0ad2ee5a136c | 968e7c6a566090d0cf25246e506820dd955b34c0 | refs/heads/main | 2023-07-31T18:49:25.705577 | 2021-09-13T18:56:10 | 2021-09-13T18:56:10 | 404,382,258 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'image_house.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a9fc27c1b63235d582097a3ea144208589f0880d | b3b066a566618f49ae83c81e963543a9b956a00a | /Supervised Learning with scikit-learn/01_Classification/05_k-Nearest Neighbors Predict.py | 96614384e3f708dc1e9787ed95b2a117a9ba84a8 | []
| no_license | ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020 | 666c4129c3f0b5d759b511529a365dfd36c12f1a | f3d20b788c8ef766e7c86c817e6c2ef7b69520b8 | refs/heads/master | 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null | UTF-8 | Python | false | false | 2,183 | py | '''
k-Nearest Neighbors: Predict
Having fit a k-NN classifier, you can now use it to predict the label of a new data point. However, there is no unlabeled data available since all of it was used to fit the model! You can still use the .predict() method on the X that was used to fit the model, but it is not a good indicator of the model's ability to generalize to new, unseen data.
In the next video, Hugo will discuss a solution to this problem. For now, a random unlabeled data point has been generated and is available to you as X_new. You will use your classifier to predict the label for this new data point, as well as on the training data X that the model has already seen. Using .predict() on X_new will generate 1 prediction, while using it on X will generate 435 predictions: 1 for each sample.
The DataFrame has been pre-loaded as df. This time, you will create the feature array X and target variable array y yourself.
INSTRUCTIONS
100XP
Create arrays for the features and the target variable from df. As a reminder, the target variable is 'party'.
Instantiate a KNeighborsClassifier with 6 neighbors.
Fit the classifier to the data.
Predict the labels of the training data, X.
Predict the label of the new data point X_new.
'''
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors: knn
knn = knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X,y)
# Predict the labels for the training data X
y_pred = knn.predict(X)
# Predict and print the label for the new data point X_new
new_prediction = knn.predict(X_new)
print("Prediction: {}".format(new_prediction))
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================# | [
"Your-Email"
]
| Your-Email |
2ff172e2e6211730ddf823e829aa16698fff35b2 | 2f2667682bb78578445b9e3aac7cc62cfba83d5a | /googlenet/nets/cnn_model/CNN_MODEL.py | cb42ca784f10d89d88a49599ffdab9680d78e45d | []
| no_license | Yorwxue/trt_experence | 9c770c2a1cb7c48c9d7f21c46be0107de91f1c41 | 778a6cef019dd8afdae6b608b3cbacb56480c7b1 | refs/heads/master | 2022-12-21T12:38:13.108402 | 2019-08-01T08:11:10 | 2019-08-01T08:11:10 | 195,760,238 | 0 | 0 | null | 2022-12-08T05:57:26 | 2019-07-08T07:36:12 | Python | UTF-8 | Python | false | false | 4,407 | py | import tensorflow as tf
class cnn_model(object):
def __init__(self, input_placeholder, num_classes):
self.inputs = input_placeholder
self.num_classes = num_classes
self._build_model()
self.labels = None
def build_graph(self, label_place_holder):
self.labels = label_place_holder
self._build_train_op()
return
def _build_model(self):
with tf.variable_scope('cnn'):
with tf.variable_scope('unit-1'):
x = self._conv2d(self.inputs, name='cnn-1', filter_size=3, in_channels=3, out_channels=64, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('unit-2'):
x = self._conv2d(x, name='cnn-2', filter_size=3, in_channels=64, out_channels=128, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('unit-3'):
x = self._conv2d(x, name='cnn-3', filter_size=3, in_channels=128, out_channels=128, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('unit-4'):
x = self._conv2d(x, name='cnn-4', filter_size=3, in_channels=128, out_channels=256, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('fc'):
# [batch_size, max_stepsize, num_features]
batch_size, height, width, channels = x.get_shape().as_list()
x = tf.reshape(x, [-1, height * width *channels])
outputs = self._fc(x, input_shape=height * width *channels, output_shape=64, name="fc-1")
outputs = self._fc(outputs, input_shape=64, output_shape=self.num_classes, name="fc-2")
self.logits = tf.identity(outputs, name="logits")
def _build_train_op(self):
self.global_step = tf.Variable(0, trainable=False)
self.loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.labels,
logits=self.logits,
)
self.optimizer = tf.train.AdamOptimizer().minimize(
self.loss,
global_step=self.global_step
)
train_ops = [self.optimizer]
self.train_op = tf.group(*train_ops)
self.cost = tf.reduce_mean(self.loss)
self.acc, self.acc_op = tf.metrics.accuracy(
tf.argmax(self.labels, 1),
tf.argmax(self.logits, 1),
name="metrics"
)
def _conv2d(self, x, name, filter_size, in_channels, out_channels, strides):
with tf.variable_scope(name):
kernel = tf.get_variable(name='conv',
shape=[filter_size, filter_size, in_channels, out_channels],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bais',
shape=[out_channels],
dtype=tf.float32,
initializer=tf.constant_initializer())
con2d_op = tf.nn.conv2d(x, kernel, [1, strides, strides, 1], padding='SAME')
return tf.nn.bias_add(con2d_op, b)
def _leaky_relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _max_pool(self, x, ksize, strides):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, strides, strides, 1],
padding='SAME',
name='max_pool')
def _fc(self, x, input_shape, output_shape, name):
with tf.variable_scope(name):
W = tf.get_variable(name='w',
shape=[input_shape, output_shape],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='b',
shape=[output_shape],
dtype=tf.float32,
initializer=tf.constant_initializer())
return tf.matmul(x, W) + b
| [
"[email protected]"
]
| |
2a129138b050a28bf745e50795dcbd843554f7d3 | d7f45fac46598da9825a404d7511df7474237e4a | /ex.071.py | ae90bc20c6a1a345fd4295374d5d35af552c7539 | []
| no_license | MarceloBCS/Exercicios_Curso_em_video | b4a8cbc8573e1303065c0cf1baad25c47d5a2fd8 | a90fd67d83cf154f3554f962815fb791d3508d0c | refs/heads/master | 2022-12-29T19:15:50.007022 | 2020-10-13T05:09:28 | 2020-10-13T05:09:28 | 303,592,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | cor = {'amarelo':'\033[1;33m', 'red':'\033[1;31m', 'limp':'\033[m'}
print(cor['red'], '='*22)
print(f' Bhering Bank')
print('='*24, cor['limp'])
n = int(input('Valor do Saque: R$ '))
nota50 = n // 50
resto50 = n % 50
nota20 = (resto50) // 20
resto20 = resto50 % 20
nota10 = (resto20) // 10
resto10 = resto20 % 10
nota1 = resto10
print('{1}{0:>3} nota(s){2} R$50'.format(nota50, cor['amarelo'], cor['limp']))
print('{1}{0:>3} nota(s){2} R$20'.format(nota20, cor['amarelo'], cor['limp']))
print('{1}{0:>3} nota(s){2} R$10'.format(nota10, cor['amarelo'], cor['limp']))
print('{1}{0:>3} nota(s){2} R$ 1'.format(nota1, cor['amarelo'], cor['limp']))
from datetime import date
print(f'\nProcessado em {date.today()}')
| [
"[email protected]"
]
| |
1517091c99791653d0cab3474d9bab53eab31f3b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_neglect.py | a50f6e279fa4b54dc4dbd0054f3c11b55146c2f7 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py |
#calss header
class _NEGLECT():
def __init__(self,):
self.name = "NEGLECT"
self.definitions = [u'to not give enough care or attention to people or things that are your responsibility: ', u'to not do something, often because you forget: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
73139227eba251932205e3dbb90771cec1951790 | 92993cff825da80a8ff601572a0c52b0b7d3cbde | /algorithms/Svm/APG/L2/APG_L2_m22.py | a2277a8abdf5a9f18704027fccf9cf7bcdc2e1f7 | []
| no_license | yingzhuoy/MRs-of-linear-models | 06e8b1f84b08c6aa77553813824cf35c1806c5a7 | c3df8299e039a12613f2022b370b8c3e9c2dd822 | refs/heads/master | 2023-04-07T23:09:37.736952 | 2021-04-04T05:33:37 | 2021-04-04T05:33:37 | 265,124,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | import numpy as np
from numpy import linalg
import cvxopt
from cvxopt import matrix,solvers
from algorithms.clf import Clf
def backtracking(l0, x0, p, q, low, up):
# update x
beta = 0.5
l = l0
L0 = 0.5*x0.T*(p*x0) + q.T*x0
g0 = p*x0 + q
for k in range(128):
#xp = x0 - l * g0
#----bug----
xp = x0 - l *g0+2.4710787568352073
xp[xp < low] = low
xp[xp > up] = up
Lx = 0.5*xp.T*(p*xp) + q.T*xp
gt = (x0-xp) / l
if Lx > L0 - l *(g0.T*gt) + 0.5*l*gt.T*(gt):
l = beta * l
else:
break
return xp, l
def projected_apg(p, q, bounds, step_size=0.1, max_iter=1000):
m = p.shape[0]
low, up = bounds
x = np.ones((m, 1), np.float64)
y = x
v, w = np.linalg.eigh(p)
# v[v<=0] = 1e-10
# p = w*np.diag(v)*w.T
l = 1/v[-1] - 1e-10
for k in range(max_iter): # heavy on matrix operations
# saving previous x
y = x
# compute loss and its gradient
# gradient = p*x + q
# proximal mapping
# x = x - l * gradient
# x[x < low] = low
# x[x > up] = up
x, l = backtracking(l, y, p, q, low, up)
# if(np.linalg.norm(x1-x)):
# print('error', np.linalg.norm(x1-x))
# t1 = (1+np.sqrt(1+4*np.square(t0)))/2
# y = x + (t0-1)/t1* (x - y)
# t0 = t1
# stop criteria
rnormw = np.linalg.norm(y-x)
if k > 1 and rnormw < 1e-6:
#print('convergence!')
break
#print(rnormw)
return y
#L2-svm
class APG_L2_m22():
def fit(self, X, y):
m, n = X.shape
X = np.column_stack((X, np.ones((m, 1))))
y = y.astype(np.float64)
data_num = len(y)
C = 1.0
kernel = np.dot(X, np.transpose(X))
p = np.matrix(np.multiply(kernel,np.outer(y, y))) + np.diag(np.ones(data_num, np.float64)) * .5/C
q = np.matrix(-np.ones([data_num, 1], np.float64))
bounds = (0, np.inf)
alpha_svs = projected_apg(p, q, bounds)
# p = matrix(p)
# q = matrix(q)
# g = matrix(-np.eye(data_num))
# h = matrix(np.zeros([data_num, 1], np.float64))
# solvers.options['show_progress'] = False
# sol = solvers.qp(p, q, g, h)
# alpha_svs1 = np.array(sol['x'])
# print(np.linalg.norm(alpha_svs1 - alpha_svs))
# # alpha_svs = alpha_svs1
y1 = np.reshape(y, (-1, 1))
alpha1 = alpha_svs
lambda1 = np.multiply(y1,alpha1)
w = np.dot(X.T, lambda1)
w = np.array(w).reshape(-1)
# b = np.mean(y1-np.reshape(np.dot(w, np.transpose(X)), [-1, 1]))
b = w[n]
w = w[0:n]
clf = Clf(w, b)
return clf | [
"[email protected]"
]
| |
2bd4c287bd784c2d446c13521de35f4dc46bdb33 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/embedding_ops.py | e640b65c6359932536475c67f0a4333dc2a73b54 | [
"Apache-2.0"
]
| permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/embedding_ops.py | [
"[email protected]"
]
| |
1b4e72269db5cacbd411ce69d585d1981dee4c84 | 3449140c29d292c910527cc7163689d215d2f22d | /delivery/urls.py | edddc5b0d4461c75abdbf2a5259fa5461fd3ed6c | []
| no_license | gayatrishivani/ichoose-ead-project-sem6 | 89afeda33261bb18b3848e9171865409d0cf6b44 | ae0f59769950656683bc72d1e76cec2bcfa7fd07 | refs/heads/master | 2023-03-08T18:43:02.970287 | 2021-02-25T19:06:25 | 2021-02-25T19:06:25 | 342,350,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from django.contrib.auth import views
from django.urls import path
from .views import *
urlpatterns = [
path('order/', OrderAPIView.as_view(), name="order"),
path('deliver/', deliver_verify,name ="deliver"),
] | [
"[email protected]"
]
| |
971536fe0a24d6e56e72428cf6577eb03a601f75 | 76133934b1dd287273a9bfa0c801d10d08a21b21 | /test/functional/walletbackup.py | 3734f2cfc1a6af61d4e6020705c5022fe3698f71 | [
"MIT"
]
| permissive | kenfmcoin/kenfmcoin | d8783b34fcb3ae01067e8d1b33e3a73e3b82b1f9 | 1fa48487593233f2066757dc54f48b2349e2d9db | refs/heads/master | 2020-03-10T17:53:31.569229 | 2018-04-14T12:28:55 | 2018-04-14T12:28:55 | 129,511,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,275 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The KenFMcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from random import randint
import shutil
from test_framework.test_framework import KenFMcoinTestFramework
from test_framework.util import *
class WalletBackupTest(KenFMcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
self.log.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
tmpdir + "/node0/regtest/wallet.dat",
tmpdir + "/node0/./regtest/wallet.dat",
tmpdir + "/node0/regtest/",
tmpdir + "/node0/regtest"]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| [
"[email protected]"
]
| |
48ca686bf130382adbb3b325a6410896e709b516 | b68aa1559ffc1228ef11b9dfe49434d618bb6d90 | /src/config.py | e06f9131ea1fcb5f4e1e3749378ea0b5a5df3dc2 | []
| no_license | rady1337/TelegramStatisticBot | c72c67c88aa98ce03d1f6402b5f13bc8a3a34cab | b62c042498185cb2ef5e6f40e13bbb9d4545446d | refs/heads/master | 2022-11-21T06:27:36.421048 | 2020-07-18T11:05:27 | 2020-07-18T11:05:27 | 269,916,495 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | # -*- coding: utf-8 -*-
BOT_TOKEN = ''
ADMIN_ID = ''
| [
"[email protected]"
]
| |
6c41e3ac04ff6a47be5e08a8078f8d709e321df1 | c8b39acfd4a857dc15ed3375e0d93e75fa3f1f64 | /Engine/Extras/Maya_AnimationRiggingTools/ArtToolsOSX/MayaTools/General/Scripts/Modules/ART_Core.py | 876e0d7da21db78bba104005d49ddef2a26a3200 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
]
| permissive | windystrife/UnrealEngine_NVIDIAGameWorks | c3c7863083653caf1bc67d3ef104fb4b9f302e2a | b50e6338a7c5b26374d66306ebc7807541ff815e | refs/heads/4.18-GameWorks | 2023-03-11T02:50:08.471040 | 2022-01-13T20:50:29 | 2022-01-13T20:50:29 | 124,100,479 | 262 | 179 | MIT | 2022-12-16T05:36:38 | 2018-03-06T15:44:09 | C++ | UTF-8 | Python | false | false | 6,469 | py | import maya.cmds as cmds
import maya.mel as mel
import os
import ART_rigUtils as utils
reload(utils)
class RigCore():
#RigCore builds up our core components needed to start the rig build. This includes setting up the driver skeleton and building things like the rig settings, and master rig grps
#These are components that will be needed for every rig
def __init__(self):
#create the rig settings node
self.rigSettings = cmds.group(empty = True, name = "Rig_Settings")
cmds.setAttr(self.rigSettings + ".tx", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".ty", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".tz", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".rx", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".ry", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".rz", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".sx", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".sy", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".sz", lock = True, keyable = False)
cmds.setAttr(self.rigSettings + ".v", lock = True, keyable = False)
#Setup the driver skeleton
self.createDriverSkeleton()
#build the core rig components
self.buildCoreComponents()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def createDriverSkeleton(self):
#there will always be a root bone, so let's duplicate that
dupe = cmds.duplicate("root", rc = True)[0]
cmds.select("root", hi = True)
joints = cmds.ls(sl = True)
cmds.select(dupe, hi = True)
dupeJoints = cmds.ls(sl = True)
driverJoints = []
for i in range(int(len(dupeJoints))):
if cmds.objExists(dupeJoints[i]):
driverJoint = cmds.rename(dupeJoints[i], "driver_" + joints[i])
driverJoints.append(driverJoint)
#create a direct connection between the driver and the export joints
for joint in driverJoints:
exportJoint = joint.partition("_")[2]
cmds.connectAttr(joint + ".translate", exportJoint + ".translate")
cmds.orientConstraint(joint, exportJoint)
cmds.connectAttr(joint + ".scale", exportJoint + ".scale")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def buildCoreComponents(self):
#builds the master, the root, and the core rig groups
#MASTER CONTROL
masterControl = utils.createControl("circle", 150, "master_anim")
constraint = cmds.pointConstraint("root", masterControl)[0]
cmds.delete(constraint)
cmds.makeIdentity(masterControl, apply = True)
cmds.setAttr(masterControl + ".overrideEnabled", 1)
cmds.setAttr(masterControl + ".overrideColor", 18)
spaceSwitchFollow = cmds.group(empty = True, name = masterControl + "_space_switcher_follow")
constraint = cmds.parentConstraint("root", spaceSwitchFollow)[0]
cmds.delete(constraint)
spaceSwitcher = cmds.group(empty = True, name = masterControl + "_space_switcher")
constraint = cmds.parentConstraint("root", spaceSwitcher)[0]
cmds.delete(constraint)
cmds.parent(spaceSwitcher, spaceSwitchFollow)
cmds.parent(masterControl, spaceSwitcher)
cmds.makeIdentity(masterControl, apply = True)
#OFFSET CONTROL
offsetControl = utils.createControl("square", 140, "offset_anim")
constraint = cmds.pointConstraint("root", offsetControl)[0]
cmds.delete(constraint)
cmds.parent(offsetControl, masterControl)
cmds.makeIdentity(offsetControl, apply = True)
cmds.setAttr(offsetControl + ".overrideEnabled", 1)
cmds.setAttr(offsetControl + ".overrideColor", 17)
#ROOT ANIM
rootControl = utils.createControl("sphere", 10, "root_anim")
constraint = cmds.parentConstraint("driver_root", rootControl)[0]
cmds.delete(constraint)
cmds.parent(rootControl, masterControl)
cmds.makeIdentity(rootControl, apply = True)
cmds.parentConstraint(rootControl, "driver_root")
cmds.setAttr(rootControl + ".overrideEnabled", 1)
cmds.setAttr(rootControl + ".overrideColor", 30)
for attr in [".sx", ".sy", ".sz", ".v"]:
cmds.setAttr(masterControl + attr, lock = True, keyable = False)
cmds.setAttr(offsetControl + attr, lock = True, keyable = False)
cmds.setAttr(rootControl + attr, lock = True, keyable = False)
#Create the group that will hold all of the control rig components
rigGrp = cmds.group(empty = True, name = "ctrl_rig")
cmds.parent(rigGrp, "offset_anim")
#finish grouping everything under 1 character grp
controlRigGrp = cmds.group(empty = True, name = "rig_grp")
cmds.parent(["driver_root", "master_anim_space_switcher_follow"], controlRigGrp)
cmds.parent("Rig_Settings", controlRigGrp)
if cmds.objExists("Proxy_Geo_Skin_Grp"):
cmds.parent("Proxy_Geo_Skin_Grp", controlRigGrp)
returnNodes = [rigGrp, offsetControl]
return returnNodes
| [
"[email protected]"
]
| |
eb307b0b3d4c0305cf1ec3ffde93f182d60448a4 | 66b1f3c3e57f53e1404d6e17c4acc850173a531d | /Python/Fundamentals/Objects and Classes/04. Exercises.py | c63352f1eab3c72ca9d0b9b6bde644bfae6dd9ba | []
| no_license | bMedarski/SoftUni | ca4d6891b3bbe7b03aad5960d2f4af5479fd8bbd | 62cd9cb84b0826e3381c991882a4cdc27d94f8ab | refs/heads/master | 2021-06-08T17:32:39.282975 | 2020-02-04T11:57:08 | 2020-02-04T11:57:08 | 67,947,148 | 6 | 3 | null | 2021-05-06T20:35:42 | 2016-09-11T18:31:02 | Python | UTF-8 | Python | false | false | 1,149 | py | class Exercise:
def __init__(self,topic,course_name,judge_contest_link,):
self.topic = topic
self.course_name = course_name
self.judge_contest_link = judge_contest_link
self.problems = []
def add_problems(self,list_of_problems):
for problem in list_of_problems:
self.problems.append(problem)
def print_problem(self):
print(f"Exercises: {self.topic}")
print(f"Problems for exercises and homework for the \"{self.course_name}\" course @ SoftUni.")
print(f"Check your solutions here: {self.judge_contest_link}")
problem_count = len(self.problems)
for i in range(0,problem_count):
print(f"{i+1}. {self.problems[i]}")
list_of_exercises = []
while True:
user_input = input()
if user_input == "go go go":
break
exercise_args = user_input.split(" -> ")
problems = exercise_args[3].split(", ")
exercise = Exercise(exercise_args[0],exercise_args[1],exercise_args[2])
exercise.add_problems(problems)
list_of_exercises.append(exercise)
for exercise in list_of_exercises:
exercise.print_problem()
| [
"[email protected]"
]
| |
77d363126291b572790991bb8207eda8fe0559c8 | a149044762566830b4356343c062ee84c8056471 | /healthid/apps/orders/migrations/0019_auto_20191118_1908.py | cb35272d4db1312cee84af4026cfee38c1046a7f | []
| no_license | malep2007/healtid-web-api | 23fbe8b34a3c5256cbb60cf15205f65cc035df13 | d9e88b4d8fe1dbc297b61bb007d4182928e515d4 | refs/heads/dev | 2022-12-12T03:24:37.537569 | 2019-12-17T16:26:59 | 2019-12-17T16:26:59 | 228,655,076 | 0 | 1 | null | 2022-12-08T05:25:00 | 2019-12-17T16:17:27 | Python | UTF-8 | Python | false | false | 521 | py | # Generated by Django 2.2 on 2019-11-18 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0018_supplierrating'),
]
operations = [
migrations.AlterField(
model_name='supplierorderdetails',
name='status',
field=models.CharField(choices=[('pending', 'Pending Approval'), ('open', 'Open'), ('closed', 'Closed'), ('approved', 'Approved')], default='pending', max_length=10),
),
]
| [
"[email protected]"
]
| |
6e1c053097e229c6fd3c3bb3d6e367ae5d464e29 | c231bdc8d01465c9dda429be2d6535f8e8ea5ece | /Complete Python Masterclass/Source_Codes/lesson_192_pigen.py | 7cd19f181a3a2e3f01176230fb3bdbda2765cf2a | [
"MIT"
]
| permissive | jessequinn/Udemy | 1634049711eb888f59b697d883555767e38598a3 | e3d652c8144660ae155149d897b6364416106c7c | refs/heads/master | 2021-08-18T09:58:57.274897 | 2018-10-26T22:50:26 | 2018-10-26T22:50:26 | 142,214,616 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | def oddnumbers():
n = 1
while True:
yield n
n += 2
def pi_series():
odds = oddnumbers()
approximation = 0
while True:
approximation += (4 / next(odds))
yield approximation
approximation -= (4 / next(odds))
yield approximation
approx_pi = pi_series()
for x in range(10000):
print(next(approx_pi))
| [
"[email protected]"
]
| |
4a8fa681a8b0aae3c3b191b3e2b4ec7019a401fc | f40e5c91a18fc5c7e0b4d96fe964a493f924e958 | /supervised_learning/0x0F-word_embeddings/4-fasttext.py | 949c893776a76d2dde5174ec730529b71a0fd9e2 | []
| no_license | jgadelugo/holbertonschool-machine_learning | ab46f71477998371ca5e3623455d61fe334ab221 | e20b284d5f1841952104d7d9a0274cff80eb304d | refs/heads/master | 2023-02-01T03:52:43.723569 | 2020-12-10T19:28:57 | 2020-12-10T19:28:57 | 256,043,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | #!/usr/bin/env python3
"""creates and trains a genism fastText model"""
from gensim.models import FastText
def fasttext_model(sentences, size=100, min_count=5, negative=5,
window=5, cbow=True, iterations=5, seed=0, workers=1):
"""creates and trains a genism fastText model
@sentences: list of sentences to be trained on
@size: dimensionality of the embedding layer
@min_count: minimum number of occurrences of a word for use in training
@window: maximum distance between the current and predicted word within
a sentence
@negative: size of negative sampling
@cbow: boolean to determine the training type
True is for CBOW
False is for Skip-gram
@iterations: number of iterations to train over
@seed: seed for the random number generator
@workers: number of worker threads to train the model
Return: trained model
"""
model = FastText(sentences=sentences, size=size, window=window,
min_count=min_count, workers=workers, sg=cbow,
negative=negative, seed=seed)
model.train(sentences=sentences, total_examples=model.corpus_count,
epochs=iterations)
return model
| [
"[email protected]"
]
| |
a7041cf991e52b763f3cd3bc1245544d989aa779 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2014_11_06_composite_test/microstructure_function.py | 40648669fbe68bcc57b24375b06e393199591580 | []
| no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 23 14:25:50 2014
This script reads a set of microstructures designated by the set-ID and saves
the microstructure function in real and frequency space.
@author: nhpnp3
"""
import time
import numpy as np
import functions_composite as rr
import scipy.io as sio
def msf(el,ns,H,set_id,wrt_file):
start = time.time()
## import microstructures
tmp = np.zeros([H,ns,el**3])
microstructure = sio.loadmat('M_%s%s.mat' %(ns,set_id))['M']
microstructure = microstructure.swapaxes(0,1)
for h in xrange(H):
tmp[h,...] = (microstructure == h).astype(int)
del microstructure
tmp = tmp.swapaxes(0,1)
micr = tmp.reshape([ns,H,el,el,el])
del tmp
np.save('msf_%s%s' %(ns,set_id),micr)
end = time.time()
timeE = np.round((end - start),3)
msg = "generate real-space microstructure function from GSH-coefficients: %s seconds" %timeE
rr.WP(msg,wrt_file)
## Microstructure functions in frequency space
start = time.time()
M = np.fft.fftn(micr, axes = [2,3,4])
del micr
size = M.nbytes
np.save('M_%s%s' %(ns,set_id),M)
end = time.time()
timeE = np.round((end - start),3)
msg = "FFT3 conversion of micr to M_%s%s: %s seconds" %(ns,set_id,timeE)
rr.WP(msg,wrt_file)
msg = 'Size of M_%s%s: %s bytes' %(ns,set_id,size)
rr.WP(msg,wrt_file) | [
"[email protected]"
]
| |
3c15ad74bdae4f5394409a54f52da75f6dfa9934 | 6fff0893ef43f1018d65f2e8e1bf27d9f8accf5b | /pw_stm32cube_build/py/setup.py | 4a4f3b3318d0244b9e89d087778c77c813a96b56 | [
"Apache-2.0"
]
| permissive | isabella232/pigweed | eeb68a4eda6f0a9b5ef0b8145d0204bc9f85bfdc | 53c2f3e2569d7e582d3dd3056ceb9b2c3b8197b2 | refs/heads/main | 2023-06-03T10:32:29.498066 | 2021-06-17T06:38:15 | 2021-06-17T20:44:55 | 378,165,913 | 0 | 0 | Apache-2.0 | 2021-06-18T13:54:37 | 2021-06-18T13:53:40 | null | UTF-8 | Python | false | false | 1,131 | py | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""pw_stm32cube_build"""
import setuptools # type: ignore
setuptools.setup(
name='pw_stm32cube_build',
version='0.0.1',
author='Pigweed Authors',
author_email='[email protected]',
description='Python scripts for stm32cube targets',
packages=setuptools.find_packages(),
package_data={'pw_stm32cube_build': ['py.typed']},
zip_safe=False,
entry_points={
'console_scripts': [
'stm32cube_builder = pw_stm32cube_build.__main__:main',
]
},
install_requires=[])
| [
"[email protected]"
]
| |
c2eea179b5c79d97d0acbbad1ce516c307302d5c | 1819b161df921a0a7c4da89244e1cd4f4da18be4 | /WhatsApp_FarmEasy/env/lib/python3.6/site-packages/cytoolz/_version.py | 700c05c3519cb3546209ef31207f8d98793ce9e5 | [
"MIT"
]
| permissive | sanchaymittal/FarmEasy | 889b290d376d940d9b3ae2fa0620a573b0fd62a0 | 5b931a4287d56d8ac73c170a6349bdaae71bf439 | refs/heads/master | 2023-01-07T21:45:15.532142 | 2020-07-18T14:15:08 | 2020-07-18T14:15:08 | 216,203,351 | 3 | 2 | MIT | 2023-01-04T12:35:40 | 2019-10-19T12:32:15 | JavaScript | UTF-8 | Python | false | false | 52 | py | __version__ = '0.10.0'
__toolz_version__ = '0.10.0'
| [
"[email protected]"
]
| |
de18343ee3eb88b08ceb551823642e31ba91135a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1777.py | 0029905c5b8fadd6cd1b84c5cdcbc74c89c48176 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | # /usr/bin/python
import sys
import math
def dbg(s): sys.stderr.write(str(s) +"\n")
def reads(t): return map(t, input().split(" "))
def read(t) : return t(input())
palss = {
0: [],
1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
2: [11, 22, 33, 44, 55, 66, 77, 88, 99],
}
pals = palss[1] + palss[2]
fsq = []
def gen_pals(n):
ps = palss.get(n)
if ps != None:
return ps
p_list = gen_pals(n-2)
ps = []
for i in range(1, 10):
for p in p_list:
np = int(str(i) + str(p) + str(i))
ps.append(np)
pals.append(np)
palss[n] = ps
return ps
def gen_fsq(max_fsq):
for p in pals:
maybe_fsq = p**2
if maybe_fsq > max_fsq:
return
if maybe_fsq in pals_set:
fsq.append(maybe_fsq)
G = 3
for i in range(1, G+1):
gen_pals(i)
pals.remove(0)
max_pal = pals[-1]
pals_set = set(pals)
gen_fsq(max_pal)
T = read(int)
for t in range(1, T+1):
[A, B] = reads(int)
cnt = 0
for f in fsq:
if f >= A and f <= B:
cnt += 1
print("Case #%d: %d" % (t, cnt))
| [
"[email protected]"
]
| |
39c0ddf7ea3e1e79086c341e5bb886df70d3c00f | 990f6df58da264e8c35d5889a63c16ed9a9ba4c2 | /kvirt/common/__init__.py | 32510dae8b407d6f00883c04ce0d3b404622abad | [
"Apache-2.0"
]
| permissive | aelejota/kcli | 0079662591e781345aed2096f9cedc896bb5462f | 0c1e1756b4af6b7dccd2f759382adf79f4a7050f | refs/heads/master | 2021-01-20T09:16:03.835141 | 2017-05-03T15:28:58 | 2017-05-03T15:28:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,355 | py | #!/usr/bin/env python
from distutils.spawn import find_executable
import socket
import urllib2
import json
import os
def symlinks(user, repo):
mappings = []
url1 = 'https://api.github.com/repos/%s/%s/git/refs/heads/master' % (user, repo)
r = urllib2.urlopen(url1)
base = json.load(r)
sha = base['object']['sha']
url2 = 'https://api.github.com/repos/%s/%s/git/trees/%s?recursive=1' % (user, repo, sha)
r = urllib2.urlopen(url2)
try:
base = json.load(r)
except:
return []
for e in base['tree']:
if e['mode'] == '120000':
mappings.append(e['path'])
return mappings
def download(url, path):
filename = os.path.basename(url)
print("Fetching %s" % filename)
url = urllib2.urlopen(url)
with open("%s/%s" % (path, filename), 'wb') as output:
output.write(url.read())
def makelink(url, path):
filename = os.path.basename(url)
url = urllib2.urlopen(url)
target = url.read()
print("Creating symlink for %s pointing to %s" % (filename, target))
os.symlink(target, "%s/%s" % (path, filename))
def fetch(url, path, syms=None):
if not url.startswith('http'):
url = "https://%s" % url
if 'github.com' not in url or 'raw.githubusercontent.com' in url:
download(url, path)
return
elif 'api.github.com' not in url:
# url = url.replace('github.com/', 'api.github.com/repos/').replace('tree/master', 'contents')
url = url.replace('github.com/', 'api.github.com/repos/').replace('tree/master', '')
url = url.replace('blob/master', '')
if 'contents' not in url:
tempurl = url.replace('https://api.github.com/repos/', '')
user = tempurl.split('/')[0]
repo = tempurl.split('/')[1]
syms = symlinks(user, repo)
url = url.replace("%s/%s" % (user, repo), "%s/%s/contents" % (user, repo))
if not os.path.exists(path):
os.mkdir(path)
r = urllib2.urlopen(url)
try:
base = json.load(r)
except:
print("Invalid url.Leaving...")
os._exit(1)
for b in base:
if 'name' not in b or 'type' not in b or 'download_url' not in b:
print("Invalid url.Leaving...")
os._exit(1)
filename = b['name']
filetype = b['type']
filepath = b['path']
download_url = b['download_url']
if filepath in syms:
makelink(download_url, path)
elif filetype == 'file':
download(download_url, path)
elif filetype == 'dir':
fetch("%s/%s" % (url, filename), "%s/%s" % (path, filename), syms=syms)
def cloudinit(name, keys=[], cmds=[], nets=[], gateway=None, dns=None, domain=None, reserveip=False, files=[]):
default_gateway = gateway
with open('/tmp/meta-data', 'w') as metadatafile:
if domain is not None:
localhostname = "%s.%s" % (name, domain)
else:
localhostname = name
metadatafile.write('instance-id: XXX\nlocal-hostname: %s\n' % localhostname)
metadata = ''
if nets:
for index, net in enumerate(nets):
if isinstance(net, str):
if index == 0:
continue
nicname = "eth%d" % index
ip = None
netmask = None
elif isinstance(net, dict):
nicname = net.get('nic', "eth%d" % index)
ip = net.get('ip')
netmask = net.get('mask')
metadata += " auto %s\n" % nicname
if ip is not None and netmask is not None and not reserveip:
metadata += " iface %s inet static\n" % nicname
metadata += " address %s\n" % ip
metadata += " netmask %s\n" % netmask
gateway = net.get('gateway')
if index == 0 and default_gateway is not None:
metadata += " gateway %s\n" % default_gateway
elif gateway is not None:
metadata += " gateway %s\n" % gateway
dns = net.get('dns')
if dns is not None:
metadata += " dns-nameservers %s\n" % dns
domain = net.get('domain')
if domain is not None:
metadatafile.write(" dns-search %s\n" % domain)
else:
metadata += " iface %s inet dhcp\n" % nicname
if metadata:
metadatafile.write("network-interfaces: |\n")
metadatafile.write(metadata)
# if dns is not None:
# metadatafile.write(" dns-nameservers %s\n" % dns)
# if domain is not None:
# metadatafile.write(" dns-search %s\n" % domain)
with open('/tmp/user-data', 'w') as userdata:
userdata.write('#cloud-config\nhostname: %s\n' % name)
userdata.write("ssh_pwauth: True\ndisable_root: false\n")
if domain is not None:
userdata.write("fqdn: %s.%s\n" % (name, domain))
if keys or os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']) or os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']):
userdata.write("ssh_authorized_keys:\n")
else:
print("neither id_rsa.pub or id_dsa public keys found in your .ssh directory, you might have trouble accessing the vm")
if keys:
for key in keys:
userdata.write("- %s\n" % key)
if os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']):
publickeyfile = "%s/.ssh/id_rsa.pub" % os.environ['HOME']
with open(publickeyfile, 'r') as ssh:
key = ssh.read().rstrip()
userdata.write("- %s\n" % key)
if os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']):
publickeyfile = "%s/.ssh/id_dsa.pub" % os.environ['HOME']
with open(publickeyfile, 'r') as ssh:
key = ssh.read().rstrip()
userdata.write("- %s\n" % key)
if cmds:
userdata.write("runcmd:\n")
for cmd in cmds:
if cmd.startswith('#'):
continue
else:
userdata.write("- %s\n" % cmd)
if files:
userdata.write('ssh_pwauth: True\n')
userdata.write('disable_root: false\n')
userdata.write("write_files:\n")
for fil in files:
if not isinstance(fil, dict):
continue
origin = fil.get('origin')
content = fil.get('content')
if origin is not None:
origin = os.path.expanduser(origin)
if not os.path.exists(origin):
print("Skipping file %s as not found" % origin)
continue
# if origin.endswith('j2'):
# origin = open(origin, 'r').read()
# content = Environment().from_string(origin).render(name=name, gateway=gateway, dns=dns, domain=domain)
# else:
# content = open(origin, 'r').readlines()
content = open(origin, 'r').readlines()
elif content is None:
continue
path = fil.get('path')
owner = fil.get('owner', 'root')
permissions = fil.get('permissions', '0600')
userdata.write("- owner: %s:%s\n" % (owner, owner))
userdata.write(" path: %s\n" % path)
userdata.write(" permissions: '%s'\n" % (permissions))
userdata.write(" content: | \n")
if isinstance(content, str):
content = content.split('\n')
for line in content:
userdata.write(" %s\n" % line.strip())
isocmd = 'mkisofs'
if find_executable('genisoimage') is not None:
isocmd = 'genisoimage'
os.system("%s --quiet -o /tmp/%s.iso --volid cidata --joliet --rock /tmp/user-data /tmp/meta-data" % (isocmd, name))
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def pprint(text, color=None):
colors = {'blue': '34', 'red': '31', 'green': '32', 'yellow': '33', 'white': '37'}
if color is not None and color in colors:
color = colors[color]
print('\033[1;%sm%s\033[0;0m' % (color, text))
else:
print(text)
def handle_response(result, name, quiet=False, element='', action='deployed'):
if result['result'] == 'success':
if not quiet:
pprint("%s%s %s!" % (element, name, action), color='green')
return 0
else:
if not quiet:
reason = result['reason']
pprint("%s%s not %s because %s" % (element, name, action, reason), color='red')
return 1
def confirm(message):
message = "%s [y/N]: " % message
input = raw_input(message)
if input.lower() != 'y':
pprint("Leaving...", color='red')
os._exit(1)
return
| [
"[email protected]"
]
| |
db41ab0764713f49b58abe113aa3b99dfbace9db | a3139097d228a89255422fbfc5ee9b10992aeca1 | /day_02/transcripts/trans_sets.py | 2dc2e76eabc38d3dec809e54772d4dd68d597b90 | []
| no_license | Code360In/07122020PYLVC | 4d8e88163a00c56004d3920d5802a6c945e4af89 | a0e4dd708c973ff333930191a77f289091969a3d | refs/heads/main | 2023-01-29T09:03:41.375978 | 2020-12-11T14:00:09 | 2020-12-11T14:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> s1 = {'a', 'a', 'b', 'b', 'c' ,'d', 'e', 'f', 'g' }
>>> type(s1)
<class 'set'>
>>> s1
{'f', 'c', 'a', 'g', 'b', 'd', 'e'}
>>> s1[2]
Traceback (most recent call last):
File "<pyshell#3>", line 1, in <module>
s1[2]
TypeError: 'set' object is not subscriptable
>>> 'a' in s1
True
>>> s2 = set("defghijk")
>>> s2
{'f', 'g', 'h', 'i', 'j', 'k', 'd', 'e'}
>>> # ----------------------------- operators
>>>
>>> s1 ^ s2
{'c', 'b', 'a', 'h', 'i', 'j', 'k'}
>>> s1 & s2
{'e', 'd', 'g', 'f'}
>>> s1 | s2
{'i', 'f', 'c', 'g', 'a', 'h', 'b', 'j', 'k', 'd', 'e'}
>>>
>>> # ---------------------------- adding and removing
>>>
>>> s1.add("x")
>>> s1
{'f', 'c', 'a', 'g', 'x', 'b', 'd', 'e'}
>>> s3 = {'y', 'z'}
>>> s1.update(s3)
>>> s1
{'f', 'c', 'y', 'a', 'g', 'x', 'z', 'b', 'd', 'e'}
>>>
>>> s1.remove('g')
>>> s1
{'f', 'c', 'y', 'a', 'x', 'z', 'b', 'd', 'e'}
>>>
>>> # ---------------------------- functions
>>>
>>> s1.union(s2)
{'i', 'f', 'y', 'e', 'a', 'x', 'g', 'z', 'b', 'h', 'k', 'j', 'd', 'c'}
>>> s1.intersection(s2)
{'d', 'e', 'f'}
>>>
>>>
>>> # -------------------------------- interconversion
>>>
>>> s1
{'f', 'c', 'y', 'a', 'x', 'z', 'b', 'd', 'e'}
>>> list(s1)
['f', 'c', 'y', 'a', 'x', 'z', 'b', 'd', 'e']
>>> tuple(s1)
('f', 'c', 'y', 'a', 'x', 'z', 'b', 'd', 'e')
>>>
| [
"[email protected]"
]
| |
82e64f85567565fee068b5888b54eba2a79ab18e | f569978afb27e72bf6a88438aa622b8c50cbc61b | /douyin_open/EnterpriseGrouponGrouponCodeCodeDeposit/models/inline_response200.py | 12e39d0314ab86197d71e68b691b7582b16b95da | []
| no_license | strangebank/swagger-petstore-perl | 4834409d6225b8a09b8195128d74a9b10ef1484a | 49dfc229e2e897cdb15cbf969121713162154f28 | refs/heads/master | 2023-01-05T10:21:33.518937 | 2020-11-05T04:33:16 | 2020-11-05T04:33:16 | 310,189,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,756 | py | # coding: utf-8
"""
自定义卷码预存
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse200(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'extra': 'ExtraBody',
'data': 'InlineResponse200Data'
}
attribute_map = {
'extra': 'extra',
'data': 'data'
}
def __init__(self, extra=None, data=None): # noqa: E501
"""InlineResponse200 - a model defined in Swagger""" # noqa: E501
self._extra = None
self._data = None
self.discriminator = None
if extra is not None:
self.extra = extra
if data is not None:
self.data = data
@property
def extra(self):
"""Gets the extra of this InlineResponse200. # noqa: E501
:return: The extra of this InlineResponse200. # noqa: E501
:rtype: ExtraBody
"""
return self._extra
@extra.setter
def extra(self, extra):
"""Sets the extra of this InlineResponse200.
:param extra: The extra of this InlineResponse200. # noqa: E501
:type: ExtraBody
"""
self._extra = extra
@property
def data(self):
"""Gets the data of this InlineResponse200. # noqa: E501
:return: The data of this InlineResponse200. # noqa: E501
:rtype: InlineResponse200Data
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this InlineResponse200.
:param data: The data of this InlineResponse200. # noqa: E501
:type: InlineResponse200Data
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse200, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse200):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
eb99088385dfeba2cc4eaf7cfc1cec214ff6f8a1 | db4add5587fbcb24bd6147a5f2cc161743553429 | /venv/lib/python3.6/site-packages/sqlalchemy/dialects/mssql/pymssql.py | cfcfb4afc306ea8222d264b2f34b031b8c470614 | [
"MIT"
]
| permissive | DiptoChakrabarty/FlaskKube-api | 38dedb5695f00f1fa0ee58af1f4b595c37f3ba0f | 50bf4c226ce2ed0d544cb2eb16f5279e0fe25ca1 | refs/heads/master | 2022-12-25T04:08:52.669305 | 2020-10-01T08:42:39 | 2020-10-01T08:42:39 | 259,813,586 | 4 | 5 | MIT | 2020-10-01T08:42:41 | 2020-04-29T03:20:59 | Python | UTF-8 | Python | false | false | 4,677 | py | # mssql/pymssql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?charset=utf8
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_.
Modern versions of this driver worked very well with SQL Server and FreeTDS
from Linux and were highly recommended. However, pymssql is currently
unmaintained and has fallen behind the progress of the Microsoft ODBC driver in
its support for newer features of SQL Server. The latest official release of
pymssql at the time of this document is version 2.1.4 (August, 2018) and it
lacks support for:
1. table-valued parameters (TVPs),
2. ``datetimeoffset`` columns using timezone-aware ``datetime`` objects
(values are sent and retrieved as strings), and
3. encrypted connections (e.g., to Azure SQL), when pymssql is installed from
the pre-built wheels. Support for encrypted connections requires building
pymssql from source, which can be a nuisance, especially under Windows.
The above features are all supported by mssql+pyodbc when using Microsoft's
ODBC Driver for SQL Server (msodbcsql), which is now available for Windows,
(several flavors of) Linux, and macOS.
""" # noqa
import re
from .base import MSDialect
from .base import MSIdentifierPreparer
from ... import processors
from ... import types as sqltypes
from ... import util
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
def __init__(self, dialect):
super(MSIdentifierPreparer_pymssql, self).__init__(dialect)
# pymssql has the very unusual behavior that it uses pyformat
# yet does not require that percent signs be doubled
self._double_percents = False
class MSDialect_pymssql(MSDialect):
supports_native_decimal = True
driver = "pymssql"
preparer = MSIdentifierPreparer_pymssql
colspecs = util.update_copy(
MSDialect.colspecs,
{sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float},
)
@classmethod
def dbapi(cls):
module = __import__("pymssql")
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, "decode") else str(x)
if client_ver < (1,):
util.warn(
"The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI."
)
return module
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
port = opts.pop("port", None)
if port and "host" in opts:
opts["host"] = "%s:%s" % (opts["host"], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
):
if msg in str(e):
return True
else:
return False
def set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit(True)
else:
connection.autocommit(False)
super(MSDialect_pymssql, self).set_isolation_level(
connection, level
)
dialect = MSDialect_pymssql
| [
"[email protected]"
]
| |
976b1f612310075478e65e12b9776472bcf16ee0 | a6c48d5d697cd0ba48bc7c6bd64e871326ce6458 | /server/libs/pyticas_noaa/test/stations.py | 14a4d003b74a916682190216840fbd9e27984dac | []
| no_license | lakshmanamettu/tetres | 873a878cf06b313ee26537504e63f5efdecdc98f | 1acf985f378106953cbff34fb99147cac5104328 | refs/heads/master | 2020-06-30T19:33:44.044090 | 2019-08-06T16:21:18 | 2019-08-06T16:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | # -*- coding: utf-8 -*-
__author__ = 'Chongmyung Park ([email protected])'
import csv
import datetime
class ISDStation(object):
def __init__(self, row):
"""
:type row: list[str]
"""
self.usaf = None
self.wban = None
self.station_name = None
self.city = None
self.state = None
self.icao = None
self.lat = None
self.lon = None
self.elev = None
self.begin = None
self.end = None
attrs = ['usaf', 'wban', 'station_name', 'city', 'state', 'icao', 'lat', 'lon', 'elev', 'begin', 'end']
for idx, aname in enumerate(attrs):
setattr(self, aname, row[idx])
def get_date_range(self):
"""
:rtype: (datetime.date, datetime.date)
"""
return (datetime.datetime.strptime(self.begin, "%Y%m%d").date(),
datetime.datetime.strptime(self.end, "%Y%m%d").date())
def is_valid(self, dt):
"""
:type dt: datetime.date
:rtype: bool
"""
begin, end = self.get_date_range()
return (begin <= dt <= end)
def __str__(self):
return '<ISDStation usaf="%s" wban="%s" name="%s" begin="%s" end="%s">' % (
self.usaf, self.wban, self.station_name, self.begin, self.end
)
def load_isd_stations(filepath, state='MN', station_filter=None):
"""
:type filepath: str
:type state: str
:type station_filter: function
:rtype: list[ISDStation]
"""
stations = []
with open(filepath, 'r') as f:
cr = csv.reader(f)
for idx, row in enumerate(cr):
if not idx:
continue
if row[4] != state:
continue
st = ISDStation(row)
if not station_filter or station_filter(st):
stations.append(st)
return stations
DATA_FILE = 'isd-history.csv'
stations = load_isd_stations(DATA_FILE, 'MN', lambda st: st.is_valid(datetime.date(2017, 1, 31)))
for idx, st in enumerate(stations):
print(idx, ':', st) | [
"[email protected]"
]
| |
49582b8c6a9f659561e39a6385394b3b6f3d86f8 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /333_nuke/__exercises/Nuke_Scripting_for_Pipeline_TD/003_Dopolnitelnue_mareialu/module_nukescripts.py | 2f0806e2df8e5772e47a542b87ed98158954dad2 | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 624 | py | import nuke
import os
import nukescripts
nukescripts.autoBackdrop()
def clearNodeSelection():
for n in nuke.selectedNodes():
# n['selected'].setValue(False)
n.setSelected(False)
nukescripts.clear_selection_recursive()
nukescripts.declone(node)
nukescripts.declone(nuke.selectedNode())
nukescripts.swapAB(nuke.selectedNode())
nukescripts.color_nodes()
nukescripts.search_replace()
nukescripts.getNukeUserFolder()
# nukescripts.findNextNodeName('Read')
# nuke.nodes.Read()
# nuke.nodes.Read(name=nukescripts.findNextNodeName('Read'))
# nuke.createNode('Read')
# t = nuke.toNode('Read3')
# t.setSelected(1)
| [
"[email protected]"
]
| |
33d7882c955e9858ee8e5d4ad0d2b2178ee13693 | 4c432a555bc3932880b201e0686d75bc2abd80b9 | /lib/core_preprocessor.py | 56b4d092df896b8852f2055515ceb0d566155b86 | [
"BSD-3-Clause"
]
| permissive | lmdalessio/aTRAM | 3a14f08994d3ce7de692df3b90d5d7f99e8af9a8 | 807f43d83887dd80dea158b9a22ecb2a4e1aa220 | refs/heads/master | 2021-03-05T11:17:02.324817 | 2020-03-06T16:58:56 | 2020-03-06T16:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | """
Format the data so that atram can use it later in atram itself.
It takes sequence read archive (SRA) files and converts them into coordinated
blast and sqlite3 databases.
"""
from os.path import join, basename, splitext
import sys
import multiprocessing
import numpy as np
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from Bio.SeqIO.FastaIO import SimpleFastaParser
from . import db
from . import db_preprocessor
from . import log
from . import util
from . import blast
def preprocess(args):
"""Build the databases required by atram."""
log.setup(args['log_file'], args['blast_db'])
with util.make_temp_dir(
where=args['temp_dir'],
prefix='atram_preprocessor_',
keep=args['keep_temp_dir']) as temp_dir:
util.update_temp_dir(temp_dir, args)
with db.connect(args['blast_db'], clean=True) as cxn:
db_preprocessor.create_metadata_table(cxn, args)
db_preprocessor.create_sequences_table(cxn)
load_seqs(args, cxn)
log.info('Creating an index for the sequence table')
db_preprocessor.create_sequences_index(cxn)
shard_list = assign_seqs_to_shards(cxn, args['shard_count'])
create_all_blast_shards(args, shard_list)
def load_seqs(args, cxn):
"""Load sequences from a fasta/fastq files into the atram database."""
# We have to clamp the end suffix depending on the file type.
for (ends, clamp) in [('mixed_ends', ''), ('end_1', '1'),
('end_2', '2'), ('single_ends', '')]:
if args.get(ends):
for file_name in args[ends]:
load_one_file(args, cxn, file_name, ends, clamp)
def load_one_file(args, cxn, file_name, ends, seq_end_clamp=''):
"""Load sequences from a fasta/fastq file into the atram database."""
log.info('Loading "{}" into sqlite database'.format(file_name))
parser = get_parser(args, file_name)
with util.open_file(args, file_name) as sra_file:
batch = []
for rec in parser(sra_file):
title = rec[0].strip()
seq = rec[1]
seq_name, seq_end = blast.parse_fasta_title(
title, ends, seq_end_clamp)
batch.append((seq_name, seq_end, seq))
if len(batch) >= db.BATCH_SIZE:
db_preprocessor.insert_sequences_batch(cxn, batch)
batch = []
db_preprocessor.insert_sequences_batch(cxn, batch)
def get_parser(args, file_name):
"""Get either a fasta or fastq file parser."""
is_fastq = util.is_fastq_file(args, file_name)
return FastqGeneralIterator if is_fastq else SimpleFastaParser
def assign_seqs_to_shards(cxn, shard_count):
"""Assign sequences to blast DB shards."""
log.info('Assigning sequences to shards')
total = db_preprocessor.get_sequence_count(cxn)
offsets = np.linspace(0, total - 1, dtype=int, num=shard_count + 1)
cuts = [db_preprocessor.get_shard_cut(cxn, offset) for offset in offsets]
# Make sure the last sequence gets included
cuts[-1] = cuts[-1] + 'z'
# Now organize the list into pairs of sequence names
pairs = [(cuts[i - 1], cuts[i]) for i in range(1, len(cuts))]
return pairs
def create_all_blast_shards(args, shard_list):
"""
Assign processes to make the blast DBs.
One process for each blast DB shard.
"""
log.info('Making blast DBs')
with multiprocessing.Pool(processes=args['cpus']) as pool:
results = []
for idx, shard_params in enumerate(shard_list, 1):
results.append(pool.apply_async(
create_one_blast_shard,
(args, shard_params, idx)))
all_results = [result.get() for result in results]
log.info('Finished making blast all {} DBs'.format(len(all_results)))
def create_one_blast_shard(args, shard_params, shard_index):
"""
Create a blast DB from the shard.
We fill a fasta file with the appropriate sequences and hand things off
to the makeblastdb program.
"""
shard = '{}.{:03d}.blast'.format(args['blast_db'], shard_index)
exe_name, _ = splitext(basename(sys.argv[0]))
fasta_name = '{}_{:03d}.fasta'.format(exe_name, shard_index)
fasta_path = join(args['temp_dir'], fasta_name)
fill_blast_fasta(args['blast_db'], fasta_path, shard_params)
blast.create_db(args['temp_dir'], fasta_path, shard)
def fill_blast_fasta(blast_db, fasta_path, shard_params):
"""
Fill the fasta file used as input into blast.
Use sequences from the sqlite3 DB. We use the shard partitions passed in to
determine which sequences to get for this shard.
"""
with db.connect(blast_db) as cxn:
limit, offset = shard_params
with open(fasta_path, 'w') as fasta_file:
for row in db_preprocessor.get_sequences_in_shard(
cxn, limit, offset):
util.write_fasta_record(fasta_file, row[0], row[2], row[1])
| [
"[email protected]"
]
| |
3092a3b0b103669277fb6168d38c7712550116c8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03611/s849629257.py | c22112651ffbef820aae30f404b80b8e6c08dc60 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | n = int(input())
a = list(map(int, input().split()))
count = [0]*(max(a)+1)
for i in a: count[i]+=1
if len(count) <= 2:
print(sum(count))
exit()
ans = 0
for i in range(2,max(a)):
if count[i-1]+count[i]+count[i+1] > ans:
ans = count[i-1]+count[i]+count[i+1]
print(ans) | [
"[email protected]"
]
| |
dafbe5e66dfcb61cad1999bdba519ec439021ce9 | 8d2c37a19076690dbe0f1b6c64467eeb7475b4ae | /kikoha/apps/voting/models.py | d958372da246c1e1c1c78cc1adb74b981448be92 | []
| no_license | siolag161/kikoha | 39fbdbc6f27337b8b84661165448d0e4a23a79d4 | e8af8c7b0019faa6cc3a6821e2c04f6d8142fd08 | refs/heads/master | 2016-09-03T06:44:57.088649 | 2014-12-25T03:10:07 | 2014-12-25T03:10:07 | 25,884,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | # coding: utf-8
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from django.db import models
from core.models import OwnedModel, TimeStampedModel
from .managers import VoteManager
POINTS = (
(+1, '+1'),
(-1, '-1'),
)
## todo: define maybe a contenttype base class
@python_2_unicode_compatible
class Vote(OwnedModel, TimeStampedModel, models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
object = generic.GenericForeignKey('content_type', 'object_id')
vote = models.SmallIntegerField(choices = POINTS )
objects = VoteManager()
class Meta:
db_table = 'votes'
unique_together = (('author', 'content_type', 'object_id'),)
def __str__(self):
return '%s: %s on %s' % (self.author, self.vote, self.object)
def is_upvote(self):
return self.vote == 1
def is_downvote(self):
return self.vote == -1
TOTAL_CHANGES = {
'CLEARED': -1,
'UPGRADED': 0,
'DOWNGRADED': 0,
'CREATED': +1
}
UPVOTES_CHANGES = {
'CLEARED': 0,
'UPGRADED': +1,
'DOWNGRADED': -1,
'CREATED': +1
}
import logging
logger = logging.getLogger("werkzeug")
class VotedModel(models.Model):
upvotes = models.PositiveIntegerField(default=1)
totalvotes = models.IntegerField(default=1)
@property
def downvotes(self):
return totalvotes - upvotes
@property
def point(self):
return 2*self.upvotes - self.totalvotes # upvotes - downvotes
class Meta:
abstract = True
def get_point(self):
return { 'point': self.point, 'num_votes': self.totalvotes }
def update_vote(self, update_type, vote):
if update_type and vote:
self.totalvotes += TOTAL_CHANGES[update_type]
self.upvotes += UPVOTES_CHANGES[update_type]
if update_type == 'CLEARED' and vote.vote > 0:
self.upvotes -= vote.vote
if update_type == 'CREATED' and vote.vote < 0:
self.upvotes += vote.vote
self.save()
| [
"[email protected]"
]
| |
a5c7e9f7952089395270e27d81634bb006a7830d | 0133d8d56ee611a0c65ef80693ae263692557b96 | /spira/yevon/filters/layer_filter.py | 4ff08772238a31e9cf4202378a362503d9cfb6d0 | [
"MIT"
]
| permissive | JCoetzee123/spira | e77380df2e79333b0c48953faae2d3dae50a8d27 | dae08feba1578ecc8745b45109f4fb7bef374546 | refs/heads/master | 2021-06-25T23:32:52.289382 | 2019-07-17T13:25:50 | 2019-07-17T13:25:50 | 198,605,222 | 1 | 0 | MIT | 2019-07-24T09:42:07 | 2019-07-24T09:42:06 | null | UTF-8 | Python | false | false | 1,112 | py | from spira.log import SPIRA_LOG as LOG
from spira.yevon.filters.filter import Filter
# from spira.yevon.process.layer_list import LayerList, LayerListParameter
from spira.yevon.process.gdsii_layer import LayerList, LayerListParameter
__all__ = ['LayerFilterAllow', 'LayerFilterDelete']
class __LayerFilter__(Filter):
layers = LayerListParameter()
class LayerFilterAllow(__LayerFilter__):
def __filter___LayerElement____(self, item):
if item.layer in self.layers:
return [item]
else:
LOG.debug("LayerFilterAllow is filtering out item %s" %item)
return []
def __repr__(self):
return "[SPiRA: LayerFilterDelete] (layer count {})".format(len(self.layers))
class LayerFilterDelete(__LayerFilter__):
def __filter___LayerElement____(self, item):
if item.layer in self.layers:
LOG.debug("LayerFilterDelete is filtering out item %s" %item)
return []
else:
return [item]
def __repr__(self):
return "[SPiRA: LayerFilterDelete] (layer count {})".format(len(self.layers))
| [
"[email protected]"
]
| |
2b4c1b5b88e67aa6787b4704564067ebd2403069 | 938a496fe78d5538af94017c78a11615a8498682 | /SwordRefersToOffer/11.NumberOf1.py | 55ed4a03ad52b60ceb6e7472bafa3fd959ffa8fe | []
| no_license | huilizhou/Leetcode-pyhton | 261280044d15d0baeb227248ade675177efdb297 | 6ae85bf79c5a21735e3c245c0c256f29c1c60926 | refs/heads/master | 2020-03-28T15:57:52.762162 | 2019-11-26T06:14:13 | 2019-11-26T06:14:13 | 148,644,059 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | # -*- coding:utf-8 -*-
# 二进制中1的个数
class Solution:
def NumberOf1(self, n):
# write code here
count = 0
if n < 0:
n = n & 0xffffffff
while n:
count += 1
n = n & (n - 1)
return count
print(Solution().NumberOf1(3))
| [
"[email protected]"
]
| |
34a7acd539aa930a26936a914f4e663c21da9000 | 73978c38ef18dd87b75d6af706de58f5633ee157 | /trunk/zkomm_multiserver/src/modelo_lista_nodos.py | 44435e9e08934dc9b4060a6c58be21ffdfa36e65 | []
| no_license | BGCX261/zkomm-multiserv-svn-to-git | 1702f2bc9ef8764dd9a35c301e9dbb362b4e2d12 | 830ccd1ec3a617f30450ad6d038107bf9aafa6a2 | refs/heads/master | 2021-01-19T08:53:11.428916 | 2015-08-25T15:43:46 | 2015-08-25T15:43:46 | 41,488,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | #ZKOMM Multiserver
#Modelos de vistas
from PyQt4 import *
class modelo_lista_nodos:
def __init__(self, padre, lista = []):
self.m = QtGui.QStandardItemModel(0, 5, padre)
encabezados = QtCore.QStringList()
encabezados << "Nodo (ID)" << "Nombre" << "IP" << "Puerto" << "Capacidad" << "Carga" << "Sobrecarga" << "Penalizacion"
self.m.setHorizontalHeaderLabels(encabezados)
self.lista_nodos = lista
self.actualizar_lista(self.lista_nodos)
def actualizar_lista(self,lista):
self.lista_nodos=lista
row = 0
self.m.setRowCount(len(self.lista_nodos))
for i in self.lista_nodos:
self.m.setItem(row, 0, QtGui.QStandardItem(str(i.ident)))
self.m.setItem(row, 1, QtGui.QStandardItem(str(i.nombre)))
self.m.setItem(row, 2, QtGui.QStandardItem(str(i.ip)))
self.m.setItem(row, 3, QtGui.QStandardItem(str(i.puerto)))
self.m.setItem(row, 4, QtGui.QStandardItem(str(i.capacidad)))
self.m.setItem(row, 5, QtGui.QStandardItem(str(i.carga)))
self.m.setItem(row, 6, QtGui.QStandardItem(str(i.sobrecarga)))
self.m.setItem(row, 7, QtGui.QStandardItem(str(i.penalizacion)))
row = row + 1
| [
"[email protected]"
]
| |
9ba8b32ba254af1cf723aab25c9a85110c4f54b6 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/438/usersdata/348/94998/submittedfiles/pico.py | 8c6e09f062dd15ec153e26731839b488ba972b38 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | # -*- coding: utf-8 -*-
#funçoes
def crescente(lista):
for i in range (n,-1,-1):
if lista[0] > lista[1]:
return 'N'
elif (lista[i-1] < lista[i-2]) and (lista[0] < lista[1]) :
return 'S'
elif (lista[0] < lista[1]):
return 'S'
elif lista[0] < lista[1]:
return 'S'
i = i+1
#codigo geral
n = int(input('Digite a quantidade de elementos da lista: '))
l1 = []
for i in range (0,n,1):
l1.append(int(input( 'informe o %d° elemento da lista: ' %(i+1))))
print(crescente(l1))
| [
"[email protected]"
]
| |
ea1f782a1427aa290425642443e58bfbbed6a50a | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/l2tpratecpf_783f5a0ed938906c9d7aa8783627a802.py | 2e67204c153bbd1a243fc6bbfb26728068fbcdcc | [
"MIT"
]
| permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 12,520 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class L2tpRateCpf(Base):
"""
The L2tpRateCpf class encapsulates a list of l2tpRateCpf resources that is be managed by the user.
A list of resources can be retrieved from the server using the L2tpRateCpf.find() method.
The list can be managed by the user by using the L2tpRateCpf.add() and L2tpRateCpf.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'l2tpRateCpf'
def __init__(self, parent):
super(L2tpRateCpf, self).__init__(parent)
@property
def Results(self):
"""An instance of the Results class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_09c332531ff8a5316dba4ebcc7b6109f.Results)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_09c332531ff8a5316dba4ebcc7b6109f import Results
return Results(self)._select()
@property
def TestConfig(self):
"""An instance of the TestConfig class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_210b52cb283940b311e5a57340ec017f.TestConfig)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_210b52cb283940b311e5a57340ec017f import TestConfig
return TestConfig(self)._select()
@property
def ForceApplyQTConfig(self):
"""Apply QT config
Returns:
bool
"""
return self._get_attribute('forceApplyQTConfig')
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
self._set_attribute('forceApplyQTConfig', value)
@property
def InputParameters(self):
"""Input Parameters
Returns:
str
"""
return self._get_attribute('inputParameters')
@InputParameters.setter
def InputParameters(self, value):
self._set_attribute('inputParameters', value)
@property
def Mode(self):
"""Test mode
Returns:
str(existingMode|newMode)
"""
return self._get_attribute('mode')
@Mode.setter
def Mode(self, value):
self._set_attribute('mode', value)
@property
def Name(self):
"""Test name
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Updates a child instance of l2tpRateCpf on the server.
Args:
ForceApplyQTConfig (bool): Apply QT config
InputParameters (str): Input Parameters
Mode (str(existingMode|newMode)): Test mode
Name (str): Test name
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Adds a new l2tpRateCpf node on the server and retrieves it in this instance.
Args:
ForceApplyQTConfig (bool): Apply QT config
InputParameters (str): Input Parameters
Mode (str(existingMode|newMode)): Test mode
Name (str): Test name
Returns:
self: This instance with all currently retrieved l2tpRateCpf data using find and the newly added l2tpRateCpf data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the l2tpRateCpf data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Finds and retrieves l2tpRateCpf data from the server.
All named parameters support regex and can be used to selectively retrieve l2tpRateCpf data from the server.
By default the find method takes no parameters and will retrieve all l2tpRateCpf data from the server.
Args:
ForceApplyQTConfig (bool): Apply QT config
InputParameters (str): Input Parameters
Mode (str(existingMode|newMode)): Test mode
Name (str): Test name
Returns:
self: This instance with matching l2tpRateCpf data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of l2tpRateCpf data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the l2tpRateCpf data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Returns:
bool:
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Returns:
str: This method is asynchronous and has no return value.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
run()list
Returns:
list(str): This method is synchronous and returns the result of the test.
run(InputParameters:string)list
Args:
args[0] is InputParameters (str): The input arguments of the test.
Returns:
list(str): This method is synchronous and returns the result of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
start()
start(InputParameters:string)
Args:
args[0] is InputParameters (str): The input arguments of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Returns:
list(str): This method is synchronous and returns the result of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
| [
"[email protected]"
]
| |
0355e904e902fa6f33f696424157267e37861148 | 491235d50ab27bb871d58a5dfff74d6a4aa9bbe6 | /pong-client/gui/__init__.py | 4ac6e8ce43257509a2ebdc0c6c8e5d0bacc29e8f | []
| no_license | elgrandt/Pong-Network | 768bb861757d1fb98be3b761a66ad14e632f7932 | 204e1c5d9fbd53eece906d56df394602bdc269b6 | refs/heads/master | 2022-12-06T16:12:01.506699 | 2020-08-18T03:27:47 | 2020-08-18T03:27:47 | 288,315,589 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from add_border import *
from input_text import *
from moving_bar import *
from area_movil import *
from server_list import *
from server_connect_menu import *
from surface_gf import *
import add_border
import input_text
import moving_bar
import area_movil
from status_cur import * | [
"[email protected]"
]
| |
17d29257f02a4cbd86e726dd708d33d09015e44d | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/tests/ip_slowpath.py | 6d6a6823d1c190bbc1ff7525420e8459c4038e33 | []
| no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,689 | py | #!/usr/bin/env python2.5
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
"""
DESCRIPTION : This Script contains following ip_slowpath APIs which has
been used in the Slowpath Testcases
TEST PLAN : IP_SLOWPATH Test plan V0.3
AUTHOR : Ganapathi; email : [email protected]
REVIEWER :
DEPENDENCIES : Linux.py,device.py
"""
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = mydir
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
import pexpect
import time
import string
import sys
import re
from logging import getLogger
log = getLogger()
from StokeTest import test_case
#from ip_slowpath_config import *
def ip_verify_ip_counters_icmp(self,total_tx='none',echo_request='none',total='none',echo_reply='none',
unreachable='none',mask_request='none',mask_reply='none',
source_quench='none',param_problem='none',timestamp='none',
redirects='none',info_reply='none',ttl_expired='none',
other='none',format='none',rate_limit='none',out_str='none'):
"""
Description: - This API verify the "show ip conters icmp" with passing differnt combinations of
transmitted ICMP packet statastics such as echo request etc with output when the
command run.
CLI Used- CLI.s that are used for the API <show ip counters icmp>.
Input: - List of differnt ICMP packet statastics Inputs such as no of Echo Request packets
transmitted etc.
Output: - Returns to the calling function ,i.e Pass or Fail .
Author: - Ganapathi, [email protected] .
Reviewer: - """
expected={'icmp_echo_request':echo_request,'icmp_echo_reply':echo_reply,
'icmp_total':total_tx,'icmp_unreachable':unreachable,
'icmp_mask_request':mask_request,'icmp_mask_reply':mask_reply,
'icmp_source_quench':source_quench,'icmp_param_problem':param_problem,
'icmp_timestamp':timestamp,'icmp_redirects':redirects,
'icmp_info_reply':info_reply,'icmp_ttl_expired':ttl_expired,
'icmp_other':other,'icmp_format':format,'icmp_rate_limit':rate_limit}
cli="show ip counters icmp"
ret_str=self.cmd(cli)
regex_list=['\s*Rx:\s+Total\s+(?P<rx_total>\d+)\s+Tx:\s+Total\s+(?P<icmp_total>\d+)',
'\s+Echo\s+Request\s+(?P<rx_echo_request>\d+)\s+Echo\s+Request\s+(?P<icmp_echo_request>\d+)',
'\s+Echo\s+Reply\s+(?P<rx_echo_reply>\d+)\s+Echo\s+Reply\s+(?P<icmp_echo_reply>\d+)',
'\s+Unreachable\s+(?P<rx_unreachable>\d+)\s+Unreachable\s+(?P<icmp_unreachable>\d+)',
'\s+Param\s+Problem\s+(?P<rx_param_problem>\d+)\s+Param\s+Problem\s+(?P<icmp_param_problem>\d+)',
'\s+Redirects\s+(?P<rx_redirects>\d+)\s+redirects\s+(?P<icmp_redirects>\d+)',
'\s+TTL\s+Expired\s+(?P<rx_ttl_expired>\d+)\s+TTL\s+Expired\s+(?P<icmp_ttl_expired>\d+)',
'\s+Mask\s+Request\s+(?P<rx_mask_request>\d+)\s+Mask\s+Request\s+(?P<icmp_mask_request>\d+)',
'\s+Mask\s+Reply\s+(?P<rx_mask_reply>\d+)\s+Mask\s+Reply\s+(?P<icmp_mask_reply>\d+)',
'\s+Source\s+Quench\s+(?P<rx_source_quench>\d+)\s+Source\s+Quench\s+(?P<icmp_source_quench>\d+)',
'\s+Timestamp\s+(?P<rx_timestamp>\d+)\s+Timestamp\s+(?P<icmp_timestamp>\d+)',
'\s+Info\s+Request\s+(?P<rx_info_request>\d+)\s+Info\s+Reply\s+(?P<icmp_info_reply>\d+)',
'\s+Other\s+(?P<rx_other>\d+)\s+Other\s+(?P<icmp_other>\d+)',
'\s+Error:\s+Checksum\s+(?P<chksum_error>\d+)\s+Format\s+(?P<format_error>\d+)',
'\s+Length\s+(?P<length_error>\d+)\s+Rate\s+Limit\s+(?P<rate_limit_error>\d+)']
actual={}
for regex in regex_list:
obj=re.compile(regex,re.I)
m=obj.search(ret_str)
if m:
dict=m.groupdict()
for key in dict.keys():
actual[key]=dict[key]
for keys in expected.keys():
if expected[keys] != 'none':
if expected[keys] != actual[keys]:
return 0
return 1
def telnet_glc(self,telnet_to_glc='none'):
"""
Description: - This API will establish a telnet session with GLC from an IMC
CLI Used- No CLI used
Input: - Slot no to which we want to telnet
Output: -
Author: - Ganapathi, [email protected] .
Reviewer: - """
cmd_str="SOCK=/nv telnet 192.168.200.20"+telnet_to_glc
self.ses.sendline(cmd_str)
self.ses.expect("login:")
self.ses.sendline("root")
return
def ip_verify_ip_counters_general(self,checksum='none',no_resource='none',
source='none',ttl_expired='none',
bad_version='none',option='none',
frag_dropped='none',frag_malformed='none',
acl_in_dropped='none',unreachable='none',
length='none',runt='none',
destination='none',
arp_unresolved='none',other='none',
frag_timeout='none',could_not_frag='none',
acl_out_dropped='none',local_delivery='none',local_out='none',
fragments='none',express='none',reassembled='none',exception='none',
options_present='none',fragmented='none',ike_packets='none' ):
"""
Description: - This API verify whether the packets are recieved on the line card by observing the
o/p stats of the command "show ip counters general" o/p ,the source counter statastics CLI Used- CLI.s that are used for the API <show ip counters general>.
Input: - source counter as a parameter
Output: - Returns to the calling function ,i.e Pass or Fail .
Author: - Ganapathi, [email protected] .
Reviewer: - """
expected={'checksum':checksum,'no_resource':no_resource,
'source':source,'ttl_expired':ttl_expired,
'bad_version': bad_version,'option':option,
'frag_dropped':frag_dropped,'frag_malformed':frag_malformed,
'acl_in_dropped': acl_in_dropped,'unreachable':unreachable,
'length':length,'runt':runt,'destination':destination,
'arp_unresolved':arp_unresolved,'other': other ,
'frag_timeout':frag_timeout,
'could_not_frag':could_not_frag,
'acl_out_dropped':acl_out_dropped,
'local_delivery':local_delivery,
'local_out':local_out,
'fragments':fragments,
'express' :express,
'reassembled':reassembled,
'exception':exception,
'options_present':options_present,
'fragmented':fragmented,
'ike_packets':ike_packets}
regex_list=['\s+Rx:\s+Local\s+Delivery\s+(?P<local_delivery>\d+)\s+Tx:\s+Local\s+Out\s+(?P<local_out>\d+)',
'\s+Fragments\s(?P<fragments>\d+)\s+Express\s+(?P<express>\d+)',
'\s+Reassembled\s+(?P<reassembled>\d+)\s+Exception\s+(?P<exception>\d+)',
'\s+Options\s+Present\s+(?P<options_present>\d+)\s+Fragmented\s+(?P<fragmented>\d+)',
'\s+IKE\s+Packets\s+(?P<ike_packets>\d+)',
'\s+Error:\s+Checksum\s+(?P<checksum>\d+)\s+Unreachable\s+(?P<unreachable>\d+)',
'\s+No\s+Resource\s+(?P<no_resource>\d+)\s+length\s+(?P<length>\d+)',
'\s+Source\s+(?P<source>\d+)\s+Runt\s+(?P<runt>\d+)',
'\s+TTL\s+Expired\s+(?P<ttl_expired>\d+)\s+Destination\s+(?P<destination>\d+)',
'\s+Bad\s+Version\s+(?P<bad_version>\d+)\s+ARP\s+Unresolved\s+(?P<arp_unresolved>\d+)',
'\s+Option\s+(?P<option>\d+)\s+Other\s+(?P<other>\d+)',
'\s+Frag\s+Dropped\s+(?P<frag_dropped>\d+)\s+Frag\s+Timeout\s+(?P<frag_timeout>\d+)',
'\s+Frag\s+Malformed\s+(?P<frag_malformed>\d+)\s+Couldn\'t\s+Frag\s+(?P<could_not_frag>\d+)',
'\s+ACL-In\s+Dropped\s+(?P<acl_in_dropped>\d+)\s+ACL-Out\s+Dropped\s+(?P<acl_out_dropped>\d+)']
actual={}
cli="show ip counters general"
ret_str=self.cmd(cli)
for regex in regex_list:
obj=re.compile(regex,re.I)
m=obj.search(ret_str)
if m:
dict=m.groupdict()
for key in dict.keys():
actual[key]=dict[key]
for keys in expected.keys():
if expected[keys] != 'none':
if expected[keys] != actual[keys]:
return 0
return 1
def ip_verify_ipv6_counters_general(self,too_many_ext_hdrs='none',no_resource='none',
bad_address_scope="none",hoplimit_expired='none',
bad_version='none',option='none',
frag_dropped='none',frag_malformed='none',
reassembled='none',unreachable='none',could_not_forward="none",
acl_in_dropped="none", frag_overflow='none',esp_ah="none",
other='none',error_ike_pkt="none",
frag_timeout='none',could_not_frag='none',reassembly="none",
acl_out_dropped='none',local_delivery='none',local_out='none',
fragments='none',express='none',slowpath="none",router_header_out="none",
runt='none',neighbour_unresolved="none",length="none",
fragmented='none',ike_packets='none',express_forward='none' ):
"""
Description: - This API verify whether the packets are recieved on the line card by observing the
o/p stats of the command "show ipv6 counters general" o/p ,the source counter statastics CLI Used- CLI.s that are used for the API <show ipv6 counters general>.
Input: - source counter as a parameter
Output: - Returns to the calling function ,i.e Pass or Fail .
Author: - Ganapathi, [email protected] .
Reviewer: - """
expected={'too_many_ext_hdrs':too_many_ext_hdrs,'no_resource':no_resource,
'hoplimit_expired':hoplimit_expired,'bad_address_scope':bad_address_scope,
'bad_version': bad_version,'option':option,
'frag_dropped':frag_dropped,'frag_malformed':frag_malformed,
'acl_in_dropped': acl_in_dropped,'unreachable':unreachable,
'could_not_forward':could_not_forward,
'length':length,'other': other ,
'frag_timeout':frag_timeout,
'could_not_frag':could_not_frag,'slowpath':slowpath,'router_header_out':router_header_out,
'acl_out_dropped':acl_out_dropped,
'local_delivery':local_delivery,
'local_out':local_out,
'fragments':fragments,'esp_ah':esp_ah,'frag_overflow':frag_overflow,
'express' :express,'runt':runt,
'reassembled':reassembled,'reassembly':reassembly,
'fragmented':fragmented,'error_ike_pkt':error_ike_pkt,
'ike_packets':ike_packets,'express_forward':express_forward}
regex_list=['\s*Rx:\s+Total\s+(?P<rx_total>\d+)\s+Tx:\s+Total\s+(?P<tx_total>\d+)',
'\s+Local\s+Delivery\s+(?P<local_delivery>\d+)\s+Local\s+Out\s+(?P<local_out>\d+)',
'\s+Fragments\s(?P<fragments>\d+)\s+Express\s+Forward\s+(?P<express_forward>\d+)',
'\s+Reassembled\s+(?P<reassembled>\d+)\s+Slowpath\s+Forward\s+(?P<slowpath_forward>\d+)',
'\s+IKE\s+Packets\s+(?P<rx_ike_packets>\d+)\s+Fragmented\s+(?P<fragmented>\d+)',
'\s+Route\s+Header\s+Out\s+(?P<route_hdr_out>\d+)',
'\s+Error:\s+Too\s+many\s+Ext\s+Hdrs\s+(?P<too_many_ext_hdrs>\d+)\s+Unreachable\s+(?P<unreachable>\d+)',
'\s+No\s+Resource\s+(?P<no_resource>\d+)\s+length\s+(?P<length>\d+)',
'\s+Bad\s+Address\s+Scope\s+(?P<bad_address_scope>\d+)\s+Runt\s+(?P<runt>\d+)',
'\s+Hoplimit\s+Expired\s+(?P<hoplimit_expired>\d+)\s+Can\'t\s+Forward\s+(?P<could_not_forward>\d+)',
'\s+Bad\s+Version\s+(?P<bad_version>\d+)\s+Neighbor\s+Unresolved\s+(?P<neighbor_unresolved>\d+)',
'\s+Option\s+(?P<option>\d+)\s+Other\s+(?P<other>\d+)',
'\s+Frag\s+Dropped\s+(?P<frag_dropped>\d+)\s+Frag\s+Timeout\s+(?P<frag_timeout>\d+)',
'\s+Reassembly\s+(?P<reassembly>\d+)\s+Fragment\s+Overflow\s+(?P<frag_overflow>\d+)',
'\s+Frag\s+Malformed\s+(?P<frag_malformed>\d+)\s+Couldn\'t\s+Frag\s+(?P<could_not_frag>\d+)',
'\s+ACL-In\s+Dropped\s+(?P<acl_in_dropped>\d+)\s+ACL-Out\s+Dropped\s+(?P<acl_out_dropped>\d+)',
'\s+IKE\s+packet\s+(?P<error_ike_packet>\d+)\s+ESP\/AH\s+(?P<esp_ah>\d+)']
actual={}
flag=0
cli="show ipv6 counters general"
ret_str=self.cmd(cli)
for regex in regex_list:
obj=re.compile(regex,re.I)
m=obj.search(ret_str)
if m:
dict=m.groupdict()
for key in dict.keys():
actual[key]=dict[key]
for keys in expected.keys():
if expected[keys] != 'none':
if expected[keys] != actual[keys]:
return 0
return 1
def ip_verify_ip_counters_udp(self,checksum='none',no_port='none',
rx_total='none',tx_total='none',
short_pkt='none',short_hdr='none',
full='none',no_port_bcast='none' ):
"""
Description: - This API verify whether the UDP packets are recieved on the line card by observing the
o/p stats of the command "show ip counters udp" o/p ,the source counter statastics CLI Used- CLI.s that are used for the API <show ip counters udp>.
Input: - the stats of the o/p of the command
Output: - Returns to the calling function ,i.e Pass or Fail .
Author: - Ganapathi, [email protected] .
Reviewer: - """
expected={'checksum':checksum,'no_port':no_port,
'rx_total':rx_total,'tx_total':tx_total,
'short_pkt':short_pkt,'short_hdr':short_hdr,
'full':full,'no_port_bcast':no_port_bcast}
regex_list=['\s+Rx:\s+Total\s+(?P<rx_total>\d+)',
'\s+Tx:\s+Total\s+(?P<tx_total>\d+)',
'\s+Error:\s+Checksum\s+(?P<checksum>\d+)',
'\s+No\s+Port\s+(?P<no_port>\d+)',
'\s+Short\s+packet\s+(?P<short_pkt>\d+)',
'\s+Short\s+header\s+(?P<short_hdr>\d+)',
'\s+Full\s+(?P<full>\d+)',
'\s+No\s+port\s+bcast\s+(?P<no_port_bcast>\d+)']
actual={}
cli="show ip counters udp"
ret_str=self.cmd(cli)
for regex in regex_list:
obj=re.compile(regex,re.I)
m=obj.search(ret_str)
if m:
dict=m.groupdict()
for key in dict.keys():
actual[key]=dict[key]
for keys in expected.keys():
if expected[keys] != 'none':
if expected[keys] != actual[keys]:
return 0
return 1
def get_mac_address(self,port='none'):
"""
Description: - This API will return the mac_adress of the port which is directly connected
to the linux machine
CLI Used- "show port"
Input: - slot and port as a parameter format slot/port
Output: - Returns returns the mac address of the required port
Author: - Ganapathi, [email protected] .
Reviewer: - """
out_str=self.cmd("show port")
split_list=out_str.split("\n")
for line in split_list:
if (re.compile(port)).search(line):
ret_list=line.split()
return ret_list[(len(ret_list))-1].strip("\n")
def get_cli_passwd(self,day='none',level='none'):
"""
Description: - This API will return the password to enable required hidden level in SSX box
of a particular day today,tommorrow or yesterday
CLI Used- runs the cli-pwd command on linux m/c
Input: - day,level number
Output: - Returns the password
Author: - Ganapathi, [email protected] .
Reviewer: - """
flag=0
count=7
os.system("cli-pwd >> pwd.txt")
fileptr=file("pwd.txt","r")
lvl_str= "level %s"%(level)
for line in fileptr:
if (re.compile(day,re.I)).search(line) or flag==1:
if (re.compile(lvl_str,re.I)).search(line):
var=line.partition(((re.compile("enable",re.I)).search(line)).group())
list1=var[2].split(',')
fileptr.close()
os.system("rm pwd.txt")
return list1[0].strip()
flag=1
count = count - 1
if count == 0:
flag=0
return "not found"
def generic_get_slot_and_port(self,slot_port='none'):
"""
Description: - This API will splits the i/p parameter string into slot and port
and place these values into a dictionary
CLI Used- no CLI used
Input: - string as parameter
Output: - Returns the dictionary with slot and parameter
Author: - Ganapathi, [email protected] .
Reviewer: - """
dict={}
regex=(re.compile(r'(\d+)/(\d+)').search(slot_port))
dict['slot']=regex.group(1)
dict['port']=regex.group(2)
return dict
def get_hex_ip_addr(self,dot_ip='none'):
"""
Description: - This API will return the Hex format of the IP which takes dotted format of IP as input parameter.
CLI Used- no CLI used
Input: - Dotted format of IP address
Output: - Hex format of IP address
Author: - Ganapathi, [email protected] .
Reviewer: - """
regex=(re.compile(r'(\d+).(\d+).(\d+).(\d+)').search(dot_ip))
if int(regex.group(1))>15:
first_octet=(str(hex(int(regex.group(1))))).replace("0x","")
else:
first_octet=(str(hex(int(regex.group(1))))).replace("x","")
if int(regex.group(2))>15:
second_octet=(str(hex(int(regex.group(2))))).replace("0x","")
else:
second_octet=(str(hex(int(regex.group(2))))).replace("x","")
if int(regex.group(3))>15:
third_octet=(str(hex(int(regex.group(3))))).replace("0x","")
else:
third_octet=(str(hex(int(regex.group(3))))).replace("x","")
if int(regex.group(4))>15:
fourth_octet=(str(hex(int(regex.group(4))))).replace("0x","")
else:
fourth_octet=(str(hex(int(regex.group(4))))).replace("x","")
return first_octet+second_octet+third_octet+fourth_octet
#LINUX .py
def invoke_scapy(self,proto='none',sudo_passwd='none',dst_mac_adr='ff:ff:ff:ff:ff:ff',dst_ip_adr='127.0.0.1',src_ip_adr='127.0.0.1',ip_checksum='none',ip_hdr_len='none',ip_pkt_len='none',ip_ttl='none',ip_flags='none',ip_options='none',from_interface='eth1',large_pkt=0,udp='none',udp_checksum='none',ip_tos='none',ip6_plen=0,log="none",count = 5):
"""
Description: This API will invoke scapy tool which works in python ,in
interpreter mode and sends 4 ip packets on required
interface to required destiantion with our required
fields populated
CLI used :sudo python
Input :The fields which we require to fill the IP packets
Output :Sends 4 IP packets to SSX
Author :Ganapathi,[email protected] """
dict={'chksum':ip_checksum,'ihl':ip_hdr_len,'len':ip_pkt_len,
'ttl':ip_ttl,'flags':ip_flags,'tos':ip_tos}
str1=dst_mac_adr.strip()
payload = 'X'
if large_pkt==1:
for i in range(1200):
payload+='X'
if ip_options == 'none':
for key in dict.keys():
if dict[key] != 'none':
value=int(dict[key])
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",%s=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,key,value,payload,from_interface)
break
else:
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s")/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,payload,from_interface)
else:
if ip_ttl == 'none' and ip_flags == 'none':
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",options="%s")/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,ip_options,payload,from_interface)
elif ip_flags != 'none':
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",flags=%d,options="%s")/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,ip_flags,ip_options,payload,from_interface)
else:
value=int(ip_ttl)
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",options="%s",ttl=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,ip_options,value,payload,from_interface)
if udp != 'none':
if udp_checksum != 'none':
value2=int(udp_checksum)
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s")/UDP(dport=500,chksum=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,value2,payload,from_interface)
if ip_checksum != 'none':
value1=int(ip_checksum)
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",chksum=%d)/UDP(dport=500,chksum=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,value1,value2,payload,from_interface)
if ip_flags != 'none':
value1=int(ip_flags)
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",flags=%d)/UDP(dport=500,chksum=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,value1,value2,payload,from_interface)
if ip_options != 'none':
scapy_cmd='sendp(Ether(dst="%s")/IP(src="%s",dst="%s",flags=%d,options="%s")/UDP(dport=500,chksum=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,value1,ip_options,value2,payload,from_interface)
if proto=="ipv6":
if ip6_plen != 0:
chksum=int(udp_checksum)
scapy_cmd='sendp(Ether(dst="%s")/IPv6(src="%s",dst="%s",plen=%d)/UDP(dport=500,chksum=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,ip6_plen,chksum,payload,from_interface)
else:
scapy_cmd='sendp(Ether(dst="%s")/IPv6(src="%s",dst="%s",hlim=%d)/"%s",iface="%s")' %(str1,src_ip_adr,dst_ip_adr,ip_ttl,payload,from_interface)
self.ses.sendline("sudo python2.5")
retstr=self.ses.before
which=self.ses.expect(["Password:",">>>"])
if which != 0:
self.ses.sendline("from scapy import *")
if proto == "ipv6":
self.ses.sendline("from scapy6 import *")
retstr=self.ses.before
self.ses.expect(">>>")
retstr=self.ses.before
while count > 0:
self.ses.sendline(scapy_cmd)
retstr =self.ses.before
log.output("scapy_cmd::%s" %(retstr))
self.ses.expect(">>>")
retstr =self.ses.before
count = count - 1
self.ses.sendline("%c" %(0x04))
return
else:
self.ses.sendline(sudo_passwd)
self.ses.expect(">>>",20)
retstr += self.ses.before
self.ses.sendline("from scapy import *")
if proto == "ipv6":
self.ses.sendline("from scapy6 import *")
self.ses.expect(">>>",20)
while count > 0:
self.ses.sendline(scapy_cmd)
retstr = self.ses.before
log.output("scapy_cmd::%s" %(retstr))
self.ses.expect(">>>",5)
count = count - 1
self.ses.sendline("%c" %(0x04))
return
def ip_ip4_pktgen(self,card='none',slot='none',port='none',src_addr='01010101',options='0',bad_option='0',
dst_addr='none',pkt_len="500",bad_len='0',
bad_checksum='0',ttl='32',df='0',
no_of_pkts=0,cli_pwd="none",from_glc='1'):
"""Description:This API invo es the tool ip4_pktgen tool and
generates packets within the SSX box
CLI :CLI used is <ip4_pktgen>
Input :Input parameters to the API are different fields
in the IP header.argument(range)
slot(0-4),port(0-3),src_addr(hex),dst_addr(hex)
pktlen(28-2000),ttl(0-255),df(0 or 1),bad_len(0 or 1)
bad_checksum(0 or 1)
Output :Generates required no of IP packets with in the SSX
box
Author :Ganapathi,[email protected]
Reviewer : """
passwd_prompt=re.compile("password:",re.I)
enable_prompt_regex = "[\r\n]*\S+\[\S+\]#"
#enabling level 2 to start shell
self.ses.sendline("hidden enable 2")
self.ses.expect(passwd_prompt)
self.ses.sendline(cli_pwd+'\n')
if no_of_pkts > 0:
#running the command start shell
self.ses.sendline("start shell")
self.ses.expect('#')
if card=="GLC":
telnet_glc(self,telnet_to_glc=from_glc)
#invoking the tool ip4_pktgen in
self.ses.sendline("ip4_pktgen")
self.ses.expect("\r\n(.*)\r\nDo you want to generate packet with optional header fields:\r\n(.*): ")
self.ses.sendline(options)
if options == '1':
self.ses.expect("\r\nGenerate with bad option:\r\n(.*): ")
self.ses.sendline(bad_option)
self.ses.expect("Enter the slot number of LC \(0\-4\): ")
self.ses.sendline(slot)
self.ses.expect("Enter the port number \(0\-3\): ")
self.ses.sendline(port)
self.ses.expect("Enter IPv4 source address in hex:")
self.ses.sendline(src_addr)
self.ses.expect("Enter IPv4 destination address in hex:")
self.ses.sendline(dst_addr)
self.ses.expect("Enter total packet length \(32\-1792\):")
self.ses.sendline(pkt_len)
self.ses.expect("\r\nDo you want to generate packet with bad total length:\r\n(.*): ")
self.ses.sendline(bad_len)
self.ses.expect("Enter TTL \(0\-255\): ")
self.ses.sendline(ttl)
self.ses.expect("DF bit \(0\-1\): ")
self.ses.sendline(df)
self.ses.expect("\r\nDo you want to generate packet with bad checksum:\r\n(.*): ")
self.ses.sendline(bad_checksum)
while no_of_pkts > 1:
self.ses.expect("\r\n(.*)\r\n(.*)\r\nDo you want to send the same packet again:\r\n(.*): ")
self.ses.sendline("1")
no_of_pkts = no_of_pkts - 1
self.ses.sendline("%c" % 0x3)
self.ses.expect("#")
self.ses.sendline("exit")
if card == "GLC":
self.ses.expect("Connection closed by foreign host.")
self.ses.expect("#")
self.ses.sendline("exit")
self.ses.expect(enable_prompt_regex)
return 1
else:
self.ses.sendline("%c" % 0x3)
return 0
def get_mgmt_addr(ssx_name="none"):
"""
This API will return management port address of a given SSX .
CLI: "nslookup "ssx_name"
input: ssx-name as a parameter
output:returns the ip address of mgmt port
Author:[email protected]
Reviewer: """
cmd="nslookup %s >> mgmt.txt" %(ssx_name)
os.system(cmd)
fileptr=file("mgmt.txt","r")
outputstr=fileptr.read()
regex=re.compile('\nAddress:(\s+)(\d+).(\d+).(\d+).(\d+)\n')
regex1=re.compile('(\d+)..(\d+).(\d+).(\d+)')
found=regex.search(outputstr)
found1=regex1.search(found.group())
return found1.group()
def ip_verify_ipv6_counters_icmp(self,total_tx='none',echo_request='none',echo_reply='none',
unreachable='none',param_problem='none', redirects='none',
ttl_expired='none',too_big='none',other='none',format='none',
rate_limit='none',router_solicit='none',
router_advertise='none',neighbor_solicit='none',neighbor_advertise="none"):
"""
Description: - This API verify the "show ip conters icmp" with passing differnt combinations of
transmitted ICMP packet statastics such as echo request etc with output when the
command run.
CLI Used- CLI.s that are used for the API <show ip counters icmp>.
Input: - List of differnt ICMP packet statastics Inputs such as no of Echo Request packets
transmitted etc.
Output: - Returns to the calling function ,i.e Pass or Fail .
Author: - Ganapathi, [email protected] .
Reviewer: - """
expected={'icmp_echo_request':echo_request,'icmp_echo_reply':echo_reply,
'icmp_total':total_tx,'icmp_unreachable':unreachable,
'icmp_too_big':too_big,'icmp_param_problem':param_problem,
'icmp_router_solicit':router_solicit,'icmp_redirects':redirects,
'icmp_router_advertise':router_advertise,'icmp_ttl_expired':ttl_expired,
'icmp_other':other,'icmp_format':format,'icmp_rate_limit':rate_limit,
'icmp_neighbor_solicit':neighbor_solicit,'icmp_neighbor_advertise':neighbor_advertise}
cli="show ipv6 counters icmp"
ret_str=self.cmd(cli)
#splitted_icmp_list=icmplist.split('\n')
actual={}
regex_list=['\s*Rx:\s+Total\s+(?P<rx_total>\d+)\s+Tx:\s+Total\s+(?P<icmp_total>\d+)',
'\s+Echo\s+Request\s+(?P<rx_echo_request>\d+)\s+Echo\s+Request\s+(?P<icmp_echo_request>\d+)',
'\s+Echo\s+Reply\s+(?P<rx_echo_reply>\d+)\s+Echo\s+Reply\s+(?P<icmp_echo_reply>\d+)',
'\s+Unreachable\s+(?P<rx_unreachable>\d+)\s+Unreachable\s+(?P<icmp_unreachable>\d+)',
'\s+Param\s+Problem\s+(?P<rx_param_problem>\d+)\s+Param\s+Problem\s+(?P<icmp_param_problem>\d+)',
'\s+Redirects\s+(?P<rx_redirects>\d+)\s+redirects\s+(?P<icmp_redirects>\d+)',
'\s+TTL\s+Expired\s+(?P<rx_ttl_expired>\d+)\s+TTL\s+Expired\s+(?P<icmp_ttl_expired>\d+)',
'\s+Too\s+Big\s+(?P<rx_too_big>\d+)\s+Too\s+Big\s+(?P<icmp_too_big>\d+)',
'\s+Router\s+Solicit\s+(?P<rx_router_solicit>\d+)\s+Router\s+Solicit\s+(?P<icmp_router_solicit>\d+)',
'\s+Router\s+Advertise\s+(?P<rx_router_advertise>\d+)\s+Router\s+Advertise\s+(?P<icmp_router_advertise>\d+)',
'\s+Neighbor\s+Solicit\s+(?P<rx_neighbor_solicit>\d+)\s+Neighbor\s+Solicit\s+(?P<icmp_neighbor_solicit>\d+)',
'\s+Neighbor\s+Advertise\s+(?P<rx_neighbor_advertise>\d+)\s+Neighbor\s+Advertise\s+(?P<icmp_neighbor_advertise>\d+)',
'\s+Other\s+(?P<rx_other>\d+)\s+Other\s+(?P<icmp_other>\d+)',
'\s+Error:\s+Checksum\s+(?P<chksum_error>\d+)\s+Format\s+(?P<format_error>\d+)',
'\s+Length\s+(?P<length_error>\d+)\s+Rate\s+Limit\s+(?P<rate_limit_error>\d+)',
'\s+Tx\s+Redirect\s+(?P<redirect_error>\d+)']
for regex in regex_list:
obj=re.compile(regex,re.I)
m=obj.search(ret_str)
if m:
dict=m.groupdict()
for key in dict.keys():
actual[key]=dict[key]
for keys in expected.keys():
if expected[keys] != 'none':
if expected[keys] != actual[keys]:
return 0
return 1
def verify_port_drop_counters(self,port='none',disabled_port='none',
lookup_failed='none',nitrox_drop='none',
known_acl='none',drop_adjacency='none',
invalid_fib='none',invalid_channel='none',
buff_invalid='none',xscale_ring_full='none',
invalid_circuit='none',arp_drop='none',
ip6_port_drop='none',invalid_service='none',
v4_chksum_drop='none',v4_hdr_drop='none',v4_scr_drop='none'):
"""
Description: - This API will verify the o/p stats of packet drops at a particular port
CLI Used- "show port slot/port counters drop" ---->to catch the name of the particular interface
Input: - slot and port as a parameter format slot/port
Output: - verifies the output stats of the command
Author: - Ganapathi, [email protected] .
Reviewer: - """
expected={'disabled_port':disabled_port,
'lookup_failed':lookup_failed,
'nitrox_drop' :nitrox_drop,
'known_acl' :known_acl,
'drop_adjacency':drop_adjacency,
'invalid_fib':invalid_fib,
'invalid_channel':invalid_channel,
'buff_invalid':buff_invalid,
'xscale_ring_full':xscale_ring_full,
'invalid_circuit':invalid_circuit,
'arp_drop':arp_drop,
'ip6_port_drop':ip6_port_drop,
'invalid_service':invalid_service,
'v4_chksum_drop':v4_chksum_drop,
'v4_hdr_drop':v4_hdr_drop,
'v4_scr_drop': v4_scr_drop }
actual={}
ret_str=self.cmd("show port %s counters drop" %(port))
regex_list=['\d+/\d+\s+Disabled\s+Port:\s+(?P<disabled_port>\d+)',
'\s+Lookup\s+Failed:\s+(?P<lookup_failed>\d+)',
'\s+Nitrox\s+Drop:\s+(?P<nitrox_drop>\d+)',
'\s+Known\s+Acl:\s+(?P<known_acl>\d+)',
'\s+Drop\s+Adjacency:\s+(?P<drop_adjacency>\d+)',
'\s+Invalid\s+Fib:\s+(?P<invalid_fib>\d+)',
'\s+Invalid\s+Channel:\s+(?P<invalid_channel>\d+)'
'\s+Buff\s+Invalid:\s+(?P<buff_invalid>\d+)'
'\s+Xscale\s+Ring\s+Full:\s+(?P<xscale_ring_full>\d+)',
'\s+Invalid\s+Circuit:\s+(?P<invalid_circuit>\d+)',
'\s+Arp\s+Drop:\s+(?P<arp_drop>\d+)',
'\s+Ip6\s+Port\s+Drop:\s+(?P<ip6_port_drop>\d+)',
'\s+Invalid\s+Service:\s+(?P<invalid_service>\d+)',
'\s+V4\s+Chksum\s+Drop:\s+(?P<v4_chksum_drop>\d+)',
'\s+V4\s+Hdr\s+Invalid:\s+(?P<v4_hdr_drop>\d+)',
'\s+V4\s+Scr\s+Invalid:\s+(?P<v4_scr_drop>\d+)']
for regex in regex_list:
obj=re.compile(regex,re.I)
m=obj.search(ret_str)
if m:
dict=m.groupdict()
for key in dict.keys():
actual[key]=dict[key]
for keys in expected.keys():
if expected[keys] != 'none':
if expected[keys] != actual[keys]:
return 0
return 1
def verify_tunnel_counters(self,tunnel_name="none",in_pkts='none',out_pkts='none'):
ret_str=self.cmd("show tunnel counters " )
split_list=ret_str.split('\n')
ret_str=self.cmd("show tunnel counters | grep %s " %(tunnel_name))
str2 ='\s*\w+\s+\w+\s+(\d+)\s+(\d)+\s+\d+\s+\d+'
obj=re.compile(str2)
found=obj.search(ret_str)
actual_in_pkts=found.group(1)
actual_out_pkts=found.group(2)
if actual_in_pkts == in_pkts and actual_out_pkts == out_pkts:
return 1
else:
return 0
def get_circuit_handle(self,tunnel="none"):
ret_str=self.cmd("show tunnel name %s" %(tunnel))
obj=re.compile('\s*Tunnel\s+Circuit\s+Handle\s+(\w+)')
m=obj.search(ret_str)
return m.group()
| [
"[email protected]"
]
| |
0d608ca99a5df062161a727ba15e6c5d0d860cfa | d55f3f715c00bcbd60badb3a31696a1a629600e2 | /students/maks/9/site2/page/migrations/0002_category.py | ee732793b384633ecbbe6c4b58bbd67a5b4d787d | []
| no_license | zdimon/wezom-python-course | ea0adaa54444f6deaca81ce54ee8334297f2cd1a | 5b87892102e4eb77a4c12924d2d71716b9cce721 | refs/heads/master | 2023-01-29T02:22:54.220880 | 2020-12-05T11:27:48 | 2020-12-05T11:27:48 | 302,864,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # Generated by Django 3.1.2 on 2020-11-14 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
],
),
]
| [
"[email protected]"
]
| |
205794e0f345bd8c88dca18a2b82e9f5364d5430 | 168556624401cd884fe0bfdac5a312adf51286a1 | /CS1430/homework4_sceneclassification-Enmin/code/trial.py | ce43e58f440332e775d1492ab4a36598380a3a3c | []
| no_license | Enmin/Coursework | f39dc7b54a07b901491fbd809187fd54e96fa5a4 | a17d216c37e70a8073602389924af10019cfe7de | refs/heads/master | 2021-12-28T02:53:15.949547 | 2021-12-21T22:45:19 | 2021-12-21T22:45:19 | 179,909,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | import numpy as np
import matplotlib
import time
from matplotlib import pyplot as plt
from helpers import progressbar
from skimage import io
from skimage.io import imread
from skimage.color import rgb2grey
from skimage.feature import hog
from skimage.transform import resize
from scipy.spatial.distance import cdist
def get_tiny_images(image_paths):
#TODO: Implement this function!
output = []
for image_path in image_paths:
image = imread(image_path)
if len(image.shape) > 2:
image = rgb2grey(image)
image = resize(image, output_shape=(64, 64), anti_aliasing=True)
output.append(image)
return np.array(output)
# res = get_tiny_images([r'..\data\train\Bedroom\image_0002.jpg'])
# io.imshow(res[0])
# plt.show()
def build_vocab():
num_imgs = 1000
for i in progressbar(range(num_imgs), "Loading ...", num_imgs):
pass
build_vocab() | [
"[email protected]"
]
| |
8526bf590577cee22e9f84d173c847d4b9407e14 | 824f19d20cdfa26c607db1ff3cdc91f69509e590 | /Array-101/5-Merge-Sorted-Array.py | e028a6ec47e16b2bddb04b2c0dac066cac1c70c2 | []
| no_license | almamuncsit/LeetCode | 01d7e32300eebf92ab54c983de6e183242b3c985 | 17aa340649574c37067ec170ceea8d9326be2d6a | refs/heads/master | 2021-07-07T09:48:18.069020 | 2021-03-28T11:26:47 | 2021-03-28T11:26:47 | 230,956,634 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from typing import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
i, j, k = m - 1, n - 1, m + n - 1
while j >= 0:
if nums1[i] >= nums2[j] and i >= 0:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
| [
"[email protected]"
]
| |
9ab60f6818d611b38e5a018aab0b282e5e417ec6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bobbling.py | 5754a833bc3c73ef4d8503d781013e85cd1c3248 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.nouns._bobble import _BOBBLE
#calss header
class _BOBBLING(_BOBBLE, ):
def __init__(self,):
_BOBBLE.__init__(self)
self.name = "BOBBLING"
self.specie = 'nouns'
self.basic = "bobble"
self.jsondata = {}
| [
"[email protected]"
]
| |
541ff8209cf419cbd7c780213cf1f0c62b91a4d7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_313/ch75_2020_04_07_18_04_40_887750.py | 39e6ed3c06555b5fc1e686140dc5d782dadd9b6c | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | def verifica_primos(l1):
dicionario = {}
for i in l1:
if i == 0 or i == 1 or i == -1:
dicionario[i] = True
elif i == 2:
dicionario[i] = True
elif i % 2 == 0:
dicionario[i] = False
else:
a = 3
for a in range(3,17,2):
print (a)
if i % a == 0:
dicionario[i] = False
dicionario[i] = True
return dicionario | [
"[email protected]"
]
| |
04b60bf68bf3570d0bc85f8a5ffe962f4f658aa1 | 2635c2e2c31a7badb8b188306c3cdfc61dc1ecc8 | /ihm_add_fields_productos/models/add_fields_product.py | 71ba7678d0edaea18b210f7f577c718ee38a5872 | []
| no_license | rosalesdc/ihm_testing | ec4ebf26c3c7602267a04fd183de4064f9d16bc1 | d91ebeac5504c9f29a21b2b0f05bc16ed240ff48 | refs/heads/master | 2020-04-17T01:21:03.214595 | 2019-10-29T23:05:52 | 2019-10-29T23:05:52 | 166,088,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,791 | py | # -*- coding: utf-8 -*-
from odoo import api
from odoo import fields
from odoo import models
from odoo.exceptions import UserError
from odoo.exceptions import ValidationError
import smtplib
class AddProductFields(models.Model):
_inherit = 'product.template'
name = fields.Char('Name', index=True, required=True, translate=False)
#Antes de crear un producto checa que el usuario no esté restringido
@api.model
def create(self, vals):
producto_creado=super(AddProductFields, self).create(vals)
if producto_creado.is_group_restr == True:
raise ValidationError('Usuario actual no puede crear inmuebles')
else: return producto_creado
#Antes de actualizar el producto se verifica si el usuario es administrador
# @api.multi
# def write(self, vals):
# print(vals)
# if self.es_inmueble == True and self.is_group_admin == False:
# keys=vals.keys()
# if ('invoice_policy' in keys) or ('service_type' in keys) or ('estatus' in keys) or ('sale_order' in keys):
# return super(AddProductFields, self).write(vals)
# else:
# raise ValidationError('Usuario actual no puede actualizar inmuebles')
# else:
# return super(AddProductFields, self).write(vals)
#Antes de eliminar el producto se verifica si el usuario es administrador
@api.multi
def unlink(self):
if self.es_inmueble == True and self.is_group_admin == False:
raise ValidationError('Usuario actual no puede eliminar inmuebles')
else:
return super(AddProductFields, self).unlink()
@api.one
@api.depends('x_asignacion_ids.importe')
def _compute_total_elementos(self):
self.importe_total_elementos = sum(line.importe for line in self.x_asignacion_ids)
@api.one
def asignar_precio_inmueble(self):
precio_calculado = float(self.importe_total_elementos)
self.write({'list_price': precio_calculado})
@api.model
def get_default_estatus(self):
default_estatus = 'Disponible'
return default_estatus
@api.one
@api.depends('estatus')
def _compute_copy_estatus(self):
if self.estatus != False:
if self.estatus == "Disponible":
self.estatus_ordenado = "01-Disponible"
elif self.estatus == "Apartado":
self.estatus_ordenado = "02-Apartado"
elif self.estatus == "Vendido":
self.estatus_ordenado = "03-Vendido"
elif self.estatus == "Escriturado":
self.estatus_ordenado = "04-Escriturado"
elif self.estatus == "Preparacion":
self.estatus_ordenado = "05-Liberado"
elif self.estatus == "Entregado":
self.estatus_ordenado = "06-Entregado"
@api.one
@api.model
@api.depends('sale_order')
def _obtener_referencia(self):
orden = self.env['sale.order'].search([('id', '=', self.sale_order.id)])
#self.xreferencia=orden.name
self.xreferencia = orden.id_numero_referencia.name
#Funcion que busca al usuario en el grupo de administradores
@api.one
def _compute_is_group_admin(self):
self.is_group_admin = self.env['res.users'].has_group('ihm_ocultar_validar.group_director')
#Funcion que busca que el usuario no pertenezca al campo que puede editar/crear productos
@api.one
def _compute_is_group_restringido(self):
self.is_group_restr = self.env['res.users'].has_group('ihm_ocultar_validar.group_no_editarcrear')
#Campo para revisar si el usuario actual es un administrador
is_group_admin = fields.Boolean(
string='Is Admin',
compute="_compute_is_group_admin",
)
is_group_restr = fields.Boolean(
string='Is Restringido',
compute="_compute_is_group_restringido",
)
#características para los productos que son inmuebles y su proyecto relacionado
es_inmueble = fields.Boolean(string="Es un inmueble")
caracteristicas = fields.Text(string="Características")
numero = fields.Char(string="Número")
estatus = fields.Selection(
selection=[
('Disponible', '01 Disponible'),
('Apartado', '02 Apartado'),
('Vendido', '03 Vendido'),
('Escriturado', '04 Escriturado'),
('Preparacion', '05 Liberado'),
('Entregado', '06 Entregado'),
],
string="Estatus",
copy=False,
readonly=True,
default=get_default_estatus,
)
estatus_ordenado = fields.Char(string="Estatus ordenado",
readonly=True,
store=True,
compute='_compute_copy_estatus', )
x_proyecto_id = fields.Many2one('project.project', string='Proyecto')
x_asignacion_ids = fields.One2many(
'asignacion.elementos', #modelo al que se hace referencia
'inmueble_id', #un campo de regreso
string="Asignacion elementos"
)
#CAMPO PARA EL CALCULO DE TOTAL DE LOS ELEMENTOS
importe_total_elementos = fields.Float(string='Importe total elementos',
#store=True,
readonly=True,
compute='_compute_total_elementos',
)
oportunidades_ids = fields.One2many(
'crm.lead', #modelo al que se hace referencia
'id_producto_inmueble', #un campo de regreso
string="Oportunidad"
)#
cantidad_enganche = fields.Float(
string="Cantidad Contrato"
)
garantia_id = fields.Many2one(
'tipo.garantia',
string="Tipo de garantia"
)
sale_order = fields.Many2one(
'sale.order',
copy=False,
string="Orden de venta del"
)
xreferencia = fields.Char(
string='Referencia',
#store=True,
compute='_obtener_referencia',
)
#campo que muestra la referencia en el listado de inmuebles
xreferencia_texto = fields.Char(
string='Referencia',
#store=True,
)
#https://fundamentos-de-desarrollo-en-odoo.readthedocs.io/es/latest/capitulos/modelos-estructura-datos-aplicacion.html
| [
"[email protected]"
]
| |
9df29fc50a42863f005cbf20a869b2a23268e334 | fe93e14d6508ac45679d2ef8a5c7876d77edbc05 | /src/python/commands/MCMCImpl.py | 281103c3ea66f5999e8eb918344c14fdf52cf576 | [
"MIT"
]
| permissive | Linhua-Sun/phycas | d216bedb08ef2fb0f05db2e5691b3d410cd7313a | 8fc283abd4b6add492908f7f54abceff62fc0036 | refs/heads/master | 2020-12-26T18:54:33.461954 | 2015-09-12T18:23:40 | 2015-09-12T18:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90,992 | py | import os,sys,math,random
from phycas import *
from phycas.utilities.PhycasCommand import *
from phycas.utilities.CommonFunctions import CommonFunctions
from MCMCManager import MCMCManager
from phycas.probdist import StopWatch
from phycas.readnexus import NexusReader
class MCMCImpl(CommonFunctions):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Needs to be written.
"""
def __init__(self, opts):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Initializes MCMCImpl object by assigning supplied phycas object
to a data member variable.
"""
CommonFunctions.__init__(self, opts)
self.models = None # This variable is set in setup
# These copied over from Phycas.py - many are not used and should be weeded out
self.data_matrix = None
self.file_name_trees_stored = None
self.do_marginal_like = False
self.mcmc_manager = MCMCManager(self)
self.heat_vector = None # Leave set to None unless you are implementing some ad hoc heating scheme. This vector ordinarily computed using nchains and self.heating_lambda
self.stopwatch = StopWatch()
self.sim_model_tree = None # Will hold the model tree used by simulateDNA
self.starting_tree = [] # starting_tree[i] will contain reference to phylogeny.Tree object for chain i
self.warn_tip_numbers = False # True only if tip numbers were not able to be created using the tip names in the tree description (always False if starting_tree_source == 'random' because BuildTreeFromString is not called in this case)
self.ntax = 0 # Will hold the actual number of taxa after data file read
self.nchar = 0 # Will hold the actual number of characters after data file has been read
self.npatterns = [] # Will hold the actual number of patterns for each subset after data file has been read
self.taxon_labels = [] # Will hold taxon labels from data file or default names if self.data_source equals None
#self.sssf = None
self.paramf = None
self.treef = None
self.sitelikef = None
#self.tree_file_name = '' # Will hold tree file name (see openParameterAndTreeFiles)
#self.param_file_name = '' # Will hold parameter file name (see openParameterAndTreeFiles)
#self.tmp_simdata = SimData()
self.gg_Pm = 0.0 # Penalty component (same for all k)
self.gg_Gm = [] # Vector of goodness-of-fit components (one for each k in gg_kvect)
self.gg_Dm = [] # Vector of overall measures (one for each k in gg_kvect)
self.reader = NexusReader()
#self._logFileName = None
self.addition_sequence = [] # List of taxon numbers for addition sequence
self.ref_tree = None # a reference tree against which the current MCMC tree can be compared (user specifies with, e.g., "mcmc.reference_tree_source = TreeCollection(file='hkyml.tre')")
self.stored_tree_defs = None
self.psf = None
self.pdf_splits_to_plot = None
#self.param_file_name = None
#self.tree_file_name = None
self.burnin = 0 # same as self.opts.burnin except for path sampling, when it drops to 0 after first beta value
self.ncycles = 0
self.cycle_start = None # used in path sampling to avoid starting over the cycle count for each beta value
self.cycle_stop = None # total number of cycles (used for computing time remaining)
self.last_adaptation = 0
#POLTMP self.next_adaptation = 0
self.ss_beta = 1.0
self.ss_beta_index = 0
self.ss_sampled_betas = None
self.ss_sampled_likes = None
self.siteIndicesForPatternIndex = None
def setSiteLikeFile(self, sitelikef):
if sitelikef is not None:
self.sitelikef = sitelikef
def siteLikeFileSetup(self, coldchain):
if self.sitelikef is not None:
# Set up the siteIndicesForPatternIndex, which holds a list of sites for each pattern index
# This allows us to spit out site likelihoods for each site, even though site likelihoods
# are stored for patterns, many of which represent numerous sites
v = coldchain.likelihood.getCharIndexToPatternIndex()
nexcluded = 0
self.phycassert(len(v) == self.nchar,'Number of sites returned by coldchain.likelihood.getCharIndexToPatternIndex (%d) differs from MCMCImpl.nchar (%d) in MCMCImpl.siteLikeFileSetup()' % (len(v), self.nchar))
npatterns = coldchain.likelihood.getNPatterns()
self.siteIndicesForPatternIndex = []
for i in range(npatterns):
self.siteIndicesForPatternIndex.append([])
for i,p in enumerate(v):
if p < npatterns:
self.siteIndicesForPatternIndex[p].append(i)
else:
nexcluded += 1;
def unsetSiteLikeFile(self):
self.sitelikef = None
self.siteIndicesForPatternIndex = None
def adaptSliceSamplers(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Cycle through all slice samplers and adapt each one. Adaptation
adjusts the slice unit width of each slice sampler in an attempt to
bring it closer to the optimum width using experience from past
sampling attempts. Non-slice-sampling updaters are also tuned so that
eventually they hover around the target acceptance rate.
"""
cold_chain_manager = self.mcmc_manager.getColdChainManager()
for p in cold_chain_manager.getAllUpdaters():
if p.hasSliceSampler():
s = p.getSliceSampler()
if s.getNumSamples() > 0:
s.adaptSimple(self.opts.adapt_simple_param)
def resetUpdaterDiagnostics(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Calls resetDiagnostics() for all slice-samplers and Metropolis-Hastings
updaters.
"""
cold_chain_manager = self.mcmc_manager.getColdChainManager()
for p in cold_chain_manager.getAllUpdaters():
if p.hasSliceSampler():
s = p.getSliceSampler()
s.resetDiagnostics()
else:
p.resetDiagnostics()
def debugShowTuningParameters(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Shows value of tuning parameter for each Metropolis-Hastings updater
that can be tuned.
"""
s = '\n\nTuning parameter values for all tunable Metropolis-Hastings updaters:'
for k in range(self.opts.nchains):
c = self.mcmc_manager.chains[k]
chain_manager = c.chain_manager
s += '\n Chain %d (power = %.3f):' % (k+1, c.heating_power)
for p in chain_manager.getAllUpdaters():
if not p.hasSliceSampler():
nm = p.getName()
tp = p.getTuningParameter()
s += '\n %12.5f <-- %s' % (tp, nm)
print s
def reportUpdaterEfficiency(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
If not in the burnin period (cycle negative), cycle through all
slice samplers and adapt each one. Adaptation adjusts the slice unit
width of each slice sampler in an attempt to bring it closer to the
optimum width using experience from past sampling attempts.
Non-slice-sampling updaters are also tuned so that eventually they
hover around the target acceptance rate.
"""
summary = ''
# need to adapt all chains, not just the cold one!
cold_chain_manager = self.mcmc_manager.getColdChainManager()
for p in cold_chain_manager.getAllUpdaters():
p_summary = ''
nm = p.getName()
#print '~POL~ nm =',nm
if p.hasSliceSampler():
s = p.getSliceSampler()
if s.getNumSamples() > 0:
# s.adaptSimple(self.opts.adapt_simple_param)
mode = s.getMode()
accept_pct = 100.0*float(s.getNumSamples())/float(s.getNumFuncEvals())
p_summary += ' * efficiency = %.1f%%, mode=%.5f (%s)\n' % (accept_pct, mode, nm)
else:
naccepts = p.getNumAccepts()
nattempts = p.getNumAttempts()
if nattempts > 0:
accept_pct = 100.0*float(naccepts)/float(nattempts)
if nattempts == 1:
p_summary += ' accepted %.1f%% of 1 attempt (%s)\n' % (accept_pct, nm)
else:
p_summary += ' accepted %.1f%% of %d attempts (%s)\n' % (accept_pct, nattempts, nm)
else:
accept_pct = 0.0
summary += p_summary
if self.opts.verbose and summary != '':
self.output('\nUpdater diagnostics (* = slice sampler):')
self.output(summary)
if self.opts.verbose and self.opts.nchains > 1:
self.output('Chain swaps (upper: attempted swaps, lower: accepted swaps):')
row = '%20s' % 'heating power -->'
row += ' '.join(['%12.2f' % self.mcmc_manager.chains[k].heating_power for k in range(self.opts.nchains)])
self.output(row)
for ii in range(self.opts.nchains):
row = '%20.2f' % (self.mcmc_manager.chains[ii].heating_power,)
row += ' '.join(['%12d' % self.mcmc_manager.swap_table[ii][jj] for jj in range(self.opts.nchains)])
self.output(row)
self.output()
def obsoleteUpdateAllUpdaters(self, chain, chain_index, cycle):
# This function abandoned; functionality moved to C++ side
# for speed reasons: see MCMCChainManager::updateAllUpdaters
#import gc
#gc.enable()
assert False, 'NOT SUPPOSED TO BE HERE'
if self.opts.debugging:
tmpf = file('debug_info.txt', 'a')
tmpf.write('************** cycle=%d, chain=%d\n' % (cycle,chain_index))
if chain.all_updaters_list is None:
chain.all_updaters_list = [(m, m.getWeight()) for m in chain.chain_manager.getAllUpdaters()]
chain.all_updaters_list.extend(chain.python_only_moves)
for t in chain.all_updaters_list:
p, w = t
#print "param = %s (weight = %f), chain = %d" % (p.getName(), w, chain_index)
for x in range(w):
if self.opts.debugging:
p.setSaveDebugInfo(True)
p.update()
#if p.getName() == 'Bush move':
# print ' counts after bush move =',gc.get_count()
# print ' thresholds =',gc.get_threshold()
if self.opts.debugging:
tmpf.write('%s | %s\n' % (p.getName(), p.getDebugInfo()))
if self.opts.debugging:
tmpf.close()
def showTopoPriorInfo(self):
m = self.mcmc_manager.getColdChain()
self.output('\nTopology prior:')
if not self.opts.allow_polytomies:
self.output(' flat across all fully-resolved tree topologies (polytomies not allowed)')
else:
if self.opts.polytomy_prior:
self.output(' Prior type: polytomy prior')
else:
self.output(' Prior type: resolution class prior')
self.output(' Prior strength (C): %s' % self.opts.topo_prior_C)
self.output(' Prior probability for each resolution class:')
self.output(' Note: 0.00000000 does *not* mean that the prior is zero! It simply')
self.output(' indicates that the prior is less than 0.000000005\n')
self.output('%20s %20s' % ('internal nodes', 'prior probability'))
self.output('%20s %20s' % ('--------------', '-----------------'))
jpm = self.mcmc_manager.getColdChainManager().getJointPriorManager()
topo_prior_calculator = jpm.getTopoProbCalculator()
topo_priors = topo_prior_calculator.getRealizedResClassPriorsVect()
for i,v in enumerate(topo_priors):
if i == 0:
denom = v # first element of vector is log of normalization constant (sum of all other elements)
else:
topo_prior = math.exp(v - denom)
self.output('%20d %20.8f' % (i,topo_prior))
self.output()
def showParamInfo(self, p):
if p.computesUnivariatePrior() or p.computesMultivariatePrior() or p.computesTreeLengthPrior():
self.output(' Parameter name: %s' % p.getName())
self.output(' Prior distribution: %s' % p.getPriorDescr())
if p.isMasterParameter():
self.output(' Master parameter (no current value)')
else:
if p.computesUnivariatePrior():
v = p.getCurrValueFromModel()
self.output(' Current value: %s' % v)
else:
v = p.listCurrValuesFromModel()
self.output(' Current value: %s' % ','.join(['%.5f' % x for x in v]))
self.output(' Prior log-density: %s' % p.getLnPrior())
self.output()
def siteLikeFileOpen(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Opens the site log-likelihood file.
"""
self.phycassert(self.sitelikef is None, 'Attempt made to open MCMCImpl.sitelikef, but it is already open!')
sitelnl_file_spec = self.opts.out.sitelikes
try:
self.sitelikef = sitelnl_file_spec.open(self.stdout)
except:
print '*** Attempt to open site log-likelihood file (%s) failed.' % self.opts.out.sitelike.filename
if self.sitelikef:
print 'Site log-likelihood file was opened successfully'
#mcmc.sitelikef = self.sitelikef
def siteLikeFileClose(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Closes the site log-likelihood file.
"""
self.phycassert(self.sitelikef is not None, 'Attempt made to close MCMCImpl.sitelikef, but it is not open!')
self.sitelikef.close()
self.sitelikef = None
def treeFileOpen(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Opens the tree file and writes a translate table.
"""
tree_file_spec = self.opts.out.trees
self.treef = None
try:
self.treef = tree_file_spec.open(self.stdout)
except:
print '*** Attempt to open tree file (%s) failed.' % self.opts.out.trees.filename
if self.treef:
self.mcmc_manager.treeFileHeader(self.treef)
def treeFileClose(self):
self.treef.write('end;\n')
self.treef.close()
def paramFileOpen(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Opens the parameter file and writes a header line.
"""
param_file_spec = self.opts.out.params
self.paramf = None
try:
self.paramf = param_file_spec.open(self.stdout)
except:
print '*** Attempt to open parameter file (%s) failed.' % self.opts.out.params.filename
if self.paramf:
self.mcmc_manager.paramFileHeader(self.paramf)
self.paramf.write('\n')
def paramFileClose(self):
if self.paramf is not None:
self.paramf.close()
def openParameterAndTreeFiles(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Creates parameter and tree file names based on the data file name or the
user-supplied prefix and opens the files
"""
#prefix = self.getPrefix()
#self.param_file_name = prefix + '.p'
#self.tree_file_name = prefix + '.t'
self.paramFileOpen()
self.treeFileOpen()
def _loadData(self, matrix):
self.phycassert(matrix is not None, 'Tried to load data from a non-existant matrix')
self.data_matrix = matrix
self.taxon_labels = matrix.getTaxLabels()
self.ntax = self.data_matrix.getNTax()
self.nchar = self.data_matrix.getNChar() # used for Gelfand-Ghosh simulations only
self.phycassert(len(self.taxon_labels) == self.ntax, "Number of taxon labels does not match number of taxa.")
def cumLagrange(self, which, x, y):
xx = x[which]
term1 = (xx - x[0])*(y[0] + (xx - x[0])*(y[1] - y[0])/(2.0*(x[1] - x[0])))
term2a = 2.0*(xx**2.0) - xx*x[0] - (x[0]**2.0) + 2.0*x[0]*x[1] - 3.0*xx*x[1]
term2b = (y[2] - y[1])/(x[2] - x[1]) - (y[1] - y[0])/(x[1] - x[0])
term2c = (xx - x[0])/(x[2] - x[0])
term2 = term2a*term2b*term2c/6.0
cum = term1 + term2
return cum
def getStartingTree(self):
# if self.starting_tree is None:
# if False:
# if self.opts.starting_tree_source == 'file':
# self.phycassert(self.data_source, "Specified starting_tree_source to be 'file' when data_source was None (file was not read)")
# tree_defs = self.reader.getTrees()
# self.phycassert(len(tree_defs) > 0, 'a trees block defining at least one tree must be stored in the nexus data file')
# # Grab first tree description in the data file
# # TODO allow some other tree than the first
# self.starting_tree = tree_defs[0]
# elif self.opts.starting_tree_source == 'usertree':
# self.starting_tree = Newick(self.opts.tree_topology)
# elif self.opts.starting_tree_source == 'random':
# self.phycassert(self.ntax > 0, 'expecting ntax to be greater than 0')
# self.starting_tree = None
# else:
# self.phycassert(False, "starting_tree_source should equal 'random', 'file', or 'usertree', but instead it was this: %s" % self.starting_tree_source)
# else:
# If user failed to specify starting_tree_source, get starting tree from randomtree object
# as it is currently configured
tr_source = self.opts.starting_tree_source
if tr_source is None:
tr_source = randomtree()
try:
tr_source.setActiveTaxonLabels(self.taxon_labels)
i = iter(tr_source)
# self.starting_tree = i.next()
self.starting_tree.append(i.next())
except:
self.stdout.error("A starting tree could not be obtained from the starting_tree_source")
raise
t = self.starting_tree[-1]
num_degree_two_nodes = t.deroot()
if num_degree_two_nodes > 0:
self.stdout.warning("A total of %d degree-2 nodes were removed from tree defined in starting_tree_source" % num_degree_two_nodes)
return t
def storeRefTreeIfSupplied(self):
cold_chain = self.mcmc_manager.getColdChain()
# If a reference tree was specified, create that tree now
tr_source = self.opts.reference_tree_source
if tr_source is not None:
try:
tr_source.setActiveTaxonLabels(self.taxon_labels)
i = iter(tr_source)
self.ref_tree = i.next()
except:
self.stdout.error("A reference tree could not be obtained from the specified reference_tree_source")
if self.ref_tree is not None:
cold_chain.chain_manager.setRefTree(self.ref_tree)
def setup(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
This function is for parts of the setup that should occur right before
run() is called. Setup is deferred until this point to give the
user a chance to change the default settings before the call to
run(). This function does these things:
1) reads the data and ensures that the taxon_labels list is filled
with the correct number of taxon labels;
2) creates a starting tree description;
3) creates an appropriate heat_vector
4) calls MCMCManager's createChains function to handle setup for each
individual chain;
5) opens the parameter and tree files; and
6) establishes an output log file name if requested
"""
ds = self.opts.data_source
if ds is None:
# User apparently wants to run without data
self.data_matrix = None
self.ntax = self.opts.ntax
self.nchar = 0 # used for Gelfand-Ghosh simulations only
self.phycassert(self.ntax > 0, 'Number of taxa (mcmc.ntax) should be > 0 if mcmc.data_source is None')
self.taxon_labels = ['taxon%d' % (i+1,) for i in range(self.ntax)]
else:
mat = ds.getMatrix()
self.phycassert(mat is not None, 'Data matrix could not be input')
self._loadData(mat)
self.phycassert(self.ntax > 0, 'Number of taxa in data matrix was 0')
#print 'In MCMCImpl.py, function setup():'
#print ' mat =', mat
#print ' self.nchar = %d' % self.nchar
# Next line creates a default partition if a partition was not defined by the user
self.opts.partition.validate(self.nchar)
# Ask for a partition report, passing self as the reporter (object that has an output function)
#self.opts.partition.partitionReport(self) # delayed now until after all_missing has been determined
self.models = [m for (n,s,m) in self.opts.partition.subset]
self.model_names = [n for (n,s,m) in self.opts.partition.subset]
# Perform sanity checks on models
for m,n in zip(self.models, self.model_names):
#print '==> checking model %s' % n
bad_priors = m.checkPriorSupport()
self.phycassert(len(bad_priors) == 0, 'In model %s, prior support is incorrect for these parameters:\n%s' % (n,' \n'.join([p for p in bad_priors])))
if m.edgelen_prior is not None:
# set both internal and external edge length priors to edgelen_prior
m.internal_edgelen_prior = m.edgelen_prior
m.external_edgelen_prior = m.edgelen_prior
else:
# Ensure that user has specified both internal and external edge length priors, or has specified a tree length prior
if m.tree_length_prior is None:
self.phycassert(m.internal_edgelen_prior is not None, 'In model %s, internal_edgelen_prior cannot be None if edgelen_prior and tree_length_prior are both None' % n)
self.phycassert(m.external_edgelen_prior is not None, 'In model %s, external_edgelen_prior cannot be None if edgelen_prior and tree_length_prior are both None' % n)
if m.edgelen_hyperprior is not None:
# Ensure that both internal and external edgelen priors are Exponential
if m.internal_edgelen_prior.getDistName() != 'Exponential':
m.internal_edgelen_prior = Exponential(1.0)
self.warning('In model %s, internal_edgelen_prior reset to Exponential because edgelen_hyperprior was specified' % n)
if m.external_edgelen_prior.getDistName() != 'Exponential':
m.external_edgelen_prior = Exponential(1.0)
self.warning('In model %s, external_edgelen_prior reset to Exponential because edgelen_hyperprior was specified' % n)
# Determine heating levels if multiple chains
if self.opts.heat_vector == None:
if self.opts.nchains == 1:
self.heat_vector = [1.0]
else:
# Determine vector of powers for each chain
self.heat_vector = []
for i in range(self.opts.nchains):
# For n=5 chains (1 cold, 4 heated), min_heat_power = 0.5, we have:
# lambda = (1 - min_heat_power)/(min_heat_power*(n-1))
# = (1 - 0.5)/(0.5*4)
# = 0.25
# 0 1.000 = 1/1.0 cold chain explores posterior
# 1 0.800 = 1/1.25
# 2 0.667 = 1/1.50
# 3 0.571 = 1/1.75
# 4 0.500 = 1/2.00
z = self.opts.min_heat_power
n = self.opts.nchains
lamda = (1.0 - z)/(z*(n - 1.0))
temp = 1.0/(1.0 + float(i)*lamda)
self.heat_vector.append(temp)
else:
# User supplied his/her own heat_vector; perform sanity checks
self.heat_vector = self.opts.heat_vector
if not self.opts.debug_allow_arbitrary_heating_powers:
self.phycassert(1.0 in self.heat_vector, 'user-supplied heat_vector does not allow for a cold chain (one power must be 1.0)')
h = list(self.heat_vector)
h.sort(reverse=True)
if h != self.heat_vector:
self.phycassert(False, 'chain heating powers must be in decreasing order')
self.phycassert(h[-1] > 0.0, 'all chain heating powers must be positive')
self.mcmc_manager.createChains()
self.storeRefTreeIfSupplied()
self.openParameterAndTreeFiles()
cold_chain = self.mcmc_manager.getColdChain()
all_missing = cold_chain.likelihood.getListOfAllMissingSites()
partition.handleAllMissingSites(all_missing)
# Ask for a partition report, passing self as the reporter (object that has an output function)
self.opts.partition.partitionReport(self)
# If all-missing sites found, need to revise the subset proportions supplied to the
# cold chain's subset_relrates_move when the cold chain was created
# BETTER IDEA is to give subset_relrates_move a shared pointer to the partition model object
# so we don't have multiple copies of the subset proportions in the first place
#if partition.getNumSubsets() > 1:
# subset_proportions = partition.getSubsetProportions()
# cold_chain.subset_relrates_move.setSubsetProportions(subset_proportions)
if self.opts.doing_steppingstone_sampling:
# start with posterior (ss_beta = 1) and work toward the prior (ss_beta = 0)
self.ss_beta = 1.0
cc = self.mcmc_manager.getColdChain()
cc.setPower(self.ss_beta)
self.siteLikeFileSetup(self.mcmc_manager.getColdChain())
def beyondBurnin(self, cycle):
c = cycle + 1
#POLTMP return (c > self.burnin)
return (c > 0)
def doThisCycle(self, cycle, burnin, mod):
if cycle < 0:
c = cycle - burnin + 1
else:
c = cycle + 1
return ((c % mod) == 0)
def getModelIndex(self, name):
"""
If an updater has name like 'k_rAC', this function will return the model index k-1.
If an updater has name like 'rAC_k', this function will return the model index k-1. <-- deprecated
If an updater has name 'rAC', returns model index 0. <-- deprecated
"""
import re
mo = re.search('^([0-9]+)_',name)
if mo is not None:
return int(mo.group(1)) - 1
else:
mo = re.search('_([0-9]+)$',name)
if mo is not None:
return int(mo.group(1)) - 1
else:
return 0
############################ exploreWorkingPrior ############################
def exploreWorkingPrior(self, cycle): # GENERALIZED-STEPPING-STONE
chain_index = 0
cold_chain = self.mcmc_manager.getColdChain()
tm = phylogeny.TreeManip(cold_chain.tree)
jpm = cold_chain.chain_manager.getJointPriorManager()
nmodels = cold_chain.partition_model.getNumSubsets()
unpartitioned = (nmodels == 1)
tree_length_prior_specified = (cold_chain.likelihood.getTreeLengthPrior() is not None)
#new_edge_lens = []
#new_internal_edge_lens = None
#new_external_edge_lens = None
all_updaters = cold_chain.chain_manager.getAllUpdaters()
edge_lens_need_updating = True
for u in all_updaters: # good candidate for moving into C++
if u.isFixed() or not u.isPriorSteward():
continue
name = u.getName()
if name.find('edgelen_hyper') > -1:
# draw an edge length hyperparameter value that applies to all edges
edgelen_hyperparam = u.sampleWorkingPrior()
m = cold_chain.partition_model.getModel(0)
cold_chain.chain_manager.setEdgeLenHyperparam(0, edgelen_hyperparam)
m.getInternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
m.getExternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
jpm.edgeLenHyperparamModified(name, cold_chain.tree, edgelen_hyperparam)
elif name.find('external_hyper') > -1:
# draw an edge length hyperparameter value for external edges
edgelen_hyperparam = u.sampleWorkingPrior()
cold_chain.chain_manager.setEdgeLenHyperparam(0, edgelen_hyperparam)
m = cold_chain.partition_model.getModel(0)
self.phycassert(m.isSeparateInternalExternalEdgeLenPriors(), "found updater named 'external_hyper' but isSeparateInternalExternalEdgeLenPriors returns False")
m.getExternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
jpm.edgeLenHyperparamModified(name, cold_chain.tree, edgelen_hyperparam)
elif name.find('internal_hyper') > -1:
# draw an edge length hyperparameter value for external edges
edgelen_hyperparam = u.sampleWorkingPrior()
cold_chain.chain_manager.setEdgeLenHyperparam(1, edgelen_hyperparam)
m = cold_chain.partition_model.getModel(0)
self.phycassert(m.isSeparateInternalExternalEdgeLenPriors(), "found updater named 'internal_hyper' but isSeparateInternalExternalEdgeLenPriors returns False")
m.getInternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
jpm.edgeLenHyperparamModified(name, cold_chain.tree, edgelen_hyperparam)
elif name.find('subset_relrates') > -1: # C++ class SubsetRelRatesMove
rates_vector = u.sampleMultivariateWorkingPrior()
cold_chain.partition_model.setSubsetRelRatesVect(rates_vector)
jpm.multivariateModified(name, rates_vector)
elif name.find('relrates') > -1: # C++ class RelRatesMove
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rate_vector = u.sampleMultivariateWorkingPrior()
m.setRelRates(rate_vector)
jpm.multivariateModified(name, rate_vector)
elif name.find('state_freqs') > -1: # C++ class StateFreqMove
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
freq_vector = u.sampleMultivariateWorkingPrior()
m.setStateFreqsUnnorm(freq_vector)
jpm.multivariateModified(name, freq_vector)
elif name.find('gamma_shape') > -1: # C++ class DiscreteGammaShapeParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_gamma_shape = u.sampleWorkingPrior()
m.setShape(new_gamma_shape)
jpm.univariateModified(name, new_gamma_shape)
elif name.find('pinvar') > -1: # C++ class PinvarParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_pinvar = u.sampleWorkingPrior()
m.setPinvar(new_pinvar)
jpm.univariateModified(name, new_pinvar)
elif name.find('kappa') > -1: # C++ class KappaParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_kappa = u.sampleWorkingPrior()
m.setKappa(new_kappa)
jpm.univariateModified(name, new_kappa)
elif name.find('rAC') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rr = list(m.getRelRates())
new_rAC = u.sampleWorkingPrior()
rr[0] = new_rAC
m.setRelRates(rr)
jpm.univariateModified(name, new_rAC)
elif name.find('rAG') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rr = list(m.getRelRates())
new_rAG = u.sampleWorkingPrior()
rr[1] = new_rAG
m.setRelRates(rr)
jpm.univariateModified(name, new_rAG)
elif name.find('rAT') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rr = list(m.getRelRates())
new_rAT = u.sampleWorkingPrior()
rr[2] = new_rAT
m.setRelRates(rr)
jpm.univariateModified(name, new_rAT)
elif name.find('rCG') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rr = list(m.getRelRates())
new_rCG = u.sampleWorkingPrior()
rr[3] = new_rCG
m.setRelRates(rr)
jpm.univariateModified(name, new_rCG)
elif name.find('rCT') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rr = list(m.getRelRates())
new_rCT = u.sampleWorkingPrior()
rr[4] = new_rCT
m.setRelRates(rr)
jpm.univariateModified(name, new_rCT)
elif name.find('rGT') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
rr = list(m.getRelRates())
new_rGT = u.sampleWorkingPrior()
rr[5] = new_rGT
m.setRelRates(rr)
jpm.univariateModified(name, new_rGT)
elif name.find('freqA') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_freqA = u.sampleWorkingPrior()
m.setStateFreqUnnorm(0, new_freqA)
jpm.univariateModified(name, new_freqA)
elif name.find('freqC') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_freqC = u.sampleWorkingPrior()
m.setStateFreqUnnorm(1, new_freqC)
jpm.univariateModified(name, new_freqC)
elif name.find('freqG') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_freqG = u.sampleWorkingPrior()
m.setStateFreqUnnorm(2, new_freqG)
jpm.univariateModified(name, new_freqG)
elif name.find('freqT') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = cold_chain.partition_model.getModel(i)
new_freqT = u.sampleWorkingPrior()
m.setStateFreqUnnorm(3, new_freqT)
jpm.univariateModified(name, new_freqT)
elif (name.find('extedge') > -1) or (name.find('intedge') > -1): # C++ class EdgeLenParam
self.phycassert(self.opts.fix_topology, 'There is an EdgeLenParam updater but mcmc.fix_topology is False. This is a bug: please report it to [email protected]')
edgelen = u.sampleWorkingPrior()
u.sendCurrValueToModel(edgelen)
jpm.univariateModified(name, edgelen)
edge_lens_need_updating = False
#elif name.find('master_edgelen') > -1: # C++ class EdgeLenMasterParam
# num_edge_lens = cold_chain.tree.getNNodes() - 1
# new_edge_lens = [u.sampleWorkingPrior() for j in range(num_edge_lens)]
#elif name.find('external_edgelen') > -1: # C++ class EdgeLenMasterParam
# num_edge_lens = cold_chain.tree.getNTips() - 1
# new_external_edge_lens = [u.sampleWorkingPrior() for j in range(num_edge_lens)]
#elif name.find('internal_edgelen') > -1: # C++ class EdgeLenMasterParam
# num_edge_lens = cold_chain.tree.getNInternals()
# new_internal_edge_lens = [u.sampleWorkingPrior() for j in range(num_edge_lens)]
elif name == 'larget_simon_local':
# Sample a new tree and edge lengths from the topology reference distribution
# Note: this really has nothing to do with the Larget-Simon move, but if there
# is a larget_simon_local updater present, it means that the topology is being
# updated and is not fixed
self.phycassert(not self.opts.fix_topology, 'There is a larget_simon_local move updater but mcmc.fix_topology is True. This is a bug: please report it to [email protected]')
u.sampleWorkingPrior()
edge_lens_need_updating = False
jpm.allEdgeLensModified(cold_chain.tree)
jpm.topologyModified("tree_topology", cold_chain.tree)
else:
self.phycassert(0, 'model uses an updater (%s) that has not yet been added to MCMCImpl.exploreWorkingPrior (workaround: specify mcmc.draw_directly_from_prior = False)' % name)
if edge_lens_need_updating and tree_length_prior_specified and cold_chain.likelihood.getTreeLengthRefDist():
# all edge lengths must be drawn from the tree length working prior
tm.setRandomEdgeLensFromTreeLengthDist(cold_chain.likelihood.getTreeLengthRefDist())
jpm.treeLengthModified('tree_length', cold_chain.tree)
#if new_internal_edge_lens is not None:
# # Case of a separate master edge length parameter for internals and tips
# self.phycassert(new_external_edge_lens is not None, 'not expecting new_external_edge_lens to be None in MCMCImpl.exploreWorkingPrior')
# i = 0 # indexes internals
# j = 0 # indexes tips
# for nd in cold_chain.tree:
# if nd.isRoot():
# continue
# elif nd.isInternal():
# nd.setEdgeLen(new_internal_edge_lens[i])
# i += 1
# elif nd.isTip():
# nd.setEdgeLen(new_external_edge_lens[j])
# j += 1
# else:
# self.phycassert(0, 'nd is neither a tip nor an internal node in MCMCImpl.exploreWorkingPrior')
#if cold_chain.likelihood.getTreeLengthRefDist():
# # all edge lengths must be drawn from the tree length working prior
# tm.setRandomEdgeLensFromTreeLengthDist(cold_chain.likelihood.getTreeLengthRefDist())
# jpm.treeLengthModified('tree_length', cold_chain.tree)
#
# #tl_ref_dist = cold_chain.likelihood.getTreeLengthRefDist()
# #num_taxa = cold_chain.tree.getNTips()
# #new_edge_lens = tl_ref_dist.sample(num_taxa)
# #new_external_edge_lens = new_edge_lens[:num_taxa]
# #new_internal_edge_lens = new_edge_lens[num_taxa:]
# #iInternal = 0
# #iExternal = 0
# #for nd in cold_chain.tree:
# # if nd.isRoot():
# # continue
# # if nd.isTip() or nd.getParent().isTip():
# # nd.setEdgeLen(new_external_edge_lens[iExternal])
# # iExternal += 1
# # else:
# # nd.setEdgeLen(new_internal_edge_lens[iInternal])
# # iInternal += 1
#elif edge_lens_need_updating:
# # fixed topology (and hence each edge has its own length parameter)
# self.phycassert(len(new_edge_lens) == cold_chain.tree.getNNodes() - 1, 'new_edge_lens has %d elements but expecting %d in MCMCImpl.exploreWorkingPrior' % (len(new_edge_lens), cold_chain.tree.getNNodes() - 1))
# i = 0
# for nd in cold_chain.tree:
# if nd.isRoot():
# continue
# nd.setEdgeLen(new_edge_lens[i])
# i += 1
# replace the model
cold_chain.prepareForLikelihood()
cold_chain.likelihood.replaceModel(cold_chain.partition_model)
# recalculate the likelihood
cold_chain_manager = self.mcmc_manager.getColdChainManager()
cold_chain_manager.refreshLastLnLike()
#TODO what about polytomies?
############################ end exploreWorkingPrior ############################
def explorePrior(self, cycle):
chain_index = 0
chain = self.mcmc_manager.getColdChain()
tm = phylogeny.TreeManip(chain.tree)
if self.opts.debugging:
tmpf = file('debug_info.txt', 'a')
tmpf.write('************** cycle=%d, chain=%d\n' % (cycle,chain_index))
edgelens_generated = False
nmodels = chain.partition_model.getNumSubsets()
unpartitioned = (nmodels == 1)
tree_length_prior_specified = (chain.likelihood.getTreeLengthPrior() is not None)
jpm = chain.chain_manager.getJointPriorManager()
for p in chain.chain_manager.getAllUpdaters():
if p.isFixed():
continue
w = p.getWeight()
name = p.getName()
if (name.find('edgelen_hyper') > -1) or (name.find('external_hyper') > -1) or (name.find('internal_hyper') > -1): # C++ class HyperPriorParam
self.phycassert(not tree_length_prior_specified, 'Cannot specify edge length hyperpriors and tree length prior simultaneously')
if not edgelens_generated:
# Choose hyperparam, then use it to choose new edge lengths for a newly-created tree
m = chain.partition_model.getModel(0)
if m.isSeparateInternalExternalEdgeLenPriors():
# draw an edge length hyperparameter value for external edges
edgelen_hyperparam = m.getEdgeLenHyperPrior().sample()
chain.chain_manager.setEdgeLenHyperparam(0, edgelen_hyperparam)
m.getExternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
jpm.edgeLenHyperparamModified('external_hyper', chain.tree, edgelen_hyperparam)
# draw an edge length hyperparameter value for internal edges
edgelen_hyperparam = m.getEdgeLenHyperPrior().sample()
chain.chain_manager.setEdgeLenHyperparam(1, edgelen_hyperparam)
m.getInternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
jpm.edgeLenHyperparamModified('internal_hyper', chain.tree, edgelen_hyperparam)
else:
# draw an edge length hyperparameter value that applies to all edges
edgelen_hyperparam = m.getEdgeLenHyperPrior().sample()
chain.chain_manager.setEdgeLenHyperparam(0, edgelen_hyperparam)
m.getInternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
m.getExternalEdgeLenPrior().setMeanAndVariance(1.0/edgelen_hyperparam, 0.0) # 2nd arg. (variance) ignored for exponential distributions
jpm.edgeLenHyperparamModified('edgelen_hyper', chain.tree, edgelen_hyperparam)
if self.opts.fix_topology:
tm.setRandomInternalExternalEdgeLengths(m.getInternalEdgeLenPrior(), m.getExternalEdgeLenPrior())
else:
tm.equiprobTree(chain.tree.getNTips(), chain.r, m.getInternalEdgeLenPrior(), m.getExternalEdgeLenPrior())
edgelens_generated = True
elif name.find('master_edgelen') > -1:
pass
elif name.find('external_edgelen') > -1:
pass
elif name.find('internal_edgelen') > -1:
pass
elif name.find('intedge_') > -1:
pass
elif name.find('extedge_') > -1:
pass
elif name.find('subset_relrates') > -1:
new_subset_relrate_vector = chain.partition_model.getSubsetRelRatePrior().sample()
chain.partition_model.setSubsetRelRatesVect(new_subset_relrate_vector)
jpm.multivariateModified(name, new_subset_relrate_vector)
elif name.find('kappa') > -1: # C++ class KappaParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_kappa = m.getKappaPrior().sample()
m.setKappa(new_kappa)
jpm.univariateModified(name, new_kappa)
elif name.find('omega') > -1: # C++ class OmegaParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_omega = m.getOmegaPrior().sample()
m.setOmega(new_omega)
jpm.univariateModified(name, new_omega)
elif name.find('rAC') > -1: # C++ class GTRRateParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_rAC = m.getRelRateParamPrior().sample()
m.setRelRateUnnorm(0, new_rAC)
jpm.univariateModified(name, new_rAC)
elif name.find('rAG') > -1: # C++ class GTRRateParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_rAG = m.getRelRateParamPrior().sample()
m.setRelRateUnnorm(1, new_rAG)
jpm.univariateModified(name, new_rAG)
elif name.find('rAT') > -1: # C++ class GTRRateParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_rAT = m.getRelRateParamPrior().sample()
m.setRelRateUnnorm(2, new_rAT)
jpm.univariateModified(name, new_rAT)
elif name.find('rCG') > -1: # C++ class GTRRateParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_rCG = m.getRelRateParamPrior().sample()
m.setRelRateUnnorm(3, new_rCG)
jpm.univariateModified(name, new_rCG)
elif name.find('rCT') > -1: # C++ class GTRRateParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_rCT = m.getRelRateParamPrior().sample()
m.setRelRateUnnorm(4, new_rCT)
jpm.univariateModified(name, new_rCT)
elif name.find('rGT') > -1: # C++ class GTRRateParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_rGT = m.getRelRateParamPrior().sample()
m.setRelRateUnnorm(5, new_rGT)
jpm.univariateModified(name, new_rGT)
elif name.find('freqA') > -1: # C++ class StateFreqParam
new_freq_param_A = m.getStateFreqParamPrior().sample()
m.setStateFreqParam(0, new_freq_param_A)
jpm.univariateModified(name, new_freq_param_A)
elif name.find('freqC') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_freq_param_C = m.getStateFreqParamPrior().sample()
m.setStateFreqParam(1, new_freq_param_C)
jpm.univariateModified(name, new_freq_param_C)
elif name.find('freqG') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_freq_param_G = m.getStateFreqParamPrior().sample()
m.setStateFreqParam(2, new_freq_param_G)
jpm.univariateModified(name, new_freq_param_G)
elif name.find('freqT') > -1: # C++ class StateFreqParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_freq_param_T = m.getStateFreqParamPrior().sample()
m.setStateFreqParam(3, new_freq_param_T)
jpm.univariateModified(name, new_freq_param_T)
elif name.find('gamma_shape') > -1: # C++ class DiscreteGammaShapeParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_gamma_shape = m.getDiscreteGammaShapePrior().sample()
m.setShape(new_gamma_shape)
jpm.univariateModified(name, new_gamma_shape)
elif name.find('pinvar') > -1: # C++ class PinvarParam
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
new_pinvar = m.getPinvarPrior().sample()
m.setPinvar(new_pinvar)
jpm.univariateModified(name, new_pinvar)
elif name.find('relrates') > -1: # C++ class RelRatesMove
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
rate_vector = m.getRelRatePrior().sample()
# Drawing values from a Dirichlet prior, but relative rates should have mean 1, not sum 1,
# so multiply each by the value nmodels to correct this.
m.setRelRates([6.0*x for x in rate_vector])
jpm.multivariateModified(name, rate_vector)
elif name.find('state_freqs') > -1: # C++ class StateFreqMove
i = unpartitioned and 0 or self.getModelIndex(name)
m = chain.partition_model.getModel(i)
freq_vector = m.getStateFreqPrior().sample()
m.setStateFreqsUnnorm(freq_vector)
jpm.multivariateModified(name, freq_vector)
elif name.find('edge_move') > -1: # C++ class EdgeMove
pass
elif name.find('larget_simon_local') > -1: # C++ class LargetSimonMove
pass
elif name.find('tree_scaler') > -1: # C++ class TreeScalerMove
pass
elif name.find('bush_move') > -1: # C++ class BushMove
pass # polytomies handled further down (by randomly pruning fully-resolved equiprobable tree)
else:
self.phycassert(0, 'model uses an updater (%s) that has not yet been added to MCMCImpl.explorePrior (workaround: specify mcmc.draw_directly_from_prior = False)' % name)
# If no edge length hyperprior was specified, then build the tree with edge lengths now
if tree_length_prior_specified:
if not edgelens_generated:
m = chain.partition_model.getModel(0)
if self.opts.fix_topology:
tm.setRandomEdgeLensFromTreeLengthDist(chain.likelihood.getTreeLengthPrior())
else:
tm.buildEquiprobTree(chain.tree.getNTips(), chain.r)
if self.opts.allow_polytomies and not self.opts.fix_topology:
# Choose number of internal nodes
jpm = self.mcmc_manager.getColdChainManager().getJointPriorManager()
topo_prior_calculator = jpm.getTopoProbCalculator()
num_internal_nodes = topo_prior_calculator.sample(chain.r)
# Delete edges at random from tree to achieve chosen number of internal nodes
orig_num_internal_nodes = chain.tree.getNInternals()
num_internals_to_delete = orig_num_internal_nodes - num_internal_nodes
for i in range(num_internals_to_delete):
tm.deleteRandomInternalEdge(chain.r)
if not self.opts.fix_topology:
tm.setRandomEdgeLensFromTreeLengthDist(chain.likelihood.getTreeLengthPrior())
jpm.treeLengthModified('tree_length', chain.tree)
else:
if not edgelens_generated:
m = chain.partition_model.getModel(0)
if self.opts.fix_topology:
tm.setRandomInternalExternalEdgeLengths(m.getInternalEdgeLenPrior(), m.getExternalEdgeLenPrior())
else:
tm.equiprobTree(chain.tree.getNTips(), chain.r, m.getInternalEdgeLenPrior(), m.getExternalEdgeLenPrior())
if self.opts.allow_polytomies:
# Choose number of internal nodes
jpm = self.mcmc_manager.getColdChainManager().getJointPriorManager()
topo_prior_calculator = jpm.getTopoProbCalculator()
num_internal_nodes = topo_prior_calculator.sample(chain.r)
# Delete nodes at random from tree to achieve chosen number of internal nodes
orig_num_internal_nodes = chain.tree.getNInternals()
num_internals_to_delete = orig_num_internal_nodes - num_internal_nodes
for i in range(num_internals_to_delete):
tm.deleteRandomInternalEdge(chain.r)
jpm.allEdgeLensModified(chain.tree)
#jpm.externalEdgeLensModified('external_edgelen', chain.tree)
#jpm.internalEdgeLensModified('internal_edgelen', chain.tree)
if not self.opts.fix_topology:
jpm.topologyModified('tree_topology', chain.tree)
chain.prepareForLikelihood()
chain.likelihood.replaceModel(chain.partition_model)
if False:
# debugging code
chain.likelihood.storeSiteLikelihoods(True)
from phycas.Utilities.kappa2tratio import convert
f = chain.model.getStateFreqs()
k = convert(chain.model.getKappa(), f[0], f[1], f[2], f[3])
print 'cycle = %d, model = %s' % (cycle + 1, chain.model.getModelName())
print ' lset tratio=%.5f basefreq=(%.5f %.5f %.5f) rates=gamma ncat=4 shape=%.5f;' % (k, f[0], f[1], f[2], chain.model.getShape())
print 'taxon names:', self.opts.data_source.taxon_labels
chain.tree.rectifyNames(self.opts.data_source.taxon_labels)
print ' utree one = %s;' % chain.tree.makeNewick()
print ' sum of edge lengths = %.5f' % chain.tree.edgeLenSum()
# recalculate the likelihood
cold_chain_manager = self.mcmc_manager.getColdChainManager()
cold_chain_manager.refreshLastLnLike()
if False:
# debugging code
counts = chain.likelihood.getPatternCounts()
sitelikes = chain.likelihood.getSiteLikelihoods()
print ' lnL = %.6f' % cold_chain_manager.getLastLnLike()
sumlikes = 0.0
for sitelike,count in zip(sitelikes, counts):
if count > 100:
print '%6.0f -> %12.5f' % (count, sitelike)
sumlikes += count*sitelike
if self.opts.debugging:
tmpf.close()
#POLTMP def computeTimeRemaining(self, secs, ndone, ntotal):
def computeTimeRemaining(self, secs, cycle_start, cycle_stop, cycle):
if self.opts.doing_steppingstone_sampling:
num_betas_completed = self.ss_sampled_betas.index(self.ss_beta)
num_betas_total = len(self.ss_sampled_betas)
else:
num_betas_completed = 0
num_betas_total = 1
ndone = (cycle_stop - cycle_start)*num_betas_completed + (cycle - cycle_start)
ntotal = (cycle_stop - cycle_start)*num_betas_total
if ndone < 1:
return ''
days_remaining = 0
hours_remaining = 0
secs_remaining = float(secs)*(float(ntotal)/float(ndone) - 1.0)
#print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
#print '~~~ secs =',secs
#print '~~~ cycle_start =',cycle_start
#print '~~~ cycle_stop =',cycle_stop
#print '~~~ cycle =',cycle
#print '~~~ num_betas_completed =',num_betas_completed
#print '~~~ num_betas_total =',num_betas_total
#print '~~~ ndone =',ndone
#print '~~~ ntotal =',ntotal
#print '~~~ ntotal/ndone =',(float(ntotal)/float(ndone))
#print '~~~ secs_remaining =',secs_remaining
#print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
time_left = []
if secs_remaining > 86400.0:
days_remaining = math.floor(secs_remaining/86400.0)
secs_remaining -= 86400.0*days_remaining
if days_remaining > 0:
if days_remaining == 1:
time_left.append('1 day')
else:
time_left.append('%d days' % days_remaining)
if secs_remaining > 3600.0:
hours_remaining = math.floor(secs_remaining/3600.0)
secs_remaining -= 3600.0*hours_remaining
if hours_remaining > 0:
if hours_remaining == 1:
time_left.append('1 hour')
else:
time_left.append('%d hours' % hours_remaining)
if secs_remaining > 60.0:
minutes_remaining = math.floor(secs_remaining/60.0)
secs_remaining -= 60.0*minutes_remaining
if minutes_remaining > 0:
if minutes_remaining == 1 and (days_remaining + hours_remaining) == 0:
time_left.append('less than 2 minutes')
else:
time_left.append('%d minutes' % minutes_remaining)
if len(time_left) > 0:
return ', '.join(time_left) + ' remaining'
elif math.floor(secs_remaining) == 1:
return '1 second remaining'
else:
return '%d seconds remaining' % math.floor(secs_remaining)
def mainMCMCLoop(self, explore_prior = False):
levels_file_created = False #temp!
nchains = len(self.mcmc_manager.chains)
# print '******** nchains =',nchains
self.last_adaptation = 0
#POLTMP self.next_adaptation = self.opts.adapt_first
CPP_UPDATER = True # if False, uses python obsoleteUpdateAllUpdaters
# self.debugShowTuningParameters()
#POLTMP for cycle in xrange(self.burnin + self.ncycles):
cycle = self.cycle_start
done = False
burning_in = True
while not done:
if cycle == 0:
# switching from burnin to sampling, tell chains to stop adapting themselves
burning_in = False
for c in self.mcmc_manager.chains:
c.chain_manager.setAdapting(False) #POLTMP
assert cycle < 0 or burning_in == False, 'should not be adapting chains during sampling phases'
# Update all updaters
if explore_prior and self.opts.draw_directly_from_prior:
if self.opts.ss_heating_likelihood or not self.opts.doing_steppingstone_sampling:
# MCMC without data, TI, or specific SS
self.explorePrior(cycle)
else:
# generalized SS
self.exploreWorkingPrior(cycle)
else:
for i,c in enumerate(self.mcmc_manager.chains):
if CPP_UPDATER:
c.chain_manager.updateAllUpdaters()
else:
self.obsoleteUpdateAllUpdaters(c, i, cycle)
# Attempt to swap two random chains
if nchains > 1:
self.mcmc_manager.attemptChainSwap(cycle)
# Provide progress report to user if it is time
if self.opts.verbose and self.doThisCycle(cycle, self.burnin, self.opts.report_every):
# Refresh log-likelihood of cold chain if necessary
if self.ss_beta == 0.0:
self.mcmc_manager.getColdChainManager().refreshLastLnLike()
self.stopwatch.normalize()
secs = self.stopwatch.elapsedSeconds()
#POLTMP time_remaining = self.computeTimeRemaining(secs, self.cycle_start + cycle + 1, self.cycle_stop)
time_remaining = self.computeTimeRemaining(secs, self.cycle_start, self.cycle_stop, cycle)
if time_remaining != '':
time_remaining = '(' + time_remaining + ')'
if self.opts.doing_steppingstone_sampling:
cold_chain_manager = self.mcmc_manager.getColdChainManager()
msg = 'beta = %.5f, cycle = %d, lnL = %.5f %s' % (self.ss_beta, cycle + 1, cold_chain_manager.getLastLnLike(), time_remaining)
else:
if nchains == 1:
cold_chain_manager = self.mcmc_manager.getColdChainManager()
msg = 'cycle = %d, lnL = %.5f %s' % (cycle + 1, cold_chain_manager.getLastLnLike(), time_remaining)
else:
msg = 'cycle = %d, ' % (cycle + 1)
for k in range(nchains):
c = self.mcmc_manager.chains[k]
msg += 'lnL(%.3f) = %.5f, ' % (c.heating_power, c.chain_manager.getLastLnLike())
msg += '%s' % time_remaining
self.output(msg)
# Sample chain if it is time
#POLTMP if self.beyondBurnin(cycle) and self.doThisCycle(cycle - self.burnin, self.opts.sample_every):
if self.beyondBurnin(cycle) and self.doThisCycle(cycle, self.burnin, self.opts.sample_every):
# Refresh log-likelihood(s) if necessary
if self.ss_beta == 0.0:
for i,c in enumerate(self.mcmc_manager.chains):
# is this necessary?
c.chain_manager.refreshLastLnLike()
#POLTMP self.mcmc_manager.recordSample(self.cycle_start + cycle)
self.mcmc_manager.recordSample(cycle)
cold_chain_manager = self.mcmc_manager.getColdChainManager()
sampled_lnL = cold_chain_manager.getLastLnLike()
self.ss_sampled_likes[self.ss_beta_index].append(sampled_lnL)
self.stopwatch.normalize()
# Adapt slice samplers if it is time
#POLTMP if self.doThisCycle(cycle, self.burnin, self.next_adaptation):
#POLTMP self.adaptSliceSamplers()
#POLTMP self.next_adaptation += 2*(self.next_adaptation - self.last_adaptation)
#POLTMP self.last_adaptation = cycle + 1
if cycle < 0:
self.adaptSliceSamplers()
if self.doThisCycle(cycle, self.burnin, self.opts.report_efficiency_every):
self.reportUpdaterEfficiency()
# self.debugShowTuningParameters()
# Recalculate joint prior to avoid creeping round-off error
jpm = self.mcmc_manager.getColdChainManager().getJointPriorManager()
jpm.recalcLogJointPrior()
cycle += 1 #POLTMP
if cycle == self.cycle_stop:
done = True
#POLTMP self.cycle_start += self.burnin + self.ncycles
#POLTMP self.cycle_start = self.cycle_stop
#POLTMP self.cycle_stop = self.cycle_start + self.ncycles
#def _isTreeLengthPriorBeingUsed(self):
# #---+----|----+----|----+----|----+----|----+----|----+----|----+----|
# """
# Returns True if tree length prior is defined, False otherwise.
# """
# cold_chain = self.mcmc_manager.getColdChain()
# m0 = cold_chain.partition_model.getModel(0)
# return bool(m0.getTreeLengthPrior())
def _createRefDistMap(self, fn):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Takes a file name and from the contents of the
file creates a focal tree and an associative array that matches
reference distributions (values) with parameter names (keys). The
first line is a tree definition with embedded clade posteriors. Lines
following the first are reference distribution definitions, one per
line. The file fn looks like this:
(1:1.00000,3:1.00000,((2:1.00000,(8:1.00000,5:1.00000):1.00000):1.00000,((9:1.00000,6:1.00000):1.00000,(4:1.00000,(10:1.00000,7:1.00000):1.00000):1.00000):1.00000):1.00000)
split_-----*--*- = Gamma(10.6102,0.00119215)
split_-********* = Gamma(154.718,0.000845338)
split_-----*---- = Gamma(112.402,0.000775574)
split_------*--- = Gamma(74.0985,0.000784768)
split_-------*-- = Gamma(138.733,0.000884312)
split_------*--* = Gamma(29.9702,0.00115743)
split_--*------- = Gamma(87.8519,0.000947885)
split_--------*- = Gamma(101.101,0.000771077)
split_-*--*--*-- = Gamma(2.27518,0.00139715)
split_---------* = Gamma(87.2539,0.00083153)
split_---*------ = Gamma(125.99,0.000793145)
split_----*----- = Gamma(76.5084,0.000925474)
split_-*-******* = Gamma(14.437,0.00150977)
split_---*-**-** = Gamma(24.95,0.00127126)
split_---*--*--* = Gamma(0.977012,0.00117438)
split_----*--*-- = Gamma(32.8486,0.00113388)
split_-*-------- = Gamma(106.928,0.000846824)
split_NA = Gamma(2.37343,0.0256837)
tree_length = TreeLengthDist(1226.6,1183.64,12.6972,0.227628)
1_state_freqs = Dirichlet((559.71489, 531.95324, 507.49159, 685.90371))
1_kappa = Gamma(249.30762, 0.01510)
"""
cold_chain = self.mcmc_manager.getColdChain()
m0 = cold_chain.partition_model.getModel(0)
is_tree_length_prior = bool(m0.getTreeLengthPrior())
ref_dist_map = {}
lines = open(fn, 'r').readlines()
for line in lines[1:]:
stripped = line.strip()
if len(stripped) == 0 or stripped[0] == '#':
continue # skip blank lines and comment lines
k,v = line.split('=')
#print '_createRefDistMap: k = |%s|' % k.strip()
if k.strip() == 'tree_length' and not is_tree_length_prior:
# Don't create a reference distribution for tree length if not using the tree length prior
pass
else:
# Create reference distribution object and add to map
name = k.strip()
dist = eval(v.strip())
ref_dist_map[name] = dist
ref_dist_tree_newick = lines[0] # this is just the tree definition string, not a tree object
return (ref_dist_tree_newick,ref_dist_map)
def _addRefDistsToJointPriorManager(self, ref_dist_map):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Adds all reference distributions stored in supplied ref_dist_map to
the joint prior manager.
"""
cold_chain = self.mcmc_manager.getColdChain()
jpm = cold_chain.chain_manager.getJointPriorManager()
for k in ref_dist_map.keys():
key_name = k + '_ref'
distr = ref_dist_map[k]
distr_name = distr.__class__.__name__
if distr_name in ['Normal', 'InverseGamma', 'Gamma', 'Beta', 'BetaPrime']:
jpm.addUnivariateDistribution(key_name, distr, distr.sample())
elif distr_name in ['Dirichlet', 'RelativeRateDistribution']:
jpm.addMultivariateDistribution(key_name, distr, distr.sample())
elif distr_name == 'TreeLengthDist':
jpm.addTreeLengthDistribution(key_name, distr, cold_chain.tree)
else:
self.phycassert(False, 'MCMCImpl._addRefDistsToJointPriorManager: Need to add "%s" case' % distr_name)
def _loadReferenceDistribution(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
User has specified a file containing the reference distribution
definitions. Read this file and call _createRefDistMap to instantiate
and store the reference distributions. The reference tree newick
string is parsed and a reference tree created. Edge lengths in the
newick tree description are assumed to be posterior clade
probabilities.
"""
ref_dist_tree,ref_dist_map = self._createRefDistMap(self.opts.ssobj.refdistfile)
#self._addRefDistsToJointPriorManager(ref_dist_map)
# Build the reference tree from the description in ref_dist_tree
focal_tree = TreeCollection(newick=ref_dist_tree).trees[0]
ntips = focal_tree.getNObservables()
focal_tree.recalcAllSplits(ntips)
nd = focal_tree.getFirstPreorder()
assert nd.isRoot(), 'the first preorder node should be the root'
focal_root_number = nd.getNodeNumber()
self.phycassert(focal_tree.hasEdgeLens(), 'focal tree from reference distribution must have edge lengths (which will be interpreted as split posteriors)')
nd = nd.getNextPreorder()
while nd:
# Determine whether this split represents an internal or tip node
if not (nd.isTip() or nd.getParent().isRoot()):
split_prob = nd.getEdgeLen()
if split_prob == 0.0:
split_prob = 0.001
nd.setEdgeLen(split_prob)
if split_prob == 1.0:
split_prob = 0.999
nd.setEdgeLen(split_prob)
self.phycassert(split_prob > 0.0 and split_prob < 1.0, 'Split probabilities must be in the range (0, 1)')
s = nd.getSplit()
if s.isBitSet(0):
s.invertSplit()
nd = nd.getNextPreorder()
# Be sure trees for all chains are rooted with the same taxon as the reference tree
# but only if fix_topology is False (if fix_topology is True, the reference tree is
# irrelevant and rerooting the tree will cause problems because edge-specific updaters
# have already been created based on the current rooting)
if not self.opts.fix_topology:
for i,chain in enumerate(self.mcmc_manager.chains):
t = chain.getTree()
t.rerootAtTip(focal_root_number)
# Instantiate tree topology probability calculator, which is used to generate trees
# from the topology reference distribution
topo_ref_dist_calculator = likelihood.FocalTreeTopoProbCalculatorBase(focal_tree)
# Inform tree topology probability calculator of edge length reference distributions
prefix = 'split_'
default_edge_len = None
for k, v in ref_dist_map.iteritems():
if k.lower().startswith(prefix):
split_rep = k[len(prefix):] # get back end of name (part after prefix)
v.setLot(self._getLot())
if split_rep.lower() == 'na':
assert(default_edge_len is None)
topo_ref_dist_calculator.setDefaultEdgeLenDist(v)
else:
s = phylogeny.SplitBase()
s.createFromPattern(split_rep)
#print split_rep, v, s.createPatternRepresentation()
topo_ref_dist_calculator.setEdgeLenDist(s, v)
return topo_ref_dist_calculator, ref_dist_map
def _provideRefDistToUpdaters(self, cold_chain, topo_ref_dist_calculator, ref_dist_map):
#print '#@#@#@#@#@#@# ref_dist_map.keys() -->',ref_dist_map.keys()
if self.opts.fix_topology and 'tree_length' in ref_dist_map.keys():
cold_chain.likelihood.setTreeLengthRefDist(ref_dist_map['tree_length'].cloneAndSetLot(self._getLot()))
all_updaters = cold_chain.chain_manager.getAllUpdaters()
for u in all_updaters:
if u.isFixed():
continue
else:
u.setUseWorkingPrior(True)
updater_name = u.getName()
#print '~o~o~o~o~o~o~o~o~ updater_name =',updater_name
if updater_name in ['master_edgelen','external_edgelen','internal_edgelen'] and not self.opts.fix_topology:
# larget_simon_local takes over this role for variable topology steppingstone analyses
u.setUseWorkingPrior(False)
u.setPrior(None);
u.setMultivarPrior(None);
if u.useWorkingPrior():
if u.computesUnivariatePrior():
try:
d = ref_dist_map[updater_name].cloneAndSetLot(self._getLot())
except KeyError:
# Probably an edge length updater with name like 'intedge_1001' (whereas keys in ref_dist_map have names like 'split_-**-')
self.phycassert(u.__class__.__name__ == "EdgeLenParam", 'Expecting updater to be an EdgeLenParam. This is a bug: please report this to Paul Lewis ([email protected])')
split_key = 'split_%s' % u.getSplitReprAsString()
d = ref_dist_map[split_key].cloneAndSetLot(self._getLot())
u.setWorkingPrior(d)
#self.output(' %s = %s' % (updater_name, u.getWorkingPriorDescr()))
elif u.computesMultivariatePrior():
u.setMultivariateWorkingPrior(ref_dist_map[updater_name].cloneAndSetLot(self._getLot()))
#self.output(' %s = %s' % (updater_name, u.getWorkingPriorDescr()))
elif u.computesTopologyPrior():
u.setReferenceDistribution(topo_ref_dist_calculator)
def steppingstoneMCMC(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Performs a Steppingstone MCMC analysis.
"""
nchains = len(self.mcmc_manager.chains)
self.phycassert(self.data_matrix is not None, 'steppingstone sampling requires data')
self.phycassert(nchains == 1, 'steppingstone sampling currently requires nchains = 1')
chain = self.mcmc_manager.getColdChain()
if not self.opts.ssobj.refdist_is_prior:
topo_ref_dist_calculator, ref_dist_map = self._loadReferenceDistribution()
self._provideRefDistToUpdaters(chain, topo_ref_dist_calculator, ref_dist_map)
# Compute the current log-likelihood and log-prior in case first updater
# is a move and will thus depend on these quantities being accurate
for c in self.mcmc_manager.chains:
c.chain_manager.setTargetAcceptanceRate(self.opts.target_accept_rate) #POLTMP
c.chain_manager.setAdapting(True) #POLTMP
c.chain_manager.refreshLastLnLike()
if c.heating_power == 1.0:
self.output('Starting log-likelihood = %s' % c.chain_manager.getLastLnLike())
self.output('Starting log-prior = %s' % c.chain_manager.getJointPriorManager().getLogJointPrior())
self.mcmc_manager.recordSample()
if self.opts.ssobj.nstones > 1:
# Set up the list ss_sampled_betas
# Beta distribution will be divided into nstones intervals, each of which has an equal area
#
# Example: nstones = 5, Beta(a,1), a = 1/4
#
# quantile ------- beta -------
# 0/5 (0/5)^(1/a) = 0.0 <-- 0
# 1/5 (1/5)^(1/a) = 0.0016 <-- 1
# 2/5 (2/5)^(1/a) = 0.0256 <-- 2
# 3/5 (3/5)^(1/a) = 0.1296 <-- 3
# 4/5 (4/5)^(1/a) = 0.4096 <-- 4
# 5/5 (5/5)^(1/a) = 1.0 <-- not used
#
segment_area = 1.0/float(self.opts.ssobj.nstones)
cum_area = 0.0
lower_boundary = 0.0
self.ss_sampled_betas = [0.0]
betadist = probdist.Beta(self.opts.ssobj.shape1, self.opts.ssobj.shape2)
for i in range(self.opts.ssobj.nstones - 1):
cum_area += segment_area
upper_boundary = betadist.getQuantile(cum_area)
self.ss_sampled_betas.append(upper_boundary)
lower_boundary = upper_boundary
# Reverse ss_sampled_betas so that sampled beta values start high and decrease toward 0.0
self.ss_sampled_betas.reverse()
# Output the beta values that will be used
self.output('%d %s chosen from a discrete\nBeta(%.5f, %.5f) distribution:' % (self.opts.ssobj.nstones, (self.opts.ssobj.nstones == 1 and 'value was' or 'values were'), self.opts.ssobj.shape1, self.opts.ssobj.shape2))
for i,x in enumerate(self.ss_sampled_betas):
self.output('%6d %12.5f' % (i+1,x))
self.output('MCMC will be used to sample from each of the')
self.output('power posteriors defined by these values.')
self.output()
else:
self.ss_sampled_betas = [0.0]
# Run the main MCMC loop for each beta value in ss_sampled_betas
self.ss_sampled_likes = []
ref_dist_calculated = False
for self.ss_beta_index, self.ss_beta in enumerate(self.ss_sampled_betas):
self.ss_sampled_likes.append([])
chain.setPower(self.ss_beta)
#POLTMP2 boldness = 100.0*(1.0 - self.ss_beta)
#POLTMP2 chain.setBoldness(boldness)
#POLTMP2 self.output('Setting chain boldness to %g based on beta = %g' % (boldness,self.ss_beta))
#POLTMP self.cycle_stop = self.opts.burnin + len(self.ss_sampled_betas)*self.opts.ssobj.ncycles
self.ncycles = self.opts.ssobj.ncycles
self.burnin = self.opts.ssobj.burnin
chain.chain_manager.setTargetAcceptanceRate(self.opts.target_accept_rate) #POLTMP
chain.chain_manager.setAdapting(True) #POLTMP
#POLTMP if self.ss_beta_index > 0:
#POLTMP self.burnin = 0
#POLTMP else:
#POLTMP self.burnin = self.opts.burnin
#POLTMP self.cycle_start = 0
#self.burnin = self.opts.burnin
self.cycle_start = -self.burnin
#POLTMP self.cycle_stop = len(self.ss_sampled_betas)*self.opts.ssobj.ncycles
self.cycle_stop = self.opts.ssobj.ncycles
if self.ss_beta == 0.0:
self.mainMCMCLoop(explore_prior=True)
else:
self.mainMCMCLoop()
def standardMCMC(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Performs a standard MCMC analysis.
"""
# Compute the current log-likelihood and log-prior in case first updater
# is a move and will thus depend on these quantities being accurate
for c in self.mcmc_manager.chains:
c.chain_manager.setTargetAcceptanceRate(self.opts.target_accept_rate) #POLTMP
c.chain_manager.setAdapting(True) #POLTMP
c.chain_manager.refreshLastLnLike()
if c.heating_power == 1.0:
self.output('Starting log-likelihood = %s' % c.chain_manager.getLastLnLike())
self.output('Starting log-prior = %s' % c.chain_manager.getJointPriorManager().getLogJointPrior())
self.mcmc_manager.recordSample()
self.ss_sampled_likes = []
self.ss_sampled_likes.append([])
self.ss_beta_index = 0
#POLTMP self.cycle_start = 0
#POLTMP self.cycle_stop = self.opts.burnin + self.opts.ncycles
self.cycle_start = -self.opts.burnin
self.cycle_stop = self.opts.ncycles
self.burnin = self.opts.burnin
self.ncycles = self.opts.ncycles
if self.data_matrix is None:
self.mainMCMCLoop(explore_prior=True)
else:
self.mainMCMCLoop()
def run(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Performs the MCMC analysis.
"""
self.setup()
# If user has set quiet to True, then phycas.output calls will have no effect
self.quiet = self.opts.quiet
nchains = len(self.mcmc_manager.chains)
cold_chain = self.mcmc_manager.getColdChain()
if self.opts.verbose:
if self.data_matrix == None:
self.output('Data source: None (running MCMC with no data to explore prior)')
else:
self.output('Data source: %s' % str_value_for_user(self.opts.data_source))
self.output(' No. taxa: %d' % self.ntax)
self.output(' No. included characters: %d' % cold_chain.likelihood.sumPatternCounts())
all_missing = cold_chain.likelihood.getListOfAllMissingSites()
num_excluded = len(all_missing)
if num_excluded > 0:
self.output(' *** Note: the following %d sites were automatically excluded because' % num_excluded)
self.output(' *** they exhibited completely missing data for all taxa:')
while len(all_missing) > 0:
tmp = all_missing[:10]
all_missing = all_missing[10:]
self.output(' *** '+','.join([str(i+1) for i in tmp]))
if nchains > 1:
for c in range(nchains):
self.output('Starting tree for chain %d: %s' % (c, self.starting_tree[c]))
else:
self.output('Starting tree: %s' % str(self.starting_tree[0]))
if self.opts.fix_topology:
self.output('\nTree topology fixed.\n')
else:
self.output('\nTree topology free to vary.\n')
if self.opts.doing_steppingstone_sampling:
nsamples_per_step = self.opts.ssobj.ncycles//self.opts.sample_every
if self.opts.ss_heating_likelihood:
self.output('\nPerforming steppingstone sampling and thermodynamic integration to estimate marginal likelihood.')
else:
self.output('\nPerforming generalized steppingstone sampling to estimate marginal likelihood.')
self.output('Likelihood will be raised to the power beta, and beta will be')
self.output('decremented from 1.0 to 0.0 in a series of steps.')
self.output(' No. stones: %s' % self.opts.ssobj.nstones)
self.output(' No. cycles per step: %s' % self.opts.ssobj.ncycles)
self.output(' Sample every: %s' % self.opts.sample_every)
self.output(' No. samples per step: %s' % nsamples_per_step)
self.output('\n')
else:
nsamples_per_step = self.opts.ncycles//self.opts.sample_every
self.output('No. cycles: %s' % self.opts.ncycles)
self.output('Sample every: %s' % self.opts.sample_every)
self.output('No. samples: %s' % nsamples_per_step)
self.output('Sampled trees will be saved in %s' % str_value_for_user(self.opts.out.trees))
self.output('Sampled parameters will be saved in %s' % str_value_for_user(self.opts.out.params))
self.output('Using standard MCMC (i.e. no uniformized mapping)')
if not self.warn_tip_numbers:
self.output('Tip node numbers were set using the names in the tree description')
else:
self.output('Warning: tip node numbers were NOT set using the names in the tree description')
if nchains == 1:
self.output('Creating one chain (i.e. not using heated chains to improve mixing)')
else:
self.output('Creating %d chains with these temperatures:' % (nchains))
for t in self.heat_vector:
self.output(' %.5f %s' % (t, t == 1.0 and '(cold chain)' or ''))
# Show starting parameter info
self.output('\nParameter starting values and prior densities:')
cold_chain_manager = self.mcmc_manager.getColdChainManager()
for p in cold_chain_manager.getAllUpdaters():
self.showParamInfo(p)
# Show updater names
self.output('\nHere is a list of all updaters that will be used for this analysis:')
for p in cold_chain_manager.getAllUpdaters():
if p.getWeight() > 0:
p.setUseWorkingPrior(False)
if p.isMove():
self.output(' %s (Metropolis-Hastings)' % p.getName())
elif p.hasSliceSampler():
self.output(' %s (slice sampler)' % p.getName())
else:
self.output(' %s (computes prior but does not update)' % p.getName())
# Debugging: show data patterns
if self.opts.debugging and not cold_chain.likelihood.getNoData():
#cold_chain = self.mcmc_manager.getColdChain()
s = cold_chain.likelihood.listPatterns()
print '\nDebug Info: List of data patterns and their frequencies (see TreeLikelihood::listPatterns):'
print s
cold_chain.likelihood.debugUncompressedDataInfo("all-site-patterns.txt");
# Show information about topology prior to be used
self.showTopoPriorInfo()
self.stopwatch.start()
self.mcmc_manager.resetNumLikelihoodEvals()
if self.opts.doing_steppingstone_sampling:
self.output('\nSampling (%d cycles for each of the %d values of beta)...' % (self.opts.ssobj.ncycles, self.opts.ssobj.nstones))
else:
self.output('\nSampling (%d cycles)...' % self.opts.ncycles)
if self.opts.verbose:
print
# Lay down first line in params file (recorded as cycle 0) containing starting values of parameters
if self.opts.doing_steppingstone_sampling:
self.steppingstoneMCMC()
else:
self.standardMCMC()
#POLTMP self.adaptSliceSamplers()
self.resetUpdaterDiagnostics()
total_evals = self.mcmc_manager.getTotalEvals() #self.likelihood.getNumLikelihoodEvals()
total_secs = self.stopwatch.elapsedSeconds()
self.output('%d likelihood evaluations in %.5f seconds' % (total_evals, total_secs))
if (total_secs > 0.0):
self.output(' = %.5f likelihood evaluations/sec' % (total_evals/total_secs))
if self.treef:
self.treeFileClose()
if self.paramf:
self.paramFileClose()
| [
"[email protected]"
]
| |
3690de3d521be3542a0c4bafc7ca2f7cdc71a149 | 36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1 | /house/migrations/0004_auto_20180920_2003.py | 2c799c963093e1889c416fe8249beab35804299e | []
| no_license | phufoxy/fotourNew | 801ab2518424118020dc6e5f31a7ba90a654e56a | 6048c24f5256c8c5a0d18dc7b38c106a7c92a29c | refs/heads/master | 2023-04-13T01:34:22.510717 | 2018-12-26T03:46:09 | 2018-12-26T03:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 2.1 on 2018-09-20 13:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('house', '0003_auto_20180920_2002'),
]
operations = [
migrations.RenameField(
model_name='house',
old_name='city',
new_name='location',
),
migrations.AlterField(
model_name='comment_house',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 9, 20, 20, 3, 15, 282441)),
),
migrations.AlterField(
model_name='house_tour',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 9, 20, 20, 3, 15, 283439)),
),
]
| [
"[email protected]"
]
| |
2e2066e619ccc7d33ad5a3115a5f2d7b25804acc | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1619.py | 627355a3f0c5448f6788990546a6563e13a4e9b3 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | for Pr in xrange(1, input()+1):
D, N=[int(x) for x in raw_input().split()]
d, v=[], []
for i in xrange(N):
c=[int(x) for x in raw_input().split()]
d.append(c[0])
v.append(c[1])
d_sort=[k for k in d]
d_sort.sort()
v_sort=[]
for k in d_sort:
v_sort.append(v[d.index(k)])
d,v=d_sort, v_sort
curv=v[0]
d_ok, v_ok=[d[0]], [v[0]]
for i in xrange(1, N):
if v[i]<curv:
d_ok.append(d[i])
v_ok.append(v[i])
curv=v[i]
d, v=d_ok, v_ok
N=len(d)
d1, v1=d[0], v[0]
T=0
for i in xrange(1, N):
t=float(d1-d[i])/(v[i]-v1)
dd=d1+v1*t
if D<=dd:
break
else:
d1=dd
v1=v[i]
T+= t
T+= (D-d1)/float(v1)
solve=D/T
print 'Case #%d: %f'%(Pr, solve)
| [
"[email protected]"
]
| |
36890bb9f1effdf8395b81675dd2e0393e4a1f10 | a3b0e7acb6e0d7e73f5e369a17f367ac7caf83fb | /python/Udemy/Learn_Python_Programming_StepByStep/16_Database_SQLite/pulling data.py | a908c75deb59e48146df2835e10bc32a6420b7ed | []
| no_license | jadedocelot/Bin2rong | f9d35731ca7df50cfba36141d249db2858121826 | 314b509f7b3b3a6a5d6ce589dbc57a2c6212b3d7 | refs/heads/master | 2023-03-24T14:03:32.374633 | 2021-03-19T00:05:24 | 2021-03-19T00:05:24 | 285,387,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import sqlite3
conct = sqlite3.connect("database/mydatabase.db")
try:
cur = conct.cursor()
for records in cur.execute("SELECT * FROM student"):
print(records)
# cur.execute("SELECT name FROM student")
# print(cur.fetchmany(3))
# print(cur.fetchall())
# print(cur.fetchone())
except Exception as err:
print(err)
finally:
conct.close()
# See CHAPTER README for notes | [
"[email protected]"
]
| |
b4cc31ff92eb9ed40614c2fd26c025ec9077120f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/387/usersdata/273/107669/submittedfiles/ep2.py | 33033fb09115fd60503b0008b74f07c92d2e70db | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | # -*- coding: utf-8 -*-
'''
/**********************************************************/
/* Equipe: Fulano de Tal:Leandro Pedro de Freitas e Gustavo henrique */
/* N ́umero de matriculas:381420 e */
/* Exercicio-Programa 2 -- TEMA *Razao aurea
/* ECI0007 ou EM0006 (EC/EM) -- 2017 -- Professor: Rafael perazzo */
/* Interpretador: Python vers~ao 3 */
/**********************************************************
'''
#COMECE SEU CODIGO NA LINHA ABAIXO.
def calcula_valor_absoluto (x):
x=((x**2)**0.5)
def calcula_pi(m):
for i in range(1,m,1):
pi0=0
a=2
b=3
c=4
if ((i%2)==0):
pi=pi0-(4/(a*b*c))
else:
pi=pi0+(4/(a*b*c))
a=a+2
b=b+2
c=c+2
valordopi=pi+3
return(valordopi)
def calcula_co_seno(z):
for z in range(1,z,1):
a=2
deno=2
cosseno=0
if (z%2)==0:
while (deno>0):
produto=((calcula_valor_absoluto)/(deno))
deno=deno-1
cosseno=cosseno-produto
else:
while (deno>0):
produto=((calcula_valor_absoluto)/(deno))
deno=deno-1
cosseno=cosseno+produto
deno=deno+2
a=a+2
cosseno_real=cosseno+1
return(cosseno_real)
| [
"[email protected]"
]
| |
fbf3c62c164a93fa9c4828bcb8325e94ed813e75 | 9e65e409106aad6339424a1afac3e75e15b4f488 | /0x11-python-network_1/6-post_email.py | 69b2654e635444013e91f5cd6642ffd28af4f413 | []
| no_license | jfbm74/holbertonschool-higher_level_programming | 63fe8e35c94454896324791585fe7b8e01e1c67d | 609f18ef7a479fb5dcf1825333df542fc99cec7b | refs/heads/master | 2023-03-05T20:44:05.700354 | 2021-02-10T00:21:54 | 2021-02-10T00:21:54 | 259,452,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | #!/usr/bin/python3
"""
Python script that takes in a URL and an email address,
sends a POST request to the passed URL with the email as
a parameter
"""
import requests
from sys import argv
if __name__ == '__main__':
url = argv[1]
email = {'email': argv[2]}
req = requests.post(url, email)
print(req.text)
| [
"[email protected]"
]
| |
9151b1b46f2c5b5e9581951a8af9df5a3f4f1f95 | b844c72c394b13d9ed4f73222a934f962d6ff187 | /src/matching_counts.py | 118790fdedcd4c34ee60aad44942be3563c04bce | []
| no_license | curtisbright/sagesat | b9b4c9180c75ce8574217058ffa4e121163ccf36 | 8fe52609ab6479d9b98a1e6cf2199a4f12c27777 | refs/heads/master | 2021-01-01T17:52:01.288449 | 2015-08-19T18:14:26 | 2015-08-19T18:14:26 | 41,425,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | '''
Created on Sep 20, 2014
@author: ezulkosk
'''
from common.common import Options
from main import run
if __name__ == '__main__':
TEST_DIR = "../test/matching_counts/"
options = Options()
options.SHARPSAT=True
print("Obtaining counts for hypercube matchings.")
for d in range(2,6):
d_dir = TEST_DIR + "d" + str(d) + "/"
print("Dimension "+ str(d))
spec = d_dir + "matchings"
print("\tmatchings: " + str(run(spec, options)))
spec = d_dir + "forbidden_matchings"
print("\tforbidden matchings: " + str(run(spec, options)))
spec = d_dir + "maximal_forbidden_matchings"
print("\tmaximal forbidden matchings: " + str(run(spec, options)))
| [
"[email protected]"
]
| |
e598bbc2a1a04dda520a9ae42d5eb12de22c2eea | 78980891d3137810bf3a3c1bb229966b7f49f0dd | /interestings/asyncio_demos/coro10.py | 6c8cc07ed8e8518539a71a7fd1e9aa82e39641db | []
| no_license | miniyk2012/leetcode | 204927d3aefc9746070c1bf13abde517c6c16dc0 | 91ca9cd0df3c88fc7ef3c829dacd4d13f6b71ab1 | refs/heads/master | 2021-06-17T21:50:31.001111 | 2021-03-10T11:36:23 | 2021-03-10T11:36:23 | 185,042,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import asyncio
import time
from contextlib import contextmanager
async def a():
await asyncio.sleep(3)
return 'A'
async def b():
await asyncio.sleep(1)
return 'B'
async def s1():
return await asyncio.gather(a(), b())
@contextmanager
def timed(func):
start = time.perf_counter()
yield asyncio.run(func())
print(f'Cost: {time.perf_counter() - start}')
if __name__ == '__main__':
with timed(s1) as rv:
print(f'Result: {rv}') | [
"[email protected]"
]
| |
14cfc235ea57ad8c87158c3a105277b32dd89e0b | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L3FN/3FN-2I_wat_20Abox/set_1.py | a66fca767f59717be304bfaf4b0ba3facedc7df3 | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L3FN/wat_20Abox/ti_one-step/3FN_2I/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
b964a72f0dbb3381a2b723bbe70654e1d5ab14b1 | 8ca19f1a31070738b376c0370c4bebf6b7efcb43 | /office365/directory/protection/riskyusers/activity.py | f84b5aacfd83351b65aacf60389d622975b7a493 | [
"MIT"
]
| permissive | vgrem/Office365-REST-Python-Client | 2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3 | cbd245d1af8d69e013c469cfc2a9851f51c91417 | refs/heads/master | 2023-09-02T14:20:40.109462 | 2023-08-31T19:14:05 | 2023-08-31T19:14:05 | 51,305,798 | 1,006 | 326 | MIT | 2023-08-28T05:38:02 | 2016-02-08T15:24:51 | Python | UTF-8 | Python | false | false | 201 | py | from office365.runtime.client_value import ClientValue
class RiskUserActivity(ClientValue):
"""Represents the risk activites of an Azure AD user as determined by Azure AD Identity Protection."""
| [
"[email protected]"
]
| |
af6acf69a5f749171c212ccadb2286cebe501086 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /Examples/bokeh/plotting/file/bollinger.py | 95cb41a56fde8c6e8db94b100473937ab032ef56 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
]
| permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 591 | py | import numpy as np
from bokeh.plotting import figure, show, output_file
# Define Bollinger Bands.
upperband = np.random.random_integers(100, 150, size=100)
lowerband = upperband - 100
x_data = np.arange(1, 101)
# Bollinger shading glyph:
band_x = np.append(x_data, x_data[::-1])
band_y = np.append(lowerband, upperband[::-1])
output_file('bollinger.html', title='Bollinger bands (file)')
p = figure(x_axis_type='datetime')
p.patch(band_x, band_y, color='#7570B3', fill_alpha=0.2)
p.title = 'Bollinger Bands'
p.plot_height = 600
p.plot_width = 800
p.grid.grid_line_alpha = 0.4
show(p)
| [
"[email protected]"
]
| |
7b2b78d7ba55a651c167d3dc9d0da45dd5fb4a9f | 12fa783b25175407440657173447b774b6151430 | /synthesizer/preprocess.py | 83a777c4dc3ec3378a5e5484e37387869c52d1e4 | [
"MIT"
]
| permissive | Ramstein/voice-mimicking | 8e91241c414a1b2e4ae77bc0cf3f70b3295d8a37 | 75c4e75fb34d111071e33cea34c282a0e4b01b4d | refs/heads/master | 2023-07-21T22:59:37.956103 | 2021-09-07T18:06:15 | 2021-09-07T18:06:15 | 404,076,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,403 | py | from multiprocessing.pool import Pool
from synthesizer import audio
from functools import partial
from itertools import chain
from encoder import inference as encoder
from pathlib import Path
from utils import logmmse
from tqdm import tqdm
import numpy as np
import librosa
def preprocess_librispeech(datasets_root: Path, out_dir: Path, n_processes: int,
skip_existing: bool, hparams):
# Gather the input directories
dataset_root = datasets_root.joinpath("LibriSpeech")
input_dirs = [dataset_root.joinpath("train-clean-100"),
dataset_root.joinpath("train-clean-360")]
print("\n ".join(map(str, ["Using data from:"] + input_dirs)))
assert all(input_dir.exists() for input_dir in input_dirs)
# Create the output directories for each output file type
out_dir.joinpath("mels").mkdir(exist_ok=True)
out_dir.joinpath("audio").mkdir(exist_ok=True)
# Create a metadata file
metadata_fpath = out_dir.joinpath("train.txt")
metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8")
# Preprocess the dataset
speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs))
func = partial(preprocess_speaker, out_dir=out_dir, skip_existing=skip_existing,
hparams=hparams)
job = Pool(n_processes).imap(func, speaker_dirs)
for speaker_metadata in tqdm(job, "LibriSpeech", len(speaker_dirs), unit="speakers"):
for metadatum in speaker_metadata:
metadata_file.write("|".join(str(x) for x in metadatum) + "\n")
metadata_file.close()
# Verify the contents of the metadata file
with metadata_fpath.open("r", encoding="utf-8") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
mel_frames = sum([int(m[4]) for m in metadata])
timesteps = sum([int(m[3]) for m in metadata])
sample_rate = hparams.sample_rate
hours = (timesteps / sample_rate) / 3600
print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." %
(len(metadata), mel_frames, timesteps, hours))
print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata))
print("Max mel frames length: %d" % max(int(m[4]) for m in metadata))
print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata))
def preprocess_speaker(speaker_dir, out_dir: Path, skip_existing: bool, hparams):
metadata = []
for book_dir in speaker_dir.glob("*"):
# Gather the utterance audios and texts
try:
alignments_fpath = next(book_dir.glob("*.alignment.txt"))
with alignments_fpath.open("r") as alignments_file:
alignments = [line.rstrip().split(" ") for line in alignments_file]
except StopIteration:
# A few alignment files will be missing
continue
# Iterate over each entry in the alignments file
for wav_fname, words, end_times in alignments:
wav_fpath = book_dir.joinpath(wav_fname + ".flac")
assert wav_fpath.exists()
words = words.replace("\"", "").split(",")
end_times = list(map(float, end_times.replace("\"", "").split(",")))
# Process each sub-utterance
wavs, texts = split_on_silences(wav_fpath, words, end_times, hparams)
for i, (wav, text) in enumerate(zip(wavs, texts)):
sub_basename = "%s_%02d" % (wav_fname, i)
metadata.append(process_utterance(wav, text, out_dir, sub_basename,
skip_existing, hparams))
return [m for m in metadata if m is not None]
def split_on_silences(wav_fpath, words, end_times, hparams):
# Load the audio waveform
wav, _ = librosa.load(wav_fpath, hparams.sample_rate)
if hparams.rescale:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
words = np.array(words)
start_times = np.array([0.0] + end_times[:-1])
end_times = np.array(end_times)
assert len(words) == len(end_times) == len(start_times)
assert words[0] == "" and words[-1] == ""
# Find pauses that are too long
mask = (words == "") & (end_times - start_times >= hparams.silence_min_duration_split)
mask[0] = mask[-1] = True
breaks = np.where(mask)[0]
# Profile the noise from the silences and perform noise reduction on the waveform
silence_times = [[start_times[i], end_times[i]] for i in breaks]
silence_times = (np.array(silence_times) * hparams.sample_rate).astype(np.int)
noisy_wav = np.concatenate([wav[stime[0]:stime[1]] for stime in silence_times])
if len(noisy_wav) > hparams.sample_rate * 0.02:
profile = logmmse.profile_noise(noisy_wav, hparams.sample_rate)
wav = logmmse.denoise(wav, profile, eta=0)
# Re-attach segments that are too short
segments = list(zip(breaks[:-1], breaks[1:]))
segment_durations = [start_times[end] - end_times[start] for start, end in segments]
i = 0
while i < len(segments) and len(segments) > 1:
if segment_durations[i] < hparams.utterance_min_duration:
# See if the segment can be re-attached with the right or the left segment
left_duration = float("inf") if i == 0 else segment_durations[i - 1]
right_duration = float("inf") if i == len(segments) - 1 else segment_durations[i + 1]
joined_duration = segment_durations[i] + min(left_duration, right_duration)
# Do not re-attach if it causes the joined utterance to be too long
if joined_duration > hparams.hop_size * hparams.max_mel_frames / hparams.sample_rate:
i += 1
continue
# Re-attach the segment with the neighbour of shortest duration
j = i - 1 if left_duration <= right_duration else i
segments[j] = (segments[j][0], segments[j + 1][1])
segment_durations[j] = joined_duration
del segments[j + 1], segment_durations[j + 1]
else:
i += 1
# Split the utterance
segment_times = [[end_times[start], start_times[end]] for start, end in segments]
segment_times = (np.array(segment_times) * hparams.sample_rate).astype(np.int)
wavs = [wav[segment_time[0]:segment_time[1]] for segment_time in segment_times]
texts = [" ".join(words[start + 1:end]).replace(" ", " ") for start, end in segments]
# # DEBUG: play the audio segments (run with -n=1)
# import sounddevice as sd
# if len(wavs) > 1:
# print("This sentence was split in %d segments:" % len(wavs))
# else:
# print("There are no silences long enough for this sentence to be split:")
# for wav, text in zip(wavs, texts):
# # Pad the waveform with 1 second of silence because sounddevice tends to cut them early
# # when playing them. You shouldn't need to do that in your parsers.
# wav = np.concatenate((wav, [0] * 16000))
# print("\t%s" % text)
# sd.play(wav, 16000, blocking=True)
# print("")
return wavs, texts
def process_utterance(wav: np.ndarray, text: str, out_dir: Path, basename: str,
skip_existing: bool, hparams):
## FOR REFERENCE:
# For you not to lose your head if you ever wish to change things here or implement your own
# synthesizer.
# - Both the audios and the mel spectrograms are saved as numpy arrays
# - There is no processing done to the audios that will be saved to disk beyond volume
# normalization (in split_on_silences)
# - However, pre-emphasis is applied to the audios before computing the mel spectrogram. This
# is why we re-apply it on the audio on the side of the vocoder.
# - Librosa pads the waveform before computing the mel spectrogram. Here, the waveform is saved
# without extra padding. This means that you won't have an exact relation between the length
# of the wav and of the mel spectrogram. See the vocoder data loader.
# Skip existing utterances if needed
mel_fpath = out_dir.joinpath("mels", "mel-%s.npy" % basename)
wav_fpath = out_dir.joinpath("audio", "audio-%s.npy" % basename)
if skip_existing and mel_fpath.exists() and wav_fpath.exists():
return None
# Skip utterances that are too short
if len(wav) < hparams.utterance_min_duration * hparams.sample_rate:
return None
# Compute the mel spectrogram
mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32)
mel_frames = mel_spectrogram.shape[1]
# Skip utterances that are too long
if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length:
return None
# Write the spectrogram, embed and audio to disk
np.save(mel_fpath, mel_spectrogram.T, allow_pickle=False)
np.save(wav_fpath, wav, allow_pickle=False)
# Return a tuple describing this training example
return wav_fpath.name, mel_fpath.name, "embed-%s.npy" % basename, len(wav), mel_frames, text
def embed_utterance(fpaths, encoder_model_fpath):
if not encoder.is_loaded():
encoder.load_model(encoder_model_fpath)
# Compute the speaker embedding of the utterance
wav_fpath, embed_fpath = fpaths
wav = np.load(wav_fpath)
wav = encoder.preprocess_wav(wav)
embed = encoder.embed_utterance(wav)
np.save(embed_fpath, embed, allow_pickle=False)
def create_embeddings(synthesizer_root: Path, encoder_model_fpath: Path, n_processes: int):
wav_dir = synthesizer_root.joinpath("audio")
metadata_fpath = synthesizer_root.joinpath("train.txt")
assert wav_dir.exists() and metadata_fpath.exists()
embed_dir = synthesizer_root.joinpath("embeds")
embed_dir.mkdir(exist_ok=True)
# Gather the input wave filepath and the target output embed filepath
with metadata_fpath.open("r") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata]
# TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here.
# Embed the utterances in separate threads
func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath)
job = Pool(n_processes).imap(func, fpaths)
list(tqdm(job, "Embedding", len(fpaths), unit="utterances"))
| [
"[email protected]"
]
| |
3dc9c0e03163ba648da6dd6492a03da3e5d66cb0 | 824ccee9f5c27f5b3fec0dff710e9c4a181a9747 | /atlas_coords/all_sky_phot.py | 71562becbd41c1ca68d871cc16caa871a6a2114f | []
| no_license | yoachim/ScratchStuff | 1450a2a785c28c209121600072e7ccb2ba47af08 | 10c490dd1f3c574738ff4ecb71e135180df4724e | refs/heads/master | 2020-04-04T07:31:17.017542 | 2019-07-02T16:51:12 | 2019-07-02T16:51:12 | 35,652,984 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | import numpy as np
import astropy
from astropy.io import fits
import matplotlib.pylab as plt
from astropy.stats import sigma_clipped_stats
from photutils import DAOStarFinder, aperture_photometry, CircularAperture, CircularAnnulus
# Need to pick up photutils
# conda install -c astropy photutils
# Following example from http://photutils.readthedocs.io/en/stable/photutils/aperture.html
# Let's do an example of generating a catalog of stars from an all-sky image
# Collect relevant kwargs
# FWHM of stars
fwhm = 1.5
# how many sigma above background
threshold = 5.
# Radius for stellar aperture
r_star = 4.
# For sky anulus
r_in = 6.
r_out = 8.
# Load up an image and a few header values
hdulist = fits.open('02k57699o0526w.fits.fz')
mjd = hdulist[1].header['MJD-OBS'] + 0
exptime = hdulist[1].header['EXPTIME'] + 0
image = hdulist[1].data + 0.
try:
hdulist.close()
except:
hdulist.close()
# crop down image for easy example
image = image[500:1000, 1000:1500]
# Simple stats of the image
maskval = image[0, 0]
good = np.where(image != maskval)
mean, median, std = sigma_clipped_stats(image[good], sigma=3.0, iters=5)
# include a mask
mask = np.zeros_like(image, dtype=bool)
mask[np.where(image == maskval)] = True
# fwhm in pixels, find sources
daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold*std)
sources = daofind(image - median)
# Do aperature phot
positions = (sources['xcentroid'], sources['ycentroid'])
# aperture for stars
apertures = CircularAperture(positions, r=r_star)
# sky background anulus
annulus_apertures = CircularAnnulus(positions, r_in=r_in, r_out=r_out)
apers = [apertures, annulus_apertures]
phot_table = aperture_photometry(image, apers, mask=mask)
bkg_mean = phot_table['aperture_sum_1'] / annulus_apertures.area()
bkg_sum = bkg_mean * apertures.area()
final_sum = phot_table['aperture_sum_0'] - bkg_sum
phot_table['residual_aperture_sum'] = final_sum
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
norm = ImageNormalize(stretch=SqrtStretch(), vmin=0, vmax=100)
plt.imshow(image, cmap='Greys', origin='lower', norm=norm)
apertures.plot(color='blue', lw=1.5, alpha=0.5)
plt.savefig('phot_example.png')
| [
"[email protected]"
]
| |
ce0c15a0ad0f852b7ae51ff9ad8119b7ad6c0b84 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2737/8160/259430.py | 84b3baed41127949fbe85e854b03ddcffde98b5d | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import collections
def majorityElement(nums):
return [key for key, val in collections.Counter(nums).items() if val > len(nums) // 3]
nums = input()
print(majorityElement(nums)) | [
"[email protected]"
]
| |
f895c9359552253b5b8147d97aa07c1c39c028c4 | fb28175748fdb497f23e71e9a11c89fdf41b8a11 | /level.py | 99b3afb2cfe14c3e237dd56e741ad2fd48a74604 | []
| no_license | daeken/ygritte | 40f571505a7a77769a98c18c45f8793cf87faa4d | 0f8bf75f761b40a6c863d589dfe44f189d22ea7e | refs/heads/master | 2020-05-20T02:08:31.671031 | 2011-04-10T21:45:42 | 2011-04-10T21:45:42 | 1,563,991 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from asset import Asset
grid = Asset.load('layer0')
stars = Asset.load('layer1')
class Level(object):
levels = {}
def __init__(self, game):
self.game = game
self.bg = Asset.load('layer2_outline', mask='layer2_mask')# % self.levelNumber)
stars.maskOff(self.bg)
self.i = 0
@staticmethod
def spawn(game, number, *args, **kwargs):
return Level.levels[number](game, *args, **kwargs)
@staticmethod
def register(level):
Level.levels[level.levelNumber] = level
def draw(self, surface):
grid.draw(surface, (0, 0), center=False)
stars.draw(surface, self.game.worldOff, center=True)
self.bg.draw(surface, self.game.worldOff, center=True)
self.i += 1
@Level.register
class LevelZero(Level):
levelNumber = 0
def __init__(self, game):
Level.__init__(self, game)
| [
"[email protected]"
]
| |
7cd0a6131a02f20edef1352a07bec29cbd8a381e | 0c98ca0ca250d0dc815813287d732c48c70aa12a | /EXP/run_bar_framed_rectangle_from_scratch.py | becb6e7809018ef3e95c4275f7f7fd715685c547 | []
| no_license | AswinVasudevan21/perception | 8f00479870ee767963767fc621325c1a96249196 | 7a6522fdfbb26ec56a682a4cdf41102cbfd6731a | refs/heads/master | 2020-07-14T13:54:10.923781 | 2019-07-23T11:21:23 | 2019-07-23T11:21:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,937 | py | from keras import models
from keras import layers
from keras import optimizers
import keras.applications
import keras.callbacks
from keras import backend as K
from keras.utils.np_utils import to_categorical
import sklearn.metrics
import cPickle as pickle
import numpy as np
import os
import sys
import time
import ClevelandMcGill as C
EXPERIMENT = sys.argv[1] # f.e. C.Figure12.data_to_framed_rectangles
CLASSIFIER = sys.argv[2] # 'LeNet'
NOISE = sys.argv[3] # True
JOB_INDEX = int(sys.argv[4])
#
#
#
print 'Running', EXPERIMENT, 'with', CLASSIFIER, 'Noise:', NOISE, 'Job Index', JOB_INDEX
#
#
# PROCESS SOME FLAGS
#
#
SUFFIX = '.'
if NOISE == 'True':
NOISE = True
SUFFIX = '_noise.'
else:
NOISE = False
DATATYPE = eval(EXPERIMENT)
if os.path.abspath('~').startswith('/n/'):
# we are on the cluster
PREFIX = '/n/regal/pfister_lab/PERCEPTION/'
else:
PREFIX = '/home/d/PERCEPTION/'
RESULTS_DIR = PREFIX + 'RESULTS_FROM_SCRATCH/'
OUTPUT_DIR = RESULTS_DIR + EXPERIMENT + '/' + CLASSIFIER + '/'
if not os.path.exists(OUTPUT_DIR):
# here can be a race condition
try:
os.makedirs(OUTPUT_DIR)
except:
print 'Race condition!', os.path.exists(OUTPUT_DIR)
STATSFILE = OUTPUT_DIR + str(JOB_INDEX).zfill(2) + SUFFIX + 'p'
MODELFILE = OUTPUT_DIR + str(JOB_INDEX).zfill(2) + SUFFIX + 'h5'
print 'Working in', OUTPUT_DIR
print 'Storing', STATSFILE
print 'Storing', MODELFILE
if os.path.exists(STATSFILE) and os.path.exists(MODELFILE):
print 'WAIT A MINUTE!! WE HAVE DONE THIS ONE BEFORE!'
sys.exit(0)
#
#
# DATA GENERATION
#
#
train_counter = 0
val_counter = 0
test_counter = 0
train_target = 60000
val_target = 20000
test_target = 20000
train_labels = []
val_labels = []
test_labels = []
X_train = np.zeros((train_target, 100, 100), dtype=np.float32)
y_train = np.zeros((train_target, 2), dtype=np.float32)
X_val = np.zeros((val_target, 100, 100), dtype=np.float32)
y_val = np.zeros((val_target, 2), dtype=np.float32)
X_test = np.zeros((test_target, 100, 100), dtype=np.float32)
y_test = np.zeros((test_target, 2), dtype=np.float32)
t0 = time.time()
all_counter = 0
while train_counter < train_target or val_counter < val_target or test_counter < test_target:
all_counter += 1
data, label, parameters = C.Figure12.generate_datapoint()
pot = np.random.choice(3)
# sometimes we know which pot is right
if label in train_labels:
pot = 0
if label in val_labels:
pot = 1
if label in test_labels:
pot = 2
if pot == 0 and train_counter < train_target:
if label not in train_labels:
train_labels.append(label)
#
image = DATATYPE(data)
image = image.astype(np.float32)
# add noise?
if NOISE:
image += np.random.uniform(0, 0.05,(100,100))
# safe to add to training
X_train[train_counter] = image
y_train[train_counter] = label
train_counter += 1
elif pot == 1 and val_counter < val_target:
if label not in val_labels:
val_labels.append(label)
image = DATATYPE(data)
image = image.astype(np.float32)
# add noise?
if NOISE:
image += np.random.uniform(0, 0.05,(100,100))
# safe to add to training
X_val[val_counter] = image
y_val[val_counter] = label
val_counter += 1
elif pot == 2 and test_counter < test_target:
if label not in test_labels:
test_labels.append(label)
image = DATATYPE(data)
image = image.astype(np.float32)
# add noise?
if NOISE:
image += np.random.uniform(0, 0.05,(100,100))
# safe to add to training
X_test[test_counter] = image
y_test[test_counter] = label
test_counter += 1
print 'Done', time.time()-t0, 'seconds (', all_counter, 'iterations)'
#
#
#
#
#
# NORMALIZE DATA IN-PLACE (BUT SEPERATELY)
#
#
X_min = X_train.min()
X_max = X_train.max()
y_min = y_train.min()
y_max = y_train.max()
# scale in place
X_train -= X_min
X_train /= (X_max - X_min)
y_train -= y_min
y_train /= (y_max - y_min)
X_val -= X_min
X_val /= (X_max - X_min)
y_val -= y_min
y_val /= (y_max - y_min)
X_test -= X_min
X_test /= (X_max - X_min)
y_test -= y_min
y_test /= (y_max - y_min)
# normalize to -.5 .. .5
X_train -= .5
X_val -= .5
X_test -= .5
print 'memory usage', (X_train.nbytes + X_val.nbytes + X_test.nbytes + y_train.nbytes + y_val.nbytes + y_test.nbytes) / 1000000., 'MB'
#
#
#
#
#
# FEATURE GENERATION
#
#
feature_time = 0
if CLASSIFIER == 'VGG19' or CLASSIFIER == 'XCEPTION':
X_train_3D = np.stack((X_train,)*3, -1)
X_val_3D = np.stack((X_val,)*3, -1)
X_test_3D = np.stack((X_test,)*3, -1)
print 'memory usage', (X_train_3D.nbytes + X_val_3D.nbytes + X_test_3D.nbytes) / 1000000., 'MB'
if CLASSIFIER == 'VGG19':
feature_generator = keras.applications.VGG19(include_top=False, input_shape=(100,100,3))
elif CLASSIFIER == 'XCEPTION':
feature_generator = keras.applications.Xception(include_top=False, input_shape=(100,100,3))
elif CLASSIFIER == 'RESNET50':
print 'Not yet - we need some padding and so on!!!'
sys.exit(1)
t0 = time.time()
#
# THE MLP
#
#
MLP = models.Sequential()
MLP.add(layers.Flatten(input_shape=feature_generator.output_shape[1:]))
MLP.add(layers.Dense(256, activation='relu', input_dim=(100,100,3)))
MLP.add(layers.Dropout(0.5))
MLP.add(layers.Dense(2, activation='linear')) # REGRESSION
model = keras.Model(inputs=feature_generator.input, outputs=MLP(feature_generator.output))
sgd = optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['mse', 'mae']) # MSE for regression
#
#
# TRAINING
#
#
t0 = time.time()
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto'), \
keras.callbacks.ModelCheckpoint(MODELFILE, monitor='val_loss', verbose=1, save_best_only=True, mode='min')]
history = model.fit(X_train_3D, \
y_train, \
epochs=1000, \
batch_size=32, \
validation_data=(X_val_3D, y_val),
callbacks=callbacks,
verbose=True)
fit_time = time.time()-t0
print 'Fitting done', time.time()-t0
#
#
# PREDICTION
#
#
y_pred = model.predict(X_test_3D)
#
#
# CLEVELAND MCGILL ERROR
# MEANS OF LOG ABSOLUTE ERRORS (MLAEs)
#
MLAE = np.log2(sklearn.metrics.mean_absolute_error(y_pred*100, y_test*100)+.125)
#
#
# STORE
# (THE NETWORK IS ALREADY STORED BASED ON THE CALLBACK FROM ABOVE!)
#
stats = dict(history.history)
# 1. the training history
# 2. the y_pred and y_test values
# 3. the MLAE
stats['time'] = feature_time + fit_time
stats['y_test'] = y_test
stats['y_pred'] = y_pred
stats['MLAE'] = MLAE
with open(STATSFILE, 'w') as f:
pickle.dump(stats, f)
print 'MLAE', MLAE
print 'Written', STATSFILE
print 'Written', MODELFILE
print 'Sayonara! All done here.'
| [
"[email protected]"
]
| |
06d60305b023671e68cfce3af716ffa147a882e4 | c934e7c27f0e72385218a14b4e2a7e94a747a360 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/kubernetesedge/v1alpha1/kubernetesedge_v1alpha1_client.py | 23a8fa7854e9c71a4bec490a3f84ac77e3adee16 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | PrateekKhatri/gcloud_cli | 5f74b97494df4f61816026af9460b9c4d8e89431 | 849d09dd7863efecbdf4072a504e1554e119f6ae | refs/heads/master | 2023-03-27T05:53:53.796695 | 2021-03-10T04:08:14 | 2021-03-10T04:08:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,509 | py | """Generated client library for kubernetesedge version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.kubernetesedge.v1alpha1 import kubernetesedge_v1alpha1_messages as messages
class KubernetesedgeV1alpha1(base_api.BaseApiClient):
"""Generated client library for service kubernetesedge version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://kubernetesedge.googleapis.com/'
MTLS_BASE_URL = 'https://kubernetesedge.mtls.googleapis.com/'
_PACKAGE = 'kubernetesedge'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1alpha1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'KubernetesedgeV1alpha1'
_URL_VERSION = 'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new kubernetesedge handle."""
url = url or self.BASE_URL
super(KubernetesedgeV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_clusters_nodePools_nodes = self.ProjectsLocationsClustersNodePoolsNodesService(self)
self.projects_locations_clusters_nodePools = self.ProjectsLocationsClustersNodePoolsService(self)
self.projects_locations_clusters = self.ProjectsLocationsClustersService(self)
self.projects_locations_machines = self.ProjectsLocationsMachinesService(self)
self.projects_locations_nodePools = self.ProjectsLocationsNodePoolsService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations_sites_machines = self.ProjectsLocationsSitesMachinesService(self)
self.projects_locations_sites = self.ProjectsLocationsSitesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsClustersNodePoolsNodesService(base_api.BaseApiService):
"""Service class for the projects_locations_clusters_nodePools_nodes resource."""
_NAME = 'projects_locations_clusters_nodePools_nodes'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsClustersNodePoolsNodesService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets details of a single Node.
Args:
request: (KubernetesedgeProjectsLocationsClustersNodePoolsNodesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Node) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}/nodes/{nodesId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.clusters.nodePools.nodes.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersNodePoolsNodesGetRequest',
response_type_name='Node',
supports_download=False,
)
class ProjectsLocationsClustersNodePoolsService(base_api.BaseApiService):
"""Service class for the projects_locations_clusters_nodePools resource."""
_NAME = 'projects_locations_clusters_nodePools'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsClustersNodePoolsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new NodePool in a given project and location.
Args:
request: (KubernetesedgeProjectsLocationsClustersNodePoolsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools',
http_method='POST',
method_id='kubernetesedge.projects.locations.clusters.nodePools.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['nodePoolId', 'requestId'],
relative_path='v1alpha1/{+parent}/nodePools',
request_field='nodePool',
request_type_name='KubernetesedgeProjectsLocationsClustersNodePoolsCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a single NodePool.
Args:
request: (KubernetesedgeProjectsLocationsClustersNodePoolsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}',
http_method='DELETE',
method_id='kubernetesedge.projects.locations.clusters.nodePools.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['requestId'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersNodePoolsDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single NodePool.
Args:
request: (KubernetesedgeProjectsLocationsClustersNodePoolsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(NodePool) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.clusters.nodePools.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersNodePoolsGetRequest',
response_type_name='NodePool',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists NodePools in a given project and location.
Args:
request: (KubernetesedgeProjectsLocationsClustersNodePoolsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNodePoolsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools',
http_method='GET',
method_id='kubernetesedge.projects.locations.clusters.nodePools.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/nodePools',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersNodePoolsListRequest',
response_type_name='ListNodePoolsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the parameters of a single NodePool.
Args:
request: (KubernetesedgeProjectsLocationsClustersNodePoolsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}',
http_method='PATCH',
method_id='kubernetesedge.projects.locations.clusters.nodePools.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['requestId', 'updateMask'],
relative_path='v1alpha1/{+name}',
request_field='nodePool',
request_type_name='KubernetesedgeProjectsLocationsClustersNodePoolsPatchRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsClustersService(base_api.BaseApiService):
"""Service class for the projects_locations_clusters resource."""
_NAME = 'projects_locations_clusters'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsClustersService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new Cluster in a given project and location.
Args:
request: (KubernetesedgeProjectsLocationsClustersCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters',
http_method='POST',
method_id='kubernetesedge.projects.locations.clusters.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['clusterId', 'requestId'],
relative_path='v1alpha1/{+parent}/clusters',
request_field='cluster',
request_type_name='KubernetesedgeProjectsLocationsClustersCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a single Cluster.
Args:
request: (KubernetesedgeProjectsLocationsClustersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}',
http_method='DELETE',
method_id='kubernetesedge.projects.locations.clusters.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['requestId'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single Cluster.
Args:
request: (KubernetesedgeProjectsLocationsClustersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Cluster) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.clusters.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersGetRequest',
response_type_name='Cluster',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (KubernetesedgeProjectsLocationsClustersGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:getIamPolicy',
http_method='GET',
method_id='kubernetesedge.projects.locations.clusters.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v1alpha1/{+resource}:getIamPolicy',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Clusters in a given project and location.
Args:
request: (KubernetesedgeProjectsLocationsClustersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListClustersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters',
http_method='GET',
method_id='kubernetesedge.projects.locations.clusters.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/clusters',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsClustersListRequest',
response_type_name='ListClustersResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the parameters of a single Cluster.
Args:
request: (KubernetesedgeProjectsLocationsClustersPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}',
http_method='PATCH',
method_id='kubernetesedge.projects.locations.clusters.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['requestId', 'updateMask'],
relative_path='v1alpha1/{+name}',
request_field='cluster',
request_type_name='KubernetesedgeProjectsLocationsClustersPatchRequest',
response_type_name='Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (KubernetesedgeProjectsLocationsClustersSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setIamPolicy',
http_method='POST',
method_id='kubernetesedge.projects.locations.clusters.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='KubernetesedgeProjectsLocationsClustersSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (KubernetesedgeProjectsLocationsClustersTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:testIamPermissions',
http_method='POST',
method_id='kubernetesedge.projects.locations.clusters.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='KubernetesedgeProjectsLocationsClustersTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsMachinesService(base_api.BaseApiService):
"""Service class for the projects_locations_machines resource."""
_NAME = 'projects_locations_machines'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsMachinesService, self).__init__(client)
self._upload_configs = {
}
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (KubernetesedgeProjectsLocationsMachinesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/machines/{machinesId}:getIamPolicy',
http_method='GET',
method_id='kubernetesedge.projects.locations.machines.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v1alpha1/{+resource}:getIamPolicy',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsMachinesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (KubernetesedgeProjectsLocationsMachinesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/machines/{machinesId}:setIamPolicy',
http_method='POST',
method_id='kubernetesedge.projects.locations.machines.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='KubernetesedgeProjectsLocationsMachinesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (KubernetesedgeProjectsLocationsMachinesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/machines/{machinesId}:testIamPermissions',
http_method='POST',
method_id='kubernetesedge.projects.locations.machines.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='KubernetesedgeProjectsLocationsMachinesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsNodePoolsService(base_api.BaseApiService):
"""Service class for the projects_locations_nodePools resource."""
_NAME = 'projects_locations_nodePools'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsNodePoolsService, self).__init__(client)
self._upload_configs = {
}
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (KubernetesedgeProjectsLocationsNodePoolsGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/nodePools/{nodePoolsId}:getIamPolicy',
http_method='GET',
method_id='kubernetesedge.projects.locations.nodePools.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v1alpha1/{+resource}:getIamPolicy',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsNodePoolsGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (KubernetesedgeProjectsLocationsNodePoolsSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/nodePools/{nodePoolsId}:setIamPolicy',
http_method='POST',
method_id='kubernetesedge.projects.locations.nodePools.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='KubernetesedgeProjectsLocationsNodePoolsSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (KubernetesedgeProjectsLocationsNodePoolsTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/nodePools/{nodePoolsId}:testIamPermissions',
http_method='POST',
method_id='kubernetesedge.projects.locations.nodePools.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='KubernetesedgeProjectsLocationsNodePoolsTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (KubernetesedgeProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='kubernetesedge.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}:cancel',
request_field='cancelOperationRequest',
request_type_name='KubernetesedgeProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (KubernetesedgeProjectsLocationsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='kubernetesedge.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (KubernetesedgeProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
Args:
request: (KubernetesedgeProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='kubernetesedge.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/operations',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsSitesMachinesService(base_api.BaseApiService):
"""Service class for the projects_locations_sites_machines resource."""
_NAME = 'projects_locations_sites_machines'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsSitesMachinesService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets details of a single Machine.
Args:
request: (KubernetesedgeProjectsLocationsSitesMachinesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Machine) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites/{sitesId}/machines/{machinesId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.sites.machines.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsSitesMachinesGetRequest',
response_type_name='Machine',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Machines in a given project and location.
Args:
request: (KubernetesedgeProjectsLocationsSitesMachinesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListMachinesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites/{sitesId}/machines',
http_method='GET',
method_id='kubernetesedge.projects.locations.sites.machines.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/machines',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsSitesMachinesListRequest',
response_type_name='ListMachinesResponse',
supports_download=False,
)
class ProjectsLocationsSitesService(base_api.BaseApiService):
"""Service class for the projects_locations_sites resource."""
_NAME = 'projects_locations_sites'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsSitesService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets details of a single Site.
Args:
request: (KubernetesedgeProjectsLocationsSitesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Site) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites/{sitesId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.sites.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsSitesGetRequest',
response_type_name='Site',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (KubernetesedgeProjectsLocationsSitesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites/{sitesId}:getIamPolicy',
http_method='GET',
method_id='kubernetesedge.projects.locations.sites.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v1alpha1/{+resource}:getIamPolicy',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsSitesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Sites in a given project and location.
Args:
request: (KubernetesedgeProjectsLocationsSitesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListSitesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites',
http_method='GET',
method_id='kubernetesedge.projects.locations.sites.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/sites',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsSitesListRequest',
response_type_name='ListSitesResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (KubernetesedgeProjectsLocationsSitesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites/{sitesId}:setIamPolicy',
http_method='POST',
method_id='kubernetesedge.projects.locations.sites.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='KubernetesedgeProjectsLocationsSitesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (KubernetesedgeProjectsLocationsSitesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/sites/{sitesId}:testIamPermissions',
http_method='POST',
method_id='kubernetesedge.projects.locations.sites.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='KubernetesedgeProjectsLocationsSitesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (KubernetesedgeProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='kubernetesedge.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (KubernetesedgeProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations',
http_method='GET',
method_id='kubernetesedge.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/locations',
request_field='',
request_type_name='KubernetesedgeProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(KubernetesedgeV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"[email protected]"
]
| |
ad8d2f89fbfbfa9db5342a0c79f661cc9644b2d1 | bbcaed21db23d08ebf11fabe0c6b8d0ee551a749 | /q2_dummy_types/__init__.py | ccc172e8439e27f14d38fa77465ecbed5345f16f | []
| no_license | ebolyen/q2-dummy-types | c34707e2a3e7ef1ad6abf76b1b9fabbf9c7d7222 | 3ac8f643e5f88a6de2413bebf8fb16c1b3168e4b | refs/heads/master | 2021-01-10T22:47:04.455265 | 2016-10-08T19:20:40 | 2016-10-08T19:20:40 | 70,352,411 | 0 | 0 | null | 2016-10-08T19:17:24 | 2016-10-08T19:17:23 | null | UTF-8 | Python | false | false | 667 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2016--, QIIME development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
__version__ = "0.0.1" # noqa
# Make the types defined in this plugin importable from the top-level package
# so they can be easily imported by other plugins relying on these types.
from ._int_sequence import IntSequence1, IntSequence2
from ._mapping import Mapping
__all__ = ['IntSequence1', 'IntSequence2', 'Mapping']
| [
"[email protected]"
]
| |
55275eba43f18555f947ab6986f6a7ba8440046f | ae376963049fdff111d0f844b25d7a907acb27f5 | /Python/Fundamentals/0.8-Text-Processing/Lab/reverse_strings.py | 2a98b6cd6ae2c93c0179292403280e14bf908480 | []
| no_license | Pittor052/SoftUni-Courses | bdc5968244de497f1aaaff7c6c6879c624791ad8 | 43216ee69e7eec59c9f14377f57ae1ef8d9cce8d | refs/heads/main | 2023-08-14T05:12:11.966927 | 2021-09-28T13:33:30 | 2021-09-28T13:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | word = input()
while word != "end":
print(f"{word} = {word[::-1]}")
word = input()
| [
"[email protected]"
]
| |
f797180c6e34873f308b5114f2ce5068da8416ee | 6f0f40a1d60fecb9c1516e8119e9aade3587659d | /tensorflow/python/distribute/input_lib_test.py | 2ca11024e431262d534fa92f5516615fa2d4ee31 | [
"Apache-2.0"
]
| permissive | gdwangh/tensorflow | 65d3019ec8e385db7640b313a48f218ec14839e0 | 71eb168f453bd035df4d8ba27383ef53527e3aaa | refs/heads/master | 2020-05-09T17:30:11.243619 | 2019-04-14T09:02:24 | 2019-04-14T09:08:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,496 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the input_lib library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import errors
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import nest
class DistributedIteratorTestBase(test.TestCase):
def _wrap_iterator(self, input_type, dataset_fn, input_workers, devices,
split_batch_by, enable_get_next_as_optional):
if input_type == "input_fn":
input_contexts = []
for i in range(input_workers.num_workers):
input_contexts.append(
distribute_lib.InputContext(
num_input_pipelines=input_workers.num_workers,
input_pipeline_id=i,
num_replicas_in_sync=len(devices)))
iterator = input_lib.InputFunctionIterator(
dataset_fn, input_workers, input_contexts,
_enable_get_next_as_optional=enable_get_next_as_optional)
else:
iterator = input_lib.DatasetIterator(
dataset_fn(distribute_lib.InputContext()), input_workers,
split_batch_by,
_enable_get_next_as_optional=enable_get_next_as_optional)
return iterator
def _wrap_dataset(self, input_type, dataset, input_workers,
split_batch_by, enable_get_next_as_optional):
if isinstance(dataset, dataset_ops.Dataset):
return input_lib.DistributedDatasetV1(
dataset, input_workers,
split_batch_by,
_enable_get_next_as_optional=enable_get_next_as_optional)
else:
return input_lib.DistributedDataset(
dataset, input_workers,
split_batch_by,
_enable_get_next_as_optional=enable_get_next_as_optional)
def _test_input_iteration(self,
input_type,
api_type,
iteration_type,
dataset_fn,
worker_device_pairs,
expected_values,
sess=None,
split_batch_by=None,
enable_get_next_as_optional=False):
if iteration_type == "for_loop" and not context.executing_eagerly():
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and iteration_type == "for_loop":
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_dataset" and input_type == "input_fn":
self.skipTest("unsupported test combination.")
devices = nest.flatten([ds for _, ds in worker_device_pairs])
device_map = values.ReplicaDeviceMap(devices)
input_workers = input_lib.InputWorkers(device_map, worker_device_pairs)
if api_type == "wrap_into_iterator":
iterator = self._wrap_iterator(
input_type, dataset_fn, input_workers, devices, split_batch_by,
enable_get_next_as_optional)
else:
# wrapping into a dataset:
given_dataset = dataset_fn(distribute_lib.InputContext())
dataset = self._wrap_dataset(input_type, given_dataset, input_workers,
split_batch_by, enable_get_next_as_optional)
if context.executing_eagerly():
iterator = iter(dataset)
else:
# In graph mode currently we only have support for creating iterators
# for datasetV1 instances.
if not isinstance(dataset, dataset_ops.DatasetV1):
self.skipTest("unsupported test combination")
iterator = dataset.make_one_shot_iterator()
if iteration_type == "get_next":
evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)
if isinstance(iterator, input_lib.DistributedIteratorV1):
evaluate(control_flow_ops.group(iterator.initialize()))
else:
evaluate(control_flow_ops.group(iterator._initializer))
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = evaluate(
[values.select_replica(r,
next_element) for r in range(len(devices))])
self.assertEqual(len(expected_value), len(computed_value))
for i in range(len(expected_value)):
self.assertAllEqual(expected_value[i], computed_value[i])
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
evaluate(
[values.select_replica(r,
next_element) for r in range(len(devices))])
# After re-initializing the iterator, should be able to iterate again.
if isinstance(iterator, input_lib.DistributedIteratorV1):
evaluate(control_flow_ops.group(iterator.initialize()))
else:
evaluate(control_flow_ops.group(iterator._initializer))
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = evaluate(
[values.select_replica(r,
next_element) for r in range(len(devices))])
self.assertEqual(len(expected_value), len(computed_value))
for i in range(len(expected_value)):
self.assertAllEqual(expected_value[i], computed_value[i])
if iteration_type == "for_loop" and context.executing_eagerly():
actual_values = []
for x in dataset:
computed_value = self.evaluate(
[values.select_replica(r, x) for r in range(len(devices))])
actual_values.append(computed_value)
for i, expected_value in enumerate(expected_values):
self.assertEqual(len(expected_value), len(actual_values[i]))
for j in range(len(expected_value)):
self.assertAllEqual(expected_value[j], actual_values[i][j])
class DistributedIteratorSingleWorkerTest(DistributedIteratorTestBase,
parameterized.TestCase):
def testGraphModeError(self):
with context.graph_mode():
worker_device_pairs = [("", ["/device:CPU:0"])]
devices = nest.flatten([ds for _, ds in worker_device_pairs])
device_map = values.ReplicaDeviceMap(devices)
input_workers = input_lib.InputWorkers(device_map, worker_device_pairs)
dataset = dataset_ops.Dataset.range(10).batch(2)
with self.assertRaisesRegexp(RuntimeError,
"__iter__ is only "
"supported when eager execution is "
"enabled."):
dist_dataset = input_lib.DistributedDatasetV1(dataset, input_workers)
iter(dist_dataset)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testOneDeviceCPU(self, input_type, api_type, iteration_type):
worker_device_pairs = [("", ["/device:CPU:0"])]
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
expected_values = [[i] for i in range(10)]
self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
worker_device_pairs, expected_values)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
required_gpus=1))
def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type):
worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
worker_device_pairs, expected_values)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
required_gpus=1))
def testTupleDataset(self, input_type, api_type, iteration_type):
worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
def dataset_fn(ctx):
del ctx
if tf2.enabled():
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
else:
dataset1 = dataset_ops.DatasetV2.range(10)
dataset2 = dataset_ops.DatasetV2.range(10).map(lambda x: x**2)
return dataset_ops.DatasetV2.zip((dataset1, dataset2))
expected_values = [[(i, i**2), (i+1, (i+1)**2)] for i in range(0, 10, 2)]
self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
worker_device_pairs, expected_values)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
required_gpus=1))
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):
worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)
# The last global batch only contains data for one replica.
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
worker_device_pairs, expected_values,
enable_get_next_as_optional=True)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
split_batch_by=[None, 2],
required_gpus=1))
def testBatchSplitting(self, input_type, api_type, iteration_type,
split_batch_by):
worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
batch_size = 10
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(100).batch(batch_size)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
updated_batch_size = (
batch_size // split_batch_by if split_batch_by else batch_size)
expected_values = [[range(i, i+updated_batch_size),
range(i+updated_batch_size, i+2*updated_batch_size)]
for i in range(0, 100, updated_batch_size*2)]
self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
worker_device_pairs, expected_values, sess=None,
split_batch_by=split_batch_by)
class DistributedIteratorMultiWorkerTest(
multi_worker_test_base.MultiWorkerTestBase, DistributedIteratorTestBase,
parameterized.TestCase):
def _cpu_devices(self):
return [
("/job:worker/replica:0/task:0",
["/job:worker/replica:0/task:0/device:CPU:0"]),
("/job:worker/replica:0/task:1",
["/job:worker/replica:0/task:1/device:CPU:0"])]
def _cpu_and_one_gpu_devices(self):
return [
("/job:worker/replica:0/task:0", [
"/job:worker/replica:0/task:0/device:GPU:0",
"/job:worker/replica:0/task:0/device:CPU:0"
]),
("/job:worker/replica:0/task:1", [
"/job:worker/replica:0/task:1/device:GPU:0",
"/job:worker/replica:0/task:1/device:CPU:0"
])
]
@combinations.generate(combinations.combine(
mode=["graph"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testOneDevicePerWorker(self, input_type, api_type, iteration_type):
worker_devices = self._cpu_devices()
with context.graph_mode(), self.cached_session() as sess:
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(4)
if input_type == "dataset":
# Autosharded
expected_values = [[0, 1], [2, 3]]
else:
expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]]
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_fn, worker_devices,
expected_values, sess)
@combinations.generate(combinations.combine(
mode=["graph"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
required_gpus=1))
def testTwoDevicesPerWorker(self, input_type, api_type, iteration_type):
worker_devices = self._cpu_and_one_gpu_devices()
with context.graph_mode(), self.cached_session() as sess:
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(4)
if input_type == "dataset":
# Autosharded
expected_values = [[0, 2, 1, 3]]
else:
expected_values = [[0, 1, 0, 1], [2, 3, 2, 3]]
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_fn, worker_devices,
expected_values, sess)
@combinations.generate(combinations.combine(
mode=["graph"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testTupleDataset(self, input_type, api_type, iteration_type):
worker_devices = self._cpu_devices()
with context.graph_mode(), self.cached_session() as sess:
def dataset_fn(ctx):
del ctx
if tf2.enabled():
dataset1 = dataset_ops.DatasetV2.range(4)
dataset2 = dataset_ops.DatasetV2.range(4).map(lambda x: x**2)
return dataset_ops.DatasetV2.zip((dataset1, dataset2))
else:
dataset1 = dataset_ops.Dataset.range(4)
dataset2 = dataset_ops.Dataset.range(4).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
if input_type == "dataset":
# Autosharded
expected_values = [[(0, 0), (1, 1)], [(2, 4), (3, 9)]]
else:
expected_values = [[(i, i**2), (i, i**2)] for i in range(0, 4)]
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_fn, worker_devices, expected_values,
sess)
@combinations.generate(
combinations.combine(
mode=["graph"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
required_gpus=1))
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):
worker_devices = self._cpu_and_one_gpu_devices()
with context.graph_mode(), self.cached_session() as sess:
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)
if input_type == "dataset":
# Autosharded
expected_values = [[[0, 1], [4, 5], [2, 3], [6, 7]], [[8], [], [], []]]
else:
expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],
[[4, 5], [6, 7], [4, 5], [6, 7]], [[8], [], [8], []]]
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_fn, worker_devices, expected_values,
sess, enable_get_next_as_optional=True)
@combinations.generate(
combinations.combine(
mode=["graph"], input_type=["input_fn"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
required_gpus=1))
def testDifferentDatasets(self, input_type, api_type, iteration_type):
worker_devices = self._cpu_and_one_gpu_devices()
with context.graph_mode(), self.cached_session() as sess:
def dataset_fn(ctx):
if ctx.input_pipeline_id == 0:
return dataset_ops.Dataset.range(8).batch(2)
else:
return dataset_ops.Dataset.range(9).batch(2)
expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],
[[4, 5], [6, 7], [4, 5], [6, 7]], [[], [], [8], []]]
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_fn, worker_devices, expected_values,
sess, enable_get_next_as_optional=True)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
d39b69be8e6d36394558c1b41133aa034318b78c | 9d735842aa4ae0d65b33cd71994142ea80792af5 | /scrape_ebi_iedb_input.py | 54acc58345dc44c5eb8ac2e69a7d2fc6e40df78b | []
| no_license | saketkc/scrape_ebi | 9b5179c9cc45f6e4519732feec9bfaa34126538c | ffe451453bedd9b1aced1bd515bfe3993283809a | refs/heads/master | 2021-01-15T12:16:09.599217 | 2012-05-25T09:13:03 | 2012-05-25T09:13:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,562 | py | from mechanize import Browser, _http
from BeautifulSoup import BeautifulSoup
import sys
import os
def get_data_from_ebi(*arg):
br = Browser()
args = arg[0]
filename = args[0]
br.set_handle_robots(False)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
br.open('http://iedb.ebi.ac.uk/tools/ElliPro/iedb_input')
br.select_form(name='predictionForm')
br.form['protein_type'] = ['structure',]
if os.path.exists(filename):
br.form.add_file(open(filename), 'text/plain', filename)
else:
br.form['pdbId'] = filename.split('.')[0]
submit_response = br.submit(name='Submit', label='Submit')
html = submit_response.read()
soup = BeautifulSoup(html)
all_tables = soup.findAll("table",cellspacing=1)
if len(all_tables) == 1:
table = soup.find("table",cellspacing=1)
all_protein_chains = {}
for row in table.findAll('tr')[1:-1]:
columns = row.findAll('td')
number = columns[1]
chain = columns[2]
number_of_residues = columns[3]
all_protein_chains[number.string] = chain.string
br.select_form(name='selectChainForm')
br.form['chainIndex'] = [None] * (len(args)-1)
for index,seqchoice in enumerate(args[1:]):
for k,v in all_protein_chains.iteritems():
if str(v) == str(seqchoice):
choice = k
br.form['chainIndex'][index] = (str(int(choice)-1))
submit_response = br.submit().read()
soup = BeautifulSoup(submit_response)
for index,tables in enumerate(soup.findAll("table",cellspacing=1)[1:3]):
if index == 0:
print "Predicted Linear Epitope(s): "
for row in tables.findAll('tr'):
columns = row.findAll('td')
output = ""
for column in columns[:-1]:
output += column.string + " "
print output
if index == 1:
print "Predicted Discontinous Epitope(s): "
for row in tables.findAll('tr')[1:]:
columns = row.findAll('td')
output = ""
for column in columns[:-1]:
if column.string == None:
column = column.find('div')
output += column.string + " "
print output
if __name__ == "__main__":
get_data_from_ebi(sys.argv[1:]) | [
"[email protected]"
]
| |
832bfaeda0f2312dfbb3cf401c494edf8c4f3f34 | 8c11084d9d55e7ee602695629beba646a7c9fbc6 | /torch/ao/quantization/pt2e/utils.py | e9a2ae783292a8180694581aa8418ebcb164c55a | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
]
| permissive | SamuelMarks/pytorch | 201995d5d0ed72813a72c4c51ddbc6125c4fc5e7 | 71632d4d24616ddad6685814aae4ae54c981c0d2 | refs/heads/main | 2023-08-31T14:30:33.523459 | 2023-08-19T16:14:37 | 2023-08-20T08:56:21 | 323,614,969 | 0 | 0 | NOASSERTION | 2020-12-22T12:07:42 | 2020-12-22T12:07:41 | null | UTF-8 | Python | false | false | 16,437 | py | import torch
from torch.fx import (
Graph,
GraphModule,
Node,
)
from torch.fx.subgraph_rewriter import replace_pattern_with_filters
import torch.nn.functional as F
from torch.nn.utils.fusion import fuse_conv_bn_weights
import copy
import operator
from typing import Any, Callable, Dict, Optional, Tuple, List, Union
from torch.utils._pytree import LeafSpec
__all__ = [
"fold_bn_weights_into_conv_node",
"get_aten_graph_module",
"remove_tensor_overload_for_qdq_ops",
]
def _get_tensor_constant_from_node(node, m):
if node is None:
return None
assert node.op == "get_attr"
return getattr(m, node.target)
def _get_all_arguments(orig_args, orig_kwargs, args_schema):
all_args = []
for i, schema in enumerate(args_schema):
if schema.name in orig_kwargs:
all_args.append(orig_kwargs[schema.name])
elif not schema.kwarg_only and i < len(orig_args):
all_args.append(orig_args[i])
else:
all_args.append(schema.default_value)
return all_args
def fold_bn_weights_into_conv_node(
conv_node: Node,
conv_weight_node: Node,
conv_bias_node: Optional[Node],
bn_node: Node,
m: GraphModule
) -> None:
# conv args: input, weight, bias, stride, padding, dilation, transposed, ...
conv_w = _get_tensor_constant_from_node(conv_weight_node, m)
conv_b = _get_tensor_constant_from_node(conv_bias_node, m)
transpose = conv_node.args[6]
# eval bn args: input, weight, bias, running mean, running var, momentum, eps
# train bn args: input, weight, bias, running mean, running var, training, momentum, eps
bn_args_schema = bn_node.target._schema.arguments # type: ignore[union-attr]
bn_args = _get_all_arguments(bn_node.args, bn_node.kwargs, bn_args_schema)
bn_w = _get_tensor_constant_from_node(bn_args[1], m)
bn_b = _get_tensor_constant_from_node(bn_args[2], m)
bn_rm = _get_tensor_constant_from_node(bn_args[3], m)
bn_rv = _get_tensor_constant_from_node(bn_args[4], m)
if bn_node.target == torch.ops.aten._native_batch_norm_legit_no_training.default:
eps_arg_index = 6
elif bn_node.target == torch.ops.aten._native_batch_norm_legit.default:
eps_arg_index = 7
else:
raise ValueError("BN node target is unexpected ", bn_node.target)
bn_eps = bn_args[eps_arg_index]
fused_weight, fused_bias = fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b, transpose=transpose)
# update the weight and bias for conv
conv_args = list(conv_node.args)
# calling data since the fused_weight and fused_bias are nn.Parameter
weight_attr_name = conv_weight_node.target
assert isinstance(weight_attr_name, str)
setattr(m, weight_attr_name, fused_weight)
if conv_bias_node is not None:
bias_attr_name = conv_bias_node.target
else:
bias_attr_name = weight_attr_name + "_bias"
with m.graph.inserting_before(conv_node):
get_bias_node = m.graph.get_attr(bias_attr_name)
# NOTE: here we assume the bias of conv is not quantized!
conv_args[2] = get_bias_node
setattr(m, bias_attr_name, fused_bias) # type: ignore[arg-type]
conv_node.args = tuple(conv_args)
# native_batch_norm has 3 outputs, we expect getitem calls on the output
# and we want to replace the uses of getitem 0 with the output of conv
#
# Before:
# conv -> bn - (first output) -> users1
# \ - (second output) -> users2
# \ - (third output) -> users3
# After:
# conv -> (first output) -> users1
# bn -
# \ - (second output) -> users2
# \ - (third output) -> users3
# if users2 and users3 are empty then bn will be removed through dead code elimination
for user in bn_node.users:
if user.op != "call_function" or user.target != operator.getitem or user.args[1] != 0:
continue
user.replace_all_uses_with(conv_node)
# fuse conv bn weights, inplace modification of the graph_module and graph
def _fuse_conv_bn_(m: GraphModule) -> None:
for n in m.graph.nodes:
if n.op != "call_function" or n.target != torch.ops.aten._native_batch_norm_legit_no_training.default:
continue
bn_node = n
n = bn_node.args[0]
if n.op != "call_function" or n.target != torch.ops.aten.convolution.default:
continue
conv_node = n
conv_weight_node = conv_node.args[1]
conv_bias_node = conv_node.args[2]
fold_bn_weights_into_conv_node(conv_node, conv_weight_node, conv_bias_node, bn_node, m)
m.graph.eliminate_dead_code()
m.recompile()
def _get_node_name_to_scope(model: GraphModule) -> Dict[str, Tuple[str, type]]:
# TODO: move this information to fx node itself
node_name_to_scope: Dict[str, Tuple[str, type]] = {}
for n in model.graph.nodes:
nn_module_stack = n.meta.get("nn_module_stack", None)
current_scope = ("", type(None))
if nn_module_stack:
bt = list(nn_module_stack.values())[-1]
current_scope = (bt[0].split(".")[-1], bt[1])
node_name_to_scope[n.name] = current_scope
return node_name_to_scope
def get_aten_graph_module(
pattern: Callable,
example_inputs: Tuple[Any, ...],
**kwargs,
) -> GraphModule:
"""
Convert the pattern to an FX graph with decomposed aten ops.
"""
# Avoid circular imports
import torch._dynamo
aten_pattern, _ = torch._dynamo.export(
pattern,
aten_graph=True,
tracing_mode="real",
)(
*copy.deepcopy(example_inputs),
**kwargs,
)
aten_pattern.graph.eliminate_dead_code()
aten_pattern.recompile()
return aten_pattern
def remove_tensor_overload_for_qdq_ops(match_pattern: GraphModule) -> None:
""" Remove .tensor overload for quantize/dequantize ops so that we can
use the match_pattern that we get from torchdynamo export to match the output of convert_pt2e
"""
_MAP = {
torch.ops.quantized_decomposed.quantize_per_tensor.default: torch.ops.quantized_decomposed.quantize_per_tensor,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: torch.ops.quantized_decomposed.dequantize_per_tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: torch.ops.quantized_decomposed.quantize_per_tensor,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: torch.ops.quantized_decomposed.dequantize_per_tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.tensor2: torch.ops.quantized_decomposed.quantize_per_tensor,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor2: torch.ops.quantized_decomposed.dequantize_per_tensor,
torch.ops.quantized_decomposed.quantize_per_channel.default: torch.ops.quantized_decomposed.quantize_per_channel,
torch.ops.quantized_decomposed.dequantize_per_channel.default: torch.ops.quantized_decomposed.dequantize_per_channel,
torch.ops.aten.clamp.Tensor: torch.ops.aten.clamp,
}
for n in match_pattern.graph.nodes:
if n.op != "call_function":
continue
if n.target in _MAP:
n.target = _MAP[n.target]
def _is_dropout_filter(
match: "InternalMatch", # type: ignore[name-defined]
original_graph: Graph,
pattern_graph: Graph,
) -> bool:
"""
Match filter for the subgraph rewriter that returns True if the matched
graph includes all the ops used in the aten dropout pattern.
"""
ops_to_match = {
torch.ops.aten.empty_like.default,
torch.ops.aten.bernoulli_.float,
torch.ops.aten.div_.Scalar,
torch.ops.aten.mul.Tensor,
}
for n in match.nodes_map.values():
if n.target in ops_to_match:
ops_to_match.remove(n.target)
return len(ops_to_match) == 0
def _replace_dropout_for_eval(m: GraphModule):
"""
Replace the aten training dropout pattern with a noop, intended for eval.
For models with dropout torch ops (nn.Dropout, F.dropout), calling model.eval()
effectively turns these dropout ops into noops. For exported models, however,
this is not done automatically, since the aten dropout patterns previously generated
for training remain in the graph. Here we rewrite these dropout patterns with noops
to avoid incorrectly applying further dropout during eval.
See https://github.com/pytorch/pytorch/issues/103681.
"""
def dropout_train(x):
return F.dropout(x, p=0.5, training=True)
def dropout_eval(x):
return F.dropout(x, p=0.5, training=False)
example_inputs = (torch.randn(1),)
match_pattern = get_aten_graph_module(dropout_train, example_inputs)
replacement_pattern = get_aten_graph_module(dropout_eval, example_inputs)
# Note: The match pattern looks like:
#
# empty_like_default = torch.ops.aten.empty_like.default(x)
# bernoulli__float = torch.ops.aten.bernoulli_.float(empty_like_default)
# div__scalar = torch.ops.aten.div_.Scalar(bernoulli__float, 0.5)
# mul_tensor = torch.ops.aten.mul.Tensor(x, div__scalar)
#
# We need to use `ignore_literals=True` here to handle arbitrary dropout
# probability (not just 0.5). However, without a match filter, this would
# also match any mul op, since `div__scalar` is also a literal, e.g.:
#
# mul_tensor = torch.ops.aten.mul.Tensor(x, 0.8)
#
# Therefore, we need both `ignore_literals=True` and `_is_dropout_filter`
# to make sure we are in fact replacing the dropout pattern.
replace_pattern_with_filters(
m,
match_pattern,
replacement_pattern,
match_filters=[_is_dropout_filter],
ignore_literals=True,
)
m.recompile()
def _is_literal(arg):
if isinstance(arg, (int, float)):
return True
if isinstance(arg, (tuple, list)):
return all(map(_is_literal, arg))
return False
def _replace_literals_with_new_placeholders(
gm: torch.fx.GraphModule,
merge_dup: bool = False,
exclude_literals: Optional[List[Any]] = None
):
"""Replace the literals in the graph with placeholder nodes that's created on the fly while we
traverse the graph, so that the literal arguments in the graph can be matched and replaced
To use this, the pattern and replacement graph should have the exact same number of literal args
and they should be used in the exact same order in the pattern and replacement graph.
If the literal arguments are not used in the same order in pattern and replacement graph, please
use `_replace_literals_with_existing_placeholders` instead
Args:
`gm`: input GraphModule that we'll transform
`merge_dup`: boolean flag to indicate that if the same literal appears multiple times in
the graph, whether they should correspond to the same placeholder or not
`exclude_literals`: a list of literals that will not be replaced with placeholders
Example:
# 1. Original Graph
def pattern(self, x):
return x + 3
def replacement(self, x):
return x - 3
example_inputs = (torch.randn(1, 3, 3, 3),)
pattern_gm = get_aten_graph_module(pattern, example_inputs)
replacement_gm = get_aten_graph_module(pattern, example_inptus)
# 2. Before calling replace literals we'll see the following graph:
def pattern(self, x):
return x + 3
def replacement(self, x):
return x - 3
pattern_gm = _replace_literals_with_new_placeholders(pattern_gm)
replacement_gm = _replace_literals_with_new_placeholders(replacement_gm)
# 3. After replacing literals with new placeholder nodes
def pattern(self, x, new_ph):
return x + new_ph
def pattern(self, x, new_ph):
return x - new_ph
"""
last_ph = None
cnt = 0
literal_to_ph: Dict[Union[float, bool, int, torch.dtype], Node] = {}
if exclude_literals is None:
exclude_literals = []
for node in gm.graph.nodes:
if node.op == "placeholder":
last_ph = node
cnt += 1
continue
with gm.graph.inserting_after(last_ph):
new_args = []
for arg in node.args:
if _is_literal(arg) and arg not in exclude_literals:
if merge_dup and arg in literal_to_ph:
new_args.append(literal_to_ph[arg])
else:
ph_node = gm.graph.placeholder("arg" + str(cnt))
new_args.append(ph_node)
gm._in_spec.children_specs[0].children_specs.append(LeafSpec())
cnt += 1
if merge_dup:
literal_to_ph[arg] = ph_node
else:
new_args.append(arg)
new_args = tuple(new_args)
node.args = new_args
return gm
def _replace_literals_with_existing_placeholders(
gm: torch.fx.GraphModule,
exclude_literals: Optional[List[Any]] = None,
literal_to_ph_idx: Optional[Dict[Union[float, int, bool, torch.dtype], int]] = None
):
"""Replace the literals in the graph with **existing** placeholder nodes, so that the literal arguments
in the graph can be matched and replaced
To use this, all literal args in the graph should be unique and each of them should correspond
to exactly one placeholder node
# 1. Original Graph
def pattern(self, x_i8, scale, zero_point, quant_min, quant_max):
return torch.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max)
def replacement(x_i8, scale, zero_point, quant_min, quant_max):
x_i8 = torch.clamp(x_i8, quant_min, quant_max)
return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32)
example_inputs = (
torch.randn(1, 3, 3, 3),
1.0,
0,
-128,
127,
)
pattern_gm = get_aten_graph_module(pattern, example_inputs)
replacement_gm = get_aten_graph_module(pattern, example_inptus)
# 2. Before calling replace literals we'll see the following graph:
def pattern(self, x_i8, scale, zero_point, quant_min, quant_max):
# scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
return torch.dequantize_per_tensor(x_i8, 1.0, 0, -128, 127)
def replacement(x_i8, scale, zero_point, quant_min, quant_max):
# scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
x_i8 = torch.clamp(x_i8, -128, 127)
return ((x_i8.to(torch.float32) - 0) * 1.0).to(dtype=torch.float32)
# Note that literal args appear in different order in pattern and replacement graph, so
# we can't use _replace_literals_with_new_placeholders
literal_to_ph_idx = {1.0: 1, 0: 2, -128: 3, 127: 4}
pattern_gm = _replace_literals_with_existing_placeholders(pattern_gm, literal_to_ph_idx)
replacement_gm = _replace_literals_with_existing_placeholders(replacement_gm, literal_to_ph_idx)
# 3. After replacing literals with existing placeholder nodes
def pattern(self, x_i8, scale, zero_point, quant_min, quant_max):
# scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
return torch.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max)
def replacement(x_i8, scale, zero_point, quant_min, quant_max):
# scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
x_i8 = torch.clamp(x_i8, quant_min, quant_max)
return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32)
"""
if exclude_literals is None:
exclude_literals = []
if literal_to_ph_idx is None:
literal_to_ph_idx = {}
phs = [node for node in gm.graph.nodes if node.op == "placeholder"]
for node in gm.graph.nodes:
if node.op != "call_function":
continue
new_args = []
for arg in node.args:
if _is_literal(arg) and arg not in exclude_literals and arg in literal_to_ph_idx:
ph_idx = literal_to_ph_idx[arg]
ph_node = phs[ph_idx]
new_args.append(ph_node)
else:
new_args.append(arg)
new_args = tuple(new_args)
node.args = new_args
return gm
| [
"[email protected]"
]
| |
eca34ed79166d90c4f7930589dd15091b5634f63 | 863de8490c758435388e7328880b28fdb432a8e5 | /01-MASTER/main.py | db781ed3979dcade024c94b9b7991676497f642d | []
| no_license | bopopescu/2017-2018_EMG_senior_project | 46aa57c28119db34bd86ecc54e36dca9546232b0 | 77544e2a0afa8a75c979eb09f70d64c25e753d52 | refs/heads/master | 2022-11-20T01:45:05.476457 | 2018-05-01T19:49:56 | 2018-05-01T19:49:56 | 281,037,132 | 0 | 0 | null | 2020-07-20T06:48:33 | 2020-07-20T06:48:32 | null | UTF-8 | Python | false | false | 27,742 | py |
'''
AUTHOR: Aditya Patel and Jim Ramsay
DATE CREATED: 2018-03-01
LAST MODIFIED: 2018-04-12
PLATFORM: Raspberry Pi 3B, Raspbian Stretch Released 2017-11-29
PROJECT: EMG Human Machine Interface
ORGANIZATION: Bradley University, School of Electrical and Computer Engineering
FILENAME: main.py
DESCRIPTION:
Main script that:
- initializes/executes bluetooth protocol (not written by Aditya/Jim -- see note below)
- starts reading emg data.
- detects gestures
- commands two slave raspberry pi's to rotate servo motors
- switches between displaying video feed from each of the slaves
Gestures (right hand only, have not tested on left hand):
rest -- do nothing, arm relaxed
fist -- tight fist
piwo -- palm in, wrist out (wave outward)
piwi -- palm in, wrist in (wave inward)
Master:
emgPi_3 -- [email protected] password is "ee00"
Slaves:
ssh commands recognize the defined names for the slaves using ssh_keys. Using the defined
names and saved keys bypasses password requirements.
emgPi_1 -- [email protected] password is "ee00"
emgPi_2 -- [email protected] password is "ee00"
NOTE:
Original by dzhu
https://github.com/dzhu/myo-raw
Edited by Fernando Cosentino
http://www.fernandocosentino.net/pyoconnect
Edited further by Aditya Patel and Jim Ramsay
There are a lot of global variables used to function like constants. This is likely not good practice
but had to be done to meet deadlines.
'''
from __future__ import print_function
import enum
import re
import struct
import sys
import threading
import time
import string
import serial
from serial.tools.list_ports import comports
from common import *
''' Additional Imports '''
import os
import numpy as np
import csv
import datetime
from ringBuffer import ringBuffer
import displayControl as display
from calibrate import Calibrate
from guppy import hpy
'''
GLOBAL VARIABLES
note: a lot of these are meant to function like a "DEFINE" in C. They are never written to.
'''
''' ARRAYS '''
global emg_data
emg_data = []
global duty
duty = [50, 50] # initial duty cycle for each motor
''' INTEGERS '''
global GETTINGCALDATA; global CALIBRATING; global SLEEP; global WAITING; global DISPLAYCONTROL; global MOTORCONTROL
GETTINGCALDATA = 0
CALIBRATING = 1
SLEEP = 2
WAITING = 3
DISPLAYCONTROL = 4
MOTORCONTROL = 5
global REST; global FIST; global PIWI; global PIWO
REST = 0
FIST = 1
PIWI = 2
PIWO = 3
global calMode
calMode = REST
global EMGPI_1; global EMGPI_2
EMGPI_1 = 0
EMGPI_2 = 1
global fistCalData; global piwiCalData; global piwoCalData;
fistCalData = []
piwiCalData = []
piwoCalData = []
global curPi
curPi = 0
t0 = time.time()
global t_endWaiting
gestureString = ["fist", "piwi", "piwo", ""]
modeString = ["", "", "SLEEP", "WAITING","DISPLAY CONTROL","MOTOR CONTROL"]
def multichr(ords):
if sys.version_info[0] >= 3:
return bytes(ords)
else:
return ''.join(map(chr, ords))
def multiord(b):
if sys.version_info[0] >= 3:
return list(b)
else:
return map(ord, b)
class Arm(enum.Enum):
UNKNOWN = 0
RIGHT = 1
LEFT = 2
class XDirection(enum.Enum):
UNKNOWN = 0
X_TOWARD_WRIST = 1
X_TOWARD_ELBOW = 2
class Pose(enum.Enum):
RESTT = 0
FIST = 1
WAVE_IN = 2
WAVE_OUT = 3
FINGERS_SPREAD = 4
THUMB_TO_PINKY = 5
UNKNOWN = 255
class Packet(object):
def __init__(self, ords):
self.typ = ords[0]
self.cls = ords[2]
self.cmd = ords[3]
self.payload = multichr(ords[4:])
def __repr__(self):
return 'Packet(%02X, %02X, %02X, [%s])' % \
(self.typ, self.cls, self.cmd,
' '.join('%02X' % b for b in multiord(self.payload)))
class BT(object):
'''Implements the non-Myo-specific details of the Bluetooth protocol.'''
def __init__(self, tty):
self.ser = serial.Serial(port=tty, baudrate=9600, dsrdtr=1)
self.buf = []
self.lock = threading.Lock()
self.handlers = []
## internal data-handling methods
def recv_packet(self, timeout=None):
t0 = time.time()
self.ser.timeout = None
while timeout is None or time.time() < t0 + timeout:
if timeout is not None: self.ser.timeout = t0 + timeout - time.time()
c = self.ser.read()
if not c: return None
ret = self.proc_byte(ord(c))
if ret:
if ret.typ == 0x80:
self.handle_event(ret)
return ret
def recv_packets(self, timeout=.5):
res = []
t0 = time.time()
while time.time() < t0 + timeout:
p = self.recv_packet(t0 + timeout - time.time())
if not p: return res
res.append(p)
return res
def proc_byte(self, c):
if not self.buf:
if c in [0x00, 0x80, 0x08, 0x88]:
self.buf.append(c)
return None
elif len(self.buf) == 1:
self.buf.append(c)
self.packet_len = 4 + (self.buf[0] & 0x07) + self.buf[1]
return None
else:
self.buf.append(c)
if self.packet_len and len(self.buf) == self.packet_len:
p = Packet(self.buf)
self.buf = []
return p
return None
def handle_event(self, p):
for h in self.handlers:
h(p)
def add_handler(self, h):
self.handlers.append(h)
def remove_handler(self, h):
try: self.handlers.remove(h)
except ValueError: pass
def wait_event(self, cls, cmd):
res = [None]
def h(p):
if p.cls == cls and p.cmd == cmd:
res[0] = p
self.add_handler(h)
while res[0] is None:
self.recv_packet()
self.remove_handler(h)
return res[0]
## specific BLE commands
def connect(self, addr):
return self.send_command(6, 3, pack('6sBHHHH', multichr(addr), 0, 6, 6, 64, 0))
def get_connections(self):
return self.send_command(0, 6)
def discover(self):
return self.send_command(6, 2, b'\x01')
def end_scan(self):
return self.send_command(6, 4)
def disconnect(self, h):
return self.send_command(3, 0, pack('B', h))
def read_attr(self, con, attr):
self.send_command(4, 4, pack('BH', con, attr))
return self.wait_event(4, 5)
def write_attr(self, con, attr, val):
self.send_command(4, 5, pack('BHB', con, attr, len(val)) + val)
return self.wait_event(4, 1)
def send_command(self, cls, cmd, payload=b'', wait_resp=True):
s = pack('4B', 0, len(payload), cls, cmd) + payload
self.ser.write(s)
while True:
p = self.recv_packet()
## no timeout, so p won't be None
if p.typ == 0: return p
## not a response: must be an event
self.handle_event(p)
class MyoRaw(object):
'''Implements the Myo-specific communication protocol.'''
def __init__(self, tty=None):
if tty is None:
tty = self.detect_tty()
if tty is None:
raise ValueError('Myo dongle not found!')
self.bt = BT(tty)
self.conn = None
self.emg_handlers = []
self.imu_handlers = []
self.arm_handlers = []
self.pose_handlers = []
def detect_tty(self):
for p in comports():
if re.search(r'PID=2458:0*1', p[2]):
print('using device:', p[0])
return p[0]
return None
def run(self, timeout=None):
self.bt.recv_packet(timeout)
def connect(self):
## stop everything from before
self.bt.end_scan()
self.bt.disconnect(0)
self.bt.disconnect(1)
self.bt.disconnect(2)
## start scanning
print('scanning for bluetooth devices...')
self.bt.discover()
while True:
p = self.bt.recv_packet()
print('scan response:', p)
if p.payload.endswith(b'\x06\x42\x48\x12\x4A\x7F\x2C\x48\x47\xB9\xDE\x04\xA9\x01\x00\x06\xD5'):
addr = list(multiord(p.payload[2:8]))
break
self.bt.end_scan()
## connect and wait for status event
conn_pkt = self.bt.connect(addr)
self.conn = multiord(conn_pkt.payload)[-1]
self.bt.wait_event(3, 0)
## get firmware version
fw = self.read_attr(0x17)
_, _, _, _, v0, v1, v2, v3 = unpack('BHBBHHHH', fw.payload)
print('firmware version: %d.%d.%d.%d' % (v0, v1, v2, v3))
self.old = (v0 == 0)
if self.old: # if the firmware is 0.x.xxxx.x
## don't know what these do; Myo Connect sends them, though we get data
## fine without them
self.write_attr(0x19, b'\x01\x02\x00\x00')
self.write_attr(0x2f, b'\x01\x00')
self.write_attr(0x2c, b'\x01\x00')
self.write_attr(0x32, b'\x01\x00')
self.write_attr(0x35, b'\x01\x00')
## enable EMG data
self.write_attr(0x28, b'\x01\x00')
## enable IMU data
self.write_attr(0x1d, b'\x01\x00')
## Sampling rate of the underlying EMG sensor, capped to 1000. If it's
## less than 1000, emg_hz is correct. If it is greater, the actual
## framerate starts dropping inversely. Also, if this is much less than
## 1000, EMG data becomes slower to respond to changes. In conclusion,
## 1000 is probably a good value.
C = 1000
emg_hz = 50
## strength of low-pass filtering of EMG data
emg_smooth = 100
imu_hz = 50
## send sensor parameters, or we don't get any data
self.write_attr(0x19, pack('BBBBHBBBBB', 2, 9, 2, 1, C, emg_smooth, C // emg_hz, imu_hz, 0, 0))
else: #normal operation
name = self.read_attr(0x03)
print('device name: %s' % name.payload)
## enable IMU data
self.write_attr(0x1d, b'\x01\x00')
## enable vibrations
self.write_attr(0x24, b'\x02\x00')
# Failed attempt to disable vibrations:
# self.write_attr(0x24, b'\x00\x00')
# self.write_attr(0x19, b'\x01\x03\x00\x01\x01')
self.start_raw()
## add data handlers
def handle_data(p):
if (p.cls, p.cmd) != (4, 5): return
c, attr, typ = unpack('BHB', p.payload[:4]) # unpack unsigned char, unsigned short, unsigned char
pay = p.payload[5:]
if attr == 0x27:
vals = unpack('8HB', pay) # unpack 8 unsigned shorts, and one unsigned char https://docs.python.org/2/library/struct.html
## not entirely sure what the last byte is, but it's a bitmask that
## seems to indicate which sensors think they're being moved around or
## something
emg = vals[:8]
moving = vals[8]
self.on_emg(emg, moving)
elif attr == 0x1c:
vals = unpack('10h', pay)
quat = vals[:4]
acc = vals[4:7]
gyro = vals[7:10]
self.on_imu(quat, acc, gyro)
elif attr == 0x23:
typ, val, xdir, _,_,_ = unpack('6B', pay)
if typ == 1: # on arm
self.on_arm(Arm(val), XDirection(xdir))
print("on arm")
elif typ == 2: # removed from arm
self.on_arm(Arm.UNKNOWN, XDirection.UNKNOWN)
print("NOT on arm")
elif typ == 3: # pose
self.on_pose(Pose(val))
else:
print('data with unknown attr: %02X %s' % (attr, p))
self.bt.add_handler(handle_data)
def write_attr(self, attr, val):
if self.conn is not None:
self.bt.write_attr(self.conn, attr, val)
def read_attr(self, attr):
if self.conn is not None:
return self.bt.read_attr(self.conn, attr)
return None
def disconnect(self):
if self.conn is not None:
self.bt.disconnect(self.conn)
def start_raw(self):
'''Sending this sequence for v1.0 firmware seems to enable both raw data and
pose notifications.
'''
self.write_attr(0x28, b'\x01\x00')
#self.write_attr(0x19, b'\x01\x03\x01\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
def mc_start_collection(self):
'''Myo Connect sends this sequence (or a reordering) when starting data
collection for v1.0 firmware; this enables raw data but disables arm and
pose notifications.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x09\x01\x01\x00\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x00\x01\x00')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x00')
def mc_end_collection(self):
'''Myo Connect sends this sequence (or a reordering) when ending data collection
for v1.0 firmware; this reenables arm and pose notifications, but
doesn't disable raw data.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
self.write_attr(0x19, b'\x09\x01\x00\x00\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x00\x01\x01')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
def vibrate(self, length):
if length in xrange(1, 4):
## first byte tells it to vibrate; purpose of second byte is unknown
self.write_attr(0x19, pack('3B', 3, 1, length))
def add_emg_handler(self, h):
self.emg_handlers.append(h)
def add_imu_handler(self, h):
self.imu_handlers.append(h)
def add_pose_handler(self, h):
self.pose_handlers.append(h)
def add_arm_handler(self, h):
self.arm_handlers.append(h)
def on_emg(self, emg, moving):
for h in self.emg_handlers:
h(emg, moving)
def on_imu(self, quat, acc, gyro):
for h in self.imu_handlers:
h(quat, acc, gyro)
def on_pose(self, p):
for h in self.pose_handlers:
h(p)
def on_arm(self, arm, xdir):
for h in self.arm_handlers:
h(arm, xdir)
def controlLogic(mode, gesture, confidence):
global SLEEP; global WAITING; global DISPLAYCONTROL; global MOTORCONTROL;
global REST; global FIST; global PIWI; global PIWO
global duty; global curPi; global t_endWaiting; global t_30_SLEEP
if ( mode == SLEEP ):
if ( gesture == FIST ):
mode = WAITING
t_endWaiting = time.time() + 1 # Reset the sleep timer once you leave SLEEP
print("SWITCHING MODE: WAITING\t\t\t\tConfidence Level: ", confidence)
t_30_SLEEP = time.time() + 30
if ( mode == WAITING ):
if ( time.time() >= t_30_SLEEP ):
mode = SLEEP
print("SWITCHING MODE: SLEEP")
else:
# print("MODE = WAITING")
if ( time.time() > t_endWaiting ):
if ( gesture == FIST ):
mode = SLEEP
print("SWITCHING MODE: SLEEP\t\t\t\tConfidence Level: ",confidence)
elif ( gesture == PIWI ):
mode = DISPLAYCONTROL
print("SWITCHING MODE: DISPLAYCONTROL\t\t\t\tConfidence Level: ",confidence)
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
elif ( gesture == PIWO ):
mode = MOTORCONTROL
print("SWITCHING MODE: MOTORCONTROL\t\t\tConfidence Level: ",confidence)
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
if ( mode == DISPLAYCONTROL ):
if ( time.time() >= t_30_SLEEP ):
mode = SLEEP
print("SWITCHING MODE: SLEEP")
else:
if ( time.time() > t_endWaiting ):
if ( gesture == FIST ):
mode = WAITING
print("SWITCHING MODE: WAITING\t\t\t\tConfidence Level: ",confidence)
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
elif ( ( curPi == 0 ) and ( gesture == PIWI ) ):
curPi = display.switchDisplay()
print("Switching to Camera 2")
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
elif ( ( curPi == 1 ) and ( gesture == PIWO ) ):
curPi = display.switchDisplay()
print("Switching to Camera 1")
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
if ( mode == MOTORCONTROL ):
if ( time.time() >= t_30_SLEEP ):
mode = SLEEP
print("SWITCHING MODE: SLEEP")
else:
if ( time.time() > t_endWaiting ):
''' Select which slave to control '''
if ( curPi == 0 ):
curPi_name = "emgPi_1"
currentMotor = 0
elif ( curPi == 1 ):
curPi_name = "emgPi_2"
currentMotor = 1
''' Check Gesture '''
if ( gesture == PIWI ): # Pan Clockwise
if (duty[curPi] <= 70):
duty[curPi] += 10
ssh_string = "ssh " + curPi_name + " 'python /home/pi/scripts/moveMotor.py " + str(duty[curPi]) + " 0 0' &"
os.system(ssh_string)
elif ( ( duty[curPi] > 70 ) and ( duty[curPi] < 80 ) ):
duty[curPi] = 80
ssh_string = "ssh " + curPi_name + " 'python /home/pi/scripts/moveMotor.py " + str(duty[curPi]) + " 0 0' &"
os.system(ssh_string)
print("Motor is at limit.")
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
elif ( gesture == PIWO ): # Pan Counter Clockwise
if ( duty[curPi] >= 30 ):
duty[curPi] -= 10
ssh_string = "ssh " + curPi_name + " 'python /home/pi/scripts/moveMotor.py " + str(duty[curPi]) + " 1 0' &"
os.system(ssh_string)
elif ( ( duty[curPi] < 30 ) and ( duty[curPi] > 20 ) ):
duty[curPi] = 20
ssh_string = "ssh " + curPi_name + " 'python /home/pi/scripts/moveMotor.py " + str(duty[curPi]) + " 1 0' &"
os.system(ssh_string)
print("Motor is at limit.")
else:
print("Motor is out of range. Cannot rotate CCW")
t_endWaiting = time.time() + 1
t_30_SLEEP = time.time() + 30
elif ( gesture == FIST ):
mode = WAITING
print("SWITCHING MODE: WAITING\t\t\t\tConfidence Level: ", confidence)
t_endWaiting = time.time() + 1
return mode
def getConfidence(realTimeData, calData):
matchCounter = 0
'''
calibrated: 823
actual: 832
result: 10 + 2 + 3 = 15
calibrated: 781
actual: 832
result: 7
calibrated: 231
actual: 832
result: 1 + 6 + = 7
'''
if (realTimeData[0] == calData[0]):
matchCounter += 10
if (realTimeData[0] == calData[1]):
matchCounter += 7
if (realTimeData[0] == calData[2]):
matchCounter += 3
if (realTimeData[1] == calData[0]):
matchCounter += 4
if (realTimeData[1] == calData[1]):
matchCounter += 6
if (realTimeData[1] == calData[2]):
matchCounter += 2
if (realTimeData[2] == calData[0]):
matchCounter += 2
if (realTimeData[2] == calData[1]):
matchCounter += 3
if (realTimeData[2] == calData[2]):
matchCounter += 4
return matchCounter
'''
If the gesture is the same as the last one, increment the counter. If the gesture is different from the last gesture,
update the variable, lastGesture, and reset the counter. This allows us to wait for n counts of the same gesture before
considering a gesture valid.
'''
def confirmGesture(gesture):
global CONFIRM_COUNTER
if ( confirmGesture.lastGesture != gesture ):
confirmGesture.flag = False
if ( confirmGesture.counter < CONFIRM_COUNTER ):
confirmGesture.counter += 1
confirmGesture.flag = False
else:
confirmGesture.lastGesture = gesture
confirmGesture.counter = 0
confirmGesture.flag = True
return confirmGesture.flag
confirmGesture.flag = False # static variable initialization for the above function
confirmGesture.counter = 0
confirmGesture.lastGesture = REST
if __name__ == '__main__':
m = MyoRaw(sys.argv[1] if len(sys.argv) >= 2 else None) # this has to come first, and proc_emg() second (see below)
def proc_emg(emg, moving, times = []): # data is sent in packets of two samples at a time. I *think* we only save half of these
global calMode; global emg_data
global fistCalData; global piwiCalData; global piwoCalData;
emg = list(emg) # convert tuple to list
emg_data = emg
if ( mode == GETTINGCALDATA ): # write calibration data to a global array
if (calMode == FIST):
fistCalData.append(emg_data)
if (calMode == PIWI):
piwiCalData.append(emg_data)
if (calMode == PIWO):
piwoCalData.append(emg_data)
'''
INITIALIZATION
this code is only executed once
'''
m.add_emg_handler(proc_emg)
m.connect()
global GETTINGCALDATA; global CALIBRATING; global SLEEP; global WAITING; global DISPLAYCONTROL; global MOTORCONTROL;
global REST; global FIST; global PIWI; global PIWO; global calMode; global curPi; global CONFIRM_COUNTER;
os.system("python displayControl.py &") # initializes the display on every run
confidenceArray = []
curPi = 0
gesture = REST
isResting = 0
BUFFER_SIZE = 100 # size of circular buffer
emg_buffer = ringBuffer(BUFFER_SIZE)
counter = 0 # counter
CONFIDENCE_LEVEL = 10 # allows for tuning. Max = 20. Min = 0. See getConfidence()
CONFIRM_COUNTER = 150 # number of samples of same gesture required to confirm a gesture
SENSITIVITY = 75 # upper and lower threshold = minValueFromCal +/- SENSITIVITY
NUM_CALS = 4 # this is always 1 greater than the number of calibrations
CALIBRATION_SIZE = 500
n = CALIBRATION_SIZE
CSVFILE = "./adityaCal.csv" # file to write/read calibration data from
minValueFromCal = 9999 # initially an arbitrarily large value
iWantToCal = 0 # set to '1' when switching users or when recalibration is needed
calibrateFlag = 1
if ( iWantToCal == 1 ):
mode = GETTINGCALDATA
else:
mode = SLEEP # skip GETTINGCALDATA and CALIBRATING states
os.system("ssh emgPi_1 'python /home/pi/scripts/initMotor.py 50' &") # The ampersand is essential here. If this does not run in the background ...
os.system("ssh emgPi_2 'python /home/pi/scripts/initMotor.py 50' &") # the bluetooth protocol fails and the system is frozen.
print("MOTORS INITIALIZED")
os.system("clear")
while True: # run the program indefinitely, or until user interruption
m.run()
emg_buffer.append(emg_data)
if (counter >= BUFFER_SIZE * 2): # there was an undiagnosed issue with 7 null data points causing havoc.
# this ensures that those are gone before proceeding
average = emg_buffer.getAvg() # average value of each sensor in the buffer. [ 1 x 8 ]
bufferAvg = np.mean(np.array(average)) # average value of the whole buffer. type: float, [1 x 1]
maxGrouping = emg_buffer.getMaxGrouping()
if ( mode >= SLEEP ): # where the main gesture detection and control happens
if ( calibrateFlag == 1 ): # load saved cal data
with open(CSVFILE, 'rb') as csvfile: # Example: [ 7, 6, 1]; [ 4, 2, 5]; [ 0, 2, 7]; [ 157.6, 157.6, 157.6]
CalReader = csv.reader(csvfile, delimiter=',')
i = 0
for row in CalReader:
savedCalData = np.genfromtxt(CSVFILE, delimiter=',')
print("Calibration Data: \n", savedCalData)
print("MODE = SLEEP")
calibrateFlag = 0
fistGrouping = savedCalData[0]
piwiGrouping = savedCalData[1]
piwoGrouping = savedCalData[2]
minValueFromCal = savedCalData[3,1]
fistConfidence = getConfidence(maxGrouping, fistGrouping)
piwiConfidence = getConfidence(maxGrouping, piwiGrouping)
piwoConfidence = getConfidence(maxGrouping, piwoGrouping)
confidenceArray = [fistConfidence, piwiConfidence, piwoConfidence]
maxMatch = np.argmax(confidenceArray) # index of the gesture that returned the most confidence
maxConfidence = confidenceArray[maxMatch] # confidence level of the most confident gesture
if ( ( bufferAvg >= ( minValueFromCal + SENSITIVITY ) ) ):
if ( maxMatch == 0 ) and ( fistConfidence >= CONFIDENCE_LEVEL) :
if ( confirmGesture(FIST) ): # if we saw FIST for n times
gesture = FIST
print("\tFIST CONFIRMED\t\t\t\tConfidence Level: ", fistConfidence)
isResting = 0
elif ( maxMatch == 1 ) and ( piwiConfidence >= CONFIDENCE_LEVEL ):
if ( confirmGesture(PIWI) ): # if we saw PIWI for n times
gesture = PIWI
print("\tPIWI CONFIRMED\t\t\t\tConfidence Level: ", piwiConfidence)
isResting = 0
elif ( maxMatch == 2 ) and ( piwoConfidence >= CONFIDENCE_LEVEL ):
if ( confirmGesture(PIWO) ): # if we saw PIWO for n times
gesture = PIWO
print("\tPIWO CONFIRMED\t\t\t\tConfidence Level: ", piwoConfidence)
isResting = 0
else:
if ( confirmGesture(REST) ): # if we saw REST for n times
gesture = REST
print("\n\n\tMOTION DETECTED BUT NO GESTURE MATCH: REST ASSUMED")
print("\n\tMinimum Accepted Confidence: ", CONFIDENCE_LEVEL)
print("\tFIST Confidence: ",fistConfidence, "\tPIWI Confidence: ",piwiConfidence, "\tPIWO Confidence: ",piwoConfidence)
print("\tStill in mode: ", modeString[mode])
print("\n\n")
elif ( (bufferAvg < (minValueFromCal - SENSITIVITY)) ): #isResting or
#print("REST CONFIRMED")
gesture = REST
isResting = 1
#else:
# print("UNKNOWN")
# print("Sensitivity: ", SENSITIVITY)
# print("minValueFromCal: ", minValueFromCal)
# print("Buffer average: ", bufferAvg)
mode = controlLogic(mode, gesture, maxConfidence) # get new mode
'''
CALIBRATION
note: this can probably be put into a function later. Maybe not all of it, but enough that it becomes a little easier to follow
'''
if ( ( mode == GETTINGCALDATA ) and ( calMode < NUM_CALS ) ):
if (n >= CALIBRATION_SIZE):
n = 0 # reset calibration timer
print("Cal Mode = " + gestureString[calMode])
print("Hold a " + gestureString[calMode] + " until told otherwise")
calMode += 1
# time.sleep(2) # WARNING: THIS BREAKS THE CODE! # sleep to give user time to switch to next gesture
n += 1
if (bufferAvg < minValueFromCal): # this gets the minimum 8-sensor average from the time that calibration was run
minValueFromCal = bufferAvg # it sets the threshold that separates gestures from resting.
else:
if ( calibrateFlag == 1 ):
mode = CALIBRATING
gesture = REST
mode = controlLogic(mode, gesture, 0)
if ( mode == CALIBRATING ) :
print("mode = CALIBRATING")
fistCal = Calibrate()
fistGrouping = fistCal.getMaxGrouping(fistCalData)
piwiCal = Calibrate()
piwiGrouping = piwiCal.getMaxGrouping(piwiCalData)
piwoCal = Calibrate()
piwoGrouping = piwoCal.getMaxGrouping(piwoCalData)
minValueFromCalArray = [minValueFromCal,minValueFromCal,minValueFromCal]
with open(CSVFILE, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fistGrouping)
writer.writerow(piwiGrouping)
writer.writerow(piwoGrouping)
writer.writerow(minValueFromCalArray)
calibrateFlag = 0
mode = SLEEP
print("Fist Group: ", fistGrouping)
# print(fistCalData)
print("Piwi Group: ", piwiGrouping)
# print(piwiCalData)
print("Piwo Group: ", piwoGrouping)
else: # Runs until data is guaranteed to be good
counter += 1
# print(counter, "Data contains null values\n")
| [
"[email protected]"
]
| |
76f987a556f889a22ab85458df2b31d53eed1ef0 | 51d602577affebc8d91ffe234f926469d389dc75 | /lis/specimen/lab_receive/models/__init__.py | ba84a225c445689edf2c1adcd3ea012ec62b08af | []
| no_license | botswana-harvard/lis | 5ac491373f74eaf3855f173580b000539d7f4740 | 48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7 | refs/heads/master | 2020-12-29T01:31:07.821681 | 2018-06-24T06:06:57 | 2018-06-24T06:06:57 | 35,820,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from .base_receive import BaseReceive
from .receive import Receive
from .receive_identifier import ReceiveIdentifier
| [
"[email protected]"
]
| |
ff4d51fae8a38111bc47df0a15cfa915e98ddaff | 02e23da0431623db86c8138bda350a1d526d4185 | /Archivos Python Documentos/Graficas/.history/TRABAJO_SPT_v3_20200223200731.py | e172ba3b981e7ffe2cd9bfa36a01f100e4a315d1 | []
| no_license | Jaamunozr/Archivos-python | d9996d3d10ff8429cd1b4c2b396016a3a5482889 | 1f0af9ba08f12ac27e111fcceed49bbcf3b39657 | refs/heads/master | 2022-08-05T14:49:45.178561 | 2022-07-13T13:44:39 | 2022-07-13T13:44:39 | 244,073,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,247 | py | import os
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
#------------
#from mpl_toolkits.mplot3d.axes3d import get_test_data
#-------------
os.system("clear")
fig = pl.figure()
axx = Axes3D(fig)
raiz=np.sqrt
ln=np.log
puntoX=float(0)
puntoY=float(0)
#puntoX=float(input("Seleccione la coordenada en X donde desea calcular el potencial: "))
#puntoY=float(input("Seleccione la coordenada en Y donde desea calcular el potencial: "))
print("Calculando ...")
#------------------------------------------------------------------------------
Xa = np.arange(-10, 10, 0.1) #Rango de coordenadas de X
Ya = np.arange(-10, 10, 0.1) #Rango de coordenadas de Y
l = 2 #Longitud del electrodo [m]
rho= 100 #Resistividad de terrreno [Ohm/m]
Ik=200 #Corriente de falla [A] (Total)
Rad=0.01 #Radio del electrodo [m]
Electrodos=8 #Número de electrodos
Pos1=4 #Posición 1 en Y para analisis de grafica 2D
Pos2=0 #Posición 2 en Y para analisis de grafica 2D
#Posición de los electrodos
P=np.array([
[-4,-4], #Electrodo A
[0,-4], #Electrodo B
[4,-4], #Electrodo C
[-4,0], #Electrodo D
[4,0], #Electrodo E
[-4,4], #Electrodo F
[0,4], #Electrodo G
[4,4] #Electrodo H
])
#------------------------------------------------------------------------------
E=Electrodos-1
ik=Ik/Electrodos
Vt=np.zeros((np.count_nonzero(Xa),np.count_nonzero(Ya)))
m=np.zeros((Electrodos,1))
V=np.zeros((Electrodos,1))
k=0
m2=np.zeros((Electrodos,1))
V2=np.zeros((Electrodos,1))
#------------------------------------------------------------------------------
#Cálculo del punto ingresado
i=0
while i<=E:
m2[i][0] =round(raiz((((P[i][0])-puntoX)**2)+(((P[i][1])-puntoY)**2)),4)
o,u=((P[i][0])-puntoX),((P[i][1])-puntoY)
if ((o ==0) and (u==0)) or (m2[i][0]==0):
#print("Elementos de matriz",k,t, "x,y",P[i][0],P[i][1],"punto de eje",X,Y )
m2[i][0]=Rad
V2[i][0] =ln((l+raiz((m2[i][0])**2+l**2))/(m2[i][0]))
i += 1
Vt2=(np.sum(V2)*(rho*ik))/(2*np.pi*l)
print("El potencial en el punto (",puntoX,",",puntoY,"), es de",round(Vt2,3),"[V]")
print("Calculando el resto de operaciones..")
#------------------------------------------------------------------------------
#Cálculo de la mall
Vxy = [0] * (np.count_nonzero(Ya))
while k<np.count_nonzero(Ya):
Y=round(Ya[k],3)
t=0
while t<np.count_nonzero(Xa):
X=round(Xa[t],3)
i=0
while i<=E:
m[i][0] =round(raiz((((P[i][0])-X)**2)+(((P[i][1])-Y)**2)),4)
o,u=((P[i][0])-X),((P[i][1])-Y)
if ((o ==0) and (u==0)) or (m[i][0]==0):
#print("Elementos de matriz",k,t, "x,y",P[i][0],P[i][1],"punto de eje",X,Y )
m[i][0]=Rad
V[i][0] =ln((l+raiz((m[i][0])**2+l**2))/(m[i][0]))
i += 1
Vt[k][t]=np.sum(V)
if Y==Pos1:
Vxa=Vt[k]
if Y==Pos2:
Vxb=Vt[k]
if Y==X:
Vxy.insert(k,Vt[k][t])
t +=1
k +=1
Vtt=(Vt*(rho*ik))/(2*np.pi*l)
Vxa=(Vxa*(rho*ik))/(2*np.pi*l)
Vxb=(Vxb*(rho*ik))/(2*np.pi*l)
print ("Número de elementos por eje:",np.count_nonzero(Xa))
aa=np.where(np.amax(Vtt) == Vtt)
print ("Valor máximo de tensión:",round(Vtt[::].max(),3),"[V], en posición: (",round(Xa[aa[0][0]],2),",",round(Ya[aa[1][0]],2),")")
bb=np.where(np.amin(Vtt) == Vtt)
print ("Valor mínimo de tensión:",round(Vtt[::].min(),3),"[V], en posición: (",round(Xa[bb[0][0]],2),",",round(Ya[bb[1][0]],2),")")
print ("Número de elemmentos de Vt:",np.count_nonzero(Vtt))
print ("Elementos de Xa al cuadrado:",np.count_nonzero(Xa)**2)
X, Y = np.meshgrid(Xa, Ya)
print ("Nuevo número de elementos de X y Y:",np.count_nonzero(X))
#------------------------------------------------------------------------------
# GRAFICAS
# configurar una figura dos veces más alta que ancha
fig = plt.figure(figsize=plt.figaspect(0.5))
# Primera imagen a imprimir
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
surf = ax1.plot_surface(X, Y, Vtt, cmap = cm.get_cmap("Spectral"))#, antialiased=False)
# Customize the z axis.
ax1.set_zlim(300, 1800)
fig.colorbar(surf)
#------------------------------------------------------------------------------
#Graficas en 2D
#------------------------------------------------------------------------------
#Eje X1:
# Second subplot
ax2 = fig.add_subplot(1, 2, 2)
x1=Xa
ax2.title.set_text('Curvas de Nivel.')
ax2.plot(x1, Vxa, color="blue", linewidth=1.0, linestyle="-")
ax2.plot(x1, Vxb, color="red", linewidth=1.0, linestyle="-")
ax2.plot(x1, Vxy, color="green", linewidth=1.0, linestyle="-")
plt.show()
"""
pl.figure(figsize=(16, 8), dpi=100)
pl.subplot(1, 1, 1)
pl.plot(x1, Vxa, color="blue", linewidth=1.0, linestyle="-")
pl.plot(x1, Vxb, color="green", linewidth=1.0, linestyle="-")
pl.xlim(-10, 10)
pl.xticks(np.linspace(-10, 10, 17, endpoint=True))
pl.ylim(200.0, 1700.0)
pl.yticks(np.linspace(200, 1700, 5, endpoint=True))
#pl.pause(100)
""" | [
"[email protected]"
]
| |
938f1654252a9fbc25a0c686035dd7efd7cbc660 | 2d4380518d9c591b6b6c09ea51e28a34381fc80c | /CIM16/IEC61970/Protection/RecloseSequence.py | 549c094a7cdff30fe983ecb50cfff2a768e9c918 | [
"MIT"
]
| permissive | fran-jo/PyCIM | 355e36ae14d1b64b01e752c5acd5395bf88cd949 | de942633d966bdf2bd76d680ecb20517fc873281 | refs/heads/master | 2021-01-20T03:00:41.186556 | 2017-09-19T14:15:33 | 2017-09-19T14:15:33 | 89,480,767 | 0 | 1 | null | 2017-04-26T12:57:44 | 2017-04-26T12:57:44 | null | UTF-8 | Python | false | false | 3,250 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61970.Core.IdentifiedObject import IdentifiedObject
class RecloseSequence(IdentifiedObject):
"""A reclose sequence (open and close) is defined for each possible reclosure of a breaker.A reclose sequence (open and close) is defined for each possible reclosure of a breaker.
"""
def __init__(self, recloseStep=0, recloseDelay=0.0, ProtectedSwitch=None, *args, **kw_args):
"""Initialises a new 'RecloseSequence' instance.
@param recloseStep: Indicates the ordinal position of the reclose step relative to other steps in the sequence.
@param recloseDelay: Indicates the time lapse before the reclose step will execute a reclose.
@param ProtectedSwitch: A breaker may have zero or more automatic reclosures after a trip occurs.
"""
#: Indicates the ordinal position of the reclose step relative to other steps in the sequence.
self.recloseStep = recloseStep
#: Indicates the time lapse before the reclose step will execute a reclose.
self.recloseDelay = recloseDelay
self._ProtectedSwitch = None
self.ProtectedSwitch = ProtectedSwitch
super(RecloseSequence, self).__init__(*args, **kw_args)
_attrs = ["recloseStep", "recloseDelay"]
_attr_types = {"recloseStep": int, "recloseDelay": float}
_defaults = {"recloseStep": 0, "recloseDelay": 0.0}
_enums = {}
_refs = ["ProtectedSwitch"]
_many_refs = []
def getProtectedSwitch(self):
"""A breaker may have zero or more automatic reclosures after a trip occurs.
"""
return self._ProtectedSwitch
def setProtectedSwitch(self, value):
if self._ProtectedSwitch is not None:
filtered = [x for x in self.ProtectedSwitch.RecloseSequences if x != self]
self._ProtectedSwitch._RecloseSequences = filtered
self._ProtectedSwitch = value
if self._ProtectedSwitch is not None:
if self not in self._ProtectedSwitch._RecloseSequences:
self._ProtectedSwitch._RecloseSequences.append(self)
ProtectedSwitch = property(getProtectedSwitch, setProtectedSwitch)
| [
"[email protected]"
]
| |
5c132a7c5a5c2219a7172b0390e26e3c0c6df9b1 | aaa762ce46fa0347cdff67464f56678ea932066d | /AppServer/lib/django-1.5/django/utils/timezone.py | a56ff0b6ab03a1662f1958daf56110b7e41e7f3e | [
"Apache-2.0",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
]
| permissive | obino/appscale | 3c8a9d8b45a6c889f7f44ef307a627c9a79794f8 | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | refs/heads/master | 2022-10-01T05:23:00.836840 | 2019-10-15T18:19:38 | 2019-10-15T18:19:38 | 16,622,826 | 1 | 0 | Apache-2.0 | 2022-09-23T22:56:17 | 2014-02-07T18:04:12 | Python | UTF-8 | Python | false | false | 8,344 | py | """Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.utils import six
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| [
"[email protected]"
]
| |
0aa2a6a5d89aa348a47de4346965acb29112a90e | 0dcce6da7adc3df08038fba39ec663aa2d6d62ab | /ch2-library-app-and-api/api/views.py | 128114ffe645895bd9ad2f34635d9ae726974476 | [
"MIT"
]
| permissive | anectto/apidjangostudy | 1e82e1dc8a7775ad18841372cfdad8d0408ad83f | c8636d54b65e3fbd74e4b3949951cc8a5d681870 | refs/heads/master | 2023-05-29T04:58:54.884817 | 2023-03-15T23:06:48 | 2023-03-15T23:06:48 | 167,802,649 | 0 | 0 | MIT | 2023-05-22T21:47:50 | 2019-01-27T11:47:31 | Python | UTF-8 | Python | false | false | 221 | py | from rest_framework import generics
from books.models import Book
from .serializers import BookSerializer
class BookAPIView(generics.ListAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer | [
"[email protected]"
]
| |
f4f635b18dc05411dfc2d9abc7e11b209f9c777c | ef998914b19f34f8f85cd784b6470147d73a93e0 | /job_task/Value_Pitch_Assingment/scrapy-selenium-splash-python/SCI/spiders/scrapy_sel.py | 95feaebe678a6566ab3909da624ae97e22af5941 | []
| no_license | dilipksahu/Web_Scraping_Using_Python | 9f586704a779fda551c5e3d15902591648663870 | e6d3031f89b1b5d4bfa0377c11e87037772c460d | refs/heads/master | 2023-01-30T14:36:00.702440 | 2020-12-17T16:10:25 | 2020-12-17T16:10:25 | 316,200,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,433 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from shutil import which
class CoinSpiderSelenium(scrapy.Spider):
name = 'case_status'
allowed_domains = ['www.main.sci.gov.in']
start_urls = [
'http://main.sci.gov.in/case-status'
]
def __init__(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_path = which("chromedriver")
driver = webdriver.Chrome(executable_path=chrome_path, options=chrome_options)
driver.set_window_size(1920, 1080)
driver.implicitly_wait(0.5)
driver.get("https://main.sci.gov.in/case-status")
captcha_text = driver.find_element_by_xpath("(//font)[4]").text
print("Captcha===========>",captcha_text)
captcha_input = driver.find_element_by_id("ansCaptcha")
captcha_input.send_keys(captcha_text)
diary_input = driver.find_element_by_xpath("//input[@id='CaseDiaryNumber']")
diary_input.send_keys("1")
year_input = driver.find_element_by_xpath("//select[@id='CaseDiaryYear']")
year_input.send_keys(f"2020")
submit_btn = driver.find_element_by_xpath("//input[@id='getCaseDiary']")
submit_btn.click()
self.html = driver.page_source
driver.close()
def parse(self, response):
resp = Selector(text=self.html)
print(resp)
case_details = resp.xpath("(//table)[3]/tbody/tr")
for case_detail in case_details:
print(case_detail)
yield {
'Diary No.': case_detail.xpath(".//td[2]/div/font/text()").get(),
'Case No.': case_detail.xpath(".//td[2]/div/text()").get(),
'Present/Last Listed On': case_detail.xpath(".//td[2]/b/font/text()").get(),
'Status/Stage': case_detail.xpath(".//td[2]/font/text()").get(),
'Category': case_detail.xpath(".//td[2]/text()").get(),
'Act': case_detail.xpath(".//td[2]/text()").get(),
'Petitioner(s)': case_detail.xpath(".//td[2]/p/text()").get(),
'Respondent(s)': case_detail.xpath(".//td[2]/p/text()").get(),
'Pet. Advocate(s)': case_detail.xpath(".//td[2]/p/text()").get(),
} | [
"[email protected]"
]
| |
2c6e39a7ee4093454d57327e3b3e54a5024a56b0 | c34cd98b9126cc3b03add40bd6464a7a76270088 | /PythonTutorial/77PythonRegEx.py | fc53970f18d24d81fe19d49afa97f75a2b49d081 | []
| no_license | riyadhswe/pythonw3schools | 749e1e53e55c35cbd36f0eb08dcbbab80d40b245 | 35f4008a612c931784fb9d5fef1262f7d41e9764 | refs/heads/master | 2023-03-05T11:24:16.304462 | 2021-02-14T16:01:36 | 2021-02-14T16:01:36 | 324,746,990 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | import re
txt = "The rain in Spain"
x = re.search("^The.*Spain$", txt)
print(x) | [
"[email protected]"
]
| |
ff10e9f2710cb3dc53121976e7b8d53856854d66 | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/nltk/ccg/logic.py | b89bea9d8a08b7fccd7c58dfcdcb431f27dcecea | [
"MIT"
]
| permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 1,806 | py | # Natural Language Toolkit: Combinatory Categorial Grammar
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Tanin Na Nakorn (@tanin)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Helper functions for CCG semantics computation
"""
from nltk.sem.logic import *
def compute_type_raised_semantics(semantics):
core = semantics
parent = None
while isinstance(core, LambdaExpression):
parent = core
core = core.term
var = Variable("F")
while var in core.free():
var = unique_variable(pattern=var)
core = ApplicationExpression(FunctionVariableExpression(var), core)
if parent is not None:
parent.term = core
else:
semantics = core
return LambdaExpression(var, semantics)
def compute_function_semantics(function, argument):
return ApplicationExpression(function, argument).simplify()
def compute_composition_semantics(function, argument):
assert isinstance(argument, LambdaExpression), (
"`" + str(argument) + "` must be a lambda expression"
)
return LambdaExpression(
argument.variable, ApplicationExpression(function, argument.term).simplify()
)
def compute_substitution_semantics(function, argument):
assert isinstance(function, LambdaExpression) and isinstance(
function.term, LambdaExpression
), ("`" + str(function) + "` must be a lambda expression with 2 arguments")
assert isinstance(argument, LambdaExpression), (
"`" + str(argument) + "` must be a lambda expression"
)
new_argument = ApplicationExpression(
argument, VariableExpression(function.variable)
).simplify()
new_term = ApplicationExpression(function.term, new_argument).simplify()
return LambdaExpression(function.variable, new_term)
| [
"[email protected]"
]
| |
c128b869ed0e8a8ddf5844d2ad95701ad963f881 | 8090fe014aad86878636a6d8a1ccc59a81d5d6d0 | /EPROM_Emulator/directory_node.py | 7ecec3164461e4eaf8db92ae08b089e586c381d8 | [
"MIT"
]
| permissive | hubs2545/Adafruit_Learning_System_Guides | ad5593027a9403f48c3b9fc820ba73e7ea2bd5af | 1f743401b2dc18724d867a31aaf49273abd5ec0d | refs/heads/master | 2020-03-18T17:11:40.107775 | 2018-05-25T20:14:02 | 2018-05-25T20:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,330 | py | """
The MIT License (MIT)
Copyright (c) 2018 Dave Astels
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
--------------------------------------------------------------------------------
Manage a directory in the file system.
"""
import os
class DirectoryNode(object):
"""Display and navigate the SD card contents"""
def __init__(self, display, parent=None, name="/"):
"""Initialize a new instance.
:param adafruit_ssd1306.SSD1306 on: the OLED instance to display on
:param DirectoryNode below: optional parent directory node
:param string named: the optional name of the new node
"""
self.display = display
self.parent = parent
self.name = name
self.files = []
self.top_offset = 0
self.old_top_offset = -1
self.selected_offset = 0
self.old_selected_offset = -1
def __cleanup(self):
"""Dereference things for speedy gc."""
self.display = None
self.parent = None
self.name = None
self.files = None
return self
def __is_dir(self, path):
"""Determine whether a path identifies a machine code bin file.
:param string path: path of the file to check
"""
if path[-2:] == "..":
return False
try:
os.listdir(path)
return True
except OSError:
return False
def __sanitize(self, name):
"""Nondestructively strip off a trailing slash, if any, and return the result.
:param string name: the filename
"""
if name[-1] == "/":
return name[:-1]
return name
def __path(self):
"""Return the result of recursively follow the parent links, building a full
path to this directory."""
if self.parent:
return self.parent.__path() + os.sep + self.__sanitize(self.name)
return self.__sanitize(self.name)
def __make_path(self, filename):
"""Return a full path to the specified file in this directory.
:param string filename: the name of the file in this directory
"""
return self.__path() + os.sep + filename
def __number_of_files(self):
"""The number of files in this directory, including the ".." for the parent
directory if this isn't the top directory on the SD card."""
self.__get_files()
return len(self.files)
def __get_files(self):
"""Return a list of the files in this directory.
If this is not the top directory on the SD card, a ".." entry is the first element.
Any directories have a slash appended to their name."""
if len(self.files) == 0:
self.files = os.listdir(self.__path())
self.files.sort()
if self.parent:
self.files.insert(0, "..")
for index, name in enumerate(self.files, start=1):
if self.__is_dir(self.__make_path(name)):
self.files[index] = name + "/"
def __update_display(self):
"""Update the displayed list of files if required."""
if self.top_offset != self.old_top_offset:
self.__get_files()
self.display.fill(0)
for i in range(self.top_offset, min(self.top_offset + 4, self.__number_of_files())):
self.display.text(self.files[i], 10, (i - self.top_offset) * 8)
self.display.show()
self.old_top_offset = self.top_offset
def __update_selection(self):
"""Update the selected file lighlight if required."""
if self.selected_offset != self.old_selected_offset:
if self.old_selected_offset > -1:
self.display.text(">", 0, (self.old_selected_offset - self.top_offset) * 8, 0)
self.display.text(">", 0, (self.selected_offset - self.top_offset) * 8, 1)
self.display.show()
self.old_selected_offset = self.selected_offset
def __is_directory_name(self, filename):
"""Is a filename the name of a directory.
:param string filename: the name of the file
"""
return filename[-1] == '/'
@property
def selected_filename(self):
"""The name of the currently selected file in this directory."""
self.__get_files()
return self.files[self.selected_offset]
@property
def selected_filepath(self):
"""The full path of the currently selected file in this directory."""
return self.__make_path(self.selected_filename)
def force_update(self):
"""Force an update of the file list and selected file highlight."""
self.old_selected_offset = -1
self.old_top_offset = -1
self.__update_display()
self.__update_selection()
def down(self):
"""Move down in the file list if possible, adjusting the selected file indicator
and scrolling the display as required."""
if self.selected_offset < self.__number_of_files() - 1:
self.selected_offset += 1
if self.selected_offset == self.top_offset + 4:
self.top_offset += 1
self.__update_display()
self.__update_selection()
def up(self):
"""Move up in the file list if possible, adjusting the selected file indicator
and scrolling the display as required."""
if self.selected_offset > 0:
self.selected_offset -= 1
if self.selected_offset < self.top_offset:
self.top_offset -= 1
self.__update_display()
self.__update_selection()
def click(self):
"""Handle a selection and return the new current directory.
If the selected file is the parent, i.e. "..", return to the parent directory.
If the selected file is a directory, go into it."""
if self.selected_filename == "..":
if self.parent:
p = self.parent
p.force_update()
self.__cleanup()
return p
elif self.__is_directory_name(self.selected_filename):
new_node = DirectoryNode(self.display, self, self.selected_filename)
new_node.force_update()
return new_node
return self
| [
"[email protected]"
]
| |
7ec35b7afd7de673969f2d0e09a32e4e2ed7e5be | ceead28beb1ea6cb56a2bb4472bc1d2396b39e6f | /gen_basis_helpers/lammps_interface/unit_tests/utests_misc_objs.py | 3b444bb7d9e73c210ba707c582f3bce01cb96999 | []
| no_license | RFogarty1/plato_gen_basis_helpers | 9df975d4198bff7bef80316527a8086b6819d8ab | 8469a51c1580b923ca35a56811e92c065b424d68 | refs/heads/master | 2022-06-02T11:01:37.759276 | 2022-05-11T12:57:40 | 2022-05-11T12:57:40 | 192,934,403 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,448 | py |
import collections
import unittest
import unittest.mock as mock
import gen_basis_helpers.lammps_interface.misc_objs as tCode
class TestNVTEnsemble(unittest.TestCase):
def setUp(self):
self.startTemp = 300
self.finalTemp = 500
self.dampTime = 200
self.thermostat = "Nose-Hoover"
self.numbFmt = "{:.1f}"
self.createTestObjs()
def createTestObjs(self):
currKwargs = {"thermostat":self.thermostat, "endTemp":self.finalTemp, "dampTime":self.dampTime,
"numbFmt":self.numbFmt}
self.testObjA = tCode.NVTEnsemble(self.startTemp, **currKwargs)
def testExpStrFromSimpleOptsA(self):
expStr = "all nvt temp 300.0 500.0 200.0"
actStr = self.testObjA.fixStr
self.assertEqual(expStr, actStr)
def testRaisesIfDampTimeNotSet(self):
self.dampTime = None
self.createTestObjs()
with self.assertRaises(ValueError):
self.testObjA.fixStr
class TestNPTEnsemble(unittest.TestCase):
def setUp(self):
self.startTemp = 300
self.finalTemp = 500
self.dampTimeTemp = 200
self.pressureDims = "z"
self.startPressure = 10
self.endPressure = 20
self.dampTimePressure = 2000
self.numbFmtTemp = "{:.2f}"
self.numbFmtPressure = "{:.2f}"
self.numbFmtTime = "{:.2f}"
self.numbFmtAll = "{:.2f}"
self.createTestObjs()
def createTestObjs(self):
kwargDict = {"pressureDims":self.pressureDims, "endTemp":self.finalTemp,
"endPressure":self.endPressure, "dampTimeTemp":self.dampTimeTemp,
"dampTimePressure":self.dampTimePressure, "numbFmtTime":self.numbFmtTime,
"numbFmtPressure":self.numbFmtPressure, "numbFmtTemp":self.numbFmtTemp}
self.testObjA = tCode.NPTEnsembleNoseHooverStandard(self.startTemp, self.startPressure, **kwargDict)
def testExpStrFromSimpleOptsA(self):
expStr = "all npt temp 300.00 500.00 200.00 z 10.00 20.00 2000.00"
actStr = self.testObjA.fixStr
self.assertEqual(expStr, actStr)
def testRaiseIfPressureDampTimeNotSet(self):
self.dampTimePressure = None
self.createTestObjs()
with self.assertRaises(ValueError):
self.testObjA.fixStr
def testRaisesIfTempDampTimeNotSet(self):
self.dampTimeTemp = None
self.createTestObjs()
with self.assertRaises(ValueError):
self.testObjA.fixStr
class TestVelocityRescalingObj(unittest.TestCase):
def setUp(self):
self.groupId = "groupA"
self.nEvery = 5
self.tempStart = 340
self.tempEnd = None
self.maxDeviation = 50
self.fraction = 1
self.createTestObjs()
def createTestObjs(self):
currKwargs = {"groupId":self.groupId, "nEvery":self.nEvery, "tempStart":self.tempStart,
"tempEnd":self.tempEnd, "maxDeviation":self.maxDeviation, "fraction":self.fraction}
self.testObjA = tCode.RescaleVelocitiesSimple(**currKwargs)
def testExpectedA(self):
expStr = "groupA temp/rescale 5 340.00 340.00 50.00 1.00"
actStr = self.testObjA.fixStr
self.assertEqual(expStr, actStr)
class TestCombChargeEquilibrationObj(unittest.TestCase):
def setUp(self):
self.groupId = "Mg"
self.nEvery = 75
self.precision = 1e-3
self.createTestObjs()
def createTestObjs(self):
kwargDict = {"groupId":self.groupId, "nEvery":self.nEvery, "precision":self.precision}
self.testObjA = tCode.CombChargeNeutralisationOpts(**kwargDict)
def testExpStrFromSimpleOptsA(self):
expStr = "Mg qeq/comb 75 0.00100"
actStr = self.testObjA.fixStr
self.assertEqual(expStr,actStr)
class TestAtomGroupByTypesObj(unittest.TestCase):
def setUp(self):
self.groupId = "water"
self.atomTypes = [1,2]
self.createTestObjs()
def createTestObjs(self):
currArgs = [self.groupId, self.atomTypes]
self.testObjA = tCode.AtomGroupByType(*currArgs)
def testExpStrFromSimpleOptsA(self):
expStr = "water type 1 2"
actStr = self.testObjA.groupStr
self.assertEqual(expStr,actStr)
class TestCreateVelocityObj(unittest.TestCase):
def setUp(self):
self.temp = 400
self.seed = 300
self.dist = "fake_dist"
self.group = "fake_group"
self.createTestObjs()
def createTestObjs(self):
currKwargs = {"seed":self.seed, "group":self.group, "dist":self.dist}
self.testObjA = tCode.VelocityCreateCommObj(self.temp, **currKwargs)
def testExpectedStrFromSimpleOptsA(self):
expStr = "fake_group create 400.0 300 dist fake_dist"
actStr = self.testObjA.commandStr
self.assertEqual(expStr,actStr)
class TestDumpObjStandard(unittest.TestCase):
def setUp(self):
self.everyNSteps = 30
self.groupId = "fake_group"
self.dumpType = "atom"
self.fileExt = "lammpstrj"
self.scale = True
self.createTestObjs()
def createTestObjs(self):
currKwargs = {"groupId":self.groupId, "dumpType":self.dumpType, "fileExt":self.fileExt,
"scale":self.scale}
self.testObjA = tCode.DumpCommObjStandard(self.everyNSteps, **currKwargs)
def testExpectedDictFromSimpleOptsA(self):
currArgs = [ ["dump","myDump fake_group atom 30 dump.lammpstrj"],
["dump_modify", "myDump scale yes"] ]
expDict = collections.OrderedDict(currArgs)
actDict = self.testObjA.commandDict
self.assertEqual(expDict,actDict)
class TestReflectiveWallFaceObj(unittest.TestCase):
def setUp(self):
self.face = "xlo"
self.groupId = "fake_group"
self.createTestObjs()
def createTestObjs(self):
self.testObjA = tCode.ReflectiveWallFace(self.face, groupId=self.groupId)
def testExpectedFixCommandFromDictA(self):
expStr = "fake_group wall/reflect xlo EDGE"
actStr = self.testObjA.fixStr
self.assertEqual(expStr,actStr)
| [
"[email protected]"
]
| |
6aab3ee55ffd45abd7f2608eec839c1371b7d460 | a702fb476539272b78328f64a3a49c1012ac3ed4 | /django_slack/utils.py | ee02e313e45081ff2fab63a801ba9d3356d3ca78 | [
"BSD-3-Clause"
]
| permissive | lamby/django-slack | 7f8dea40e5b3cad93f2e207b2815327993743774 | 5b92410fadc1a91b9415c0991f0ff2547cd633c7 | refs/heads/master | 2023-03-10T23:54:17.789584 | 2023-03-02T08:07:43 | 2023-03-02T08:07:43 | 27,838,503 | 250 | 88 | BSD-3-Clause | 2023-03-01T15:08:41 | 2014-12-10T20:34:18 | Python | UTF-8 | Python | false | false | 1,626 | py | import json
from django.utils.module_loading import import_string
from .exceptions import LABEL_TO_EXCEPTION, SlackException
from .app_settings import app_settings
class Backend(object):
def send(self, url, message_data):
raise NotImplementedError()
def validate(self, content_type, content, message_data):
if content_type.startswith('application/json'):
result = json.loads(content)
if not result['ok']:
klass = LABEL_TO_EXCEPTION.get(result['error'], SlackException)
raise klass(result['error'], message_data)
return result
elif content != 'ok':
raise SlackException(content, message_data)
return content
def get_backend(name=None):
"""
Wrap the backend in a function to not load it at import time.
``get_backend()`` caches the backend on first call.
:param name: optional string name for backend, otherwise take from settings
:type name: str or unicode or None
"""
# Function's backend is initially NoneType on module import (see below)
loaded_backend = get_backend.backend
# Load the backend if we have a provided name, or if this function's
# backend is still NoneType
if name or not loaded_backend:
loaded_backend = import_string(name or app_settings.BACKEND)()
# If the backend hasn't been cached yet, and we didn't get a custom
# name passed in, cache the backend
if not (get_backend.backend or name):
get_backend.backend = loaded_backend
return loaded_backend
get_backend.backend = None
| [
"[email protected]"
]
| |
4b921124dd541cdc0e677d64df53898e563848b5 | 00fe1823bbadc9300e4fec42ca1d12dfbd4bcde9 | /Dictionary/11.py | d7986a4e30747730ee497810d2d9c9568de2029b | []
| no_license | monteua/Python | 6b36eb01959f34ccaa2bb9044e2e660383ed7695 | 64b6154d9f59e1e2dbe033e5b9f246734b7d4064 | refs/heads/master | 2020-07-05T07:29:51.250343 | 2018-02-09T11:09:51 | 2018-02-09T11:09:51 | 74,122,698 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | '''
Write a Python program to multiply all the items in a dictionary.
'''
dict1 = {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49}
print ("Old dictionary:", dict1)
for key, value in dict1.items():
dict1[key] = value * 10
print ("New dictionary:", dict1) | [
"[email protected]"
]
| |
ce50ec568accdb678d6f38382a3e15cb9984e4ee | fc678a0a5ede80f593a29ea8f43911236ed1b862 | /380-InsertDeleteGetRandomO(1).py | 7f654b9e925fa8143a6b015241c31f51b8b8f50a | []
| no_license | dq-code/leetcode | 4be0b1b154f8467aa0c07e08b5e0b6bd93863e62 | 14dcf9029486283b5e4685d95ebfe9979ade03c3 | refs/heads/master | 2020-12-13T15:57:30.171516 | 2017-11-07T17:43:19 | 2017-11-07T17:43:19 | 35,846,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import random
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.map = dict()
self.size = 0
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val not in self.map:
self.size += 1
map[val] = self.size
return True
return False
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val in self.map:
del self.map[val]
self.size -= 1
return True
return False
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
return random.choice(self.map.keys())
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom() | [
"[email protected]"
]
| |
766be0bacab93658e605892e12eb75d5ac244ee7 | 0c656371d4d38b435afb7e870c719fe8bed63764 | /levels/migrations/0003_space_variety.py | 1efda5ef876ba0f4c06e864425969a5e529e9b6e | []
| no_license | enias-oliveira/parking-lot | cfbe5ca91dcb949e60f04cd26137260f5a9dba36 | a54f5ca7b7d78bfc1a9b3c389729da14899d4048 | refs/heads/master | 2023-05-07T08:48:59.288672 | 2021-05-18T19:52:46 | 2021-05-18T19:52:46 | 369,363,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # Generated by Django 3.2.2 on 2021-05-13 21:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('levels', '0002_rename_motorcylce_spaces_level_motorcycle_spaces'),
]
operations = [
migrations.AddField(
model_name='space',
name='variety',
field=models.CharField(choices=[('car', 'car'), ('motorcycle', 'motorcycle')], default='car', max_length=255),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
424ca5b9877e60a4512eeca85195255714fa43eb | bc6b561958649c391c159d4dd3363c60eeabc7e4 | /mayan/apps/documents/migrations/0051_documentpage_enabled.py | 5835be769bcd26e022c28f01b95c5af6790dcfc5 | [
"Apache-2.0"
]
| permissive | chrisranjana/Mayan-EDMS | 37deb105cda268768fea502491ae875ff905e0e9 | 34b414ce49a2eb156e27dc1a2915e52121c9d1b7 | refs/heads/master | 2020-12-22T13:50:41.263625 | 2020-01-28T18:45:24 | 2020-01-28T18:45:24 | 236,804,825 | 0 | 1 | NOASSERTION | 2020-01-28T18:12:53 | 2020-01-28T18:12:52 | null | UTF-8 | Python | false | false | 411 | py | from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0050_auto_20190725_0451'),
]
operations = [
migrations.AddField(
model_name='documentpage',
name='enabled',
field=models.BooleanField(default=True, verbose_name='Enabled'),
),
]
| [
"[email protected]"
]
| |
277ea298bd69c7999327b07b0015aa1e11d93b0e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03402/s642521958.py | 7fcbd198121b2c295ef6f8a6e34080150121ad63 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | a,b = map(int, input().split())
ans = [['.' for i in range(50)]+['#' for i in range(50)]for j in range(100)]
x,y = 0,51
while a>1:
ans[x][y] = '.'
y+=2
if y>99:
x+=2
y=51
a-=1
x,y = 0,0
while b>1:
ans[x][y] = '#'
y+=2
if y>48:
x+=2
y=0
b-=1
print(100,100)
for i in range(100):
print(''.join(ans[i]))
| [
"[email protected]"
]
| |
840e0ebac008e76101216598b6ca7a9eb40a58eb | 76f59c245744e468577a293a0b9b078f064acf07 | /287.find-the-duplicate-number.py | 0d410826d8b60f50737d2b511468b0a6a9a88f79 | []
| no_license | satoshun-algorithm-example/leetcode | c3774f07e653cf58640a6e7239705e58c5abde82 | 16b39e903755dea86f9a4f16df187bb8bbf835c5 | refs/heads/master | 2020-07-01T10:24:05.343283 | 2020-01-13T03:27:27 | 2020-01-13T03:27:27 | 201,144,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | #
# @lc app=leetcode id=287 lang=python3
#
# [287] Find the Duplicate Number
#
from typing import List
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
low = 0
high = len(nums) - 1
mid = (high + low) // 2
while high - low > 1:
count = 0
for num in nums:
if mid < num <= high:
count += 1
if high - mid >= count:
high = mid
else:
low = mid
mid = (high + low) // 2
return high
| [
"[email protected]"
]
| |
05cd52ca349b42d73c8bb6552629c6c5a911bb15 | 20044db9ab2c773cc80caa4a5a1175ee8148269d | /test/test_rma_nb.py | 3498e98c2a687e11e0ac02e8076670280d7038cf | []
| no_license | senganal/mpi4py | a4e5dbf2d7e6cf9b6eb783f1e5f1c523326d667f | 28a2e95506844a32efb6b14238ca60173fe7c5a2 | refs/heads/master | 2021-01-13T08:02:43.439643 | 2017-06-16T15:03:44 | 2017-06-16T15:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,219 | py | from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
def mkzeros(n):
import sys
if not hasattr(sys, 'pypy_version_info'):
return bytearray(n)
return b'\0' * n
def memzero(m):
try:
m[:] = 0
except IndexError: # cffi buffer
m[0:len(m)] = b'\0'*len(m)
class BaseTestRMA(object):
COMM = MPI.COMM_NULL
INFO = MPI.INFO_NULL
COUNT_MIN = 0
def setUp(self):
nbytes = 100*MPI.DOUBLE.size
try:
self.mpi_memory = MPI.Alloc_mem(nbytes)
self.memory = self.mpi_memory
memzero(self.memory)
except MPI.Exception:
from array import array
self.mpi_memory = None
self.memory = array('B',[0]*nbytes)
self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
def tearDown(self):
self.WIN.Free()
if self.mpi_memory:
MPI.Free_mem(self.mpi_memory)
def testPutGet(self):
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(self.COUNT_MIN, 10):
for rank in range(size):
sbuf = array([rank]*count, typecode)
rbuf = array(-1, typecode, count+1)
self.WIN.Fence()
self.WIN.Lock(rank)
r = self.WIN.Rput(sbuf.as_mpi(), rank)
r.Wait()
self.WIN.Flush(rank)
r = self.WIN.Rget(rbuf.as_mpi_c(count), rank)
r.Wait()
self.WIN.Unlock(rank)
for i in range(count):
self.assertEqual(sbuf[i], rank)
self.assertEqual(rbuf[i], rank)
self.assertEqual(rbuf[-1], -1)
def testAccumulate(self):
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(self.COUNT_MIN, 10):
for rank in range(size):
ones = array([1]*count, typecode)
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.REPLACE):
self.WIN.Lock(rank)
self.WIN.Put(ones.as_mpi(), rank)
self.WIN.Flush(rank)
r = self.WIN.Raccumulate(sbuf.as_mpi(),
rank, op=op)
r.Wait()
self.WIN.Flush(rank)
r = self.WIN.Rget(rbuf.as_mpi_c(count), rank)
r.Wait()
self.WIN.Unlock(rank)
#
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertEqual(rbuf[i], op(1, i))
self.assertEqual(rbuf[-1], -1)
def testGetAccumulate(self):
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(self.COUNT_MIN, 10):
for rank in range(size):
ones = array([1]*count, typecode)
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
gbuf = array(-1, typecode, count+1)
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.REPLACE, MPI.NO_OP):
self.WIN.Lock(rank)
self.WIN.Put(ones.as_mpi(), rank)
self.WIN.Flush(rank)
r = self.WIN.Rget_accumulate(sbuf.as_mpi(),
rbuf.as_mpi_c(count),
rank, op=op)
r.Wait()
self.WIN.Flush(rank)
r = self.WIN.Rget(gbuf.as_mpi_c(count), rank)
r.Wait()
self.WIN.Unlock(rank)
#
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertEqual(rbuf[i], 1)
self.assertEqual(gbuf[i], op(1, i))
self.assertEqual(rbuf[-1], -1)
self.assertEqual(gbuf[-1], -1)
def testPutProcNull(self):
rank = self.COMM.Get_rank()
self.WIN.Lock(rank)
r = self.WIN.Rput(None, MPI.PROC_NULL, None)
r.Wait()
self.WIN.Unlock(rank)
def testGetProcNull(self):
rank = self.COMM.Get_rank()
self.WIN.Lock(rank)
r = self.WIN.Rget(None, MPI.PROC_NULL, None)
r.Wait()
self.WIN.Unlock(rank)
def testAccumulateProcNullReplace(self):
rank = self.COMM.Get_rank()
zeros = mkzeros(8)
self.WIN.Lock(rank)
r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE)
r.Wait()
r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE)
r.Wait()
self.WIN.Unlock(rank)
def testAccumulateProcNullSum(self):
rank = self.COMM.Get_rank()
zeros = mkzeros(8)
self.WIN.Lock(rank)
r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM)
r.Wait()
r = self.WIN.Raccumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM)
r.Wait()
self.WIN.Unlock(rank)
class TestRMASelf(BaseTestRMA, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestRMAWorld(BaseTestRMA, unittest.TestCase):
COMM = MPI.COMM_WORLD
try:
MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except NotImplementedError:
del TestRMASelf, TestRMAWorld
else:
name, version = MPI.get_vendor()
if name == 'Open MPI':
if version[:2] == (1,10):
def SKIP(*t, **k): pass
TestRMAWorld.testAccumulate = SKIP
TestRMAWorld.testGetAccumulate = SKIP
if version < (1,8,1):
del TestRMASelf, TestRMAWorld
elif name == 'MPICH2':
if version < (1,5,0):
del TestRMASelf, TestRMAWorld
elif version >= (2,0,0) and MPI.VERSION < 3: # Intel MPI
del TestRMASelf, TestRMAWorld
elif MPI.Get_version() < (3,0):
del TestRMASelf, TestRMAWorld
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
df01f0fb63ebfc8068c07b21de2c887bd683c017 | d697c1d45e96bd440be9c17ab14243a5882b1f52 | /qianfeng/常用模块/urllib/4-goujianheader.py | 3b39a5b80031109ac0fc8fb0b27c5a333fcabcab | []
| no_license | ithjl521/python | 9eeda2e60dda97ee36e8764c06400eb12818689f | f4fe50799501c483cb64445fd05ee0f30f56576c | refs/heads/master | 2020-07-12T23:10:53.608276 | 2019-11-08T08:59:35 | 2019-11-08T08:59:35 | 204,931,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import urllib.request
import urllib.parse
url = 'http://www.baidu.com'
# response = urllib.request.urlopen(url)
# print(response.read().decode())
# 自己伪装头部
headers = {
"User-Agnet":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
# 构建请求对象
request = urllib.request.Request(url=url,headers=headers)
# 发送请求
response = urllib.request.urlopen(request)
print(response.read())
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.