blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9117f9f2cce95c3f9c960a40127f7cde6384a932 | d21864a26233d32913c44fd87d6f6e67ca9aabd8 | /prosodic/lib/Phoneme.py | 876171217cb508068e7a472fe4fc487bf116ba6c | [
"MIT"
]
| permissive | quadrismegistus/litlab-poetry | 7721a8849667f2130bb6fa6b9f18a7f6beb9912e | 28fff4c73344ed95d19d7e9a14e5a20697599605 | refs/heads/master | 2021-01-23T20:14:05.537155 | 2018-11-19T08:56:55 | 2018-11-19T08:56:55 | 27,054,260 | 16 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | from ipa import ipa,ipakey,ipa2cmu,formantd
from entity import entity
class Phoneme(entity):
def __init__(self,phons,ipalookup=True):
self.feats = {}
self.children = [] # should remain empty unless dipthong
self.featpaths={}
self.phon=None
if type(phons)==type([]):
for phon in phons:
if type(phon)==type(""):
self.children.append(Phoneme(phon))
else:
self.children.append(phon)
self.feat('dipthong',True)
else:
self.phon=phons.strip()
if ipalookup and self.phon:
if(self.phon in ipa):
k=-1
for v in ipa[self.phon]:
k+=1
self.feat(ipakey[k],v)
self.finished = True
if self.isLong() or self.isDipthong():
self.len=2
else:
self.len=1
def str_cmu(self):
strself=str(self)
if strself in ipa2cmu:
return ipa2cmu[strself].lower()
else:
print "<error> no cmu transcription for phoneme: ["+strself+"]"
return strself
def __str__(self):
if self.children:
return self.u2s(u"".join([x.phon for x in self.children]))
else:
return self.u2s(self.phon)
def __repr__(self):
#return "["+str(self)+"]"
return str(self)
def isConsonant(self):
return self.feature('cons')
def isVowel(self):
return (self.isDipthong() or self.isPeak())
def isPeak(self):
return self.feature('syll')
def isDipthong(self):
return self.feature('dipthong')
def isLong(self):
return self.feature('long')
def isHigh(self):
return self.feature('high')
@property
def phon_str(self):
if self.phon: return self.phon
return u''.join(phon.phon for phon in self.children)
@property
def featset(self):
if self.children:
featset=set()
for child in self.children:
featset|=child.featset
return featset
else:
return {feat for feat in self.feats if self.feats[feat]}
@property
def featspace(self):
fs={}
if self.children:
for child in self.children:
#print "CHILD:",child,child.featset
for f,v in child.feats.items():
fs[f]=int(v) if v!=None else 0
else:
for f,v in self.feats.items():
fs[f]=int(v) if v!=None else 0
return fs
def CorV(self):
if self.isDipthong() or self.isLong():
return "VV"
if self.isPeak():
return "V"
else:
return "C"
def distance(self,other):
lfs1=[self.featspace] if not self.children else [c.featspace for c in self.children]
lfs2=[other.featspace] if not other.children else [c.featspace for c in other.children]
dists=[]
for fs1 in lfs1:
for fs2 in lfs2:
allkeys=set(fs1.keys() + fs2.keys())
f=sorted(list(allkeys))
v1=[float(fs1.get(fx,0)) for fx in f]
v2=[float(fs2.get(fx,0)) for fx in f]
from scipy.spatial import distance
dists+=[distance.euclidean(v1,v2)]
return sum(dists)/float(len(dists))
def distance0(self,other):
import math
feats1=self.featset
feats2=other.featset
jc=len(feats1&feats2) / float(len(feats1 | feats2))
vdists=[]
if not 'cons' in feats1 and not 'cons' in feats2:
## ADD VOWEL F1,F2 DIST
v1=[p for p in self.phon_str if p in formantd]
v2=[p for p in other.phon_str if p in formantd]
if not v1 or not v2:
vdists+=[2]
for v1x in v1:
for v2x in v2:
#print v1x,v2x
vdist=math.sqrt( (formantd[v1x][0] - formantd[v2x][0])**2 + (formantd[v1x][1] - formantd[v2x][1])**2)
#print "ADDING",vdist
vdists+=[vdist]
#print self,other,feats1,feats2
return jc + sum(vdists)
def __eq__(self,other):
return self.feats == other.feats | [
"[email protected]"
]
| |
16fa0a4b39d17c4ece50384f657fc65fb6ee0fef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02271/s666225963.py | a1e305d0cdb1bc4f6641e39bb56d1f7301cd5a82 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # ALDS_5_A - 総当たり
import sys
n = int(input())
A = list(map(int, sys.stdin.readline().strip().split()))
q = int(input())
m = list(map(int, sys.stdin.readline().strip().split()))
sum_set = set()
for i in range(2 ** n):
bit = [(i >> j) & 1 for j in range(n)]
combined = [x * y for (x, y) in zip(A, bit)]
sum_set.add(sum(combined))
for target in m:
if target in sum_set:
print('yes')
else:
print('no')
| [
"[email protected]"
]
| |
9db73616056bed06a9c8484c5aea2920e6c7b81e | 421b0ae45f495110daec64ed98c31af525585c2c | /PythonProgramsTraining/graphics/frame1.py | c0c8e6a93f60c197702ad936f518643ad8a67d1b | []
| no_license | Pradeepsuthar/pythonCode | a2c87fb64c79edd11be54c2015f9413ddce246c4 | 14e2b397f69b3fbebde5b3af98898c4ff750c28c | refs/heads/master | 2021-02-18T05:07:40.402466 | 2020-03-05T13:14:15 | 2020-03-05T13:14:15 | 245,163,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | import tkinter as tk
from tkinter import messagebox
def area():
'to calculate area'
len = float(tfLen.get())
wid = float(tfWidth.get())
result = len*wid
tfArea.insert(0,result)
# Showing massage box
messagebox.showinfo("Info MAss ", "Area is : "+str(result)+" CM")
# creating a frame
frame = tk.Tk()
frame.geometry("200x200")
#Creating controls
tfLen = tk.Entry(frame)
tfWidth = tk.Entry(frame)
tfArea = tk.Entry(frame)
btn = tk.Button(frame, text="Calculate Area", command=area)
# Adding components on frame
tfLen.pack()
tfWidth.pack()
tfArea.pack()
btn.pack()
# Showing frame
frame.mainloop() | [
"[email protected]"
]
| |
9cd66536cdc51a43bf901eccb7e2154f2e6368ec | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/numba/cuda/simulator/compiler.py | 5a88a649e47d11efe9887678a7397e77376673b8 | []
| no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | """
The compiler is not implemented in the simulator. This module provides a stub
to allow tests to import successfully.
"""
compile_kernel = None
| [
"[email protected]"
]
| |
a02d45d50426a72b18991c0c25da0082ba9e835f | 1886065d10342822b10063cd908a690fccf03d8b | /appengine/findit/crash/loglinear/changelist_classifier.py | 96277a04aefab650a935aa33a7cf08c3b48f7e7a | [
"BSD-3-Clause"
]
| permissive | TrellixVulnTeam/chromium-infra_A6Y5 | 26af0dee12f89595ebc6a040210c9f62d8ded763 | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | refs/heads/master | 2023-03-16T15:33:31.015840 | 2017-01-31T19:55:59 | 2017-01-31T20:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,932 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import logging
from common.chrome_dependency_fetcher import ChromeDependencyFetcher
from crash import changelist_classifier
from crash.changelist_classifier import StackInfo
from crash.crash_report_with_dependencies import CrashReportWithDependencies
from crash.loglinear.model import UnnormalizedLogLinearModel
class LogLinearChangelistClassifier(object):
"""A ``LogLinearModel``-based implementation of CL classification."""
def __init__(self, get_repository, meta_feature, meta_weight,
top_n_frames=7, top_n_suspects=3):
"""
Args:
get_repository (callable): a function from DEP urls to ``Repository``
objects, so we can get changelogs and blame for each dep. Notably,
to keep the code here generic, we make no assumptions about
which subclass of ``Repository`` this function returns. Thus,
it is up to the caller to decide what class to return and handle
any other arguments that class may require (e.g., an http client
for ``GitilesRepository``).
meta_feature (MetaFeature): All features.
meta_weight (MetaWeight): All weights. the weights for the features.
The keys of the dictionary are the names of the feature that weight is
for. We take this argument as a dict rather than as a list so that
callers needn't worry about what order to provide the weights in.
top_n_frames (int): how many frames of each callstack to look at.
top_n_suspects (int): maximum number of suspects to return.
"""
self._dependency_fetcher = ChromeDependencyFetcher(get_repository)
self._get_repository = get_repository
self._top_n_frames = top_n_frames
self._top_n_suspects = top_n_suspects
self._model = UnnormalizedLogLinearModel(meta_feature, meta_weight)
def __call__(self, report):
"""Finds changelists suspected of being responsible for the crash report.
Args:
report (CrashReport): the report to be analyzed.
Returns:
List of ``Suspect``s, sorted by probability from highest to lowest.
"""
annotated_report = CrashReportWithDependencies(
report, self._dependency_fetcher)
if annotated_report is None:
logging.warning('%s.__call__: '
'Could not obtain dependencies for report: %s',
self.__class__.__name__, str(report))
return []
suspects = self.GenerateSuspects(annotated_report)
if not suspects:
logging.warning('%s.__call__: Found no suspects for report: %s',
self.__class__.__name__, str(annotated_report))
return []
return self.RankSuspects(annotated_report, suspects)
def GenerateSuspects(self, report):
"""Generate all possible suspects for the reported crash.
Args:
report (CrashReportWithDependencies): the crash we seek to explain.
Returns:
A list of ``Suspect``s who may be to blame for the
``report``. Notably these ``Suspect`` instances do not have
all their fields filled in. They will be filled in later by
``RankSuspects``.
"""
# Look at all the frames from any stack in the crash report, and
# organize the ones that come from dependencies we care about.
dep_to_file_to_stack_infos = defaultdict(lambda: defaultdict(list))
for stack in report.stacktrace:
for frame in stack:
if frame.dep_path in report.dependencies:
dep_to_file_to_stack_infos[frame.dep_path][frame.file_path].append(
StackInfo(frame, stack.priority))
dep_to_file_to_changelogs, ignore_cls = (
changelist_classifier.GetChangeLogsForFilesGroupedByDeps(
report.dependency_rolls, report.dependencies,
self._get_repository))
# Get the possible suspects.
return changelist_classifier.FindSuspects(
dep_to_file_to_changelogs,
dep_to_file_to_stack_infos,
report.dependencies,
self._get_repository,
ignore_cls)
def RankSuspects(self, report, suspects):
"""Returns a lineup of the suspects in order of likelihood.
Suspects with a discardable score or lower ranking than top_n_suspects
will be filtered.
Args:
report (CrashReportWithDependencies): the crash we seek to explain.
suspects (iterable of Suspect): the CLs to consider blaming for the crash.
Returns:
A list of suspects in order according to their likelihood. This
list contains elements of the ``suspects`` list, where we mutate
some of the fields to store information about why that suspect
is being blamed (e.g., the ``confidence``, ``reasons``, and
``changed_files`` fields are updated). In addition to sorting the
suspects, we also filter out those which are exceedingly unlikely
or don't make the ``top_n_suspects`` cut.
"""
# Score the suspects and organize them for outputting/returning.
features_given_report = self._model.Features(report)
score_given_report = self._model.Score(report)
scored_suspects = []
for suspect in suspects:
score = score_given_report(suspect)
if self._model.LogZeroish(score):
logging.debug('Discarding suspect because it has zero probability: %s'
% str(suspect.ToDict()))
continue
suspect.confidence = score
# features is ``MetaFeatureValue`` object containing all feature values.
features = features_given_report(suspect)
suspect.reasons = features.reason
suspect.changed_files = [changed_file.ToDict()
for changed_file in features.changed_files]
scored_suspects.append(suspect)
scored_suspects.sort(key=lambda suspect: suspect.confidence)
return scored_suspects[:self._top_n_suspects]
| [
"[email protected]"
]
| |
5e02976a619cb1e6ada32cf79cbd4ed879067ae8 | 4b69b5dd4b1b3cf81b996065831226a243abb332 | /articles/admin.py | 45fafe2207a9eb4a089c73b9557ee149401c8418 | []
| no_license | cui0519/myBlog | d8ebd601ac5bf5a3fe0dc16e2c703cdbaa055ab9 | c0852b6e42bfa93820d330e8f9e547be229344e8 | refs/heads/master | 2023-02-09T06:33:13.641351 | 2021-01-05T00:18:21 | 2021-01-05T00:18:21 | 326,308,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django.contrib import admin
from .models import Articles
# Register your models here.
class ArticlesAdmin(admin.ModelAdmin):
list_display = ('title','author','img','abstract','visited','created_at')
<<<<<<< HEAD
search_fields = ('title','author','abstract','content')
=======
search_fields = ('title',)
>>>>>>> f4d958d ('模板复用')
list_filter = list_display
admin.site.register(Articles,ArticlesAdmin) | [
"[email protected]"
]
| |
c5b193fb983b5e4d663f93a6485499e152a180c1 | e5cf5fd657b28d1c01d8fd954a911d72526e3112 | /tide_teach/tide_time_windows.py | b54f5fcebaccedcc95ffb40b903d76d6c69a1cd4 | []
| no_license | parkermac/ptools | 6b100f13a44ff595de03705a6ebf14a2fdf80291 | a039261cd215fe13557baee322a5cae3e976c9fd | refs/heads/master | 2023-01-09T11:04:16.998228 | 2023-01-02T19:09:18 | 2023-01-02T19:09:18 | 48,205,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | """
Code to plot observed tide time series.
"""
import os
import sys
import pytz
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import numpy as np
from importlib import reload
import ephem_functions as efun
reload(efun)
import tractive_functions as tfun
reload(tfun)
alp = os.path.abspath('../../LiveOcean/alpha')
if alp not in sys.path:
sys.path.append(alp)
import zfun
indir = os.environ.get('HOME') + '/Documents/ptools_data/tide/'
zone='US/Pacific'
tz_local = pytz.timezone(zone)
def read_tide(in_fn):
df = pd.read_csv(in_fn, index_col='Date Time', parse_dates = True)
for k in df.keys():
df = df.rename(columns={k: k.strip()})
df = df.drop(['Sigma', 'I', 'L'], axis=1)
df = df.rename(columns={'Water Level': 'Tide Obs'})
# find the mean water level
eta0 = df['Tide Obs'].mean()
# Assumes time is UTC
df.index.name = 'Date UTC'
df = df.tz_localize('UTC')
return df, eta0
# READ IN OBSERVED TIDE DATA
fn = 'CO-OPS__9447130__hr.csv' # Seattle 2016 observed data
city = 'Seattle'
obs_fn = indir + fn
obs_df, eta0 = read_tide(obs_fn)
obs_df = obs_df.tz_convert(tz_local)
obs_df.index.name = 'Date (local time)'
obs_df['Tide Obs'] = obs_df['Tide Obs'] * 3.28084
# and set related time limits
year = 2016
#tzinfo = pytz.timezone('UTC')
tzinfo = tz_local
dt0_day = datetime(year,6,10,tzinfo=tzinfo)
dt1_day = datetime(year,6,11,tzinfo=tzinfo)
dt0_month = datetime(year,6,1,tzinfo=tzinfo)
dt1_month = datetime(year,7,1,tzinfo=tzinfo)
dt0_year = datetime(year,1,1,tzinfo=tzinfo)
dt1_year = datetime(year+1,1,1,tzinfo=tzinfo)
# PLOTTING
plt.close('all')
lw0 = 0.5
lw1 = 1
lw2 = 3
fsz=18
ylim=(-5, 15)
fig = plt.figure(figsize=(14,8))
ax = fig.add_subplot(221)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw2, grid=True, xlim=(dt0_day,dt1_day))
ax.text(.05,.05,'One Day', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.text(.05,.9,'Observed Tide Height (ft) ' + city,
transform=ax.transAxes, fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
ax = fig.add_subplot(222)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw1, grid=True, xlim=(dt0_month,dt1_month))
ax.text(.05,.05,'One Month', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
ax = fig.add_subplot(212)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw0, grid=True, xlim=(dt0_year,dt1_year))
ax.text(.05,.05,'One Year', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
fig.set_tight_layout(True)
plt.show()
| [
"[email protected]"
]
| |
6b1d1fdaa602c7768fb7a668612821ad314b4395 | 52d797a1a9f853f691d2d6fb233434cf9cc9e12b | /Implementation Challenges/Append and Delete.py | 1e2622a5816301cb9b83c0a56d915bdfe4639df0 | []
| no_license | harshildarji/Algorithms-HackerRank | f1c51fedf2be9e6fbac646d54abccb7e66800e22 | 96dab5a76b844e66e68a493331eade91541fd873 | refs/heads/master | 2022-05-21T06:57:59.362926 | 2020-04-19T14:05:19 | 2020-04-19T14:05:19 | 114,212,208 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Append and Delete
# https://www.hackerrank.com/challenges/append-and-delete/problem
s, t = input().strip(), input().strip()
k = int(input().strip())
for i in reversed(range(1, k + 1)):
if s == t[:len(s)] and len(t) - len(s) == i or len(s) == 0:
break
s = s[:-1]
print("Yes" if len(t) - len(s) <= i else "No")
| [
"[email protected]"
]
| |
c305892b8de9942ba1433b2aa00240da71b7b0bc | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayCloudCloudbaseHttpaccessBindQueryResponse.py | ebc27653df46ebfce5e7c7e7b22f0e76998f3f54 | [
"Apache-2.0"
]
| permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,902 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.DomainBind import DomainBind
class AlipayCloudCloudbaseHttpaccessBindQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCloudCloudbaseHttpaccessBindQueryResponse, self).__init__()
self._domain_binds = None
self._page_index = None
self._page_size = None
self._total = None
@property
def domain_binds(self):
return self._domain_binds
@domain_binds.setter
def domain_binds(self, value):
if isinstance(value, list):
self._domain_binds = list()
for i in value:
if isinstance(i, DomainBind):
self._domain_binds.append(i)
else:
self._domain_binds.append(DomainBind.from_alipay_dict(i))
@property
def page_index(self):
return self._page_index
@page_index.setter
def page_index(self, value):
self._page_index = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayCloudCloudbaseHttpaccessBindQueryResponse, self).parse_response_content(response_content)
if 'domain_binds' in response:
self.domain_binds = response['domain_binds']
if 'page_index' in response:
self.page_index = response['page_index']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total' in response:
self.total = response['total']
| [
"[email protected]"
]
| |
b4f391918f30a778d049bd168cb1ca4154c0b42a | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /@lib/12-13-2011-01/vyperlogix/decorators/addto.py | 979a905e9a18fdcddf2620939aec919f9baa031a | []
| no_license | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | __copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def addto(instance):
'''
alias for inject_method_into(instance)
'''
from inject import inject_method_into
return inject_method_into(instance)
| [
"[email protected]"
]
| |
7ca223afe5153d45121ca9011ccb886e87b49eb5 | 99fddc8762379bcb707ad53081cd342efa7a5d89 | /test/pinocchio_frame_test.py | fa17c45921833826190201d02cca144b699b6959 | [
"MIT"
]
| permissive | zhilinxiong/PyPnC | ef19a4bcc366666d2550466b07cd8ec8f098c0c4 | abf9739c953d19ca57fd4bd37be43415f3d5e4a7 | refs/heads/master | 2023-07-04T19:09:26.115526 | 2021-08-03T04:29:10 | 2021-08-03T04:29:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pinocchio as pin
import numpy as np
urdf_file = cwd + "/robot_model/manipulator/three_link_manipulator.urdf"
model = pin.buildModelFromUrdf(urdf_file)
data = model.createData()
print(model)
q = np.array([np.pi / 2., 0., 0.])
# q = np.zeros(3)
qdot = np.ones(3)
pin.forwardKinematics(model, data, q, qdot)
## Print Frame Names
print([frame.name for frame in model.frames])
## Calculate j2 placement
j2_frame = model.getFrameId('j1')
j2_translation = pin.updateFramePlacement(model, data, j2_frame)
print("j2 translation")
print(j2_translation)
## Calculate l2 placement
l2_frame = model.getFrameId('l2')
l2_translation = pin.updateFramePlacement(model, data, l2_frame)
print("l2 translation")
print(l2_translation)
## Calculate j2 jacobian
pin.computeJointJacobians(model, data, q)
j2_jacobian = pin.getFrameJacobian(model, data, j2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("j2 jacobian")
print(j2_jacobian)
## Calculate l2 jacobian
l2_jacobian = pin.getFrameJacobian(model, data, l2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("l2 jacobian")
print(l2_jacobian)
## Calculate j2 spatial velocity
j2_vel = pin.getFrameVelocity(model, data, j2_frame)
print("j2 vel")
print(j2_vel)
## Calculate l2 spatial velocity
l2_vel = pin.getFrameVelocity(model, data, l2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("l2 vel")
print(l2_vel)
print(np.dot(l2_jacobian, qdot))
| [
"[email protected]"
]
| |
2e808d917489faf59e65fb3ab6a7e999316ec019 | 14a853584c0c1c703ffd8176889395e51c25f428 | /sem1/fop/lab5/static/strings.py | 2f47c15c3b3c7d3bd361c700be9a29ee4f30b077 | []
| no_license | harababurel/homework | d0128f76adddbb29ac3d805c235cdedc9af0de71 | 16919f3b144de2d170cd6683d54b54bb95c82df9 | refs/heads/master | 2020-05-21T12:25:29.248857 | 2018-06-03T12:04:45 | 2018-06-03T12:04:45 | 43,573,199 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | """
Most long messages displayed by the UI will be found here.
"""
from util.Color import bold
STRINGS = {
'helpPrompt':
'Commands:\n' +
'\t%s - displays this prompt.\n' % bold('help') +
'\t%s - adds a new student or assignment.\n' % bold('add') +
'\t%s - displays all students or assignments.\n' % bold('list') +
'\t%s - goes to previous state.\n' % bold('undo') +
'\t%s - goes to next state.\n' % bold('redo') +
'\t%s - clears the screen.\n' % bold('clear') +
'\t%s - saves the work session and exits the application.' % bold('exit')
}
| [
"[email protected]"
]
| |
cdbfa1646185540c98eb700f25ced9365adf3ea5 | 2359121ebcebba9db2cee20b4e8f8261c5b5116b | /configs_pytorch/f92-all_pt.py | 24f40fe7c10ebd9bd5510af10002262937b2188d | []
| no_license | EliasVansteenkiste/plnt | 79840bbc9f1518c6831705d5a363dcb3e2d2e5c2 | e15ea384fd0f798aabef04d036103fe7af3654e0 | refs/heads/master | 2021-01-20T00:34:37.275041 | 2017-07-20T18:03:08 | 2017-07-20T18:03:08 | 89,153,531 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,436 | py |
#copy of j25
import numpy as np
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import math
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 3,
'n_labels': 17}
#only lossless augmentations
p_augmentation = {
'rot90_values': [0,1,2,3],
'flip': [0, 1]
}
# mean and std values for imagenet
mean=np.asarray([0.485, 0.456, 0.406])
mean = mean[:, None, None]
std = np.asarray([0.229, 0.224, 0.225])
std = std[:, None, None]
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x -= mean
x /= std
x = x.astype(np.float32)
x = data_transforms.random_lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x -= mean
x /= std
x = x.astype(np.float32)
return x
def label_prep_function(x):
#cut out the label
return x
# data iterators
batch_size = 32
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=5)
print len(folds)
train_ids = folds[0] + folds[1] + folds[2] + folds[3]
valid_ids = folds[4]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
valid_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test-jpg',
batch_size=chunk_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2-jpg',
batch_size=chunk_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
import tta
tta = tta.LosslessTTA(p_augmentation)
tta_test_data_iterator = data_iterators.TTADataGenerator(dataset='test-jpg',
tta = tta,
duplicate_label = False,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_test2_data_iterator = data_iterators.TTADataGenerator(dataset='test2-jpg',
tta = tta,
duplicate_label = False,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_valid_data_iterator = data_iterators.TTADataGenerator(dataset='train-jpg',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 40
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(10 * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-2,
int(max_nchunks * 0.3): 2e-2,
int(max_nchunks * 0.6): 1e-2,
int(max_nchunks * 0.8): 3e-3,
int(max_nchunks * 0.9): 1e-3
}
# model
from collections import OrderedDict
class MyDenseNet(nn.Module):
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(MyDenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
self.blocks = []
final_num_features = 0
for i, num_layers in enumerate(block_config):
block = torchvision.models.densenet._DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
self.blocks.append(block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = torchvision.models.densenet._Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.classifier_drop = nn.Dropout(p=0.75)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = self.classifier_drop(out)
out = F.avg_pool2d(out, kernel_size=7).view(features.size(0), -1)
out = self.classifier(out)
return out
def my_densenet169(pretrained=False, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MyDenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32))
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.densenet.model_urls['densenet169']))
return model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.densenet = my_densenet169(pretrained=True)
self.densenet.classifier = nn.Linear(self.densenet.classifier.in_features, p_transform["n_labels"])
self.densenet.classifier.weight.data.zero_()
def forward(self, x):
x = self.densenet(x)
return F.sigmoid(x)
def build_model():
net = Net()
return namedtuple('Model', [ 'l_out'])( net )
# loss
class MultiLoss(torch.nn.modules.loss._Loss):
def __init__(self, weight):
super(MultiLoss, self).__init__()
self.weight = weight
def forward(self, input, target):
torch.nn.modules.loss._assert_no_grad(target)
weighted = (self.weight*target)*(input-target)**2 +(1-target)*(input-target)**2
return torch.mean(weighted)
def build_objective():
return MultiLoss(5.0)
def build_objective2():
return MultiLoss(1.0)
def score(gts, preds):
return app.f2_score_arr(gts, preds)
# updates
def build_updates(model, learning_rate):
return optim.SGD(model.parameters(), lr=learning_rate,momentum=0.9,weight_decay=0.0002)
| [
"[email protected]"
]
| |
007da86134bd9cf81656b9de3a4b00e9262caadf | 0bce7412d58675d6cc410fa7a81c294ede72154e | /Python3/0983. Minimum Cost For Tickets.py | 67eeee126a10f3fbd09cd9f37ac9a746033d4c3f | []
| no_license | yang4978/LeetCode | 9ddf010b0f1dda32cddc7e94c3f987509dea3214 | 6387d05b619d403414bad273fc3a7a2c58668db7 | refs/heads/master | 2022-01-15T04:21:54.739812 | 2021-12-28T12:28:28 | 2021-12-28T12:28:28 | 182,653,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
# end = days[-1] + 1
# dp = [0]*end
# for d in range(1,end):
# temp = dp[d-1] + costs[0]
# temp = min(temp,min(dp[max(0,d-7):d])+costs[1])
# temp = min(temp,min(dp[max(0,d-30):d])+costs[2])
# if d not in days:
# temp = min(temp,dp[d-1])
# dp[d] = temp
# return dp[-1]
ans = [0]*(days[-1]+30)
for d in range(len(ans)):
if d in days:
ans[d] = min(ans[d-1]+costs[0],ans[d-7]+costs[1],ans[d-30]+costs[2])
else:
ans[d] = ans[d-1]
return ans[-1]
| [
"[email protected]"
]
| |
7112580637970329d57785ff0bc48507d4d081ea | 08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2 | /kubernetes/client/apis/storage_v1_api.py | 89b1839daafcce6aca004b352e8dd5927d723d95 | [
"Apache-2.0"
]
| permissive | ex3cv/client-python | 5c6ee93dff2424828d064b5a2cdbed3f80b74868 | 2c0bed9c4f653472289324914a8f0ad4cbb3a1cb | refs/heads/master | 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,194 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class StorageV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_storage_class(self, body, **kwargs):
"""
create a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_storage_class(body, async=True)
>>> result = thread.get()
:param async bool
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_storage_class_with_http_info(body, **kwargs)
else:
(data) = self.create_storage_class_with_http_info(body, **kwargs)
return data
def create_storage_class_with_http_info(self, body, **kwargs):
"""
create a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_storage_class_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_storage_class`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_storage_class(self, **kwargs):
"""
delete collection of StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_collection_storage_class(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_collection_storage_class_with_http_info(**kwargs)
else:
(data) = self.delete_collection_storage_class_with_http_info(**kwargs)
return data
def delete_collection_storage_class_with_http_info(self, **kwargs):
"""
delete collection of StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_collection_storage_class_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_storage_class(self, name, body, **kwargs):
"""
delete a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_storage_class(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.delete_storage_class_with_http_info(name, body, **kwargs)
return data
def delete_storage_class_with_http_info(self, name, body, **kwargs):
"""
delete a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_storage_class_with_http_info(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_resources(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_resources_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_storage_class(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_storage_class(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_class_with_http_info(**kwargs)
return data
def list_storage_class_with_http_info(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_storage_class_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClassList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_class(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.patch_storage_class(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.patch_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_storage_class_with_http_info(name, body, **kwargs)
return data
def patch_storage_class_with_http_info(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.patch_storage_class_with_http_info(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_class(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.read_storage_class(name, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data
def read_storage_class_with_http_info(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.read_storage_class_with_http_info(name, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_class(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_storage_class(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.replace_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_storage_class_with_http_info(name, body, **kwargs)
return data
def replace_storage_class_with_http_info(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_storage_class_with_http_info(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
]
| |
82271a49c22deb170f63fd3232c33d3a7f82602e | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/PyDunderSlotsInspection/inheritedClassAttrAssignmentAndOwnWithAttrAndInheritedSlots.py | 307acdbdb1c8afd488293f4deb2a1b2e092d9960 | [
"Apache-2.0"
]
| permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 243 | py | class B(object):
attr = 'baz'
__slots__ = ['f', 'b']
class C(B):
__slots__ = ['attr', 'bar']
C.attr = 'spam'
print(C.attr)
c = C()
<warning descr="'C' object attribute 'attr' is read-only">c.attr</warning> = 'spam'
print(c.attr) | [
"[email protected]"
]
| |
93f3d82a3dbde659163043e13cd766201e977797 | 6b05bddf2e294c8e1b39846aecadfa06b4ff805d | /test/test_v1_guest_agent_ping.py | b5518c61004a78ef0ce9d3cb39339b04acf71066 | [
"Apache-2.0"
]
| permissive | kubevirt/client-python | 5ca82fe55d48c07f62796d2bed3605a7c189922c | 235fe17f58d41165010be7e4122cb67bdc866fe7 | refs/heads/master | 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 | Apache-2.0 | 2022-10-20T13:52:10 | 2017-09-27T12:51:32 | Python | UTF-8 | Python | false | false | 911 | py | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_guest_agent_ping import V1GuestAgentPing
class TestV1GuestAgentPing(unittest.TestCase):
""" V1GuestAgentPing unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GuestAgentPing(self):
"""
Test V1GuestAgentPing
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_guest_agent_ping.V1GuestAgentPing()
pass
if __name__ == '__main__':
unittest.main()
| [
"kubevirt-bot"
]
| kubevirt-bot |
2ff528d76ec3c032363cc59f587b0f6da4f410dc | 6e373b40393fb56be4437c37b9bfd218841333a8 | /Level_3/Lecture_21/enroll/models.py | 82f2f10c875633e48efc381b7887773f0c960169 | []
| no_license | mahto4you/Django-Framework | 6e56ac21fc76b6d0352f004a5969f9d4331defe4 | ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b | refs/heads/master | 2023-01-22T01:39:21.734613 | 2020-12-04T03:01:17 | 2020-12-04T03:01:17 | 318,383,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=70)
email = models.EmailField(max_length=100)
password =models.CharField(max_length=100) | [
"[email protected]"
]
| |
a84829ae8a55aa1d175e4dcacd447f99e538bea7 | 49201afc8c3515d9f5cb569f45cd34ba291e84ca | /autobahntestsuite/autobahntestsuite/caseset.py | 2611fd0aadbb7fe5e8808a6db96dedfd3862fc7f | [
"Apache-2.0"
]
| permissive | crossbario/autobahn-testsuite | 2f3fe9a46a806550dddb23ed7bc98a94c47d5bd8 | 09cfbf74b0c8e335c6fc7df88e5c88349ca66879 | refs/heads/master | 2023-09-06T01:16:06.357182 | 2022-11-02T18:00:25 | 2022-11-02T18:00:25 | 3,762,517 | 718 | 74 | Apache-2.0 | 2022-01-26T11:07:29 | 2012-03-19T09:59:18 | Python | UTF-8 | Python | false | false | 5,570 | py | ###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("CaseSet",)
import re
class CaseSet:
def __init__(self, CaseSetName, CaseBaseName, Cases, CaseCategories, CaseSubCategories):
self.CaseSetName = CaseSetName
self.CaseBaseName = CaseBaseName
self.Cases = Cases
self.CaseCategories = CaseCategories
self.CaseSubCategories = CaseSubCategories
## Index:
## "1.2.3" => Index (1-based) of Case1_2_3 in Cases
##
self.CasesIndices = {}
i = 1
for c in self.Cases:
self.CasesIndices[self.caseClasstoId(c)] = i
i += 1
## Index:
## "1.2.3" => Case1_2_3
##
self.CasesById = {}
for c in self.Cases:
self.CasesById[self.caseClasstoId(c)] = c
def caseClasstoId(self, klass):
"""
Class1_2_3 => '1.2.3'
"""
l = len(self.CaseBaseName)
return '.'.join(klass.__name__[l:].split("_"))
def caseClasstoIdTuple(self, klass):
"""
Class1_2_3 => (1, 2, 3)
"""
l = len(self.CaseBaseName)
return tuple([int(x) for x in klass.__name__[l:].split("_")])
def caseIdtoIdTuple(self, id):
"""
'1.2.3' => (1, 2, 3)
"""
return tuple([int(x) for x in id.split('.')])
def caseIdTupletoId(self, idt):
"""
(1, 2, 3) => '1.2.3'
"""
return '.'.join([str(x) for x in list(idt)])
def caseClassToPrettyDescription(self, klass):
"""
Truncates the rest of the description after the first HTML tag
and coalesces whitespace
"""
return ' '.join(klass.DESCRIPTION.split('<')[0].split())
def resolveCasePatternList(self, patterns):
"""
Return list of test cases that match against a list of case patterns.
"""
specCases = []
for c in patterns:
if c.find('*') >= 0:
s = c.replace('.', '\.').replace('*', '.*')
p = re.compile(s)
t = []
for x in self.CasesIndices.keys():
if p.match(x):
t.append(self.caseIdtoIdTuple(x))
for h in sorted(t):
specCases.append(self.caseIdTupletoId(h))
else:
specCases.append(c)
return specCases
def parseSpecCases(self, spec):
"""
Return list of test cases that match against case patterns, minus exclude patterns.
"""
specCases = self.resolveCasePatternList(spec["cases"])
if spec.has_key("exclude-cases"):
excludeCases = self.resolveCasePatternList(spec["exclude-cases"])
else:
excludeCases = []
c = list(set(specCases) - set(excludeCases))
cases = [self.caseIdTupletoId(y) for y in sorted([self.caseIdtoIdTuple(x) for x in c])]
return cases
def parseExcludeAgentCases(self, spec):
"""
Parses "exclude-agent-cases" from the spec into a list of pairs
of agent pattern and case pattern list.
"""
if spec.has_key("exclude-agent-cases"):
ee = spec["exclude-agent-cases"]
pats1 = []
for e in ee:
s1 = "^" + e.replace('.', '\.').replace('*', '.*') + "$"
p1 = re.compile(s1)
pats2 = []
for z in ee[e]:
s2 = "^" + z.replace('.', '\.').replace('*', '.*') + "$"
p2 = re.compile(s2)
pats2.append(p2)
pats1.append((p1, pats2))
return pats1
else:
return []
def checkAgentCaseExclude(self, patterns, agent, case):
"""
Check if we should exclude a specific case for given agent.
"""
for p in patterns:
if p[0].match(agent):
for pp in p[1]:
if pp.match(case):
return True
return False
def getCasesByAgent(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = []
for server in spec['testees']:
agent = server['name']
res2 = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, agent, caseId):
res2.append(self.CasesById[caseId])
if len(res2) > 0:
o = {}
o['name'] = str(server['name'])
o['url'] = str(server['url'])
o['auth'] = server.get('auth', None)
o['cases'] = res2
res.append(o)
return res
def generateCasesByTestee(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = {}
for obj in spec['testees']:
testee = obj['name']
res[testee] = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, testee, caseId):
res[testee].append(self.CasesById[caseId])
return res
| [
"[email protected]"
]
| |
3c37470e6687cc51f01b3bfb39c7f931f854f693 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Gio/SocketServiceClass.py | 8c18c95238ae487ac715dd801bd46c959b88b0ce | []
| no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,419 | py | # encoding: utf-8
# module gi.repository.Gio
# from /usr/lib64/girepository-1.0/Gio-2.0.typelib
# by generator 1.147
# no doc
# imports
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.Gio as __gi_overrides_Gio
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class SocketServiceClass(__gi.Struct):
"""
:Constructors:
::
SocketServiceClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
incoming = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved3 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved4 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved5 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved6 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(SocketServiceClass), '__module__': 'gi.repository.Gio', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'SocketServiceClass' objects>, '__weakref__': <attribute '__weakref__' of 'SocketServiceClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f4b87fc8810>, 'incoming': <property object at 0x7f4b87fc8900>, '_g_reserved1': <property object at 0x7f4b87fc89f0>, '_g_reserved2': <property object at 0x7f4b87fc8ae0>, '_g_reserved3': <property object at 0x7f4b87fc8bd0>, '_g_reserved4': <property object at 0x7f4b87fc8cc0>, '_g_reserved5': <property object at 0x7f4b87fc8db0>, '_g_reserved6': <property object at 0x7f4b87fc8ea0>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(SocketServiceClass)
| [
"[email protected]"
]
| |
816ae873b0b90fcf3321f06f6a70489ed6eaeaa6 | c07380914a44df334194f234c33858f357365c19 | /ENV/lib/python2.7/site-packages/theano/tensor/tests/test_gc.py | d1304de7e268985aa6ba0543e87bf76860c9f26b | []
| no_license | damianpolan/Music-Genre-Classification | 318952ae7de5d0b0bdf5676e28071c7b38d0e1c5 | acd723ae1432ce798866ebb97ef3c484db37e971 | refs/heads/master | 2022-12-24T09:23:55.514337 | 2016-03-22T14:49:28 | 2016-03-22T14:49:28 | 42,965,899 | 4 | 4 | null | 2022-12-12T20:26:24 | 2015-09-22T23:05:37 | Python | UTF-8 | Python | false | false | 4,130 | py | import cPickle
import sys
import numpy
import theano
from theano import tensor as T
import time
def test_no_reuse():
x = T.lvector()
y = T.lvector()
f = theano.function([x, y], x + y)
#provide both inputs in the first call
f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64'))
try:
f(numpy.ones(10))
except TypeError:
return
assert not 'should not get here'
def test_gc_never_pickles_temporaries():
x = T.dvector()
#print >> sys.stderr, 'BUILDING GRAPH'
for i in xrange(2): # TODO: 30 causes like LONG compilation due to MERGE
if i:
r = r + r/10
else:
r = x
optimizer = None
optimizer = 'fast_run'
for f_linker, g_linker in [
(theano.PerformLinker(allow_gc=True),
theano.PerformLinker(allow_gc=False)),
(theano.OpWiseCLinker(allow_gc=True),
theano.OpWiseCLinker(allow_gc=False))]:
#f_linker has garbage collection
#g_linker has no garbage collection
#print >> sys.stderr, 'COMPILING'
f = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=f_linker))
g = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=g_linker))
len_pre_f = len(cPickle.dumps(f))
len_pre_g = len(cPickle.dumps(g))
# We can't compare the content or the length of the string
# between f and g. 2 reason, we store some timming information
# in float. They won't be the same each time. Different float
# can have different lenght when printed.
def a(fn):
return len(cPickle.dumps(fn.maker))
assert a(f) == a(f) # some sanity checks on the pickling mechanism
assert a(g) == a(g) # some sanity checks on the pickling mechanism
def b(fn):
return len(
cPickle.dumps(
theano.compile.function_module._pickle_Function(
fn)))
assert b(f) == b(f) # some sanity checks on the pickling mechanism
def c(fn):
return len(cPickle.dumps(fn))
assert c(f) == c(f) # some sanity checks on the pickling mechanism
assert c(g) == c(g) # some sanity checks on the pickling mechanism
# now run the function once to create temporaries within the no-gc
# linker
f(numpy.ones(100, dtype='float64'))
g(numpy.ones(100, dtype='float64'))
# serialize the functions again
post_f = cPickle.dumps(f)
post_g = cPickle.dumps(g)
len_post_f = len(post_f)
len_post_g = len(post_g)
# assert that f() didn't cause the function to grow
# allow_gc should leave the function un-changed by calling
assert len_pre_f == len_post_f
# assert that g() didn't cause g to grow because temporaries
# that weren't collected shouldn't be pickled anyway
# Allow for a couple of bytes of difference, since timing info,
# for instance, can be represented as text of varying size.
assert abs(len_post_f - len_post_g) < 16, (
f_linker, len_post_f, len_post_g)
def test_merge_opt_runtime():
"""In the original merge optimization, the following graph took
like caused the MERGE optimizer to exhibit really bad performance
(quadratic? exponential?)
Ironically, there is actually no merging to do in this graph.
"""
x = T.dvector()
for i in xrange(50):
if i:
r = r + r/10
else:
r = x
t = time.time()
f = theano.function([x], r, mode='FAST_COMPILE')
# FAST_RUN does in-place optimizer which requires a lot of
# toposorting, which is actually pretty slow at the moment. This
# test was designed to test MergeOptimizer... so I'm leaving
# toposort optimizations for a later date.
dt = time.time() - t
# it should never take longer than 5 seconds to compile this graph
assert dt < 5.0
| [
"[email protected]"
]
| |
a2c2e07a8afdcf2c8f91018caceb18c216081b48 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Kivy/kivy/examples/canvas/fbo_canvas.py | dd06928bdb98fedb7c9f34cb533e75a733227641 | [
"MIT"
]
| permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0cc3f5272ba46eb262d440a5c297b24905c455a2aa930e0baaa5f7f37b3486e6
size 2544
| [
"[email protected]"
]
| |
3a89353fe1bf9bc2c3a18a54b8aa626d89c3dc77 | 15978aacf0e44a890e36ff94c305aca5a056e5e8 | /13day/10-有返回的装饰器和通用的装饰器.py | 49f8d4065d8cba8ccf18b0da1614f1193e0a14d8 | []
| no_license | ittoyou/1805_python_2 | ffbe613d893208b2454ef4f25cc2b8a9951ff047 | 1d6331a83598863042912bb26205d34417abed73 | refs/heads/master | 2020-03-24T13:58:12.276827 | 2018-07-27T07:58:57 | 2018-07-27T07:58:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | def w1(fun):
def inner(*args,**kwargs):
print("验证登录")
return fun(*args,**kwargs)
return inner
@w1
def play(a,b):
print("------------%s-----%s----------"%(a,b))
return "hehe"
ret = play("1","2")
print(ret)
@w1
def play1():
print("哈哈哈")
play1()
@w1
def play2(a):
print("哈哈哈2%s"%a)
play2("嘎嘎")
@w1
def play3():
return "hahah3"
ret = play3()
print(ret)
| [
"[email protected]"
]
| |
3679dbbc8bc44685045edec9a6d71a1e00d53833 | 45ee96b582d7b3e045819db510088d2cb640dfde | /BOJ/Previous/Implementation/완전제곱수.py | e78f92dafcc73f0e1bfc49baa5f3d15bd4298468 | []
| no_license | tom9744/Algorithms | e54b649014f3b478bfbc7a0f9e8e56ad5dbc1304 | 4496b1c992ab4322289e5a200567f3df00478917 | refs/heads/master | 2023-05-06T00:59:12.767655 | 2021-05-26T16:26:50 | 2021-05-26T16:26:50 | 330,401,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # 1977 : 완전제곱수
import math
M = int(input())
N = int(input())
perfect_square_numbers = []
for number in range(M, N + 1):
if math.sqrt(number).is_integer():
perfect_square_numbers.append(number)
if len(perfect_square_numbers) == 0:
print(-1)
else:
print(sum(perfect_square_numbers))
print(perfect_square_numbers[0]) | [
"[email protected]"
]
| |
f2257a66a17f8b82a0bb0a42260b553d534f2889 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/valid_20200616203432.py | d8f31778d62ce6a0b3ed7002c575bb9870500ea2 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
def valid(str):
address = str.split(".")
numbers = range()
for a in address:
if a
print(address)
valid("172.16.254.01") | [
"[email protected]"
]
| |
f695b79388c07e89cfa05c0175e698eadc9d3daa | 8523daaf19e0250962b454d9c4f87f4c7d71ab9d | /models.py | d70630bbfa64fe60497c69c7bc15cf28c945160d | []
| no_license | madtyn/cris | ad2fd35a05efb6829e96bd1aa39c86a0efa8102f | a45410e6a67f589ac7d392bebc1ee9725ff4cd1b | refs/heads/master | 2020-11-30T17:42:09.675319 | 2020-01-19T10:36:32 | 2020-01-19T10:36:32 | 230,450,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py |
import datetime as dt
from enum import Enum
from collections import namedtuple
from indexes import FIRST_MONTH_COL, COLS_PER_MONTH
StudentMonth = namedtuple('StudentMonth', ['quota', 'number', 'processed'])
class Months(Enum):
OCTOBER = ('Octubre', 10)
NOVEMBER = ('Noviembre', 11)
DECEMBER = ('Diciembre', 12)
JANUARY = ('Enero', 1)
FEBRUARY = ('Febrero', 2)
MARCH = ('Marzo', 3)
APRIL = ('Abril', 4)
MAY = ('Mayo', 5)
JUNE = ('Junio', 6)
JULY = ('Julio', 7)
AUGUST = ('Agosto', 8)
SEPTEMBER = ('Septiembre', 9)
def __new__(cls, *args, **kwargs):
idx = FIRST_MONTH_COL + (len(cls.__members__) * COLS_PER_MONTH)
obj = object.__new__(cls)
obj._value_ = idx
obj.quota_idx = idx
obj.number_idx = idx + 1
obj.processed_idx = idx + 2
obj.trans = args[0]
obj.ordinal = args[1]
return obj
@classmethod
def get_month(cls, ordinal):
for m in cls:
if ordinal == m.ordinal:
return f'{m!s}'
def get_student_month(self, row):
return StudentMonth(row[self.quota_idx], row[self.number_idx], row[self.processed_idx])
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __str__(self):
return self.trans
class CommonInfo(object):
def __init__(self, teacher, nif, school_year, activity):
self.teacher = teacher
self.nif = nif
self.school_year = school_year
self.activity = activity
class Receipt(object):
header_tag = [
"Nombre del escolar: {student}",
"Número de recibo: {number}",
"Precio mensualidad: {quota}",
]
body_tag = [
"{teacher}, con NIF {nif}, ha recibido de los responsables del alumno / a anteriormente citado las",
"cantidades que se desglosan en este recibo en concepto de pago de la actividad \"{activity}\",",
"realizada durante el curso {school_year}",
]
sign_tag = ["A Coruña, {day} de {month} del {year}", ]
def __init__(self, info, student, student_month):
self.info = info
self.student = student
self.number = student_month.number
self.quota = student_month.quota
def header(self):
d = {
'student': self.student,
'number': self.number,
'quota': self.quota,
}
for line in self.header_tag:
yield line.format(**d)
def body(self):
d = {
'teacher': self.info.teacher,
'nif': self.info.nif,
'activity': self.info.activity,
'school_year': self.info.school_year,
}
for line in self.body_tag:
yield line.format(**d)
def sign(self):
d = {
'day': dt.datetime.today().day,
'month': Months.get_month(dt.datetime.today().month),
'year': dt.datetime.today().year
}
for line in self.sign_tag:
yield line.format(**d)
if __name__ == '__main__':
print()
print()
print()
| [
"[email protected]"
]
| |
249d0fc847698e8656f69bffdac9648ab002c339 | 45614a944ffbdb75a0bef955582a722da5ce7492 | /python/selenium/delta_time.py | f3a2a4edc43929e36dcdc6408809e7ed0457801f | []
| no_license | wccgoog/pass | 1c8ab5393547634a27c7543556a75dec771a9e3d | 0ec01536ae10b3d99707002c0e726072acb50231 | refs/heads/2 | 2023-01-15T13:27:26.312648 | 2019-10-23T09:30:45 | 2019-10-23T09:30:45 | 122,595,075 | 0 | 2 | null | 2023-01-07T10:42:38 | 2018-02-23T08:38:36 | JavaScript | UTF-8 | Python | false | false | 1,938 | py | # -*- coding: utf-8 -*-
import datetime,time
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def write_delta_time(n):
driver=webdriver.Chrome()
driver.get('http://192.168.0.138:9998')
driver.maximize_window()
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.ID,'account')))
driver.find_element_by_id('account').send_keys('5815') #账号
driver.find_element_by_id('password').send_keys('WW5815') #密码
start=driver.find_element_by_css_selector('div.handler.handler_bg')
action=ActionChains(driver)
action.drag_and_drop_by_offset(start,250,0)
action.perform() #拖动滑块
driver.find_element_by_id('loginbutton').click()
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.ID,"tabwindow_0")))
f=open('C:/Users/Administrator/Desktop/time.txt','a')
for i in range(n):
for tab in driver.find_elements_by_css_selector('div.tab_close'):
tab.click()
driver.find_element_by_xpath("//ul[@id='jMenu']/li/a/span").click()
driver.find_element_by_css_selector("li.jmenu-level-0 > ul > li > a > span").click()
time_start=datetime.datetime.now()
WebDriverWait(driver,30).until(EC.frame_to_be_available_and_switch_to_it(0))
time.sleep(1) #不加会报错
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.XPATH,"//div[@id='ListTable']/div[5]/div/div[5]/div[8]")))
time_end=datetime.datetime.now()
time_total=time_end-time_start
f.write(str(time_total)+'\n')
driver.switch_to.default_content()
f.close()
if __name__=='__main__':
n=input('输入希望运行的次数: ')
write_delta_time(int(n)) | [
"[email protected]"
]
| |
11d91f7682d807291ec8c6d20fa64f3166ad3a77 | f682c74fb65f0d951821b77bf96cee28d00ae3dd | /博物馆网数据采集子系统/展览爬取/广东省博物馆展览爬取.py | a267ae318e81038908bb00ebc4349ddfeb6944bd | []
| no_license | 1806-1/Software-engineering | 7e5add7b40d123dca0daa39d83a8fc4c16f8cb0d | 0a75ed857410bb8e1f882bd8e49504c43590ffd8 | refs/heads/main | 2023-05-13T00:07:58.579811 | 2021-06-06T08:09:41 | 2021-06-06T08:09:41 | 354,178,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,947 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 16 09:21:11 2021
@author: lenovo
"""
import requests
import pandas as pd
import csv
from bs4 import BeautifulSoup
hdrs = {'User-Agent':'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'}
# 博物馆活动列表页网址
url = "http://www.gdmuseum.com/"
r = requests.get(url, headers = hdrs)
soup = BeautifulSoup(r.content.decode('utf8', 'ignore'), 'lxml')
# class_ ='maintxt' 活动列表正文部分 根据网页tag修改
div_list = soup.find_all('div', class_ ='tz_r_first')
#查找下一级网址 即各个活动详情页的网址
anchors = soup.findAll('a')
links = [] #存取各个活动详情页网址
for tag in soup.find_all('ul', class_='xwdt'):
anchors = tag.findAll('a')
#print("anchors",anchors)
for a in anchors:
links.append(a['href'])
#print(links)
#从活动列表页爬取活动名称
TitleList = []# 存取活动名称 这个网址爬出来后十个字符刚好是活动时间
k = 0
for tag in soup.find_all('ul', class_='xwdt'):
k = k+1
title = tag.get_text()
TitleList.append(title)
#print(TitleList)
#
IntroList = []#存取简介(爬取结束后存的是大段文字,后面根据句号只取第一句上传数据库)
ImgList = []# 存取图片地址(爬取结束后与最终写入csv的Photolist一致,直接复制)
for kk in links:#遍历详情页链接
Detailurl = kk
Detailr = requests.get(Detailurl, headers = hdrs)
Detailsoup = BeautifulSoup(Detailr.content.decode('utf8', 'ignore'), 'lxml')
for tag in Detailsoup.findAll('div', class_ = 'yknr_mav'):#详情页活动介绍正文
img_link = tag.findAll('img') #查找所有img字段
print(img_link)
for a in img_link:#遍历img字段
ImgList.append("http://www.gdmuseum.com/" + a['href'])#网页给的img链接没有"http://www.sxhm.com/"自己加上
print("http://www.gdmuseum.com/" + a['href'])
break#只取第一张图片
i = 0#计数
for tag in Detailsoup.select('p', calss_ = 'content'):#<p class="MsoNormal">字段是文字介绍
i = i+1
if(i <= 2):#前两个是时间和杂项不需要用, 第三个才是介绍第一句,存入Introlist
continue
Introduce = tag.get_text()
# print(Introduce)
if(len(Introduce) > 5):#大于5个字的保存并且结束(即只保存第一句)
IntroList.append(Introduce)
break
else:
continue#可能是空格,太短的不保存
# print(IntroList)
# =============================================================================
# 爬取完成
# 开始数据格式处理
# =============================================================================
#最终写入csv的list
Name_Act_List = [] # 活动名
Time_Act_List = [] # 活动时间
Intro_Act_List = [] # 活动简介
Photo_Act_List = [] # 活动图片链接
newTitleList = TitleList[0].split('\n')#之前得到的titlelist是一整句,list中只有一个元素,各活动用‘\n'分割 通过这个语句从每个\n分开成新的元素
print(newTitleList)
for name in newTitleList:
lenth = len(name)
if(lenth < 2):#split可能截取出空格作为一个元素 太短的跳过
continue
Time = name[lenth-10:]#取后十个字符,刚好是时间
# if(len(Time) == 10):
# Time_Act_List.append(Time)
Time_Act_List.append(Time)
Title = name[:lenth-10]#后十个之外的是活动名
Name_Act_List.append(Title)
print(Time_Act_List)
print(Name_Act_List)
for intro in IntroList:
lenth = len(intro)
a = intro.find('。')#找第一个句号的位置
intro = intro[:a+1]#取第一个句号之前的作为简介
out = "".join(intro.split())#去掉’\x0xa‘等格式控制符只提取文本
Intro_Act_List.append(out)
print(out)
print(Intro_Act_List)
Photo_Act_List = ImgList
help_x_list = []
Museum_list = []
for i in range(0, len(Name_Act_List)):
help_x_list.append(str(i))
Museum_list.append("广东省博物馆
")
# =============================================================================
# 开始向CSV中写数据
# =============================================================================
dataframe = pd.DataFrame({
'博物馆名称':Museum_list,
'活动名字':Name_Act_List,
'活动时间':Time_Act_List,
'活动介绍':Intro_Act_List,
'活动图片地址':Photo_Act_List
})
dataframe.to_csv(r"广东省博物馆活动.csv",sep=',')
| [
"[email protected]"
]
| |
7ffa82f194c3ea745e4353afbfb80085484f5606 | dd256415176fc8ab4b63ce06d616c153dffb729f | /aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/23-Jul-2019/method_examples/class_methods_2.py | 05e1064eb9949454b9956604a1def6df3fba359e | []
| no_license | adityapatel329/python_works | 6d9c6b4a64cccbe2717231a7cfd07cb350553df3 | 6cb8b2e7f691401b1d2b980f6d1def848b0a71eb | refs/heads/master | 2020-07-24T17:15:39.839826 | 2019-09-12T07:53:28 | 2019-09-12T07:53:28 | 207,993,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class DecoratorExample:
def __init__(self):
print('Hello, world')
@classmethod
def example_function(cls):
print("In a class method ")
cls.some_other_function()
@staticmethod
def some_other_function():
print('Hello')
de = DecoratorExample()
de.example_function()
| [
"[email protected]"
]
| |
14e8824bedd651f4e64c978ea76622167087b5e4 | 7c9dfab9ee71de58544190fcdb8c145fcc73be20 | /keras_style_transfer/library/style_transfer.py | 412031162ead989af4fe10510b4a7548f6218b10 | [
"MIT"
]
| permissive | javad-sajady/keras-style-transfer | 1b7b2258729d90fa9716b20aafa3a759ec64fb87 | 2cb755498bc64d26bedc2e660604eee48fa15aa3 | refs/heads/master | 2021-09-07T00:17:00.014549 | 2018-02-14T02:32:06 | 2018-02-14T02:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,004 | py | from keras_style_transfer.library.nst_utils import *
from keras_style_transfer.library.download_utils import download_vgg19
import numpy as np
import tensorflow as tf
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.reshape(tf.transpose(a_C, (3, 1, 2, 0)), shape=(n_H * n_W, n_C))
a_G_unrolled = tf.reshape(tf.transpose(a_G, (3, 1, 2, 0)), shape=(n_H * n_W, n_C))
# compute the cost with tensorflow (≈1 line)
J_content = tf.divide(tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled, a_G_unrolled))), 4 * n_H * n_W * n_C)
return J_content
def compute_content_cost_test():
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
GA = tf.matmul(A, tf.transpose(A))
return GA
def gram_matrix_test():
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2 * 1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = " + str(GA.eval()))
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.reshape(tf.transpose(a_S, (3, 1, 2, 0)), shape=(n_C, n_H * n_W))
a_G = tf.reshape(tf.transpose(a_G, (3, 1, 2, 0)), shape=(n_C, n_H * n_W))
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = tf.divide(tf.reduce_sum(tf.square(tf.subtract(GS, GG))), 4 * n_C * n_C * (n_H * n_W) * (n_H * n_W))
return J_style_layer
def compute_layer_style_cost_test():
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
def compute_style_cost(sess, model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
def total_cost(J_content, J_style, alpha=10, beta=40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
J = J_content * alpha + J_style * beta
return J
def total_cost_test():
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
class StyleTransfer(object):
def __init__(self, vgg19_model_path):
self.model = None
self.vgg19_model_path = vgg19_model_path
def fit_and_transform(self, content_image, style_image, output_dir_path, num_iterations=200):
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
content_image = reshape_and_normalize_image(content_image)
style_image = reshape_and_normalize_image(style_image)
input_image = generate_noise_image(content_image)
generated_image = input_image
# Reset the graph
tf.reset_default_graph()
sess = tf.InteractiveSession()
download_vgg19(self.vgg19_model_path)
self.model = load_vgg_model(self.vgg19_model_path)
print(self.model)
# Assign the content image to be the input of the VGG model.
sess.run(self.model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = self.model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess.run(self.model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(sess, self.model, STYLE_LAYERS)
J = total_cost(J_content, J_style)
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
# Initialize global variables (you need to run the session on the initializer)
sess.run(tf.global_variables_initializer())
# Run the noisy input image (initial generated image) through the model. Use assign().
sess.run(self.model['input'].assign(input_image))
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
sess.run(train_step)
# Compute the generated image by running the session on the current model['input']
generated_image = sess.run(self.model['input'])
# Print every 2 iteration.
if i % 2 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# Save every 20 iteration.
if i % 20 == 0:
# save current generated image in the "/output" directory
save_image(output_dir_path + "/" + str(i) + ".png", generated_image)
# save last generated image
save_image(output_dir_path + '/generated_image.jpg', generated_image)
return generated_image
def main():
compute_content_cost_test()
gram_matrix_test()
compute_layer_style_cost_test()
total_cost_test()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
199693aef1523a92dec280c913acd26fa75b684e | e21e7623d99312dc8a4c0eedc0febb22d24c7918 | /venv/bin/futurize | fa21932432d4e2703fbdc5920b7e4f2b8d2c7dd4 | []
| no_license | axelonet/E-voting-system-on-blockchain | 49aa9b2b45f75e85ed9de4d113849c1f3d95dd1d | 2651bab50f29a2b68ad17b2d2240279af2f24419 | refs/heads/master | 2023-01-04T04:03:44.817356 | 2020-04-15T06:06:36 | 2020-04-15T06:06:36 | 255,822,230 | 1 | 0 | null | 2020-10-25T11:52:19 | 2020-04-15T06:12:39 | null | UTF-8 | Python | false | false | 446 | #!"/Users/anmolpanwar/Documents/PycharmProjects/python practice/venv/bin/python"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'futurize')()
)
| [
"[email protected]"
]
| ||
722e532abb9d183c9faeb239a798949f7cbb32e0 | a75b7fd002a9f8b4823dcc9cd6c2c5291ea31fe8 | /ir_datasets/datasets/wikir.py | cfa056b832e3aa089533038d543bd5ee028d47f4 | [
"Apache-2.0"
]
| permissive | FRD898/ir_datasets | 3edadc3859eb3c3c7a3f7c33c14aebe709aad2f2 | e4bfec64d41cc09c84315f675f2af768ea26f5b4 | refs/heads/master | 2023-06-16T10:32:12.367257 | 2021-07-18T10:41:20 | 2021-07-18T10:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | import contextlib
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.util import ZipExtractCache, DownloadConfig
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import CsvQueries, CsvDocs, TrecQrels, TrecScoredDocs
NAME = 'wikir'
_logger = ir_datasets.log.easy()
QRELS_DEFS = {
2: "Query is the article title",
1: "There is a link to the article with the query as its title in the first sentence",
0: "Otherwise",
}
class File:
def __init__(self, dlc, relative_path):
self.dlc = dlc
self.relative_path = relative_path
def path(self):
return str(next(Path(self.dlc.path()).glob(self.relative_path)))
@contextlib.contextmanager
def stream(self):
with open(self.path(), 'rb') as f:
yield f
def _init():
base_path = ir_datasets.util.home_path()/NAME
dlc = DownloadConfig.context(NAME, base_path)
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
subsets = {}
sources = [
('en1k', 369721),
('en59k', 2454785),
('fr14k', 736616),
('es13k', 645901),
('it16k', 503012),
]
for source, count_hint in sources:
source_dlc = ZipExtractCache(dlc[source], base_path/source)
docs = CsvDocs(File(source_dlc, "*/documents.csv"), namespace=source, lang=source[:2], count_hint=count_hint)
subsets[source] = Dataset(docs, documentation(source))
for split in ['training', 'validation', 'test']:
subsets[f'{source}/{split}'] = Dataset(
docs,
CsvQueries(File(source_dlc, f"*/{split}/queries.csv"), lang=source[:2]),
TrecQrels(File(source_dlc, f"*/{split}/qrels"), qrels_defs=QRELS_DEFS),
TrecScoredDocs(File(source_dlc, f"*/{split}/BM25.res")),
documentation(f'{source}/{split}')
)
base = Dataset(documentation('_'))
ir_datasets.registry.register(NAME, base)
for s in sorted(subsets):
ir_datasets.registry.register(f'{NAME}/{s}', subsets[s])
return base, subsets
collection, subsets = _init()
| [
"[email protected]"
]
| |
93d973806b72476402c087079c684e78920c1e44 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/letters.py | 4d6a222a879f80298b4d6ad5f5d5743deb44e15d | []
| no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('letters', __name__, url_prefix='/letters')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"[email protected]"
]
| |
71a1d35afe3081aaa5e44192447c7494b4a5050e | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/models/_container_registry_enums.py | 8ca5cfea37c17dd1bd1b22ec0ca9d9f1a79ba8bd | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 2,910 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.4.1, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class ArtifactArchitecture(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
I386 = "386"
AMD64 = "amd64"
ARM = "arm"
ARM64 = "arm64"
MIPS = "mips"
MIPS_LE = "mipsle"
MIPS64 = "mips64"
MIPS64_LE = "mips64le"
PPC64 = "ppc64"
PPC64_LE = "ppc64le"
RISC_V64 = "riscv64"
S390_X = "s390x"
WASM = "wasm"
class ArtifactOperatingSystem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AIX = "aix"
ANDROID = "android"
DARWIN = "darwin"
DRAGONFLY = "dragonfly"
FREE_BSD = "freebsd"
ILLUMOS = "illumos"
I_OS = "ios"
JS = "js"
LINUX = "linux"
NET_BSD = "netbsd"
OPEN_BSD = "openbsd"
PLAN9 = "plan9"
SOLARIS = "solaris"
WINDOWS = "windows"
class ManifestOrderBy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Sort options for ordering manifests in a collection.
"""
#: Do not provide an orderby value in the request.
NONE = "none"
#: Order manifests by LastUpdatedOn field, from most recently updated to least recently updated.
LAST_UPDATED_ON_DESCENDING = "timedesc"
#: Order manifest by LastUpdatedOn field, from least recently updated to most recently updated.
LAST_UPDATED_ON_ASCENDING = "timeasc"
class TagOrderBy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
#: Do not provide an orderby value in the request.
NONE = "none"
#: Order tags by LastUpdatedOn field, from most recently updated to least recently updated.
LAST_UPDATED_ON_DESCENDING = "timedesc"
#: Order tags by LastUpdatedOn field, from least recently updated to most recently updated.
LAST_UPDATED_ON_ASCENDING = "timeasc"
class TokenGrantType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Grant type is expected to be refresh_token
"""
REFRESH_TOKEN = "refresh_token"
PASSWORD = "password"
| [
"[email protected]"
]
| |
c9a499e0b0d202e5ea52f5ef6a9c4580d811345f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_45/72.py | 8cf783ea15df40bf45a0fc4e0429b4f48fca706b | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | #!/usr/bin/env python
#
# jam.py
#
# Copyright 2009 Denis <denis@denis-desktop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#import re
import itertools
tor = []
p = 0
q = 0
def solve(left, right):
global tor
if not tor:
return 0
if left > right:
return 0
i = 0
middle = float(right + left)/float(2)
'''
goon = True
l = len(tor)
while goon:
if i >= l:
goon = False
i -= 1
if tor[i] > middle:
goon = False
i += 1
i -= 1
if i > 0 and abs(middle - tor[i-1]) <= abs(middle - tor[i]) and tor[i-1] >= left:
i -= 1
'''
min = {'diff': 99999, 'pos': -1}
for i in xrange(0,len(tor)):
newdiff = abs(middle-tor[i])
if newdiff < min['diff']:
min['diff'] = newdiff
min['pos'] = i
released = tor[min['pos']]
if released < left or released > right:
return 0
#print left,' ',middle,' ',right
#print 'of',tor,'choose',released
del tor[min['pos']]
answer = right-left
answer += solve(left, released-1)
answer += solve(released+1, right)
return answer
def force(to, left, right):
aaa = 99999
if not to:
return 0
if left == right:
return 0
i = 0
#print 'Got',to,left,right
l = len(to)
while i < l and to[i] < left:
i += 1
#print 'Skipped to',i,'(',to[i],')'
while i < l and to[i] <= right:
answer = right-left
if i > 0:
answer += force(to[:i], left, to[i]-1)
if i < l:
answer += force(to[i+1:], to[i]+1, right)
aaa = min(aaa, answer)
i += 1
return aaa
def main():
global tor, p, q
with open("C-small-attempt5.in") as f:
n = f.readline()
n = int(n)
for case in xrange(1, n+1):
p, q = map(int, f.readline().strip().split(' '))
tor = map(int, f.readline().strip().split(' '))
#answer = solve(1, p)
answer = force(tor, 1, p)
print "Case #%d: %d" % (case, answer)
return 0
if __name__ == '__main__': main() | [
"[email protected]"
]
| |
137982ad4fabf053ac21d39abd872101e3ece56c | b124d99a5d7a139d31405aefdbfed09f6eb3d55b | /beebcn/spiders/beebcn.py | 0ee6535f8b6015585dac04bef036e47860cb503b | []
| no_license | daniel-kanchev/beebcn | 26efaab276e525b919b4fbeb06251a2850573de4 | d7e8142b41501b2586e0f8e01f8a690355701268 | refs/heads/main | 2023-04-04T05:08:37.732275 | 2021-04-15T11:13:28 | 2021-04-15T11:13:28 | 358,230,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from beebcn.items import Article
import requests
import json
import re
class beebcnSpider(scrapy.Spider):
name = 'beebcn'
start_urls = ['http://www.beeb.com.cn/#/home/banknews']
def parse(self, response):
json_response = json.loads(requests.get(
"http://www.beeb.com.cn/beebPortal/data/content/banknews.json?MmEwMD=5RroZJL4EsQSA_im0lwzRvTmJYy8PJ4cOClXNiNribCHRHjumBO3uBMMxoJzIJ3r62_9HrN9.tr70HIghQ5aKUXz1cuP4ESFycL1xKjK_Na4.JFV_a8PKOxBOF0DcMGoWbpFpqiVpl2aZy2VGwcostDBYt9hUkpu3u7a7ICHNf_K32mxnn0_.wxIMLtrYIf7PM3bZt993kiMI8Nyen.9unNqhUhblx0ILi5cJrPveYNJPVtvuppJobjGdG6nFKcBtQ_nFPjWN0kounYjSEQWn0O.t.BuCKWKbuGZkMNlyziFmT02JgsR0BLc4tfTEvv36").text)
articles = json_response["articleList"]
for article in articles:
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = article["title"]
date = article["createTime"]
p = re.compile(r'<.*?>')
content = p.sub('', article["content"])
item.add_value('title', title)
item.add_value('date', date)
item.add_value('content', content)
yield item.load_item()
| [
"[email protected]"
]
| |
4d4c3f3cfe74ab3c276e571e2d0a0a5b1a44d225 | 0e3a9758175f37e4d702ff6ccd6d2ee2e91f727f | /deepiu/util/input_flags.py | dc4a03cedbc127ecd0561e778b87fc24b49be5f1 | []
| no_license | hitfad/hasky | 94d7248f21a1ec557a838b77987e34b77fb9a0c7 | c1d2d640643037c62d64890c40de36ba516eb167 | refs/heads/master | 2021-01-20T22:55:36.778378 | 2017-08-29T13:23:50 | 2017-08-29T13:23:50 | 101,830,092 | 1 | 0 | null | 2017-08-30T02:48:35 | 2017-08-30T02:48:35 | null | UTF-8 | Python | false | false | 5,849 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file input_flags.py
# \author chenghuige
# \date 2016-12-25 00:17:18.268341
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
#--------- read data
flags.DEFINE_integer('batch_size', 32, 'Batch size. default as im2text default')
flags.DEFINE_integer('eval_batch_size', 100, 'Batch size.')
flags.DEFINE_integer('fixed_eval_batch_size', 30, """must >= num_fixed_evaluate_examples
if == real dataset len then fix sequence show
if not == can be show different fixed each time
usefull if you want only show see 2 and
be different each time
if you want see 2 by 2 seq
then num_fixed_evaluate_example = 2
fixed_eval_batch_size = 2
""")
flags.DEFINE_integer('num_fixed_evaluate_examples', 30, '')
flags.DEFINE_integer('num_evaluate_examples', 1, '')
flags.DEFINE_integer('num_threads', 12, """threads for reading input tfrecords,
setting to 1 may be faster but less randomness
""")
flags.DEFINE_boolean('shuffle_files', True, '')
flags.DEFINE_boolean('batch_join', True, '')
flags.DEFINE_boolean('shuffle_batch', True, '')
flags.DEFINE_boolean('shuffle_then_decode', True,
""" actually this is decided by is_sequence_example..
if is_sequence_example then False, if just example not sequence then True since is sparse
TODO remove this
""")
flags.DEFINE_boolean('is_sequence_example', False, '')
flags.DEFINE_string('buckets', '', 'empty meaning not use, other wise looks like 5,10,15,30')
flags.DEFINE_boolean('dynamic_batch_length', True,
"""very important False means all batch same size!
otherwise use dynamic batch size
Now only not sequence_example data will support dyanmic_batch_length=False
Also for cnn you might need to set to False to make all equal length batch used
""")
flags.DEFINE_integer('num_negs', 1, '0 means no neg')
flags.DEFINE_boolean('feed_dict', False, 'depreciated, too complex, just prepare your data at first for simple')
#---------- input dirs
#@TODO will not use input pattern but use dir since hdfs now can not support glob well
flags.DEFINE_string('train_input', '/tmp/train/train_*', 'must provide')
flags.DEFINE_string('valid_input', '', 'if empty will train only')
flags.DEFINE_string('fixed_valid_input', '', 'if empty wil not eval fixed images')
flags.DEFINE_string('num_records_file', '', '')
flags.DEFINE_integer('min_records', 12, '')
flags.DEFINE_integer('num_records', 0, 'if not 0, will check equal')
#---------- input reader
flags.DEFINE_integer('min_after_dequeue', 0, """by deafualt will be 500,
set to large number for production training
for better randomness""")
flags.DEFINE_integer('num_prefetch_batches', 0, '')
#----------eval
flags.DEFINE_boolean('legacy_rnn_decoder', False, '')
flags.DEFINE_boolean('experiment_rnn_decoder', False, '')
flags.DEFINE_boolean('show_eval', True, '')
flags.DEFINE_boolean('eval_shuffle_files', True, '')
flags.DEFINE_boolean('eval_fix_random', True, '')
flags.DEFINE_integer('eval_seed', 1024, '')
flags.DEFINE_integer('seed', 1024, '')
flags.DEFINE_boolean('fix_sequence', False, '')
#----------strategy
flags.DEFINE_string('seg_method', 'default', '')
flags.DEFINE_boolean('feed_single', False, '')
flags.DEFINE_boolean('gen_predict', True, '')
flags.DEFINE_string('decode_name', 'text', '')
flags.DEFINE_string('decode_str_name', 'text_str', '')
#--------for image caption TODO move to image_caption/input.py ?
flags.DEFINE_boolean('pre_calc_image_feature', True, '')
flags.DEFINE_boolean('distort_image', False, '')
flags.DEFINE_string('image_model_name', 'InceptionV3', '')
flags.DEFINE_integer('image_width', 299, 'default width of inception v3')
flags.DEFINE_integer('image_height', 299, 'default height of inception v3')
flags.DEFINE_string('image_checkpoint_file', '/home/gezi/data/inceptionv3/inception_v3.ckpt', '')
#---in melt.apps.image_processing.py
#flags.DEFINE_string('image_model_name', 'InceptionV3', '')
flags.DEFINE_string('one_image', '/home/gezi/data/flickr/flickr30k-images/1000092795.jpg', '')
flags.DEFINE_string('image_feature_name', 'image_feature', '')
#---------negative smapling
flags.DEFINE_boolean('neg_left', False, 'ltext or image')
flags.DEFINE_boolean('neg_right', True, 'rtext or text')
#---------discriminant trainer
flags.DEFINE_string('activation', 'relu',
"""relu/tanh/sigmoid seems sigmoid will not work here not convergent
and relu slightly better than tanh and convrgence speed faster""")
flags.DEFINE_boolean('bias', False, 'wether to use bias. Not using bias can speedup a bit')
flags.DEFINE_boolean('elementwise_predict', False, '')
flags.DEFINE_float('keep_prob', 1., 'or 0.9 0.8 0.5')
flags.DEFINE_float('dropout', 0., 'or 0.9 0.8 0.5') | [
"[email protected]"
]
| |
047d839364b362aa6a76bfe9643bcb4b78963590 | ab1c920583995f372748ff69d38a823edd9a06af | /shultais_courses/dictionaries/intro_to_dictionaries/digits_rus_eng.py | 8c5217d96a9dfe04a252496ac2455eacff1ddcc8 | []
| no_license | adyadyat/pyprojects | 5e15f4e33892f9581b8ebe518b82806f0cd019dc | c8f79c4249c22eb9e3e19998d5b504153faae31f | refs/heads/master | 2022-11-12T16:59:17.482303 | 2020-07-04T09:08:18 | 2020-07-04T09:08:18 | 265,461,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | import sys
key = int(sys.argv[1])
value = sys.argv[2]
digits = {
1: {"ru": "один", "en": "one"},
2: {"ru": "два", "en": "two"},
3: {"ru": "три", "en": "three"},
4: {"ru": "четыре", "en": "four"},
5: {"ru": "пять", "en": "five"},
6: {"ru": "шесть", "en": "six"},
7: {"ru": "семь", "en": "seven"},
8: {"ru": "восемь", "en": "eight"},
9: {"ru": "девять", "en": "nine"},
0: {"ru": "ноль", "en": "zero"}
}
print(digits[key][value])
"""
ЧИСЛА НА РУССКОМ И АНГЛИЙСКОМ
Ниже в редакторе находится словарь digits,
который содержит набор чисел и их названия
на русском и английском языках.
Обратите внимание,
что ключами словаря выступают целые числа (так тоже можно),
а значениями вложенные словари.
Напишите программу,
которая принимает из аргументов командной строки два параметра:
цифру и язык, а затем выводит название цифры на этом языке.
Учитывайте, что если ключ словаря задан числом,
то при доступе по ключу,
в квадратных скобках нужно также указывать число.
Пример использования:
> python program.py 4 ru
> четыре
""" | [
"[email protected]"
]
| |
53a9e7b485281b9e04dc2f024cb312a1c0bfe6fa | cd21d80241deeb96f4acf16e865cef439b3158d1 | /manage.py | bba21b475398de4476bf5a91b9cbc71c682bea8d | []
| no_license | sankha555/DBPortal | 72cac7118334337fc653ce2c0c133598c4f783d1 | 6e8354df09f34e0a6708630524e10f6949301de7 | refs/heads/master | 2020-12-02T07:26:42.624213 | 2019-12-30T15:00:42 | 2019-12-30T15:00:42 | 230,722,294 | 0 | 0 | null | 2019-12-29T08:14:33 | 2019-12-29T08:14:33 | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbportal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
661213532518f79f4fbd1621693e6a80ee48a239 | 86206b05a6e0a425ba5401de50b8645bddf77780 | /Oper Python/Oper Cobra/Sandbox/SFDCLib.py | 774e5e469ac749c3fe50e5a661a4ac709b6b7eff | []
| no_license | QuestTestAutomation/PersistentDesktop1python | 2e626ea16ce0fd4c697b156fdc2f9b3ca85bbd7b | ece25957edb6f87b2777b261b31914d22ebd99ad | refs/heads/master | 2021-03-10T21:55:25.450872 | 2020-03-27T09:45:14 | 2020-03-27T09:45:14 | 246,488,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,048 | py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from robot.libraries.BuiltIn import BuiltIn
from selenium.webdriver.support.ui import Select
import time
import re
import CPQLib
# import Libs/CPQLib.py
# def get_webdriver_instance():
# se2lib = BuiltIn().get_library_instance('Selenium2Library')
# return se2lib._current_browser()
def Open_Browser_And_Launch_Application(brow,url):
if brow == 'ff':
driver = webdriver.Firefox()
driver.get(url)
if brow == 'gc':
driver = webdriver.Chrome(executable_path='C:\Selenium\Selenium 3.4\Drivers\chromedriver.exe')
#driver = webdriver.Chrome()
# driver.get(url)
# driver = webdriver.Remote(command_executor='http://localhost:9515/',desired_capabilities=DesiredCapabilities.CHROME)
# driver.get(url)
if brow == 'ie':
driver = webdriver.Ie()
return driver
def get_text_column_value(driver,label,Index):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("TD")
for list in lists:
i = int(i) + int(1)
# print "label : " + str(label)
if list.get_attribute("class") == "labelCol" and list.text == label:
# print "hurr : " + str(list.text)
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
if tindex == i:
# idiv = list.find_element_by_tag_name("div")
id = list.text
# print "hurr hurr : " + str(list.text)
# id = idiv.text
# # print "hurr hurr : " + str(idiv.text)
break
return id
def get_Select_id(driver,title,Index,visibletext):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("SELECT")
for list in lists:
if list.get_attribute("title") == title:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = list.get_attribute("id")
select = Select(list)
select.select_by_visible_text(visibletext)
return id
def Select_value_from_list(driver,title,Index,visibletext):
id = get_Select_id(driver,title,Index,visibletext)
def set_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("TD")
for list in lists:
i = int(i) + int(1)
# labelCol
# requiredInput
if list.get_attribute("class") == "labelCol" and list.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
if tindex == i:
id = list.get_attribute("id")
webele = list.find_element_by_id(list.get_attribute("id"))
driver.execute_script("return arguments[0].scrollIntoView(true);", webele)
webele.click()
webele.send_keys(fieldvalue)
print "id is " + str(id)
return id
def click_SFDCbutton(driver,btntitle,btnindex):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("INPUT")
# print len(lists)
for list in lists:
# print " class :" + str(list.get_attribute("class"))
print " Title :" + str(list.get_attribute("title"))
if list.get_attribute("class") is not None and list.get_attribute("title") is not None :
if list.get_attribute("class").upper() == ("btn").upper() and list.get_attribute("title").upper() == (btntitle).upper() :
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(btnindex):
print " Title :" + str(list.get_attribute("title"))
id = list.get_attribute("value")
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
break
return id
def set_required_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
print "text " + str(len(lists))
for list in lists:
print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("INPUT")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
inpele.send_keys(fieldvalue)
break
if list.get_attribute("class") == "labelCol requiredInput":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def Search_Account_Country_Lookup(driver) :
handle = []
parent_h = driver.current_window_handle
print "The current page title : " + driver.title
Open_Required_Loopup_Window(driver,'Country','1')
# driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
handles = driver.window_handles # before the pop-up window closes
# driver.remove(parent_h)
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
driver.implicitly_wait(10) # seconds
print "The current popup page title : " + driver.title
time.sleep(20)
driver.switch_to.frame(0)
# ele = driver.find_element_by_id('searchFrame')
# ele.click()
# lists = driver.find_elements_by_tag_name("input")
# # print "*********---- " + str(len(lists))
# # print "*********----frame " + str(len(driver.find_elements_by_tag_name("frame")))
# # print "*********----form " + str(len(driver.find_elements_by_tag_name("form")))
# # print "*********----div " + str(len(driver.find_elements_by_tag_name("div")))
# # print "*********----input " + str(len(driver.find_elements_by_tag_name("input")))
# for list in lists:
# print "*********----framelistinput " + str(len(list.find_elements_by_tag_name("input")))
# print "*********----framelistdiv " + str(len(list.find_elements_by_tag_name("div")))
# print "*********----framelistform " + str(len(list.find_elements_by_tag_name("form")))
# print "*********----framelisthtml " + str(len(list.find_elements_by_tag_name("html")))
# print "*********" + list.get_attribute("id")
# print "*********" + list.get_attribute("class")
element = driver.find_element_by_id('lksrch')
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id('lksrch').click()
driver.find_element_by_id('lksrch').send_keys('India')
lists = driver.find_elements_by_tag_name("input")
# driver.find_element_by_id(_tag_name("INPUT")'lksrch').click()
# driver.find_element_by_id('lksrch').sendkeys("India")
# driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').click()
# driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').sendkeys("India")
driver.find_element_by_xpath("/html/body/form/div/div[2]/input[2]").click()
time.sleep(10)
driver.switch_to_window(popup_h)
driver.implicitly_wait(10) # seconds
driver.switch_to.frame(1)
time.sleep(5)
driver.find_element_by_link_text('India').click()
time.sleep(10)
driver.switch_to_window(parent_h)
def Handle_Lookup_Frame(driver) :
element = driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
driver.switch_to_frame("searchFrame")
driver.implicitly_wait(10) # seconds
driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').click()
driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').sendkeys("India")
def set_text_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
# print "text " + str(len(lists))
for list in lists:
# print str(i)
i = int(i) + int(1)
# labelCol
# requiredInpu t
if tindex == i:
# id = list.get_attribute("id")
# webele = list.find_element_by_id(list.get_attribute("id"))
# webele.click()
# webele.send_keys(fieldvalue)
inpelements = list.find_elements_by_tag_name("INPUT")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
inpele.send_keys(fieldvalue)
break
if list.get_attribute("class") == "labelCol":
lbllists = list.find_elements_by_tag_name("LABEL")
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def Open_Required_Loopup_Window(driver,label,Index):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
# print "text " + str(len(lists))
for list in lists:
# print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("A")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
break
if list.get_attribute("class") == "labelCol requiredInput":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def search_SFDC_Entity(driver,searchvalue):
print "searchvalue : " + searchvalue
element = driver.find_element_by_id("phSearchInput")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id("phSearchInput").click()
driver.find_element_by_id("phSearchInput").send_keys(searchvalue)
driver.find_element_by_id("phSearchButton").click()
def search_SFDC_Entity_Sidebar(driver,searchvalue) :
element = driver.find_element_by_id("sbstr")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id("sbstr").click()
driver.find_element_by_id("sbstr").send_keys(searchvalue)
# driver.find_element_by_name("search").click()
btntitle = 'Go!'
click_SFDCbutton(driver, btntitle, 1)
def Navigate_to_Header(driver,searchvalue,Index):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("H3")
print "text " + str(len(lists))
for list in lists:
print list.text
if (list.text).upper() == searchvalue.upper() :
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
def Click_Section_Button(driver,btntitle,Index) :
id = "-1"
tempindex = 0
lists = driver.find_elements_by_class_name("pbButton")
# print "pbbtn : " + str(len(lists))
for list in lists:
btnlists = list.find_elements_by_tag_name("INPUT")
# print "btnlists : " + str(len(btnlists))
if int(id) == int(1) :
break
for btnlist in btnlists :
print btnlist.get_attribute("title")
if btnlist.get_attribute("class") is not None and btnlist.get_attribute("title") is not None:
if btnlist.get_attribute("class").upper() == ("btn").upper() and btnlist.get_attribute("title").upper() == (btntitle).upper():
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = "1"
# print btnlist.get_attribute("title")
driver.execute_script("return arguments[0].scrollIntoView(true);", btnlist)
btnlist.click()
break
def Click_Section_Button1(driver,btntitle,Index) :
id = "-1"
tempindex = 0
btnlists = driver.find_elements_by_tag_name("INPUT")
# print "btnlists : " + str(len(btnlists))
for btnlist in btnlists :
print btnlist.get_attribute("title")
if btnlist.get_attribute("class") is not None and btnlist.get_attribute("title") is not None:
if btnlist.get_attribute("class").upper() == ("btn").upper() and btnlist.get_attribute("title").upper() == (btntitle).upper():
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = "1"
# print btnlist.get_attribute("title")
driver.execute_script("return arguments[0].scrollIntoView(true);", btnlist)
btnlist.click()
break
def Search_Required_Lookup(driver,label,Index,lookupvalue) :
handle = []
parent_h = driver.current_window_handle
print "The current page title : " + driver.title
# Open_Loopup_Window(driver,label,Index)
Open_Required_Loopup_Window(driver,label,Index)
# driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
handles = driver.window_handles # before the pop-up window closes
# driver.remove(parent_h)
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
driver.implicitly_wait(5) # seconds
print "The current popup page title : " + driver.title
time.sleep(5)
driver.switch_to.frame(0)
driver.find_element_by_id('lksrch').click()
driver.find_element_by_id('lksrch').send_keys(lookupvalue)
click_SFDCbutton(driver, 'Go!', 1)
driver.switch_to_window(popup_h)
driver.implicitly_wait(3) # seconds
driver.switch_to.frame(1)
time.sleep(2)
driver.find_element_by_link_text(lookupvalue).click()
time.sleep(3)
driver.switch_to_window(parent_h)
def Search_Lookup(driver,label,Index,lookupvalue) :
handle = []
parent_h = driver.current_window_handle
print "The current page title : " + driver.title
Open_Lookup_Window(driver,label,Index)
# Open_Required_Loopup_Window(driver,label,Index)
# driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
handles = driver.window_handles # before the pop-up window closes
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
driver.implicitly_wait(5) # seconds
print "The current popup page title : " + driver.title
time.sleep(5)
driver.switch_to.frame(0)
driver.find_element_by_id('lksrch').click()
driver.find_element_by_id('lksrch').send_keys(lookupvalue)
click_SFDCbutton(driver, 'Go!', 1)
driver.switch_to_window(popup_h)
driver.implicitly_wait(3) # seconds
driver.switch_to.frame(1)
time.sleep(2)
driver.find_element_by_link_text(lookupvalue).click()
time.sleep(3)
driver.switch_to_window(parent_h)
def Open_Lookup_Window(driver,label,Index):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
print "text " + str(len(lists))
for list in lists:
print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("A")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
break
if list.get_attribute("class") == "labelCol":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def Click_SFDCLink(driver,label,Index):
id = "-1"
tempindex = 0
lists = driver.find_elements_by_tag_name("A")
print "text " + str(len(lists))
print " label " + str(label)
for list in lists:
# if id != "-1" :
# break
# print "**********************************************"
# print "if : " + str(int((list.text).find(label)))
# print "if : " + str(((list.text).strip()).upper() == (label.strip()).upper())
# print "list.text : " + str(list.text)
# print " label " + str(label)
if (((list.text).strip()).upper() == (label.strip()).upper()):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = list.get_attribute("Id")
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
break
def select_required_input_column_value(driver, label, Index, fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
print "text " + str(len(lists))
for list in lists:
print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("Select")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
select = Select(inpele)
select.select_by_visible_text(fieldvalue)
break
if list.get_attribute("class") == "labelCol requiredInput":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def select_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_class_name("labelCol")
print "lenlists " + str(len(lists))
for list in lists:
if list is not None and list.get_attribute("class") is not None :
lbllists = list.find_elements_by_tag_name("LABEL")
print "Class : " + str(list.get_attribute("class"))
print "lenlists " + str(len(lbllists))
if int(len(lbllists)) > 0 :
for lbllist in lbllists:
if lbllist is not None and lbllist.get_attribute("for") is not None:
print "Class : " + str(list.get_attribute("class"))
print "lbllist.text : " + str(lbllist.text)
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
print "for : " + str(list.get_attribute("for"))
def Get_Input_FieldID(driver,label,Index) :
forid = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_class_name("labelCol")
# print "lenlists " + str(len(lists))
for list in lists:
if list is not None and list.get_attribute("class") is not None :
lbllists = list.find_elements_by_tag_name("LABEL")
# print "lenlists " + str(len(lbllists))
if int(len(lbllists)) > 0 :
for lbllist in lbllists:
if lbllist is not None and lbllist.get_attribute("for") is not None and lbllist.text is not None :
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
# print "Class : " + str(list.get_attribute("class"))
# print "lbllist.text : " + str(lbllist.text)
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
# print "for : " + str(lbllist.get_attribute("for"))
forid = lbllist.get_attribute("for")
break
return forid
def Get_Required_Input_FieldID(driver,label,Index) :
forid = "-1"
tempindex = 0
i = 0
tindex = "-10000"
classname = 'labelCol requiredInput'
lists = driver.find_elements_by_css_selector('td[class="labelCol requiredInput"]')
# print "lenlists " + str(len(lists))
for list in lists:
# print "lenlclass " + str(list.get_attribute("class"))
if list is not None and list.get_attribute("class") is not None :
lbllists = list.find_elements_by_tag_name("LABEL")
# print "lenlists " + str(len(lbllists))
if int(len(lbllists)) > 0 :
for lbllist in lbllists:
# print "lbllist.text : " + str(lbllist.text)
if lbllist is not None and lbllist.get_attribute("for") is not None and lbllist.text is not None :
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
# print "Class : " + str(list.get_attribute("class"))
# print "lbllist.text : " + str(lbllist.text)
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
# print "for : " + str(lbllist.get_attribute("for"))
forid = lbllist.get_attribute("for")
break
return forid
def select_Required_SFDC_Dropdown_Value(driver,label,Index,vFieldvalue):
myid = Get_Required_Input_FieldID(driver, label, Index)
# print "Hello : " + str(myid)
element = driver.find_element_by_id(myid)
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id(myid).click()
select = Select(driver.find_element_by_id(myid))
# select by visible text
select.select_by_visible_text(vFieldvalue)
def select_SFDC_Dropdown_Value(driver,label,Index,vFieldvalue):
myid = Get_Input_FieldID(driver, label, Index)
print "Hello : " + str(myid)
element = driver.find_element_by_id(myid)
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id(myid).click()
select = Select(driver.find_element_by_id(myid))
# select by visible text
select.select_by_visible_text(vFieldvalue)
def Select_Steel_Brick_Dropdown_Value(driver,label,Index) :
tempindex = 0
i = 0
tindex = "-10000"
# driver.find_element_by_id("tsidButton").click()
print "Hu " + str(driver.find_element_by_id("tsidLabel").text)
if driver.find_element_by_id("tsidLabel").text != label :
element = driver.find_element_by_id("tsidLabel")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id("tsidLabel").click()
driver.find_element_by_link_text(label).click()
def click_CPQ_Buttom(driver,btntitle,Index) :
bid = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("PAPER-BUTTON")
print "lenlists " + str(len(lists))
for list in lists:
print list.text
if list.text is not None :
if list.text == btntitle :
print list.text
print list.text == btntitle
tempindex = int(tempindex) + int(1)
if tempindex == Index:
list.click()
bid = "1"
break
return bid
def click_CPQ_Link(driver,label,Index) :
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_class_name("list")
print "lenlists " + str(len(lists))
for list in lists:
alists = list.find_elements_by_tag_name("A")
for alist in alists:
if alist.text is not None:
if alist.text == label:
tempindex = int(tempindex) + int(1)
if tempindex == Index:
list.click()
break
def wait_CPQ_button(driver,btntitle,Index) :
for x in range(0,20,1) :
btnid = click_CPQ_Buttom(driver,btntitle,Index)
if btnid == "1" :
break
elif btnid == "-1" :
time.sleep(10)
def wait_for_opty_sbl_rowid(driver,label,index) :
id = "-1"
val = get_text_column_value(driver,label,index)
val11 = isNotBlank (val)
# print "is none : " + val is None
# print "is none val : " + str(val)
# print "is none : " + str(val11)
for i in range(0,900,10):
driver.refresh()
time.sleep(10)
val = get_text_column_value(driver,label,index)
# print "iThe value of index : " + str(i)
# print "iThe value of val : " + str(val)
# if val == "-1":
if isBlank(val):
time.sleep(10)
val = get_text_column_value(driver, label, index)
else :
# print "tu nee amma true" + str(val)
id = val
break
# print "Siebel row id is : " + str(val)
return id
def isNotBlank (myString):
if myString and myString.strip():
#myString is not None AND myString is not empty or blank
return True
#myString is None OR myString is empty or blank
return False
def isBlank (myString):
if myString and myString.strip():
#myString is not None AND myString is not empty or blank
return False
#myString is None OR myString is empty or blank
return True
def Handle_CPQQuote_Popup(driver) :
qtext = "-1"
quoteno = "-1"
parent_h = driver.current_window_handle
for i in range(0, 600, 10):
handles = driver.window_handles
if len(handles) == 2 :
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
# driver.implicitly_wait(10) # seconds
print "The current popup page title : " + driver.title
# time.sleep(20)
driver.switch_to_window(popup_h)
qtext = driver.find_element_by_css_selector('html body span').text
print "qtext value is : " + str(qtext)
element = driver.find_element_by_css_selector('html body center input')
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_css_selector('html body center input').click()
time.sleep(5)
driver.switch_to_window(parent_h)
re_prodId = re.compile(r'Quote is successfully created.Quote Number is([^"]+).')
for m in re_prodId.findall(qtext):
# print(m)
quoteno = m
quoteno = quoteno.strip()
break
else :
time.sleep(10)
print " Handles Length : " + str(len(handles))
time.sleep(6)
driver.refresh()
return quoteno
def browser_navigate_back(driver) :
driver.execute_script("window.history.go(-1)")
time.sleep(5)
def click_SFDCbutton_new(driver,btntitle,btnindex):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_class_name("btn")
# print len(lists)
for list in lists:
# print " class :" + str(list.get_attribute("class"))
print " Title :" + str(list.get_attribute("title"))
if list.get_attribute("class") is not None and list.get_attribute("title") is not None :
if list.get_attribute("class").upper() == ("btn").upper() and list.get_attribute("title").upper() == (btntitle).upper() :
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(btnindex):
print " Title :" + str(list.get_attribute("title"))
id = list.get_attribute("value")
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
break
return id
def get_quote_number(driver,Quotelink) :
id = "-1"
vTarindex = 0
vSourceindex = 0
driver.refresh()
time.sleep(5)
elelink = driver.find_element_by_link_text(Quotelink)
ahref = elelink.get_attribute("href")
ele_splits = ahref.split("/")
vTarindex = len(ele_splits) - int(1)
tarstr = str((ele_splits[int(vTarindex)]))
print "tarstr " + str(tarstr)
elems = driver.find_elements_by_xpath("//a[@href]")
# print "length : " + str(len(elems))
for elem in elems:
print "href : " + str(elem.get_attribute("href"))
# # print "text : " + str(elem.get_attribute("text"))
print "text : " + str(elem.text)
ahref1 = elem.get_attribute("href")
ele_splits1 = ahref1.split("/")
vSourceindex = len(ele_splits1) - int(1)
sourcestr = str((ele_splits1[int(vSourceindex)]))
# print "sourcestr " + str(sourcestr)
if sourcestr == tarstr :
if elem.text == Quotelink:
id = "-1'"
else :
id = elem.text
# print "href ++ : " + str(elem.get_attribute("href"))
#
#
# print "ele len : " + str(len(ele_splits))
# print "ele len : " + str((ele_splits[3]))
# linkxpath = "//a[@href=/" \
# "" + ele_splits[3] + "]"
# print "linkxpath : " + str(linkxpath)
# elements = driver.find_elements_by_xpath(linkxpath)
# print "linlen : " + str(len(elements))
# for element in elements:
# if element.text == Quotelink:
# id = "-1'"
# else :
# id = element.text
return id
def scroll_to_bottom(driver) :
lastHeight = driver.execute_script("return document.body.scrollHeight")
while True:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(5)
newHeight = driver.execute_script("return document.body.scrollHeight")
if newHeight == lastHeight:
break
lastHeight = newHeight
def switch_to_new_tab(driver) :
main_window = driver.current_window_handle
driver.switch_to.window(driver.window_handles[1])
return main_window
def CPQ_Add_Products_Handle_Errors(driver) :
id = "-1"
errors = CPQLib.pgselectproduct_checkfor_errors(driver)
print "errors : " + str(errors)
if errors == "-1" :
print "No Errors"
else :
browser_navigate_back(driver)
time.sleep(30)
CPQLib.pgconfigureproduts_click_add_products(driver)
# Wait Until Page Contains Product Selection
CPQLib.pgselectproduct_waitfor_Select_Button(driver)
| [
"[email protected]"
]
| |
cf53ef5ed08b07917f1bafebfd98837aa6df5e39 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/site-packages/django/contrib/auth/migrations/0004_alter_user_username_opts.py | 8f8441f88f5e0f3b2074e39c01c7ef863cb3c28a | []
| no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:68dd281e8039ab66caa4937c4a723c4fd18db5304accb445a332fceed361f3f2
size 785
| [
"[email protected]"
]
| |
4dafd2675375326d00071f92b91080bea9677ef3 | 1498148e5d0af365cd7fd16197174174a7fa9800 | /t001481.py | 4bca6b1596cd106695153b484bdcabd65c9b8121 | []
| no_license | feiyanshiren/myAcm | 59a2b80fe7e02787defcb152eee3eae26135322a | 00c7082d5143ddf87aeeafbdb6ce29da46dc8a12 | refs/heads/master | 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | for T in range(int(input())):
b = list(bin(int(input())).replace("0b", ""))
if b[-1] == "1":
for i in range(len(b) - 1, -1, -1):
if b[i] == "1":
b[i] = "0"
else:
break
print("".join(b)) | [
"[email protected]"
]
| |
6f3f18539c8923851681793d40f4dcb3f50d3d64 | 60d2212eb2e287a0795d58c7f16165fd5315c441 | /app01/migrations/0001_initial.py | 3a9dc3818912587831f59c416cdcc28191857ff6 | []
| no_license | zhouf1234/mysite2 | 29145ceb470273f39fc11dd91945203db7fe0238 | 63747c789d39cf752f2b80509d8e3db9145b3492 | refs/heads/master | 2020-05-05T03:11:31.696639 | 2019-04-05T10:41:42 | 2019-04-05T10:41:42 | 179,663,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # Generated by Django 2.1.2 on 2018-11-01 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=32)),
],
),
]
| [
"="
]
| = |
6409ffff6a083b3c48d050cf0b0da4cd4e24c754 | 98811c0c149c1873c12322f20345dab1488a1870 | /nnet/hue/split_data.py | 421bc33d6fd8d8aa179440fa714ee4c730371b24 | []
| no_license | mverleg/kaggle_otto | 682d5f83a070b7e88054401e6fba221d8e1b6227 | b23beb58a1a0652e9eb98f5db31eae52303b6f85 | refs/heads/main | 2021-01-17T08:54:39.096781 | 2016-04-12T09:25:26 | 2016-04-12T09:25:26 | 37,781,556 | 0 | 1 | null | 2016-04-12T09:25:27 | 2015-06-20T18:47:45 | Python | UTF-8 | Python | false | false | 226 | py |
def split_data(data, labels, test_frac = 0.1):
N = int(len(labels) * test_frac)
train = data[N:, :]
test = data[:N, :]
train_labels = labels[N:]
test_labels = labels[:N]
return train, train_labels, test, test_labels
| [
"mark@rafiki"
]
| mark@rafiki |
b8212889d6b20712d6dc7e09b2af346ddbf3babd | 13a954fed4bced90c325e5508900b0f8665d0f08 | /day_2/list_iterate.py | 80d62d57c8fcf03d73a7af678bee6a0920c0d396 | [
"MIT"
]
| permissive | anishLearnsToCode/ml-workshop-wac-2 | 64b84589fa0b45057bf36bd1f073f12a17a8eba2 | 9992acd30f4b74ce2debf0d5ff3d8a1b78b1163f | refs/heads/main | 2023-02-21T02:36:06.542630 | 2021-01-19T17:38:48 | 2021-01-19T17:38:48 | 327,333,663 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | numbers = [2, 3, 5, 7, 11, 13, 19, 23]
for number in numbers:
print(number)
| [
"[email protected]"
]
| |
3dfe193901cf9aa28f6a4d72adcdaf72d7bf3727 | d27af2880f61e8e4b1848559dd06155a456874fe | /dev_reference_lines/ourmatplotlib.py | d93c5bd1a724a796269d21222c281372ce67e6d7 | []
| no_license | CINF/cinfdata | 6c60371cdca69409be139dd1f0eab9288da422c0 | d91b0e292095ee2ba748ebd803b794c00be37d43 | refs/heads/master | 2023-08-17T11:35:01.321323 | 2023-08-11T06:54:37 | 2023-08-11T06:54:37 | 58,733,415 | 0 | 3 | null | 2023-08-11T06:54:38 | 2016-05-13T11:23:33 | Python | UTF-8 | Python | false | false | 16,407 | py | #!/usr/bin/python
"""
This file is part of the CINF Data Presentation Website
Copyright (C) 2012 Robert Jensen, Thomas Andersen and Kenneth Nielsen
The CINF Data Presentation Website is free software: you can
redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software
Foundation, either version 3 of the License, or
(at your option) any later version.
The CINF Data Presentation Website is distributed in the hope
that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License
along with The CINF Data Presentation Website. If not, see
<http://www.gnu.org/licenses/>.
"""
from optparse import OptionParser
import sys
import hashlib
# set HOME environment variable to a directory the httpd server can write to
import os
os.environ[ 'HOME' ] = '/var/www/cinfdata/figures'
# System-wide ctypes cannot be run by apache... strange...
sys.path.insert(1, '/var/www/cinfdata')
from pytz import timezone
import numpy as np
# Matplotlib must be imported before MySQLdb (in dataBaseBackend), otherwise we
# get an ugly error
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.dates as mdates
# Import our own classes
#from databasebackend import dataBaseBackend
from common import Color
class Plot():
"""This class is used to generate the figures for the plots."""
def __init__(self, options, ggs):
""" Description of init """
self.o = options
self.ggs = ggs
# Set the image format to standard, overwite with ggs value and again
# options value if it exits
if self.o['image_format'] == '':
self.image_format = self.ggs['image_format']
else:
self.image_format = self.o['image_format']
# Default values for matplotlib plots (names correspond to ggs names)
mpl_settings = {'width': 900,
'height': 600,
'title_size': '24',
'xtick_labelsize': '12',
'ytick_labelsize': '12',
'legend_fontsize': '10',
'label_fontsize': '16',
'linewidth': 1.0,
'grid': False}
# Owerwrite defaults with gs values and convert to appropriate types
for key, value in mpl_settings.items():
try:
mpl_settings[key] = type(value)(self.ggs['matplotlib_settings'][key])
except KeyError:
pass
# Write some settings to pyplot
rc_temp = {'figure.figsize': [float(mpl_settings['width'])/100,
float(mpl_settings['height'])/100],
'axes.titlesize': mpl_settings['title_size'],
'xtick.labelsize': mpl_settings['xtick_labelsize'],
'ytick.labelsize': mpl_settings['ytick_labelsize'],
'legend.fontsize': mpl_settings['legend_fontsize'],
'axes.labelsize': mpl_settings['label_fontsize'],
'lines.linewidth': mpl_settings['linewidth'],
'axes.grid': mpl_settings['grid']
}
plt.rcParams.update(rc_temp)
# Plotting options
self.maxticks=15
self.tz = timezone('Europe/Copenhagen')
self.right_yaxis = None
self.measurement_count = None
# Colors object, will be filled in at new_plot
self.c = None
def new_plot(self, data, plot_info, measurement_count):
""" Form a new plot with the given data and info """
self.c = Color(data, self.ggs)
self.measurement_count = sum(measurement_count)
self._init_plot(data)
# _plot returns True or False to indicate whether the plot is good
if self._plot(data):
self._zoom_and_flip(data)
self._title_and_labels(plot_info)
self._add_reference_lines(data)
self._save(plot_info)
def _init_plot(self, data):
""" Initialize plot """
self.fig = plt.figure(1)
self.ax1 = self.fig.add_subplot(111)
# We only activate the right y-axis, if there there points to put on it
self.right_yaxis = sum([len(dat['data']) for dat in data['right']]) > 0
if self.right_yaxis:
self.ax2 = self.ax1.twinx()
if self.o['left_logscale']:
self.ax1.set_yscale('log')
if self.right_yaxis and self.o['right_logscale']:
self.ax2.set_yscale('log')
def _plot(self, data):
""" Determine the type of the plot and make the appropriate plot by use
of the functions:
_plot_dateplot
_plot_xyplot
"""
if self.ggs['default_xscale'] == 'dat':
return self._plot_dateplot(data)
else:
return self._plot_xyplot(data)
def _plot_dateplot(self, data):
""" Make the date plot """
# Rotate datemarks on xaxis
self.ax1.set_xticklabels([], rotation=25, horizontalalignment='right')
# Test for un-workable plot configurations
error_msg = None
# Test if there is data on the left axis
if sum([len(dat['data']) for dat in data['left']]) == 0:
error_msg = 'There must\nbe data on\nthe left y-axis'
# Test if there is any data at all
if self.measurement_count == 0:
error_msg = 'No data'
# No data
if error_msg:
y = 0.00032 if self.o['left_logscale'] is True else 0.5
self.ax1.text(0.5, y, error_msg, horizontalalignment='center',
verticalalignment='center', color='red', size=60)
return False
# Left axis
for dat in data['left']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax1.plot_date(mdates.epoch2num(dat['data'][:,0]),
dat['data'][:,1],
label=legend,
xdate=True,
color=self.c.get_color(),
tz=self.tz,
fmt='-')
# Right axis
if self.right_yaxis:
for dat in data['right']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax2.plot_date(mdates.epoch2num(dat['data'][:,0]),
dat['data'][:,1],
label=legend,
xdate=True,
color=self.c.get_color(),
tz=self.tz,
fmt='-')
# Set xtick formatter (only if we have points)
if self.measurement_count > 0:
xlim = self.ax1.set_xlim()
diff = max(xlim) - min(xlim) # in days
format_out = '%H:%M:%S' # Default
# Diff limit to date format translation, will pick the format
# format of the largest limit the diff is larger than. Limits
# are in minutes.
formats = [
[1.0, '%a %H:%M'], # Larger than 1 day
[7.0, '%Y-%m-%d'], # Larger than 7 day
[7*30., '%Y-%m'], # Larger than 3 months
]
for limit, format in formats:
if diff > limit:
format_out = format
fm = mdates.DateFormatter(format_out, tz=self.tz)
self.ax1.xaxis.set_major_formatter(fm)
# Indicate that the plot is good
return True
def _plot_xyplot(self, data):
# Left axis
for dat in data['left']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax1.plot(dat['data'][:,0],
dat['data'][:,1],
'-',
label=legend,
color=self.c.get_color(dat['lgs']['id']),
)
# Right axis
for dat in data['right']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax2.plot(dat['data'][:,0],
dat['data'][:,1],
'-',
label=legend,
color=self.c.get_color(dat['lgs']['id'])
)
# No data
if self.measurement_count == 0:
y = 0.00032 if self.o['left_logscale'] is True else 0.5
self.ax1.text(0.5, y, 'No data', horizontalalignment='center',
verticalalignment='center', color='red', size=60)
# Indicate that the plot is good
return True
def _zoom_and_flip(self, data):
""" Apply the y zooms.
NOTE: self.ax1.axis() return a list of bounds [xmin,xmax,ymin,ymax] and
we reuse x and replace y)
"""
left_yscale_inferred = self.o['left_yscale_bounding']
right_yscale_inferred = self.o['right_yscale_bounding']
# X-axis zoom and infer y-axis zoom implications
if self.o['xscale_bounding'] is not None and\
self.o['xscale_bounding'][1] > self.o['xscale_bounding'][0]:
# Set the x axis scaling, unsure if we should do it for ax2 as well
self.ax1.set_xlim(self.o['xscale_bounding'])
# With no specific left y-axis zoom, infer it from x-axis zoom
if left_yscale_inferred is None:
left_yscale_inferred = self._infer_y_on_x_zoom(
data['left'], self.o['left_logscale'])
# With no specific right y-axis zoom, infer it from x-axis zoom
if right_yscale_inferred is None and self.right_yaxis:
right_yscale_inferred = self._infer_y_on_x_zoom(
data['right'])
# Left axis
if left_yscale_inferred is not None:
self.ax1.set_ylim(left_yscale_inferred)
# Right axis
if self.right_yaxis and right_yscale_inferred is not None:
self.ax2.set_ylim(right_yscale_inferred)
if self.o['flip_x']:
self.ax1.set_xlim((self.ax1.set_xlim()[1],self.ax1.set_xlim()[0]))
def _infer_y_on_x_zoom(self, list_of_data_sets, log=None):
"""Infer the implied Y axis zoom with an X axis zoom, for one y axis"""
yscale_inferred = None
min_candidates = []
max_candidates = []
for dat in list_of_data_sets:
# Make mask that gets index for points where x is within bounds
mask = (dat['data'][:, 0] > self.o['xscale_bounding'][0]) &\
(dat['data'][:, 0] < self.o['xscale_bounding'][1])
# Gets all the y values from that mask
reduced = dat['data'][mask, 1]
# Add min/max candidates
if len(reduced) > 0:
min_candidates.append(np.min(reduced))
max_candidates.append(np.max(reduced))
# If there are min/max candidates, set the inferred left y bounding
if len(min_candidates) > 0 and len(max_candidates) > 0:
min_, max_ = np.min(min_candidates), np.max(max_candidates)
height = max_ - min_
yscale_inferred = (min_ - height*0.05, max_ + height*0.05)
return yscale_inferred
def _title_and_labels(self, plot_info):
""" Put title and labels on the plot """
# xlabel
if plot_info.has_key('xlabel'):
label = plot_info['xlabel']
if plot_info['xlabel_addition'] != '':
label += '\n' + plot_info['xlabel_addition']
self.ax1.set_xlabel(label)
if self.o['xlabel'] != '': # Manual override
self.ax1.set_xlabel(r'{0}'.format(self.o['xlabel']))
# Left ylabel
if plot_info.has_key('left_ylabel'):
label = plot_info['left_ylabel']
if plot_info['y_left_label_addition'] != '':
label += '\n' + plot_info['y_left_label_addition']
self.ax1.set_ylabel(label, multialignment='center')
if self.o['left_ylabel'] != '': # Manual override
self.ax1.set_ylabel(self.o['left_ylabel'], multialignment='center')
# Right ylabel
if self.right_yaxis and plot_info.has_key('right_ylabel'):
label = plot_info['right_ylabel']
if plot_info['y_right_label_addition'] != '':
label += '\n' + plot_info['y_right_label_addition']
self.ax2.set_ylabel(label, multialignment='center', rotation=270)
if self.o['right_ylabel'] != '': # Manual override
self.ax2.set_ylabel(self.o['right_ylabel'],
multialignment='center', rotation=270)
# Title
if plot_info.has_key('title'):
self.ax1.set_title(plot_info['title'], y=1.03)
if self.o['title'] != '':
# experiment with 'r{0}'.form .. at some time
self.ax1.set_title('{0}'.format(self.o['title']), y=1.03)
# Legends
if self.measurement_count > 0:
ax1_legends = self.ax1.get_legend_handles_labels()
if self.right_yaxis:
ax2_legends = self.ax2.get_legend_handles_labels()
for color, text in zip(ax2_legends[0], ax2_legends[1]):
ax1_legends[0].append(color)
ax1_legends[1].append(text)
# loc for locations, 0 means 'best'. Why that isn't deafult I
# have no idea
legends = self.ax1.legend(ax1_legends[0], ax1_legends[1], loc=0)
# Make legend lines thicker
for legend_handle in legends.legendHandles:
legend_handle.set_linewidth(6)
def _add_reference_lines(self, data):
"""Add reference lines to the plot"""
for value in data['reference_line_info']:
plt.axvline(x=value[0], ymax=( value[1] / 10000 )
# KARL TODO. In this method, actually draw the reference lines
# onto the graph. Remember, the data argument is the dict you
# wrote information to in databasebackend and the lef axes
# (which I think is the one you want to use is called self.ax1
def _save(self, plot_info):
""" Save the figure """
# The tight method only works if there is a title (it caps of parts of
# the axis numbers, therefore this hack, this may also become a problem
# for the other edges of the figure if there are no labels)
tight = ''
if plot_info.has_key('title'):
tight = 'tight'
# For some wierd reason we cannot write directly to sys.stdout when it
# is a pdf file, so therefore we use a the StringIO object workaround
if self.o['image_format'] == 'pdf':
import StringIO
out = StringIO.StringIO()
self.fig.savefig(out, bbox_inches=tight, pad_inches=0.03,
format=self.o['image_format'])
sys.stdout.write(out.getvalue())
else:
self.fig.savefig(sys.stdout, bbox_inches=tight, pad_inches=0.03,
format=self.o['image_format'])
| [
"[email protected]"
]
| |
c477ff81c9b1feba08d0ef6621a1c2c2e4a1acac | b5c5c27d71348937322b77b24fe9e581cdd3a6c4 | /tests/pyutils/test_is_invalid.py | d39c12e2c935eb05fc776988bbe838d3d98d9059 | [
"MIT"
]
| permissive | dfee/graphql-core-next | 92bc6b4e5a39bd43def8397bbb2d5b924d5436d9 | 1ada7146bd0510171ae931b68f6c77dbdf5d5c63 | refs/heads/master | 2020-03-27T10:30:43.486607 | 2018-08-30T20:26:42 | 2018-08-30T20:26:42 | 146,425,198 | 0 | 0 | MIT | 2018-08-28T09:40:09 | 2018-08-28T09:40:09 | null | UTF-8 | Python | false | false | 865 | py | from math import inf, nan
from graphql.error import INVALID
from graphql.pyutils import is_invalid
def describe_is_invalid():
def null_is_not_invalid():
assert is_invalid(None) is False
def falsy_objects_are_not_invalid():
assert is_invalid('') is False
assert is_invalid(0) is False
assert is_invalid([]) is False
assert is_invalid({}) is False
def truthy_objects_are_not_invalid():
assert is_invalid('str') is False
assert is_invalid(1) is False
assert is_invalid([0]) is False
assert is_invalid({None: None}) is False
def inf_is_not_invalid():
assert is_invalid(inf) is False
assert is_invalid(-inf) is False
def undefined_is_invalid():
assert is_invalid(INVALID) is True
def nan_is_invalid():
assert is_invalid(nan) is True
| [
"[email protected]"
]
| |
5628b540ad53bf7290b179cb3f6de1f245706da2 | bd3528cc321dc37f8c47ac63e57561fd6432c7cc | /transformer/tensor2tensor/models/xception.py | 2452a7d4ff23d06b687e61f5eea6106e13c22930 | [
"MIT",
"Apache-2.0"
]
| permissive | oskopek/cil | 92bbf52f130a1ed89bbe93b74eef74027bb2b37e | 4c1fd464b5af52aff7a0509f56e21a2671fb8ce8 | refs/heads/master | 2023-04-15T10:23:57.056162 | 2021-01-31T14:51:51 | 2021-01-31T14:51:51 | 139,629,560 | 2 | 5 | MIT | 2023-03-24T22:34:39 | 2018-07-03T19:35:24 | Python | UTF-8 | Python | false | false | 5,857 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout)
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope("xception"):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size)
else:
# Small image, conv
cur = common_layers.conv_block(
cur,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True,
name="small_image_conv")
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
cur = residual_block(cur, hparams)
return xception_exit(cur)
def xception_entry(inputs, hidden_dim):
"""Xception entry flow."""
with tf.variable_scope("xception_entry"):
def xnet_resblock(x, filters, res_relu, name):
"""Resblock."""
with tf.variable_scope(name):
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
tf.summary.image("inputs", inputs, max_outputs=2)
x = common_layers.conv_block(
inputs,
32, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
strides=(2, 2),
force2d=True,
name="conv0")
x = common_layers.conv_block(
x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1")
x = xnet_resblock(x, min(128, hidden_dim), True, "block0")
x = xnet_resblock(x, min(256, hidden_dim), False, "block1")
return xnet_resblock(x, hidden_dim, False, "block2")
def xception_exit(inputs):
"""Xception exit flow."""
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x)
@registry.register_model
class Xception(t2t_model.T2TModel):
def body(self, features):
return xception_internal(features["inputs"], self._hparams)
@registry.register_hparams
def xception_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.hidden_size = 768
hparams.dropout = 0.2
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 8
hparams.kernel_height = 3
hparams.kernel_width = 3
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 3000
hparams.initializer_gain = 1.0
hparams.weight_decay = 3.0
hparams.num_sampled_classes = 0
hparams.sampling_method = "argmax"
hparams.optimizer_adam_epsilon = 1e-6
hparams.optimizer_adam_beta1 = 0.85
hparams.optimizer_adam_beta2 = 0.997
return hparams
@registry.register_hparams
def xception_tiny():
hparams = xception_base()
hparams.batch_size = 2
hparams.hidden_size = 64
hparams.num_hidden_layers = 2
hparams.learning_rate_decay_scheme = "none"
return hparams
@registry.register_hparams
def xception_tiny_tpu():
hparams = xception_base()
hparams.batch_size = 2
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.optimizer = "TrueAdam"
return hparams
| [
"[email protected]"
]
| |
2f816bb890383cc7f178bf5be4d2290e2fbdfa61 | 4e81512b34223788559ea1c84acb2ef0aa4d899d | /booktracker/settings.py | 6831cbc70ea526b979e29eb6fc1e105511cae832 | []
| no_license | arsummers/book-tracker-django | 75a2e559c5dd05be67287a40514533a699889368 | 012fa821288ee99f45665e017bc8b7ab4db54a1f | refs/heads/master | 2022-12-11T16:28:27.393199 | 2019-10-15T21:28:37 | 2019-10-15T21:28:37 | 209,672,800 | 0 | 0 | null | 2022-12-08T06:38:29 | 2019-09-20T00:37:11 | Python | UTF-8 | Python | false | false | 3,428 | py | """
Django settings for booktracker project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v9j%)jsi$x1sp8oqfgln@m0a^1*0%z&4defyjpd#0ld@=^5vdx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'books.apps.BooksConfig',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'booktracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'booktracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
django_heroku.settings(locals())
| [
"[email protected]"
]
| |
5ac388b69a2ab3c163d4dd86e79293977f264fc7 | aa1e637de90f69f9ae742d42d5b777421617d10c | /nitro/resource/config/cs/csvserver_cspolicy_binding.py | e6035fde5b4e240edfc80048627a63609ec4ab92 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | km0420j/nitro-python | db7fcb49fcad3e7a1ae0a99e4fc8675665da29ba | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | refs/heads/master | 2021-10-21T18:12:50.218465 | 2019-03-05T14:00:15 | 2019-03-05T15:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,684 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class csvserver_cspolicy_binding(base_resource) :
"""Binding class showing the cspolicy that can be bound to csvserver."""
def __init__(self) :
self._policyname = ""
self._targetlbvserver = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._hits = 0
self._pipolicyhits = 0
self._rule = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority for the policy."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority for the policy.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE."""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
"""Policies bound to this vserver."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Policies bound to this vserver.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked."""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
:param labelname:
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1."""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE."""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def targetlbvserver(self) :
"""target vserver name."""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
"""target vserver name.
:param targetlbvserver:
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke flag."""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke flag.
:param invoke:
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel."""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def rule(self) :
"""Rule."""
try :
return self._rule
except Exception as e:
raise e
@property
def hits(self) :
"""Number of hits."""
try :
return self._hits
except Exception as e:
raise e
@property
def pipolicyhits(self) :
"""Number of hits."""
try :
return self._pipolicyhits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_cspolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_cspolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = csvserver_cspolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_cspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = csvserver_cspolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_cspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
"""Use this API to fetch csvserver_cspolicy_binding resources.
:param service:
:param name:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
"""Use this API to fetch filtered set of csvserver_cspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
"""Use this API to count csvserver_cspolicy_binding resources configued on NetScaler.
:param service:
:param name:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
"""Use this API to count the filtered set of csvserver_cspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
""" """
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
""" """
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_cspolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.csvserver_cspolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_cspolicy_binding = [csvserver_cspolicy_binding() for _ in range(length)]
| [
"[email protected]"
]
| |
5748492e1ac68fbb9456a149c63bf5d73cb70cb7 | 1edb8304c6429729ffc2bab8a13f4123e19d2b32 | /azure-export/settings.py | c35f8f6b2df73b7a2759d73c4be11b093fe95853 | []
| no_license | watchdogpolska/docker-images | d8292fc03df806f5be3a976cf87272f2d46e0b13 | 7a569e1d0cef4a4f57517daeac0456a59a25d021 | refs/heads/master | 2021-09-22T00:26:11.317526 | 2021-08-14T02:40:43 | 2021-08-14T02:41:33 | 157,301,522 | 0 | 4 | null | 2021-07-15T23:54:21 | 2018-11-13T01:26:54 | Python | UTF-8 | Python | false | false | 1,023 | py | import os
import dataset
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.resource import ResourceManagementClient
from msrestazure.azure_active_directory import ServicePrincipalCredentials
# $ az ad sp create-for-rbac --name "MY-PRINCIPAL-NAME2" --password "XXX" --verbose
# $ az role assignment create --assignee {app_id} --role Reader
tenant_id = os.environ.get('AZURE_TENANT_ID', '7dbd59e5-e4d9-499b-b5cb-005289cc158a')
app_id = os.environ.get('AZURE_APP_ID', 'bfeb6f69-5a18-4d0c-a669-2e7eb3798fdd')
password = os.environ['AZURE_APP_PASSWORD']
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', 'efeb9457-bf38-460f-a1e5-bb5ecc817987')
credentials = ServicePrincipalCredentials(
client_id=app_id,
secret=password,
tenant=tenant_id
)
storage_client = StorageManagementClient(
credentials,
subscription_id
)
resource_client = ResourceManagementClient(
credentials,
subscription_id
)
db = dataset.connect(os.environ.get('DATABASE_URL', 'sqlite:///:memory:'))
| [
"[email protected]"
]
| |
410f343e06b5a2e46e0ac58189f5fc2337669859 | 15a992391375efd487b6442daf4e9dd963167379 | /monai/networks/nets/__init__.py | cd9329f61baf93158a6a3aa20992150937c07ed3 | [
"Apache-2.0"
]
| permissive | Bala93/MONAI | b0e68e1b513adcd20eab5158d4a0e5c56347a2cd | e0a7eff5066da307a73df9145077f6f1fec7a514 | refs/heads/master | 2022-08-22T18:01:25.892982 | 2022-08-12T18:13:53 | 2022-08-12T18:13:53 | 259,398,958 | 2 | 0 | null | 2020-04-27T17:09:12 | 2020-04-27T17:09:11 | null | UTF-8 | Python | false | false | 2,805 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ahnet import AHnet, Ahnet, AHNet
from .attentionunet import AttentionUnet
from .autoencoder import AutoEncoder
from .basic_unet import BasicUNet, BasicUnet, Basicunet, basicunet
from .classifier import Classifier, Critic, Discriminator
from .densenet import (
DenseNet,
Densenet,
DenseNet121,
Densenet121,
DenseNet169,
Densenet169,
DenseNet201,
Densenet201,
DenseNet264,
Densenet264,
densenet121,
densenet169,
densenet201,
densenet264,
)
from .dints import DiNTS, TopologyConstruction, TopologyInstance, TopologySearch
from .dynunet import DynUNet, DynUnet, Dynunet
from .efficientnet import (
BlockArgs,
EfficientNet,
EfficientNetBN,
EfficientNetBNFeatures,
drop_connect,
get_efficientnet_image_size,
)
from .flexible_unet import FlexibleUNet
from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet
from .generator import Generator
from .highresnet import HighResBlock, HighResNet
from .hovernet import Hovernet, HoVernet, HoVerNet, HoverNet
from .milmodel import MILModel
from .netadapter import NetAdapter
from .regressor import Regressor
from .regunet import GlobalNet, LocalNet, RegUNet
from .resnet import ResNet, resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200
from .segresnet import SegResNet, SegResNetVAE
from .senet import (
SENet,
SEnet,
Senet,
SENet154,
SEnet154,
Senet154,
SEResNet50,
SEresnet50,
Seresnet50,
SEResNet101,
SEresnet101,
Seresnet101,
SEResNet152,
SEresnet152,
Seresnet152,
SEResNext50,
SEResNeXt50,
SEresnext50,
Seresnext50,
SEResNext101,
SEResNeXt101,
SEresnext101,
Seresnext101,
senet154,
seresnet50,
seresnet101,
seresnet152,
seresnext50,
seresnext101,
)
from .swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR
from .torchvision_fc import TorchVisionFCModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
from .unet import UNet, Unet
from .unetr import UNETR
from .varautoencoder import VarAutoEncoder
from .vit import ViT
from .vitautoenc import ViTAutoEnc
from .vnet import VNet
| [
"[email protected]"
]
| |
2b3c16897a3b35cc9e66306da93eacb32c23e5ef | 0d5de943909877c01b485d8a918d8bef0cf9e196 | /plugins/CompleteLikeEclipse/scribes/edit/complete_like_eclipse/__init__.py | 1bf51577114ffc6ae996e45832814ec68d606743 | [
"MIT"
]
| permissive | baverman/scribes-goodies | 31e2017d81f04cc01e9738e96ceb19f872a3d280 | f6ebfe62e5103d5337929648109b4e610950bced | refs/heads/master | 2021-01-21T10:13:08.397980 | 2013-09-25T16:33:05 | 2013-09-25T16:33:05 | 854,207 | 2 | 1 | null | 2013-09-25T16:33:05 | 2010-08-22T03:12:39 | Python | UTF-8 | Python | false | false | 4,949 | py | from gettext import gettext as _
from string import whitespace
from scribes.helpers import TriggerManager, Trigger, connect_external, connect_all
from signals import Signals
from IndexerProcessManager import Manager as IndexerProcessManager
from DictionaryManager import Manager as DictionaryManager
from ProcessCommunicator import Communicator as ProcessCommunicator
from TextExtractor import Extractor as TextExtractor
from BufferMonitor import Monitor as BufferMonitor
trigger = Trigger('complete-word', '<alt>slash',
'Eclipse like word completition', 'Text Operations')
class Plugin(object):
def __init__(self, editor):
self.editor = editor
self.signals = Signals()
self.triggers = TriggerManager(editor)
connect_all(self, self.signals, self.triggers, textbuffer=self.editor.textbuffer)
self.block_word_reset = False
self.words = None
self.start_word = None
self.start_offset = None
self.indexer = IndexerProcessManager(self.signals.sender, editor)
self.dictionary_manager = DictionaryManager(self.signals.sender, editor)
self.communicator = ProcessCommunicator(self.signals.sender, editor)
self.extractor = TextExtractor(self.signals.sender, editor)
self.buffer_monitor = BufferMonitor(self.signals.sender, editor)
def unload(self):
self.signals.destroy.emit()
return False
def is_valid_character(self, c):
if c in whitespace:
return False
return c.isalpha() or c.isdigit() or (c in ("-", "_"))
def backward_to_word_begin(self, iterator):
if iterator.starts_line(): return iterator
iterator.backward_char()
while self.is_valid_character(iterator.get_char()):
iterator.backward_char()
if iterator.starts_line(): return iterator
iterator.forward_char()
return iterator
def forward_to_word_end(self, iterator):
if iterator.ends_line(): return iterator
if not self.is_valid_character(iterator.get_char()): return iterator
while self.is_valid_character(iterator.get_char()):
iterator.forward_char()
if iterator.ends_line(): return iterator
return iterator
def get_word_before_cursor(self):
iterator = self.editor.cursor.copy()
# If the cursor is in front of a valid character we ignore
# word completion.
if self.is_valid_character(iterator.get_char()):
return None, None
if iterator.starts_line():
return None, None
iterator.backward_char()
if not self.is_valid_character(iterator.get_char()):
return None, None
start = self.backward_to_word_begin(iterator.copy())
end = self.forward_to_word_end(iterator.copy())
word = self.editor.textbuffer.get_text(start, end).strip()
return word, start
def get_matches(self, string):
if not self.words:
return None
result = []
for word, count in self.words.iteritems():
if word != string and word.startswith(string):
result.append((word.encode('utf8'), count))
result.sort(key=lambda r: r[1], reverse=True)
return [r[0] for r in result]
@trigger
def cycle(self, *args):
word_to_complete, start = self.get_word_before_cursor()
if not word_to_complete:
return False
if not self.start_word or self.start_offset != start.get_offset():
self.start_word = word_to_complete
self.start_offset = start.get_offset()
matches = self.get_matches(self.start_word)
if matches:
idx = 0
try:
idx = matches.index(word_to_complete)
idx = (idx + 1) % len(matches)
except ValueError:
pass
if matches[idx] == word_to_complete:
self.editor.update_message(_("Word completed already"), "yes", 1)
return False
self.buffer_changed_handler.block()
end = self.editor.cursor.copy()
self.editor.textbuffer.delete(start, end)
self.editor.textbuffer.insert(start, matches[idx])
self.editor.response()
self.buffer_changed_handler.unblock()
else:
self.editor.update_message(_("No word to complete"), "no", 1)
return False
@Signals.dictionary
def word_list_updated(self, sender, words):
self.words = words
return False
@connect_external('textbuffer', 'changed')
def buffer_changed(self, *args):
self.start_word = None
self.start_iter = None
return False
| [
"[email protected]"
]
| |
d8e032b3398ca8b4d5089d70996f8278fc086e9d | 123cf58c5dc4800d5d50fd2934cc63be1080e093 | /models/string_cluster_model/encoder_network.py | 33989eff917210d26d9f229c5dc93a45db8912b7 | []
| no_license | nitishgupta/char-encode-decode | dd303a9aa77a3af9000e275bcb86abb18d0b7d84 | eb4bbb8be701c3cbb4476a779094c45458a1daef | refs/heads/master | 2021-04-30T23:25:49.895472 | 2017-09-03T06:37:55 | 2017-09-03T06:37:55 | 66,794,519 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | import time
import tensorflow as tf
import numpy as np
from models.base import Model
class EncoderModel(Model):
"""Unsupervised Clustering using Discrete-State VAE"""
def __init__(self, num_layers, batch_size, h_dim, input_batch, input_lengths,
char_embeddings, scope_name, dropout_keep_prob=1.0):
self.num_layers = num_layers # Num of layers in the encoder and decoder network
# Size of hidden layers in the encoder and decoder networks. This will also
# be the dimensionality in which each string is represented when encoding
self.h_dim = h_dim
self.batch_size = batch_size
with tf.variable_scope(scope_name) as scope:
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(h_dim, state_is_tuple=True)
encoder_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell=encoder_cell,
input_keep_prob=dropout_keep_prob,
output_keep_prob=1.0)
self.encoder_network = tf.nn.rnn_cell.MultiRNNCell(
[encoder_dropout_cell] * self.num_layers, state_is_tuple=True)
#[batch_size, decoder_max_length, embed_dim]
self.embedded_encoder_sequences = tf.nn.embedding_lookup(char_embeddings,
input_batch)
self.encoder_outputs, self.encoder_states = tf.nn.dynamic_rnn(
cell=self.encoder_network, inputs=self.embedded_encoder_sequences,
sequence_length=input_lengths, dtype=tf.float32)
# To get the last output of the encoder_network
reverse_output = tf.reverse_sequence(input=self.encoder_outputs,
seq_lengths=tf.to_int64(input_lengths),
seq_dim=1,
batch_dim=0)
en_last_output = tf.slice(input_=reverse_output,
begin=[0,0,0],
size=[self.batch_size, 1, -1])
# [batch_size, h_dim]
self.encoder_last_output = tf.reshape(en_last_output,
shape=[self.batch_size, -1],
name="encoder_last_output") | [
"[email protected]"
]
| |
50757308714e1748bc154ae1b6b8a9944dfd0fca | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/sxj.py | 95c3a20eebf162044696cda87af0444abb3afeb2 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sxJ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
5a4102380ceda801c33ba27df61c91998ba24ab0 | c4943748c504f26e197ce391c747bb5a4c146be2 | /trade_data_get/future_daily_point_data.py | 242777b92ad8080f11fc2e523a24c024c3dba7a1 | []
| no_license | NewLanded/security_data_store | 88919c233d6bd22b20d0d9918c8e2ffcafc33c3e | d23c68777e6ecb0641cb5c6f7061b1c11d208886 | refs/heads/master | 2021-07-21T12:55:47.650454 | 2021-06-30T07:32:00 | 2021-06-30T07:32:00 | 133,665,767 | 1 | 0 | null | 2018-05-16T13:03:35 | 2018-05-16T12:56:05 | null | UTF-8 | Python | false | false | 1,715 | py | import datetime
import time
import tushare as ts
from conf import PRO_KEY
from util_base.date_util import convert_datetime_to_str, convert_str_to_datetime, get_date_range
from util_base.db_util import engine
from util_base.db_util import store_failed_message
from util_data.date import Date
ts.set_token(PRO_KEY)
pro = ts.pro_api()
def get_all_future_daily_point_data(data_date_str):
time.sleep(2)
all_future_daily_point_data = pro.fut_daily(trade_date=data_date_str)
time.sleep(2)
return all_future_daily_point_data
def store_future_daily_point_data(future_daily_point_data):
future_daily_point_data["update_date"] = datetime.datetime.now()
future_daily_point_data["trade_date"] = future_daily_point_data["trade_date"].apply(convert_str_to_datetime)
future_daily_point_data.to_sql("future_daily_point_data", engine, index=False, if_exists="append")
def start(date_now=None):
date_now = datetime.datetime.now() if date_now is None else date_now
date_now = datetime.datetime(date_now.year, date_now.month, date_now.day)
if Date().is_workday(date_now):
try:
all_future_daily_point_data = get_all_future_daily_point_data(convert_datetime_to_str(date_now))
store_future_daily_point_data(all_future_daily_point_data)
except Exception as e:
store_failed_message("", "future_daily_point_data", str(e), date_now)
if __name__ == "__main__":
pass
for date_now in get_date_range(datetime.datetime(2015, 1, 1), datetime.datetime(2021, 6, 18)):
print(date_now)
start(date_now)
# start(datetime.datetime(2020, 5, 19))
# all_future_daily_point_data = pro.daily(trade_date="20181008")
pass
| [
"[email protected]"
]
| |
658188357a420a967626a633ab73119b6a6a95f5 | f89b26d9c53b1d5cc6b14d7f20c57772c98fb53d | /plus minus.py | 3620c88e652db9cf30d344d0e8462e9fc3708813 | []
| no_license | Mityun/Analitic_of_my_book | 9be73824b0d218f87619e938ef0b0ceeb57e1310 | dd9842925205b3ec55179ae00df798031dcf8c26 | refs/heads/main | 2023-08-14T10:41:33.105877 | 2021-10-10T07:32:23 | 2021-10-10T07:32:23 | 326,292,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | q = float(input())
w = float(input())
e = input()
if e == "-" and w != 0:
print(q - w)
elif e == "+" and w != 0:
print(q + w)
elif e == "*" and w != 0:
print(q * w)
elif e == "/" and w != 0:
print(q / w)
elif e != "+" and e != "-" and e != "*" and e != "/":
print(888888)
elif w == 0:
print(888888)
| [
"[email protected]"
]
| |
c74942de61e4a32ff2a0a0be62da3f16bf3c27a3 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/BuildLinks1.10/test_input/CJ_16_2/16_2_1_anthrocoder_digits.py | 595c608a01ecda0f5fcd93bfb768e0ff0aab1314 | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 944 | py | import sys
try:
f = open(sys.argv[1])
out = open(sys.argv[1].rpartition("\\")[2]+".out", 'w')
numTests = int(f.readline())
for i in range (0, numTests):
note = f.readline()
# print (note)
phoneNo = ""
zeros = note.count("Z")
# print ("found zeros: " + str(zeros))
twos = note.count("W")
# print ("found twos: " + str(twos))
fours = note.count("U")
sixes = note.count("X")
eights = note.count("G")
ones = note.count("O") - twos - fours - zeros
threes = note.count("H") - eights
fives = note.count("F") - fours
sevens = note.count("V") - fives
nines = note.count("I") - fives - sixes - eights
phoneNo = ("0" * zeros) + ("1" * ones) + ("2" * twos) + ("3"*threes)+("4"*fours)+("5"*fives)+("6"*sixes)+("7"*sevens)+("8"*eights)+("9"*nines)
out.write("Case #" + str(i+1) +": " + phoneNo + "\n")
except IOError as e:
print ('Error:', err)
| [
"[[email protected]]"
]
| |
df05476a55d74eac175c02cf47d0431568781b2d | a84e1ed67ef2592cf22f7d19cdddaf16700d6a8e | /graveyard/web/VNET/branches/vnf/vnf/components/NeutronExperiment.py | 8fd30a324c74a91f7ace04f9b8a10a6528a0f084 | []
| no_license | danse-inelastic/inelastic-svn | dda998d7b9f1249149821d1bd3c23c71859971cc | 807f16aa9510d45a45360d8f59f34f75bb74414f | refs/heads/master | 2016-08-11T13:40:16.607694 | 2016-02-25T17:58:35 | 2016-02-25T17:58:35 | 52,544,337 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 24,209 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Actor import actionRequireAuthentication, action_link, AuthenticationError
from FormActor import FormActor as base
class NeutronExperiment(base):
class Inventory(base.Inventory):
import pyre.inventory
id = pyre.inventory.str("id", default=None)
id.meta['tip'] = "the unique identifier of the experiment"
ncount = pyre.inventory.float( 'ncount', default = 1e6 )
ncount.meta['tip'] = 'number of neutrons'
pass # end of Inventory
def default(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
main = page._body._content._main
# populate the main column
document = main.document(title='Neutron Experiment')
document.description = ''
document.byline = 'byline?'
p = document.paragraph()
action = actionRequireAuthentication(
actor = 'neutronexperimentwizard', sentry = director.sentry,
label = 'this wizard', routine = 'start',
)
wizard_link = action_link( action, director.cgihome )
action = actionRequireAuthentication(
actor = 'neutronexperiment', sentry = director.sentry,
label = 'experiments', routine = 'listall',
)
list_link = action_link( action, director.cgihome )
p.text = [
'In this virtual neutron facility, you can set up',
'a new experiment by using %s.' % wizard_link,
'Or you can select from one of the %s you have run' % list_link,
'and rerun it with new settings.',
]
return page
def delete(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, error:
return error.page
record = director.clerk.getNeutronExperiment( self.inventory.id )
director.clerk.deleteRecord( record )
return self.listall(director)
def listall(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
main = page._body._content._main
# populate the main column
document = main.document(title='Experiments')
document.description = ''
document.byline = 'byline?'
#
p = document.paragraph()
action = actionRequireAuthentication(
label = 'this wizard',
actor = 'neutronexperimentwizard',
routine = 'start',
sentry = director.sentry,
)
link = action_link( action, director.cgihome )
p.text = [
'You can perform various kinds of neutron experiments in',
'this virtual neutron facility.',
'To start, you can plan a new experiment by following %s.' % link,
]
# retrieve id:record dictionary from db
clerk = director.clerk
experiments = clerk.indexNeutronExperiments()
# make a list of all experiments
listexperiments( experiments.values(), document, director )
return page
def view(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
# the record we are working on
id = self.inventory.id
experiment = director.clerk.getNeutronExperiment( id )
#see if the experiment is constructed or not. if not
#ask the wizard to do the editing.
if experiment.status in ['started', 'partially configured']:
director.routine = 'submit_experiment'
actor = director.retrieveActor( 'neutronexperimentwizard')
director.configureComponent( actor )
actor.inventory.id = self.inventory.id
return actor.submit_experiment( director )
main = page._body._content._main
# populate the main column
document = main.document(
title='Experiment %r' % experiment.short_description )
document.description = ( '')
document.byline = 'byline?'
status = experiment.status
method = '_view_%s' % status
method = getattr(self, method)
method( document, director )
return page
def edit(self, director):
try:
page, document = self._head( director )
except AuthenticationError, error:
return error.page
self.processFormInputs( director )
#see if the experiment is constructed or not. if not
#ask the wizard to do the editing.
experiment = director.clerk.getNeutronExperiment( self.inventory.id )
if experiment.status != 'constructed':
director.routine = 'start'
actor = director.retrieveActor( 'neutronexperimentwizard')
director.configureComponent( actor )
actor.inventory.id = self.inventory.id
return actor.start( director )
formcomponent = self.retrieveFormToShow( 'run_neutron_experiment' )
formcomponent.inventory.id = self.inventory.id
formcomponent.director = director
# create form
form = document.form(
name='neutronexperiment',
legend= formcomponent.legend(),
action=director.cgihome)
# specify action
action = actionRequireAuthentication(
actor = 'job', sentry = director.sentry,
label = '', routine = 'edit',
arguments = {'form-received': formcomponent.name } )
from vnf.weaver import action_formfields
action_formfields( action, form )
# expand the form with fields of the data object that is being edited
formcomponent.expand( form )
# run button
submit = form.control(name="submit", type="submit", value="Run")
return page
def run(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
experiment = director.clerk.getNeutronExperiment(
self.inventory.id)
job_id = experiment.job_id
if empty_id(job_id):
raise RuntimeError, "job not yet established"
job_id = experiment.job_id
job = director.clerk.getJob( job_id )
try:
Scheduler.schedule(job, director)
experiment.status = 'submitted'
except Exception, err:
raise
import traceback
experiment.status = 'submissionfailed'
job.error = traceback.format_exc()
# update db
director.clerk.updateRecord( job )
director.clerk.updateRecord( experiment )
# check status of job
Scheduler.check( job, director )
return self.view( director )
def selectinstrument(self, director):
try:
page, document = self._head( director )
except AuthenticationError, error:
return error.page
experiment = director.clerk.getNeutronExperiment(
self.inventory.id )
# create form to set scatterer type
formcomponent = self.retrieveFormToShow( 'selectneutroninstrument' )
formcomponent.inventory.experiment_id = experiment.id
formcomponent.director = director
# create form
form = document.form(
name='selectneutroninstrument',
legend= formcomponent.legend(),
action=director.cgihome)
# specify action
action = actionRequireAuthentication(
actor = 'neutronexperiment', sentry = director.sentry,
label = '', routine = 'edit',
arguments = { 'id': experiment.id,
'form-received': formcomponent.name } )
from vnf.weaver import action_formfields
action_formfields( action, form )
# expand the form with fields of the data object that is being edited
formcomponent.expand( form )
# ok button
submit = form.control(name="submit", type="submit", value="OK")
return page
def __init__(self, name=None):
if name is None:
name = "neutronexperiment"
super(NeutronExperiment, self).__init__(name)
return
def _add_review(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
from TreeViewCreator import create
view = create( experiment )
document.contents.append( view )
return
def _add_revision_sentence(self, document, director):
p = document.paragraph()
action = actionRequireAuthentication(
label = 'here',
actor = 'neutronexperimentwizard',
routine = 'start',
sentry = director.sentry,
id = self.inventory.id)
link = action_link( action, director.cgihome )
p.text = [
'If you need to make changes to this experiment,',
'please click %s.' % link,
]
return
def _add_run_sentence(self, document, director):
p = document.paragraph()
action = actionRequireAuthentication(
label = 'here',
actor = 'neutronexperiment',
routine = 'run',
sentry = director.sentry,
id = self.inventory.id)
link = action_link( action, director.cgihome )
p.text = [
'If you are done with experiment configuration,',
'please click %s to start this experiment.' % link,
]
return
def _add_delete_sentence(self, document, director):
p = document.paragraph()
action = actionRequireAuthentication(
label = 'here',
actor = 'neutronexperiment',
routine = 'delete',
sentry = director.sentry,
id = self.inventory.id)
link = action_link( action, director.cgihome )
p.text = [
'To delete this experiment, please click %s.' % link,
]
return
def _view_constructed(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
p = document.paragraph()
p.text = [
'Experiment %r has been constructed.' % experiment.short_description,
]
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
'Please review them before you start the experiment.',
]
self._add_review( document, director )
self._add_revision_sentence( document, director )
self._add_run_sentence( document, director )
self._add_delete_sentence( document, director )
return
def _view_submissionfailed(self, document, director):
p = document.paragraph( )
p.text = [
'We have tried to start experiment %r for you but failed.' % experiment.short_description,
'This could be due to network error.',
'The error message returned from computation server is:',
]
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
p = document.paragraph(cls = 'error' )
p.text = [ experiment.job.error ]
p = document.paragraph()
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
]
self._add_review( document, director )
self._add_revision_sentence( document, director )
self._add_run_sentence( document, director )
self._add_delete_sentence( document, director )
return
def _view_submitted(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
#refresh script
p = document.paragraph()
p.text = [
'''
<script>
<!--
/*
Auto Refresh Page with Time script
By JavaScript Kit (javascriptkit.com)
Over 200+ free scripts here!
*/
//enter refresh time in "minutes:seconds" Minutes should range from 0 to inifinity. Seconds should range from 0 to 59
var limit="0:10"
var parselimit=limit.split(":")
parselimit=parselimit[0]*60+parselimit[1]*1
function beginrefresh(){
if (parselimit==1)
window.location.reload()
else{
parselimit-=1
curmin=Math.floor(parselimit/60)
cursec=parselimit%60
if (curmin!=0)
curtime=curmin+" minutes and "+cursec+" seconds left until page refresh!"
else
curtime=cursec+" seconds left until page refresh!"
window.status=curtime
setTimeout("beginrefresh()",1000)
}
}
window.onload=beginrefresh
//-->
</script>
''',
]
panel = document.form(
name='null',
legend= 'Summary',
action='')
p = panel.paragraph()
p.text = [
'Experiment %r was started %s on server %r, using %s nodes.' % (
experiment.short_description, experiment.job.timeStart,
experiment.job.computation_server.short_description,
experiment.job.numprocessors,
),
]
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
]
self._add_review( panel, director )
self._add_results( document, director )
#update status
if experiment.job.status == 'finished': experiment.status = 'finished'
director.clerk.updateRecord( experiment )
return
def _view_finished(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
panel = document.form(
name='null',
legend= 'Summary',
action='')
p = panel.paragraph()
p.text = [
'Experiment %r was started %s on server %r, using %s nodes.' % (
experiment.short_description, experiment.job.timeStart,
experiment.job.computation_server.short_description,
experiment.job.numprocessors,
),
]
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
]
self._add_review( panel, director )
self._add_results( document, director )
#update status
if experiment.job.status == 'finished': experiment.status = 'finished'
director.clerk.updateRecord( experiment )
return
def _add_results(self, document, director):
experiment = director.clerk.getNeutronExperiment( self.inventory.id )
# data path
job_id = experiment.job_id
job = director.clerk.getJob( job_id )
from JobDataManager import JobDataManager
jobdatamanager = JobDataManager( job, director )
path = jobdatamanager.localpath()
server = job.computation_server
# list entries in the job directory in the remote server
output_files = jobdatamanager.listremotejobdir()
document = document.form(
name='null',
legend= 'Data',
action='')
# loop over expected results and see if any of them is available
# and post it
expected = experiment.expected_results
import os
for item in expected:
filename = item
if filename in output_files:
#f = os.path.join( path, item )
#retieve file from computation server
localcopy = jobdatamanager.makelocalcopy( filename )
self._post_result( localcopy, document, director )
continue
return
def _post_result(self, resultfile, document, director):
drawer = ResultDrawer( )
experiment = director.clerk.getNeutronExperiment( self.inventory.id )
drawer.draw( experiment, resultfile, document, director )
return
def _head(self, director):
page = director.retrieveSecurePage( 'neutronexperiment' )
main = page._body._content._main
# the record we are working on
id = self.inventory.id
experiment = director.clerk.getNeutronExperiment( id )
# populate the main column
document = main.document(
title='Neutron Experiment: %s' % experiment.short_description )
document.description = ( '')
document.byline = '<a href="http://danse.us">DANSE</a>'
return page, document
def _configure(self):
base._configure(self)
self.id = self.inventory.id
return
pass # end of NeutronExperiment
from wording import plural, present_be
def listexperiments( experiments, document, director ):
p = document.paragraph()
n = len(experiments)
p.text = [ 'Here is a list of experiments you have planned or run:' ]
formatstr = '%(index)s: %(viewlink)s (%(status)s) is a measurement of %(sample)r in %(instrument)r (%(deletelink)s)'
actor = 'neutronexperiment'
container = experiments
for i, element in enumerate( container ):
p = document.paragraph()
name = element.short_description
if name in ['', None, 'None'] : name = 'undefined'
action = actionRequireAuthentication(
actor, director.sentry,
routine = 'view',
label = name,
id = element.id,
)
viewlink = action_link( action, director.cgihome )
action = actionRequireAuthentication(
actor, director.sentry,
routine = 'delete',
label = 'delete',
id = element.id,
)
deletelink = action_link( action, director.cgihome )
element = director.clerk.getHierarchy( element )
if element.instrument is None \
or element.instrument.instrument is None:
action = actionRequireAuthentication(
'neutronexperimentwizard', sentry = director.sentry,
label = 'select instrument',
routine = 'select_instrument',
id = element.id,
)
link = action_link( action, director.cgihome )
instrument = link
else:
instrument = element.instrument.instrument
instrument = instrument.short_description
pass # end if
subs = {'index': i+1,
'viewlink': viewlink,
'deletelink': deletelink,
'status': element.status,
'instrument': instrument,
'sample': 'sample',
}
p.text += [
formatstr % subs,
]
continue
return
def view_instrument(instrument, form):
p = form.paragraph()
p.text = [
'This experiment is to be performed in instrument %s' % instrument.short_description,
]
from TreeViewCreator import create
view = create( instrument )
form.contents.append( view )
return
def view_sampleassembly(sampleassembly, form):
p = form.paragraph()
p.text = [
'The sample to study: %s' % sampleassembly.short_description,
]
from TreeViewCreator import create
view = create( sampleassembly )
form.contents.append( view )
return
def view_instrument_plain(instrument, form):
p = form.paragraph()
p.text = [
'This experiment is to be performed in instrument %s' % instrument.short_description,
]
p = form.paragraph()
geometer = instrument.geometer
components = instrument.componentsequence
p.text = [
'Instrument %r has %s components: %s' % (
instrument.short_description, len(components),
', '.join( [ comp for comp in components ] ) ),
]
excluded_cols = [
'id', 'creator', 'date', 'short_description',
]
p = form.paragraph()
p.text = [ '<UL>' ]
for component in components:
if component != 'sample':
component_record = getattr( instrument, component ).realcomponent
component_type = component_record.__class__.__name__
else:
component_type = ''
pass # endif
p.text.append( '<li>%s: %s' % (component, component_type) )
p.text.append( '<UL>' )
record = geometer[ component ]
p.text.append( '<li>Position: %s' % (record.position,) )
p.text.append( '<li>Orientation: %s' % (record.orientation,) )
if component == 'sample':
p.text.append( '</UL>' )
continue
columns = component_record.getColumnNames()
for col in columns:
if col in excluded_cols: continue
value = getattr( component_record, col )
p.text.append('<li>%s: %s' % (col, value) )
continue
p.text.append( '</UL>' )
continue
p.text.append( '</UL>' )
return
class ResultDrawer:
def draw(self, experiment, result, document, director):
#special place to save plots
plots_path = 'images/plots'
#
results = director.clerk.getSimulationResults( experiment )
labels = [ r.label for r in results ]
if result in labels:
#if result already saved, just fetch that
id = filter( lambda r: r.label == result, results )[0].id
else:
#otherwise, we need to have a new record in simulatoinresults table
#and also need to save result in the special place
src = result
#simulationresults record
from vnf.dom.SimulationResult import SimulationResult
result_record = director.clerk.new_dbobject(SimulationResult)
result_record.label = result
result_record.simulation_type = 'NeutronExperiment'
result_record.simulation_id = experiment.id
director.clerk.updateRecord( result_record )
id = result_record.id
# copy file to a special place
filepath1 = os.path.join( plots_path, '%s.png' % id )
dest = os.path.join( 'html', filepath1 )
#copy
import shutil
shutil.copyfile( src, dest )
filepath1 = os.path.join( plots_path, '%s.png' % id )
#create view
#hack
path, name = os.path.split( result )
name, ext = os.path.splitext( name )
p = document.paragraph()
p.text = [
name,
]
p = document.paragraph()
p.text = [
'<img src="%s/%s">' % ( director.home, filepath1 ),
]
return
#switch pylab backend to ps so that it does not need interactivity
import os, spawn
import Scheduler
from misc import empty_id
# version
__id__ = "$Id$"
# End of file
| [
"[email protected]"
]
| |
189234dba477920e20978a90104fe63bbe85f33a | ccce57307a499b49b14c8b16706166b08df1c5c1 | /database.py | e454dfecf3c38702f9373d274b585f469e9ff64e | [
"MIT"
]
| permissive | simrit1/CubeTimer | 6ea1ca4549865317c947a3a91d3a57f1786f198c | b226ae875cde35fb573c618d70a408421e0e9f07 | refs/heads/master | 2023-07-01T20:38:20.983300 | 2021-07-18T02:04:33 | 2021-07-18T02:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,306 | py | import sqlite3
from CubeUtilities import Time, MultiPhaseTime
class Database:
def __init__(self, table_name, db_dir):
self.database_path = db_dir
self.table_name = table_name
self.closed = False
try:
self.conn = sqlite3.connect(self.database_path)
except sqlite3.Error:
raise Exception(f"'{self.database_path}' doesn't exist.")
self.cursor = self.conn.cursor()
self.create_table()
def create_table(self):
"""
Attempts to create the table
:returns: bool
"""
with self.conn:
if self.table_name == "times":
try:
self.cursor.execute("""CREATE TABLE times ( time float, scramble text, date text, DNF integer, multiphase text )""")
except sqlite3.OperationalError:
return False
elif self.table_name == "settings":
try:
self.cursor.execute("""CREATE TABLE settings ( inspection integer, display_time integer, scramble_len integer, multiphase integer, puzzle_type text )""")
except sqlite3.OperationalError:
return False
else:
raise ValueError(f"Invalid table name, couldn't create table with name '{self.table_name}'")
return True
def insert_record(self, record):
"""
Adds a new record to the database
:param record: Time, MultiPhaseTime, dict
:returns: bool
"""
if self.table_name == "settings":
with self.conn:
self.cursor.execute("INSERT INTO settings VALUES (:inspection, :display_time, :scramble_len, :multiphase, :puzzle_type)", record)
elif self.table_name == "times" and isinstance(record, MultiPhaseTime):
with self.conn:
times = record.get_times()
for index in range(len(times)):
times[index] = str(times[index])
times = ", ".join(times)
with self.conn:
self.cursor.execute("INSERT INTO times VALUES (?, ?, ?, ?, ?)", (record.time, record.scramble, record.date, int(record.DNF), times))
elif self.table_name == "times" and isinstance(record, Time):
print ("saving")
with self.conn:
self.cursor.execute("INSERT INTO times VALUES (?, ?, ?, ?, ?)",
(record.time, record.scramble, record.date, int(record.DNF), ""))
def delete_record(self, oid=None):
"""
Deletes the record with the oid provided, if oid is None, and the table name is settings
then all records in the database are deleted.
:param oid: int, None
:param bool
"""
if self.table_name == "settings":
self.delete_all_records()
return True
elif self.table_name == "times" and oid is not None:
with self.conn:
self.cursor.execute("DELETE FROM times WHERE oid = :oid",
{"oid": oid})
self.cursor.execute("VACUUM")
return True
return False
def update_record(self, record_attr, new_value, identifier):
"""
Updates a record in the database with the attribute record_attr, to new_value.
Identifier can be an oid, or a dictionary with a seperate record attribute along with it's known value
:param record_attr: str
:param new_value: str, int
:param identifier: int, dict
:returns: bool
"""
if self.table_name == "times":
with self.conn:
try:
self.cursor.execute(f"UPDATE times SET {record_attr}=:new_value WHERE oid=:oid", {"oid": identifier, "new_value": str(new_value)})
except sqlite3.Error as e:
return False
return True
elif self.table_name == "settings":
with self.conn:
try:
known_attr, known_val = list(identifier.keys())[0], identifier.get(list(identifier.keys())[0])
try:
known_val = int(known_val)
except ValueError:
pass
self.cursor.execute(f"UPDATE settings SET {record_attr}=:new_value WHERE {known_attr}=:known_val",
{"new_value": str(new_value), "known_val": known_val})
except sqlite3.Error:
return False
except (AttributeError, TypeError):
raise Exception("identifier argument must be a dictionary with a key of a seperate record attribute, and it's value is the record attributes known value. Ex: identifier={'puzzle_type': '3x3'}")
return True
return False
def get_record(self, oid=None):
"""
Gets the record with the specified oid, if no oid is specified,
then all records are returned
:param oid: int, None
:return: list[record_tuple]
"""
if self.table_name == "settings":
return self.get_all_records()
self.cursor.execute("SELECT * FROM times WHERE oid=:oid", {"oid": oid})
return self.cursor.fetchall()
def get_all_records(self):
"""
Gets every record in the database
:returns: list[record_tuple]
"""
with self.conn:
try:
self.cursor.execute(f"SELECT * FROM {self.table_name}")
except sqlite3.Error:
return []
return self.cursor.fetchall()
def delete_all_records(self):
"""
Deletes every record in the database
:returns: bool
"""
with self.conn:
try:
self.cursor.execute(f"DELETE FROM {self.table_name}")
self.create_table()
except sqlite3.Error:
return False
else:
self.cursor.execute("VACUUM")
return True
def close_connection(self):
"""
Closes the conection to the database:
:returns: None
"""
self.conn.close()
self.closed = True
| [
"[email protected]"
]
| |
3a9baf4f9122069e89d3d3e9c447adba687d8866 | 7942342d457276bb266228d0236af647b3d55477 | /django/contrib/gis/gdal/geomtype.pyi | 4d825dbc2a0344758cb103a9b71335753e67e32a | [
"MIT"
]
| permissive | AsymmetricVentures/mypy-django | 847c4e521ce4dec9a10a1574f9c32b234dafd00b | f6e489f5cf5672ecede323132665ccc6306f50b8 | refs/heads/master | 2020-06-30T01:53:44.434394 | 2016-12-22T22:45:50 | 2016-12-22T22:45:50 | 74,397,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | pyi | # Stubs for django.contrib.gis.gdal.geomtype (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
class OGRGeomType:
wkb25bit = ... # type: int
num = ... # type: Any
def __init__(self, type_input) -> None: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
@property
def name(self): ...
@property
def django(self): ...
def to_multi(self): ...
| [
"[email protected]"
]
| |
5f20947d37c40b225caf658aa24de35a3409eda0 | 1e9ad304868c2bda918c19eba3d7b122bac3923b | /kubernetes/client/models/v1_scale_spec.py | 4cbe43889993ed0f39cd92d9f358c3267a860626 | [
"Apache-2.0"
]
| permissive | pineking/client-python | c77e5bd3d476ac852e6dffa96056008baa0f597f | 74a64d7325518f4298600d4bb300f92843c29347 | refs/heads/master | 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None):
"""
V1ScaleSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int'
}
self.attribute_map = {
'replicas': 'replicas'
}
self._replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this V1ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this V1ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this V1ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
b3d3277c535eaa6f706a071f5b547c8b412419d8 | c1d03f41b6c80ef1e0a42b1bb710ba90d680e4c2 | /tests/unit/test_xmlgen.py | f4224d6cded6be0fe94660d2a9f52f5f3283b56e | [
"BSD-3-Clause"
]
| permissive | boxingbeetle/softfab | 4f96fc389dec5cd3dc987a427c2f491a19cbbef4 | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | refs/heads/master | 2021-06-22T15:42:38.857018 | 2020-11-23T22:53:21 | 2020-11-23T22:53:21 | 169,245,088 | 20 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,294 | py | # SPDX-License-Identifier: BSD-3-Clause
"""Test XML generation module."""
from pytest import raises
from softfab.xmlgen import parseHTML, xhtml
# Test text inside the <script> XHTML element:
def testScriptNoEscape():
"""Check that no escaping is performed when it is not necessary."""
text = 'if (a > b) return c[3];'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">{text}</script>'
)
def testScriptCDATA():
"""Check that a CDATA block is used when necessary."""
text = 'if (a < b) return c[3];'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</script>'
)
text = 'if (a = b) return c & 3;'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</script>'
)
def testScriptCDATAEnd():
"""Check that a CDATA block is not closed too early."""
text = 'var f = x[y[i]]>0 && z<0;'
# ^^^-- CDATA end marker
assert xhtml.script[text].flattenXML() == (
'<script xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/var f = x[y[i]]\\>0 && z<0;/*]]>*/'
'</script>'
)
def testScriptTagEnd():
"""Check that a <script> tag is not closed too early."""
text = 'var s = "</script>";'
assert xhtml.script[text].flattenXML() == (
'<script xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/var s = "<\\/script>";/*]]>*/'
'</script>'
)
# Test text inside the <style> XHTML element.
# Since <script> is handled in the same way, we test fewer scenarios here.
def testStyleNoEscape():
"""Check that no escaping is performed when it is not necessary."""
text = '.nav > a[href] { color: #FFC000 }'
assert xhtml.style[text].flattenXML() == (
f'<style xmlns="http://www.w3.org/1999/xhtml">{text}</style>'
)
def testStyleCDATA():
"""Check that a CDATA block is used when necessary."""
text = 'book.c /* K&R */'
assert xhtml.style[text].flattenXML() == (
f'<style xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</style>'
)
def testStyleTagEnd():
"""Check that a <style> tag is not closed too early."""
text = '@import url(more.css); /* </StyLe */'
# HTML tags are case-insensitive: ^^^^^
assert xhtml.style[text].flattenXML() == (
'<style xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/@import url(more.css); /* <\\/StyLe *//*]]>*/'
'</style>'
)
# Test parsing of HTML fragments:
def testBasic():
"""Check whether basic functionality works."""
parsed = parseHTML('<h1>Hello!</h1>')
assert parsed.flattenXML() == (
'<h1 xmlns="http://www.w3.org/1999/xhtml">Hello!</h1>'
)
def testMultiTopLevel():
"""Check whether we can handle multiple top-level tags."""
parsed = parseHTML('<h1>Hello!</h1><h1>Goodbye!</h1>')
assert parsed.flattenXML() == (
'<h1 xmlns="http://www.w3.org/1999/xhtml">Hello!</h1>'
'<h1 xmlns="http://www.w3.org/1999/xhtml">Goodbye!</h1>'
)
def testNested():
"""Check handling of nested content."""
parsed = parseHTML('<p>Text with <i>nested</i> tags.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'Text with <i>nested</i> tags.'
'</p>'
)
def testVoid():
"""Check handling of void elements."""
parsed = parseHTML('<p>Text with<br/>a void element.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'Text with<br/>a void element.'
'</p>'
)
def testIgnorePI():
"""Check parsing of processing instruction with no handlers."""
parsed = parseHTML('<p>A processing <?jump> instruction.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing instruction.'
'</p>'
)
def testRaisePI():
"""Check propagation of handler exceptions."""
def handler(name, arg):
raise KeyError(f'unknown PI: {name}')
with raises(KeyError):
parseHTML(
'<p>A processing <?jump> instruction.</p>',
piHandler=handler
)
def testNoArgPI():
"""Check parsing of processing instruction with no arguments."""
def handler(name, arg):
assert name == 'jump'
assert arg == ''
return xhtml.br
parsed = parseHTML(
'<p>A processing <?jump> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing <br/> instruction.'
'</p>'
)
def testArgPI():
"""Check parsing of processing instruction with an argument."""
def handler(name, arg):
assert name == 'jump'
return xhtml.span[arg]
parsed = parseHTML(
'<p>A processing <?jump a little higher> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing <span>a little higher</span> instruction.'
'</p>'
)
def testIgnoreXMLDecl():
"""Check parsing of XML declaration."""
def handler(name, arg):
assert False
parsed = parseHTML(
'<?xml version="1.0" encoding="UTF-8" ?>'
'<html><body><p>XHTML document.</p></body></html>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<html xmlns="http://www.w3.org/1999/xhtml">'
'<body><p>XHTML document.</p></body>'
'</html>'
)
def testIgnoreXMLSyntax():
"""Check parsing of a PI using XML syntax (question mark at end)."""
def handler(name, arg):
assert name == 'jump'
return arg.upper()
parsed = parseHTML(
'<p>A processing <?jump lazy fox?> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing LAZY FOX instruction.'
'</p>'
)
| [
"[email protected]"
]
| |
9074795f04fffda1859ceabffe3265b9dad61ac4 | c7cba1dad777f461ea546d0437528c985be3c051 | /client.py | 559f6546c5344baecc2df329d11dee988617cc63 | [
"MIT"
]
| permissive | elliotthwang/NLU | 000127b561c5b99340b04bf78aa65ff6ea28c79a | 0e6a96e4c2f363beb4241b4371244a5229e72811 | refs/heads/master | 2022-01-12T06:51:00.036787 | 2018-10-07T21:56:15 | 2018-10-07T21:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | ############################################################################################
#
# The MIT License (MIT)
#
# GeniSys NLU Engine API Client
# Copyright (C) 2018 Adam Milton-Barker (AdamMiltonBarker.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Title: GeniSys NLU Engine API Client
# Description: API client for communicating with the GeniSys AI NLU API endpoint
# Configuration: required/confs.json
# Last Modified: 2018-09-08
#
# Example Usage:
#
# $ python3 client.py CLASSIFY 1 "Do you know what I am saying?"
#
############################################################################################
import sys, time, string, requests, json
from tools.Helpers import Helpers
from tools.Logging import Logging
class Client():
def __init__(self, user):
self.Helpers = Helpers()
self.Logging = Logging()
self._confs = self.Helpers.loadConfigs()
self.LogFile = self.Logging.setLogFile(self._confs["AI"]["Logs"]+"Client/")
self.apiUrl = self._confs["AI"]["FQDN"] + "/communicate/infer/"+user
self.headers = {"content-type": 'application/json'}
self.Logging.logMessage(
self.LogFile,
"CLIENT",
"INFO",
"GeniSys AI Client Ready")
if __name__ == "__main__":
if sys.argv[1] == "CLASSIFY":
Client = Client(sys.argv[2])
data = {"query": str(sys.argv[3])}
Client.Logging.logMessage(
Client.LogFile,
"CLIENT",
"INFO",
"Sending string for classification...")
response = requests.post(
Client.apiUrl,
data=json.dumps(data),
headers=Client.headers)
Client.Logging.logMessage(
Client.LogFile,
"CLIENT",
"OK",
"Response: "+str(response)) | [
"[email protected]"
]
| |
ca8705cc1f1359d399708435066d644118c8025c | eba283c7b7d07c9ff15abee322da8fea460ea6be | /__init__.py | a81e1e6e897d836c409125c7fc0208faa64f920a | []
| no_license | ROB-Seismology/layeredbasemap | 5bfa3daad9b2e47a1fea35c652309541ac88ac23 | 122464656d5534798c4bba38cdda2638e7d8948f | refs/heads/master | 2021-01-20T17:33:02.596090 | 2020-12-16T10:30:54 | 2020-12-16T10:30:54 | 90,877,746 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | """
layeredbasemap
Module to create maps with Basemap using the GIS layer philosophy,
where each layer is defined by a dataset and style.
Author: Kris Vanneste, Royal Observatory of Belgium
"""
from __future__ import absolute_import, division, print_function, unicode_literals
## Make relative imports work in Python 3
import importlib
## Reloading mechanism
try:
reloading
except NameError:
## Module is imported for the first time
reloading = False
else:
## Module is reloaded
reloading = True
try:
## Python 3
from importlib import reload
except ImportError:
## Python 2
pass
## Test GDAL environment
import os
#gdal_keys = ["GDAL_DATA", "GDAL_DRIVER_PATH"]
gdal_keys = ["GDAL_DATA"]
for key in gdal_keys:
if not key in os.environ.keys():
print("Warning: %s environment variable not set. This may cause errors" % key)
elif not os.path.exists(os.environ[key]):
print("Warning: %s points to non-existing directory %s" % (key, os.environ[key]))
## Import submodules
## styles
if not reloading:
styles = importlib.import_module('.styles', package=__name__)
else:
reload(styles)
from .styles import *
## data_types
if not reloading:
data_types = importlib.import_module('.data_types', package=__name__)
else:
reload(data_types)
from .data_types import *
## cm
if not reloading:
cm = importlib.import_module('.cm', package=__name__)
else:
reload(cm)
## layered_basemap
if not reloading:
layered_basemap = importlib.import_module('.layered_basemap', package=__name__)
else:
reload(layered_basemap)
from .layered_basemap import *
| [
"[email protected]"
]
| |
b20d17916565894c0ad9d4c6695c25d8b0ded9b1 | 5b5d46b4a47ab365688af03afdbec24e885a2c90 | /21/21.py | 19a6901a33b382a6d732eace82edb63fc3f53e03 | []
| no_license | CA2528357431/python-base--Data-Structures | e9e24717ae016c4ca4a15805f261fd48f377ac6b | dccbcb27d82f2264947458686900addf2b83faad | refs/heads/main | 2023-07-04T08:32:52.551200 | 2021-07-30T16:21:31 | 2021-07-30T16:21:31 | 386,671,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,829 | py | # 二叉树
# 27非递归遍历
class tree:
def __init__(self, root, left=None, right=None):
self.nodes = []
self.root = root
self.left = left
self.right = right
self.data = None
# root作为排序依据
# data存数据
# 后续几个二叉树用例就不带数据了
@property
def lisp(self):
lisp = [self.root, None, None]
if self.left is not None:
lisp[1] = self.left.lisp
if self.right is not None:
lisp[2] = self.right.lisp
return lisp
# lisp 表达法
def __str__(self):
return str(self.lisp)
# 三种深度优先遍历
# 即三种周游
# 周游一定是 根、左周游、右周游的组合
def first(self):
l = []
r = []
if self.left is not None:
l = self.left.first()
if self.right is not None:
r = self.right.first()
res = [self.root] + l + r
return res
'''
def first(self):
res = []
cur = self
def do(cur):
if cur is not None:
res.append(cur.root)
do(cur.left)
do(cur.right)
do(cur)
return res
'''
def middle(self):
l = []
r = []
if self.left is not None:
l = self.left.middle()
if self.right is not None:
r = self.right.middle()
res = l + [self.root] + r
return res
'''
def middle(self):
res = []
cur = self
def do(cur):
if cur is not None:
do(cur.left)
res.append(cur.root)
do(cur.right)
do(cur)
return res
'''
def last(self):
l = []
r = []
if self.left is not None:
l = self.left.last()
if self.right is not None:
r = self.right.last()
res = l + r + [self.root]
return res
'''
def last(self):
res = []
cur = self
def do(cur):
if cur is not None:
do(cur.left)
do(cur.right)
res.append(cur.root)
do(cur)
return res
'''
# 一种广度优先遍历
def layer(self):
res = []
queue = [self]
# queue中同层的数据相连
while queue:
cur = queue[0]
queue.pop(0)
res.append(cur.root)
for x in (cur.left,cur.right):
if x is not None:
queue.append(x)
return res
a = tree(1)
b = tree(2)
c = tree(3, a, b)
d = tree(6)
e = tree(4)
f = tree(10, d, e)
g = tree(13, c, f)
print(g.first())
print(g.middle())
print(g.last())
print(g.layer())
| [
"[email protected]"
]
| |
7b610bf2dc37263d332476c74cca4f006e5c126c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02628/s221826573.py | 8d9cc91fbf2407d05ab712842180953fe7ae11f1 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | N, K = map(int, input().split())
p_list = list(map(int, input().split()))
p_list = sorted(p_list)
print(sum(p_list[:K])) | [
"[email protected]"
]
| |
39d9972ace9b2b675fc010522f98d7a0c2e20feb | e7a5e140ccacc10a4c51b66fa5942974330cce2c | /py_insightvm_sdk/models/vulnerability.py | 5ca229c4badf76227f09dee1e06eaf8e7fb2b306 | [
"Apache-2.0"
]
| permissive | greenpau/py_insightvm_sdk | 38864c7e88000181de5c09302b292b01d90bb88c | bd881f26e14cb9f0f9c47927469ec992de9de8e6 | refs/heads/master | 2020-04-21T08:22:31.431529 | 2020-02-27T02:25:46 | 2020-02-27T02:25:46 | 169,417,392 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,459 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from py_insightvm_sdk.models.content_description import ContentDescription # noqa: F401,E501
from py_insightvm_sdk.models.link import Link # noqa: F401,E501
from py_insightvm_sdk.models.pci import PCI # noqa: F401,E501
from py_insightvm_sdk.models.vulnerability_cvss import VulnerabilityCvss # noqa: F401,E501
class Vulnerability(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'added': 'str',
'categories': 'list[str]',
'cves': 'list[str]',
'cvss': 'VulnerabilityCvss',
'denial_of_service': 'bool',
'description': 'ContentDescription',
'exploits': 'int',
'id': 'str',
'links': 'list[Link]',
'malware_kits': 'int',
'modified': 'str',
'pci': 'PCI',
'published': 'str',
'risk_score': 'float',
'severity': 'str',
'severity_score': 'int',
'title': 'str'
}
attribute_map = {
'added': 'added',
'categories': 'categories',
'cves': 'cves',
'cvss': 'cvss',
'denial_of_service': 'denialOfService',
'description': 'description',
'exploits': 'exploits',
'id': 'id',
'links': 'links',
'malware_kits': 'malwareKits',
'modified': 'modified',
'pci': 'pci',
'published': 'published',
'risk_score': 'riskScore',
'severity': 'severity',
'severity_score': 'severityScore',
'title': 'title'
}
def __init__(self, added=None, categories=None, cves=None, cvss=None, denial_of_service=None, description=None, exploits=None, id=None, links=None, malware_kits=None, modified=None, pci=None, published=None, risk_score=None, severity=None, severity_score=None, title=None): # noqa: E501
"""Vulnerability - a model defined in Swagger""" # noqa: E501
self._added = None
self._categories = None
self._cves = None
self._cvss = None
self._denial_of_service = None
self._description = None
self._exploits = None
self._id = None
self._links = None
self._malware_kits = None
self._modified = None
self._pci = None
self._published = None
self._risk_score = None
self._severity = None
self._severity_score = None
self._title = None
self.discriminator = None
if added is not None:
self.added = added
if categories is not None:
self.categories = categories
if cves is not None:
self.cves = cves
if cvss is not None:
self.cvss = cvss
if denial_of_service is not None:
self.denial_of_service = denial_of_service
if description is not None:
self.description = description
if exploits is not None:
self.exploits = exploits
if id is not None:
self.id = id
if links is not None:
self.links = links
if malware_kits is not None:
self.malware_kits = malware_kits
if modified is not None:
self.modified = modified
if pci is not None:
self.pci = pci
if published is not None:
self.published = published
if risk_score is not None:
self.risk_score = risk_score
if severity is not None:
self.severity = severity
if severity_score is not None:
self.severity_score = severity_score
if title is not None:
self.title = title
@property
def added(self):
"""Gets the added of this Vulnerability. # noqa: E501
The date the vulnerability coverage was added. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:return: The added of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._added
@added.setter
def added(self, added):
"""Sets the added of this Vulnerability.
The date the vulnerability coverage was added. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:param added: The added of this Vulnerability. # noqa: E501
:type: str
"""
self._added = added
@property
def categories(self):
"""Gets the categories of this Vulnerability. # noqa: E501
All vulnerability categories assigned to this vulnerability. # noqa: E501
:return: The categories of this Vulnerability. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this Vulnerability.
All vulnerability categories assigned to this vulnerability. # noqa: E501
:param categories: The categories of this Vulnerability. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def cves(self):
"""Gets the cves of this Vulnerability. # noqa: E501
All <a target=\"_blank\" href=\"https://cve.mitre.org/\">CVE</a>s assigned to this vulnerability. # noqa: E501
:return: The cves of this Vulnerability. # noqa: E501
:rtype: list[str]
"""
return self._cves
@cves.setter
def cves(self, cves):
"""Sets the cves of this Vulnerability.
All <a target=\"_blank\" href=\"https://cve.mitre.org/\">CVE</a>s assigned to this vulnerability. # noqa: E501
:param cves: The cves of this Vulnerability. # noqa: E501
:type: list[str]
"""
self._cves = cves
@property
def cvss(self):
"""Gets the cvss of this Vulnerability. # noqa: E501
The CVSS vector(s) for the vulnerability. # noqa: E501
:return: The cvss of this Vulnerability. # noqa: E501
:rtype: VulnerabilityCvss
"""
return self._cvss
@cvss.setter
def cvss(self, cvss):
"""Sets the cvss of this Vulnerability.
The CVSS vector(s) for the vulnerability. # noqa: E501
:param cvss: The cvss of this Vulnerability. # noqa: E501
:type: VulnerabilityCvss
"""
self._cvss = cvss
@property
def denial_of_service(self):
"""Gets the denial_of_service of this Vulnerability. # noqa: E501
Whether the vulnerability can lead to Denial of Service (DoS). # noqa: E501
:return: The denial_of_service of this Vulnerability. # noqa: E501
:rtype: bool
"""
return self._denial_of_service
@denial_of_service.setter
def denial_of_service(self, denial_of_service):
"""Sets the denial_of_service of this Vulnerability.
Whether the vulnerability can lead to Denial of Service (DoS). # noqa: E501
:param denial_of_service: The denial_of_service of this Vulnerability. # noqa: E501
:type: bool
"""
self._denial_of_service = denial_of_service
@property
def description(self):
"""Gets the description of this Vulnerability. # noqa: E501
The description of the vulnerability. # noqa: E501
:return: The description of this Vulnerability. # noqa: E501
:rtype: ContentDescription
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Vulnerability.
The description of the vulnerability. # noqa: E501
:param description: The description of this Vulnerability. # noqa: E501
:type: ContentDescription
"""
self._description = description
@property
def exploits(self):
"""Gets the exploits of this Vulnerability. # noqa: E501
The exploits that can be used to exploit a vulnerability. # noqa: E501
:return: The exploits of this Vulnerability. # noqa: E501
:rtype: int
"""
return self._exploits
@exploits.setter
def exploits(self, exploits):
"""Sets the exploits of this Vulnerability.
The exploits that can be used to exploit a vulnerability. # noqa: E501
:param exploits: The exploits of this Vulnerability. # noqa: E501
:type: int
"""
self._exploits = exploits
@property
def id(self):
"""Gets the id of this Vulnerability. # noqa: E501
The identifier of the vulnerability. # noqa: E501
:return: The id of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Vulnerability.
The identifier of the vulnerability. # noqa: E501
:param id: The id of this Vulnerability. # noqa: E501
:type: str
"""
self._id = id
@property
def links(self):
"""Gets the links of this Vulnerability. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this Vulnerability. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Vulnerability.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this Vulnerability. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def malware_kits(self):
"""Gets the malware_kits of this Vulnerability. # noqa: E501
The malware kits that are known to be used to exploit the vulnerability. # noqa: E501
:return: The malware_kits of this Vulnerability. # noqa: E501
:rtype: int
"""
return self._malware_kits
@malware_kits.setter
def malware_kits(self, malware_kits):
"""Sets the malware_kits of this Vulnerability.
The malware kits that are known to be used to exploit the vulnerability. # noqa: E501
:param malware_kits: The malware_kits of this Vulnerability. # noqa: E501
:type: int
"""
self._malware_kits = malware_kits
@property
def modified(self):
"""Gets the modified of this Vulnerability. # noqa: E501
The last date the vulnerability was modified. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:return: The modified of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this Vulnerability.
The last date the vulnerability was modified. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:param modified: The modified of this Vulnerability. # noqa: E501
:type: str
"""
self._modified = modified
@property
def pci(self):
"""Gets the pci of this Vulnerability. # noqa: E501
Details the <a target=\"_blank\" href=\"https://www.pcisecuritystandards.org/\">Payment Card Industry (PCI)</a> details of the vulnerability. # noqa: E501
:return: The pci of this Vulnerability. # noqa: E501
:rtype: PCI
"""
return self._pci
@pci.setter
def pci(self, pci):
"""Sets the pci of this Vulnerability.
Details the <a target=\"_blank\" href=\"https://www.pcisecuritystandards.org/\">Payment Card Industry (PCI)</a> details of the vulnerability. # noqa: E501
:param pci: The pci of this Vulnerability. # noqa: E501
:type: PCI
"""
self._pci = pci
@property
def published(self):
"""Gets the published of this Vulnerability. # noqa: E501
The date the vulnerability was first published or announced. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:return: The published of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._published
@published.setter
def published(self, published):
"""Sets the published of this Vulnerability.
The date the vulnerability was first published or announced. The format is an ISO 8601 date, `YYYY-MM-DD`. # noqa: E501
:param published: The published of this Vulnerability. # noqa: E501
:type: str
"""
self._published = published
@property
def risk_score(self):
"""Gets the risk_score of this Vulnerability. # noqa: E501
The risk score of the vulnerability, rounded to a maximum of to digits of precision. If using the default Rapid7 Real Risk™ model, this value ranges from 0-1000. # noqa: E501
:return: The risk_score of this Vulnerability. # noqa: E501
:rtype: float
"""
return self._risk_score
@risk_score.setter
def risk_score(self, risk_score):
"""Sets the risk_score of this Vulnerability.
The risk score of the vulnerability, rounded to a maximum of to digits of precision. If using the default Rapid7 Real Risk™ model, this value ranges from 0-1000. # noqa: E501
:param risk_score: The risk_score of this Vulnerability. # noqa: E501
:type: float
"""
self._risk_score = risk_score
@property
def severity(self):
"""Gets the severity of this Vulnerability. # noqa: E501
The severity of the vulnerability, one of: `\"Moderate\"`, `\"Severe\"`, `\"Critical\"`. # noqa: E501
:return: The severity of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._severity
@severity.setter
def severity(self, severity):
"""Sets the severity of this Vulnerability.
The severity of the vulnerability, one of: `\"Moderate\"`, `\"Severe\"`, `\"Critical\"`. # noqa: E501
:param severity: The severity of this Vulnerability. # noqa: E501
:type: str
"""
self._severity = severity
@property
def severity_score(self):
"""Gets the severity_score of this Vulnerability. # noqa: E501
The severity score of the vulnerability, on a scale of 0-10. # noqa: E501
:return: The severity_score of this Vulnerability. # noqa: E501
:rtype: int
"""
return self._severity_score
@severity_score.setter
def severity_score(self, severity_score):
"""Sets the severity_score of this Vulnerability.
The severity score of the vulnerability, on a scale of 0-10. # noqa: E501
:param severity_score: The severity_score of this Vulnerability. # noqa: E501
:type: int
"""
self._severity_score = severity_score
@property
def title(self):
"""Gets the title of this Vulnerability. # noqa: E501
The title (summary) of the vulnerability. # noqa: E501
:return: The title of this Vulnerability. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Vulnerability.
The title (summary) of the vulnerability. # noqa: E501
:param title: The title of this Vulnerability. # noqa: E501
:type: str
"""
self._title = title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Vulnerability, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Vulnerability):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
5fdd01c76510a26587a3b1a59f24fc573d6df8f5 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/7dbcaa7c22297fe1b303/snippet.py | 2378306d1ede8dd7979bb02a73d1b3106a44283a | [
"MIT"
]
| permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 4,969 | py | #!/usr/bin/env python
"""
Pandoc filter to parse CriticMarkup into Spans for
Insertion and Deletion. The Docx writer will convert
these into Tracked Changes.
A comment immediately after a change will be parsed
for "author: The Author" and "date: 12-21-12", which
will be inserted into the Span as appropriate.
"""
from pandocfilters import Span, Str, RawInline, walk, attributes, stringify
import re
import sys
import json
regexes = {
'all': re.compile(r"([-+=~]{2}\}\{>>|\{[-+~>=]{2}|[-+=~<]{2}\}|~>)"),
# 'all': re.compile(r"(\{[-+~>=]{2}|[-+=~<]{2}\}|~>)"),
}
def parseMarks (key, value, format, meta):
if key == 'Str':
if regexes['all'].search(value):
items = regexes['all'].split(value, 1)
result = [
Str(items[0]),
RawInline('critic', items[1])]
result.extend(walk([Str(items[2])], parseMarks, format, meta))
return result
spanstart = {
'{++' : 'insertion',
'{--' : 'deletion',
'{==' : 'hilite',
'{>>' : 'comment',
'{~~' : 'subdelete'
}
spanend = {
'insertion' : '++}',
'deletion' : '--}',
'hilite' : '==}',
# 'comment' : '<<}',
}
spancomment = {
'insertion' : '++}{>>',
'deletion' : '--}{>>',
'hilite' : '==}{>>',
'subadd' : '~~}{>>',
}
def makeSpan (contents, classes = "", author = "", date = ""):
attrs = {'classes' : classes.split(), 'author' : author, 'date' : date}
return Span (attributes(attrs), contents)
def findAuthor (comment):
author = re.search(r"(author:|@)\s*([\w\s]+)", comment)
if author:
return author.group(2)
else:
return ""
def findDate (comment):
date = re.search(r"date:\s*(\S+)", comment)
if date:
return date.group(1)
else:
return ""
inspan = False
spantype = None
lasttype = None
spancontents = []
priorspan = []
def spanify (key, value, format, meta):
global inspan
global spantype
global lasttype
global spancontents
global priorspan
if inspan:
# pass
if key == 'RawInline' and value[0] == 'critic':
if value[1] == spanend.get(spantype, ""):
newspan = makeSpan(spancontents, spantype)
inspan = False
spantype = None
spancontents = []
return walk([newspan], spanify, format, meta)
elif spantype == 'subdelete' and value[1] == '~>':
priorspan.append({'type': 'deletion', 'contents': spancontents})
spancontents = []
spantype = 'subadd'
return []
elif spantype == 'subadd' and value[1] == '~~}':
delspan = makeSpan(priorspan[0]['contents'], 'deletion')
addspan = makeSpan(spancontents, 'insertion')
inspan = False
spantype = None
priorspan = []
spancontents = []
return walk([delspan, addspan], spanify, format, meta)
elif value[1] == spancomment.get(spantype, ""):
thistype = spantype
if thistype == 'subadd': thistype = 'insertion'
priorspan.append({'type': thistype, 'contents': spancontents})
spancontents = []
spantype = 'comment'
return []
elif value[1] == '<<}' and spantype == 'comment':
commentstring = stringify(spancontents)
result = []
# if len(priorspan) > 0:
author = findAuthor(commentstring)
date = findDate(commentstring)
for item in priorspan:
result.append(makeSpan(item['contents'], item['type'], author, date))
comment = "<!-- %s -->" % commentstring
result.append(RawInline('html', comment))
priorspan = []
spancontents = []
spantype = None
inspan = False
return walk(result, spanify, format, meta)
else:
spancontents.append({'t': key, 'c': value})
return []
else:
spancontents.append({'t': key, 'c': value})
return []
else:
if key == 'RawInline' and value[0] == 'critic':
thetype = spanstart.get(value[1], "")
if thetype:
spantype = thetype
inspan = True
spancontents = []
return []
else:
#this is a user error, do not parse
pass
else:
pass
if __name__ == "__main__":
doc = json.loads(sys.stdin.read())
if len(sys.argv) > 1:
format = sys.argv[1]
else:
format = ""
meta = doc[0]['unMeta']
parsed = walk(doc, parseMarks, format, meta)
altered = walk(parsed, spanify, format, meta)
json.dump(altered, sys.stdout)
| [
"[email protected]"
]
| |
4f388037513dc7157edd78c95a929b1b7d5c1ed8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/6/usersdata/131/2399/submittedfiles/investimento.py | b734cf524df522049516f8e80f2ef98958d66a91 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
#ENTRADA
a=input ('digite seu saldo 2016: ')
#proscessamento
b = float(a*0.045 + a)
c = float(b*0.045 + b)
d = float(c*0.045 + c)
e = float(d*0.045 + d)
f = float(e*0.045 + e)
g = flaot(f*0.045 + f)
h = float(g*0.045 + g)
i = float(h*0.045 + h)
j = float(i*0.045 + i)
k = float(j*0.045 + j)
#saida
print('seu saldo em 2017 %.2f' %(b))
print('seu saldo em 2018 %.2f' %(c))
print('seu saldo em 2019 %.2f' %(d))
print('seu saldo em 2020 %.2f' %(e))
print('seu saldo em 2021 %.2f' %(f))
print('seu saldo em 2022 %.2f' %(g))
print('seu saldo em 2023 %.2f' %(h))
print('seu saldo em 2024 %.2f' %(i))
print('seu saldo em 2025 %.2f' %(j))
print('seu saldo em 2026 %.2f' %(k)) | [
"[email protected]"
]
| |
3c327c89f0de7bec82025164c968faf2df12d343 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4191/codes/1716_2497.py | 08cd8e7141262da535ce7f98751d1c4b82b7ce4d | []
| no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # Instituto de Computacao - UFAM
# Lab 04 - Ex 04
# 20 / 06 / 2016
qi = float(input("Quantia inicial: "))
tempo = int(input("Tempo de investimento: "))
juros = 4.0
saldo = qi # Variavel acumuladora
# Valor inicial da variavel contadora
t = 0
rend=0
# Atualizacao de saldo
while(t<tempo):
rend = saldo * (juros/100)
saldo = saldo + rend
t =t+1
print(round(saldo, 2))
| [
"[email protected]"
]
| |
e9e6b193ada49c07eeba439047839ed6c513a166 | 7a31597f1359be11d2cc05d8107963f3dbe9e204 | /Image_recognition/utils/model_dict.py | c4a7e7c37d00cc8aae670e50e091d41bc1d6d1b9 | []
| no_license | LIMr1209/machine-learn | 9aac2b51a928a864ac3cf82368b3fe9694644cb2 | 56453dce6ae8ba5e7298dab99d5e6a6d114e4860 | refs/heads/master | 2022-07-12T14:17:07.536535 | 2021-12-20T06:57:54 | 2021-12-20T06:57:54 | 163,064,915 | 5 | 2 | null | 2020-08-31T03:09:10 | 2018-12-25T08:48:00 | Python | UTF-8 | Python | false | false | 252 | py | import torch as t
def save_oplaus():
state_dict = {}
checkpoint = t.load('../checkpoint/EfficientNet.pth.tar')
state_dict['state_dict'] = checkpoint['state_dict']
t.save(state_dict, '/opt/checkpoint/EfficientNet.pth')
save_oplaus()
| [
"[email protected]"
]
| |
c5023ecc348a5f6d754ae717b924597515d9e466 | c24fa89450cccb48fcd481c3cfa475ee0e412e09 | /PythonTools/accToMatAcc.py | 9b41f081bd69b214a00fd824ead8d6cca2702378 | []
| no_license | PhoenixYanrongLi/CareEcoSystem_ServerCodeNew | e95d1c552cdcc70aac09482dfda63e253e01fcb0 | b627484694863c425483a04391eedc2ec2ec1098 | refs/heads/master | 2021-01-01T04:34:51.858543 | 2016-04-14T17:57:30 | 2016-04-14T17:57:30 | 56,258,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | __author__ = 'Brad'
import csv
import datetime
import scipy.io
import numpy
def writeFile(filename):
writeDict={}
f=open(filename,'r')
timeAr=[]
accAr=[]
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=" ")
for time, x, y, z, azimuth, pitch, roll in reader:
formatStr="%y-%m-%dT%H:%M:%S.%f"
timeC=datetime.datetime.strptime(time,formatStr)
epoch = datetime.datetime.utcfromtimestamp(0)
delta=timeC-epoch
delta=delta.total_seconds()*1000
if len(timeAr)==0:
timeAr=[delta]
accAr=numpy.array([float(x),float(y),float(z)])
else:
timeAr=numpy.vstack((timeAr,delta))
accAr=numpy.vstack([accAr,[float(x),float(y),float(z)]])
writeDict={'AccData':accAr,'UnixTime_ms':timeAr}
print accAr
scipy.io.savemat(filename+'_AccelerometerData.mat',writeDict)
filename='99000213875160_20141113-193740_MM_ACC_1103.txt'
writeFile(filename)
| [
"[email protected]"
]
| |
d81d21379e5af810c27b2b1d3e4c8f32d8faec6d | 9d454ae0d5dd1d7e96e904ced80ca502019bb659 | /198_rob.py | 9c17186c04b2ad05f74577de361aeef0ece28d64 | []
| no_license | zzz686970/leetcode-2018 | dad2c3db3b6360662a90ea709e58d7facec5c797 | 16e4343922041929bc3021e152093425066620bb | refs/heads/master | 2021-08-18T08:11:10.153394 | 2021-07-22T15:58:52 | 2021-07-22T15:58:52 | 135,581,395 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | def rob(nums):
## too naive
# return max(sum(nums[0::2]), sum(nums[1::2]), sum(nums[0::3], sum(nums[1::3])))
l =r=0
for n in nums:
l, r = r, max(n+l, r)
return r
assert 4 == rob([2,1,1,2]) | [
"[email protected]"
]
| |
1eb4ea943bb10ccda036a8f2bbafcef91c5855ed | efd6a277c2d5bffdfba6ccb4d5efd555e652d29e | /chap2/2.12.py | f427f3f0b66eeba917d8798655d76ae107eb82bf | []
| no_license | CavalcanteLucas/cookbook | dd57583c8b5271879bb086783c12795d1c0a7ee8 | 09ac71e291571e3add8d23d79b1684b356702a40 | refs/heads/master | 2020-03-25T03:09:39.608599 | 2019-09-13T04:43:23 | 2019-09-13T04:43:23 | 143,325,952 | 0 | 0 | null | 2020-09-25T05:46:30 | 2018-08-02T17:32:08 | Python | UTF-8 | Python | false | false | 885 | py | # Sanitizing and Cleaning Up Text
s = 'pýtĥöñ\fis\tawesome\r\n'
s
remap = {
ord('\t') : ' ',
ord('\f') : ' ',
ord('\r') : None # Deleted
}
a = s.translate(remap)
a
import unicodedata
import sys
sys.maxunicode
cmb_chrs = dict.fromkeys(c for c in range(sys.maxunicode) if unicodedata.combining(chr(c)))
b = unicodedata.normalize('NFD', a)
b
b.translate(cmb_chrs)
digitmap = { c: ord('0') + unicodedata.digit(chr(c))
for c in range(sys.maxunicode)
if unicodedata.category(chr(c)) == 'Nd'}
len(digitmap)
# Arabic digits
x = '\u0661\u0662\u0663'
x
x.translate(digitmap)
a
b = unicodedata.normalize('NFD', a)
b
b.encode('ascii', 'ignore').decode('ascii')
# Discussion
# on text processing; the simpler, the faster.
def clean_space(s):
s = s.replace('\r', '')
s = s.replace('\t', ' ')
s = s.replace('\f', ' ')
return s
| [
"[email protected]"
]
| |
435ce25fccf4bd20dbf5ae423dd02ada727c70e2 | b07ea8c5a075e3c7e7a0f9aca6bec73a22cdb7df | /PART 1/ch03/10_is_anagram_using_ord.py | 469076cacf26d0facbbfc5e8a9ede66cabd8f11c | []
| no_license | jaeehooon/data_structure_and_algorithm_python | bb721bdbcff1804c04b944b4a01ed6be93124462 | 6d07438bfaaa1ec5283cb350ef4904eb94826c48 | refs/heads/master | 2023-02-21T10:08:20.765399 | 2021-01-22T13:37:11 | 2021-01-22T13:37:11 | 323,367,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # 3.4.2 애너그램 (2)
"""
ord() 함수는 인수가 유니코드 객체일 때, 문자의 유니코드를 나타내는 정수를 반환
인수가 8바이트 문자열인 경우 바이트 값을 반환함
문자열에서 모든 문자의 ord() 함수 결과를 더했을 때 그 결과가 같으면 두 문자열은 애너그램
"""
import string
def hash_func(astring):
"""
:param astring:
:return:
"""
s = 0
for one in astring:
if one in string.whitespace:
continue
s += ord(one)
return s
def find_anagram_hash_function(word1, word2):
return hash_func(word1) == hash_func(word2)
def test_find_anagram_hash_function():
word1 = "buffy"
word2 = "bffyu"
word3 = "bffya"
assert(find_anagram_hash_function(word1, word2) is True)
assert(find_anagram_hash_function(word1, word3) is False)
print("테스트 통과!")
if __name__ == '__main__':
test_find_anagram_hash_function()
| [
"[email protected]"
]
| |
abec0a4a92dc068a00f9f27d0c21709406b6641f | e47b87905872d92458512b0eda435f53f90b19cf | /movies/migrations/0003_alter_movie_author.py | f15bf19bee735f007ed42db65755c2622c2f495c | []
| no_license | ephremworkeye/drf_demo | e08e2f2049b427497bad815e51247e27784b1f29 | 9f5ce84edd7841fd0456107d99485d2af44e1c49 | refs/heads/master | 2023-07-31T16:24:12.400218 | 2021-09-25T05:56:05 | 2021-09-25T05:56:05 | 409,107,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.2.7 on 2021-09-23 00:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movies', '0002_alter_movie_author'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
9ff318e046b87d76579e6d5b06d8f22e909203d4 | 1b596568ef6ced06173e60c71f01141682329ac4 | /version-example | 0c6ba046e0d538c2d3d1a402526ebec6ad7fb3c5 | []
| no_license | pfuntner/gists | 4eb1847ef22d3d9cb1e17e870a8434c376c4dbfc | 3322c922bd43480b4cc2759b1c31e5c76668c7ef | refs/heads/master | 2020-04-17T08:40:29.444378 | 2019-01-18T16:23:49 | 2019-01-18T16:23:49 | 166,421,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | #! /usr/bin/env python
import os
import re
import sys
import logging
import argparse
import datetime
import subprocess
def run(cmd):
(rc, stdout, stderr) = (None, '', '')
if isinstance(cmd, basestring):
cmd = cmd.split()
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
log.debug('Ignoring `{e!s}` from {cmd}'.format(**locals()))
else:
(stdout, stderr) = p.communicate()
rc = p.wait()
log.debug('{cmd}: {rc}, {stdout!r}, {stderr!r}'.format(**locals()))
if (rc == 0) and (not stdout):
rc = None
return (rc, stdout, stderr)
def get_version():
git_used = False
ret = '?'
dir = os.path.dirname(sys.argv[0])
base = os.path.basename(sys.argv[0])
cwd = os.getcwd()
try:
os.chdir(dir)
except:
pass
else:
(rc, stdout, stderr) = run(['git', 'log', '-1', base])
"""
commit {SHA1}
Author: {FIRST_NAME} {LAST_NAME} <{EMAIL_ADDRESS}>
Date: Wed Jan 16 09:32:03 2019 -0500
.
.
.
"""
match = re.search(r'^commit\s+(\S+).*\nDate:\s+(([A-Z][a-z]{2} ){2}[ 0123]\d (\d{2}:){2}\d{2} \d{4})', stdout, re.DOTALL)
log.debug('`git log -1` search groups: {groups}'.format(groups=match.groups() if match else None))
if match:
commit = match.group(1)[:6]
timestamp = datetime.datetime.strptime(match.group(2), '%a %b %d %H:%M:%S %Y')
log.debug('timestamp: {timestamp!s}'.format(**locals()))
(rc, stdout, stderr) = run('git branch')
match = re.search(r'\*\s(\S+)', stdout, re.DOTALL)
log.debug('`git branch` search groups: {groups}'.format(groups=match.groups() if match else None))
if match:
branch = match.group(1)
(rc, stdout, stderr) = run('git remote -v')
"""
origin https://github.com/pfuntner/gists.git (fetch)
"""
hits = list(re.finditer(r'(\S+)\s(https?://\S+)\s\(fetch\)', stdout))
log.debug('`git remote -v` hits: {hits}'.format(hits=[hit.groups() for hit in hits]))
if hits:
hits = ['{name}:{url}'.format(name=hit.group(1), url=hit.group(2)) for hit in hits]
ret = '{commit}, {branch}, {timestamp!s}, {hits}'.format(**locals())
git_used = True
os.chdir(cwd)
if not git_used:
ret = str(datetime.datetime.fromtimestamp(os.path.getmtime(sys.argv[0])))
return ret
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(logging.WARNING)
parser = argparse.ArgumentParser(description='Example of doing a nifty --version')
parser.add_argument('-v', '--verbose', dest='verbose', action='count', help='Print more messages')
parser.add_argument('--version', action='version', version=get_version(), help='See wonderful version information')
args = parser.parse_args()
log.setLevel(logging.WARNING - (args.verbose or 0) * 10)
# print repr(get_version())
| [
"[email protected]"
]
| ||
d944222d39aa2c0f4eb6c53856e08e6f051fae7a | df541a802b2dfa89d3aab14af627358dc7c76e6e | /接口自动化/Frame5/httpUnittest.py | 21a2012f8446211b06c3e9b5b336e248861a73a5 | []
| no_license | gupan2018/PyAutomation | de966aff91f750c7207c9d3f3dfb488698492342 | 230aebe3eca5799c621673afb647d35a175c74f1 | refs/heads/master | 2021-09-07T19:44:20.710574 | 2017-12-22T15:58:23 | 2017-12-22T15:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | __author__ = 'Administrator'
import unittest
import mysql.connector
class Http_Unittest(unittest.TestCase):
def __init__(self, test_case_id, test_method, http_method, http, test_url, test_data, cousor):
super(Http_Unittest,self).__init__(test_method)
self.test_case_id = test_case_id
self.test_method = test_method
self.http = http
self.test_url = test_url
self.test_data = test_data
self.http_method = http_method
self.mobilephone = test_data["mobilephone"]
self.regname = test_data["regname"]
self.cursor = cousor
def test_register(self):
if self.http_method == "GET":
response = self.http.get_req(self.test_url, self.test_data)
elif self.http_method == "POST":
response = self.http.post_req(self.test_url, self.test_data)
else:
print("error in class Http_Unittest")
try:
#将执行结果存到数据库中
sql_insert = 'INSERT INTO test_result ' \
'(case_id, http_method, request_name, request_url, mobilephone, regname, test_method, test_desc, status, code, msg) ' \
'VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
insert_data = (self.test_case_id, self.http_method ,'register',self.test_url, self.mobilephone, self.regname, self.test_method, "测试注册接口",response["status"], response["code"], response["msg"])
self.cursor.execute(sql_insert, insert_data)
self.cursor.execute("commit")
except mysql.connector.Error as e:
print(e)
self.cursor.execute("rollback")
try:
self.assertEqual(response["code"], "10001", "register请求失败")
except AssertionError as e:
print(str(e))
#pass
#下面是测试代码
'''
path_http = "http.conf"
http = HttpRequest(path_http)
test_Demo = Http_Unittest("test_register", "GET", http)
test_Demo.test_register()'''
| [
"[email protected]"
]
| |
518bc4aa64f4e5aac711a4ed163b4a5f8f2a09f8 | 0cf269af0e6f8266c26b3bc68e57368e8c3d9edb | /src/outpost/django/thesis/migrations/0002_discipline_doctoralschool_thesis.py | 4dd83c63267a93ce7139bdb7ee8f8290691ea608 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | medunigraz/outpost.django.thesis | c1518aa516d2177b0cacf381432bcdde41f2b5e1 | 1f0dbaa6edb6d91216d9bd97c79ee8b3bbc153cc | refs/heads/master | 2021-09-25T16:47:59.469921 | 2020-08-04T19:16:07 | 2020-08-04T19:16:07 | 184,580,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-08 09:47
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("thesis", "0001_initial")]
operations = [
migrations.CreateModel(
name="Discipline",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(blank=True, max_length=256, null=True)),
("number", models.CharField(blank=True, max_length=256, null=True)),
("thesistype", models.CharField(blank=True, max_length=256, null=True)),
],
options={"db_table": "thesis_discipline", "managed": False},
),
migrations.CreateModel(
name="DoctoralSchool",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(blank=True, max_length=256, null=True)),
(
"emails",
django.contrib.postgres.fields.ArrayField(
base_field=models.EmailField(
blank=True, max_length=254, null=True
),
size=None,
),
),
],
options={"db_table": "thesis_doctoralschool", "managed": False},
),
migrations.CreateModel(
name="Thesis",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("topic", models.CharField(blank=True, max_length=256, null=True)),
("created", models.DateTimeField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("prerequisites", models.TextField(blank=True, null=True)),
("processstart", models.DateTimeField(blank=True, null=True)),
("goals", models.TextField(blank=True, null=True)),
("hypothesis", models.TextField(blank=True, null=True)),
("methods", models.TextField(blank=True, null=True)),
("schedule", models.TextField(blank=True, null=True)),
(
"milestones",
django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(blank=True, null=True), size=None
),
),
],
options={
"db_table": "thesis_thesis",
"permissions": (("view_thesis", "View thesis"),),
"managed": False,
},
),
]
| [
"[email protected]"
]
| |
0b697bf8ee814996d74fb061231aeabb70a184c9 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/compute/ssl_policies/describe.py | 0546d3f6604bd3a747040e4520dae448783faf92 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 2,150 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe SSL policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.ssl_policies import ssl_policies_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.ssl_policies import flags
_SSL_POLICY_ARG = flags.GetSslPolicyArgument()
class Describe(base.DescribeCommand):
"""Describe a Google Compute Engine ssl policy.
*{command}* is used to display all data associated with a Google Compute
Engine SSL policy in a project.
An SSL policy specifies the server-side support for SSL features. An SSL
policy can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects
connections between clients and the HTTPS or SSL proxy load balancer. SSL
policies do not affect the connection between the load balancers and the
backends.
"""
@staticmethod
def Args(parser):
_SSL_POLICY_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
"""Issues the request to describe a SSL policy."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
helper = ssl_policies_utils.SslPolicyHelper(holder)
ref = _SSL_POLICY_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
return helper.Describe(ref)
| [
"[email protected]"
]
| |
065afde0ad990602c145f176bbbaf950115db7e7 | 4d03e487b60afc85d1f3372fe43f2a7b081f0e41 | /file_list/thumbnail_cache.py | 88b1134b24f906a6286c8193055960e31d5d945b | []
| no_license | hal1932/Explorer | d051bd0bb09b0952bad35deeeec0d4ad00947666 | 869ce3323aee499048f98f33910fc05126947942 | refs/heads/master | 2021-01-19T13:27:22.485124 | 2017-04-18T14:03:17 | 2017-04-18T14:03:17 | 82,392,096 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | # encoding: utf-8
from lib import *
import cv2
import os
import threading
import Queue
class ThumbnailCache(QObject):
load_item_async = Signal()
def __init__(self, enable_load_async=False):
super(ThumbnailCache, self).__init__()
self.__items_dic = {}
if enable_load_async:
self.__load_queue = Queue.Queue()
self.__items_lock = threading.Lock()
self.__load_thread = threading.Thread(target=self.__load_async_impl)
self.__load_thread.daemon = True
self.__load_thread.start()
self.__enable_async = enable_load_async
def get_cached_pixmap(self, path):
if self.__enable_async:
with self.__items_lock:
if path not in self.__items_dic:
return None
image = self.__items_dic[path]
if isinstance(image, QPixmap):
return image
height, width, dim = image.shape
image = QImage(
image.data,
width, height, dim * width,
QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
with self.__items_lock:
self.__items_dic[path] = pixmap
return pixmap
else:
if path not in self.__items_dic:
return None
return self.__items_dic[path]
def load(self, path, size):
if self.__enable_async:
raise ValueError('load_sync is not enabled')
if os.path.splitext(path)[1].lower() in ThumbnailCache.__image_exts:
pixmap = QPixmap(path)
pixmap_size = qt.fitting_scale_down(size, pixmap.size())
pixmap = pixmap.scaled(pixmap_size)
else:
icon = qt.get_file_icon(path)
size = icon.actualSize(size)
pixmap = icon.pixmap(size)
self.__items_dic[path] = pixmap
return pixmap
def load_async(self, path, size):
if not self.__enable_async:
raise ValueError('load_async is not enabled')
if os.path.splitext(path)[1].lower() in ThumbnailCache.__image_exts:
self.__load_queue.put((path, size))
else:
icon = qt.get_file_icon(path)
size = icon.actualSize(size)
pixmap = icon.pixmap(size)
with self.__items_lock:
self.__items_dic[path] = pixmap
def __load_async_impl(self):
while True:
path, size = self.__load_queue.get()
image = cv2.imread(path)
height, width = image.shape[:2]
if width != size.width() or height != size.height():
size = qt.fitting_scale_down(size, QSize(width, height))
image = cv2.resize(image, (size.width(), size.height()))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
with self.__items_lock:
self.__items_dic[path] = image
self.__load_queue.task_done()
self.load_item_async.emit()
print(path)
__initialized = False
__directory_thumbnail = None
__image_exts = (u'.png', u'.jpg', u'.jpeg', u'.gif', u'.bmp')
| [
"[email protected]"
]
| |
a666762fd34411a901f443d2ec06dd10658e150c | e787a46d354e3bf9666cb0d8b0c7d5f8ed0a8169 | /ccdproc/tests/make_mef.py | a871eaab6869b53192f486e57ffb6a99680fc3eb | []
| permissive | astropy/ccdproc | 25270fec41e64e635f7f22bcf340b2dee9ef88ac | 5af6ee5eee16a99591dd9fcbe81735e70c1cc681 | refs/heads/main | 2023-09-01T11:48:06.969582 | 2023-06-08T18:01:43 | 2023-06-08T18:01:43 | 13,384,007 | 81 | 88 | BSD-3-Clause | 2023-06-08T18:01:45 | 2013-10-07T13:05:51 | Python | UTF-8 | Python | false | false | 2,156 | py | import numpy as np
from astropy.utils.misc import NumpyRNGContext
from astropy.io import fits
from astropy.nddata import CCDData
from ccdproc import flat_correct
def make_sample_mef(science_name, flat_name, size=10, dtype='float32'):
"""
Make a multi-extension FITS image with random data
and a MEF flat.
Parameters
----------
science_name : str
Name of the science image created by this function.
flat_name : str
Name of the flat image created by this function.
size : int, optional
Size of each dimension of the image; images created are square.
dtype : str or numpy dtype, optional
dtype of the generated images.
"""
with NumpyRNGContext(1234):
number_of_image_extensions = 3
science_image = [fits.PrimaryHDU()]
flat_image = [fits.PrimaryHDU()]
for _ in range(number_of_image_extensions):
# Simulate a cloudy night, average pixel
# value of 100 with a read_noise of 1 electron.
data = np.random.normal(100., 1.0, [size, size]).astype(dtype)
hdu = fits.ImageHDU(data=data)
# Make a header that is at least somewhat realistic
hdu.header['unit'] = 'electron'
hdu.header['object'] = 'clouds'
hdu.header['exptime'] = 30.0
hdu.header['date-obs'] = '1928-07-23T21:03:27'
hdu.header['filter'] = 'B'
hdu.header['imagetyp'] = 'LIGHT'
science_image.append(hdu)
# Make a perfect flat
flat = np.ones_like(data, dtype=dtype)
flat_hdu = fits.ImageHDU(data=flat)
flat_hdu.header['unit'] = 'electron'
flat_hdu.header['filter'] = 'B'
flat_hdu.header['imagetyp'] = 'FLAT'
flat_hdu.header['date-obs'] = '1928-07-23T21:03:27'
flat_image.append(flat_hdu)
science_image = fits.HDUList(science_image)
science_image.writeto(science_name)
flat_image = fits.HDUList(flat_image)
flat_image.writeto(flat_name)
if __name__ == '__main__':
make_sample_mef('data/science-mef.fits', 'data/flat-mef.fits')
| [
"[email protected]"
]
| |
1433ed9a66cf8f030d0107507d432670a7d51f0f | 58baf0dd6a9aa51ef5a7cf4b0ee74c9cb0d2030f | /tools/testrunner/standard_runner.py | a59fe0839665fe1699fff41e3e9e4b837c952af2 | [
"bzip2-1.0.6",
"BSD-3-Clause",
"SunPro"
]
| permissive | eachLee/v8 | cce8d6e620625c97a2e969ee8a52cc5eb77444ce | 1abeb0caa21301f5ace7177711c4f09f2d6447d9 | refs/heads/master | 2021-08-14T08:21:44.549890 | 2017-11-14T20:35:38 | 2017-11-14T23:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,667 | py | #!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
from os.path import join
import multiprocessing
import os
import random
import shlex
import subprocess
import sys
import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
TIMEOUT_DEFAULT = 60
# Variants ordered by expected runtime (slowest first).
VARIANTS = ["default"]
MORE_VARIANTS = [
"stress",
"stress_incremental_marking",
"nooptimization",
"stress_asm_wasm",
"wasm_traps",
]
EXHAUSTIVE_VARIANTS = MORE_VARIANTS + VARIANTS
VARIANT_ALIASES = {
# The default for developer workstations.
"dev": VARIANTS,
# Additional variants, run on all bots.
"more": MORE_VARIANTS,
# TODO(machenbach): Deprecate this after the step is removed on infra side.
# Additional variants, run on a subset of bots.
"extra": [],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"arm64"]
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self):
super(StandardTestRunner, self).__init__()
self.sancov_dir = None
def _do_execute(self, options, args):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print ' '.join(sys.argv)
if utils.GuessOS() == "macos":
# TODO(machenbach): Temporary output for investigating hanging test
# driver on mac.
print "V8 related processes running on this host:"
try:
print subprocess.check_output(
"ps -e | egrep 'd8|cctest|unittests'", shell=True)
except Exception:
pass
suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
# Use default tests if no test configuration was provided at the cmd line.
if len(args) == 0:
args = ["default"]
# Expand arguments with grouped tests. The args should reflect the list
# of suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in base_runner.TEST_MAP:
return [suite for suite in base_runner.TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
[ExpandTestGroups(arg) for arg in args],
[])
args_suites = OrderedDict() # Used as set
for arg in args:
args_suites[arg.split('/')[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(base_runner.BASE_DIR, "test", root))
if suite:
suites.append(suite)
for s in suites:
s.PrepareSources()
try:
return self._execute(args, options, suites)
except KeyboardInterrupt:
return 2
def _add_parser_options(self, parser):
parser.add_option("--sancov-dir",
help="Directory where to collect coverage data")
parser.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
parser.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3"
" support",
default=False, action="store_true")
parser.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
parser.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
parser.add_option("--command-prefix",
help="Prepended to each shell command used to run a"
" test",
default="")
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
action="append", default=[])
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("--no-harness", "--noharness",
help="Run without test harness of a given suite",
default=False, action="store_true")
parser.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
parser.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last"
" run.",
default=False, dest="no_sorting", action="store_true")
parser.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
parser.add_option("--variants",
help="Comma-separated list of testing variants;"
" default: \"%s\"" % ",".join(VARIANTS))
parser.add_option("--exhaustive-variants",
default=False, action="store_true",
help="Use exhaustive set of default variants:"
" \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
parser.add_option("--report", help="Print a summary of the tests to be"
" run",
default=False, action="store_true")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--flakiness-results",
help="Path to a file for storing flakiness json.")
parser.add_option("--rerun-failures-count",
help=("Number of times to rerun each failing test case."
" Very slow tests will be rerun only once."),
default=0, type="int")
parser.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
parser.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
parser.add_option("--swarming",
help="Indicates running test driver on swarming.",
default=False, action="store_true")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default=TIMEOUT_DEFAULT, type="int")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
parser.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator",
type=int)
parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
help="Number of runs with different random seeds")
def _process_options(self, options):
global VARIANTS
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
if not os.path.exists(self.sancov_dir):
print("sancov-dir %s doesn't exist" % self.sancov_dir)
raise base_runner.TestRunnerError()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if self.build_config.asan:
options.extra_flags.append("--invoke-weak-callbacks")
options.extra_flags.append("--omit-quit")
if options.novfp3:
options.extra_flags.append("--noenable-vfp3")
if options.exhaustive_variants:
# This is used on many bots. It includes a larger set of default
# variants.
# Other options for manipulating variants still apply afterwards.
VARIANTS = EXHAUSTIVE_VARIANTS
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
if self.build_config.msan:
VARIANTS = ["default"]
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
options.random_seed = self._random_seed()
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_variants, bool(options.variants)):
print("Use only one of --no-variants or --variants.")
raise base_runner.TestRunnerError()
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if options.no_variants:
VARIANTS = ["default"]
if options.variants:
VARIANTS = options.variants.split(",")
# Resolve variant aliases.
VARIANTS = reduce(
list.__add__,
(VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
[],
)
if not set(VARIANTS).issubset(ALL_VARIANTS):
print "All variants must be in %s" % str(ALL_VARIANTS)
raise base_runner.TestRunnerError()
if self.build_config.predictable:
VARIANTS = ["default"]
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
# Dedupe.
VARIANTS = list(set(VARIANTS))
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
raise base_runner.TestRunnerError()
CheckTestMode("slow test", options.slow_tests)
CheckTestMode("pass|fail test", options.pass_fail_tests)
if self.build_config.no_i18n:
base_runner.TEST_MAP["bot_default"].remove("intl")
base_runner.TEST_MAP["default"].remove("intl")
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
symbolizer_option = self._get_external_symbolizer_option()
if self.sancov_dir:
os.environ['ASAN_OPTIONS'] = ":".join([
'coverage=1',
'coverage_dir=%s' % self.sancov_dir,
symbolizer_option,
"allow_user_segv_handler=1",
])
def _random_seed(self):
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
# Populate context object.
# Simulators are slow, therefore allow a longer timeout.
if self.build_config.arch in SLOW_ARCHS:
options.timeout *= 2
options.timeout *= self.mode_options.timeout_scalefactor
if self.build_config.predictable:
# Predictable mode is slower.
options.timeout *= 2
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags,
options.verbose,
options.timeout,
options.isolates,
options.command_prefix,
options.extra_flags,
self.build_config.no_i18n,
options.random_seed,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
self.build_config.predictable,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
simulator_run = (
not options.dont_skip_simulator_slow_tests and
self.build_config.arch in [
'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
'ppc64', 's390', 's390x'] and
bool(base_runner.ARCH_GUESS) and
self.build_config.arch != base_runner.ARCH_GUESS)
# Find available test suites and read test cases from them.
variables = {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"gc_fuzzer": False,
"gc_stress": options.gc_stress,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": options.novfp3,
"predictable": self.build_config.predictable,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": simulator_run,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
all_tests = []
num_tests = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
# First filtering by status applying the generic rules (independent of
# variants).
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_gen = s.CreateVariantGenerator(VARIANTS)
variant_tests = [ t.CopyAddingFlags(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1:
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for _ in range(0, options.random_seed_stress_count):
# Use given random seed for all runs (set by default in
# execution.py) or a new random seed if none is specified.
if options.random_seed:
yield []
else:
yield ["--random-seed=%d" % self._random_seed()]
s.tests = [
t.CopyAddingFlags(t.variant, flags)
for t in variant_tests
for flags in iter_seed_flags()
]
else:
s.tests = variant_tests
# Second filtering by status applying the variant-dependent rules.
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests, variants=True)
s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
# Run the tests.
start_time = time.time()
progress_indicator = progress.IndicatorNotifier()
progress_indicator.Register(
progress.PROGRESS_INDICATORS[options.progress]())
if options.junitout:
progress_indicator.Register(progress.JUnitTestProgressIndicator(
options.junitout, options.junittestsuite))
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode,
ctx.random_seed))
if options.flakiness_results:
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")
if exit_code == 1 and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = 0
if self.sancov_dir:
# If tests ran with sanitizer coverage, merge coverage files in the end.
try:
print "Merging sancov files."
subprocess.check_call([
sys.executable,
join(
base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % self.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
exit_code = 1
return exit_code
def _shard_tests(self, tests, options):
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count:
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if options.shard_run > 1 and options.shard_run != shard_run:
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
| [
"[email protected]"
]
| |
c81f32fd9551171eca3f5765147895606e3573ff | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2759/60610/245707.py | 0d1aca295915e41bef3bdf5a5262c94f0f29f52f | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | num=input();
for i in range(num):
string=raw_input();
numList=string.split();
count=0
for j in range(int(numList[0]),int(numList[1])+1):
a=int(numList[2]);
b=int(numList[3]);
if (j%a==0) | (j%b==0):
count+=1;
print(count); | [
"[email protected]"
]
| |
842240a63093b1ea755d9ef1824ad3d6792f4177 | 9e658976a6fdfbe031fc3452c69243dc66359f6a | /pythonExercise/four.py | b4fadd64058df0da705a77f23dd57f2e54e2cff1 | []
| no_license | zyyxydwl/Python-Learning | b2ed0f80121b284e5fb65cc212ccb84a0eb14cb6 | 6a5d36aa8805da647229fa747fa96452638d830e | refs/heads/master | 2018-10-04T23:42:21.076668 | 2018-06-08T03:19:33 | 2018-06-08T03:19:33 | 107,348,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time :2017/12/2 9:57
#@Author :zhouyuyao
#@File :four.py
# 题目:输入某年某月某日,判断这一天是这一年的第几天?
# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:
# 程序源代码:
# 实例(题目:输入某年某月某日,判断这一天是这一年的第几天?
# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:
year = int(input('year:\n'))
month = int(input('month:\n'))
day = int(input('day:\n'))
months = (0,31,59,90,120,151,181,212,243,273,304,334)
if 0 < month <= 12:
sum = months[month - 1]
else:
print('data error')
sum += day
leap = 0
if (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):
leap = 1
if (leap == 1) and (month > 2):
sum += 1
print('it is the %dth day.' % sum)
| [
"[email protected]"
]
| |
5215a084044fb39cce1d96120767a0cf0684d3fe | 72fd9d49d89a9fc23ca896154fa54cba836c41ca | /tasks.py | 0ea3f55768a7233a886cb6707e616c923561b8c6 | [
"MIT"
]
| permissive | envobe/pydash | 15066046fbc07458c29b6b33b1489aaadda5d074 | 6c0f778f6a2535397706aab68636485702ff3565 | refs/heads/master | 2023-01-05T18:14:09.923169 | 2020-10-29T02:16:34 | 2020-10-29T02:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | """
This module provides the CLI interface for invoke tasks.
All tasks can be executed from this file's directory using:
$ inv <task>
Where <task> is a function defined below with the @task decorator.
"""
from __future__ import print_function
from functools import partial
from invoke import Exit, UnexpectedExit, run as _run, task
PACKAGE_SOURCE = "src/pydash"
TEST_TARGETS = "{} tests".format(PACKAGE_SOURCE)
LINT_TARGETS = "{} tasks.py".format(PACKAGE_SOURCE)
EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit)
# Set pyt=True to enable colored output when available.
run = partial(_run, pty=True)
@task
def black(ctx, quiet=False):
"""Autoformat code using black."""
run("black {}".format(LINT_TARGETS), hide=quiet)
@task
def isort(ctx, quiet=False):
"""Autoformat Python imports."""
run("isort {}".format(LINT_TARGETS), hide=quiet)
@task
def docformatter(ctx):
"""Autoformat docstrings using docformatter."""
run(
"docformatter -r {} "
"--in-place --pre-summary-newline --wrap-descriptions 100 --wrap-summaries 100".format(
LINT_TARGETS
)
)
@task
def fmt(ctx):
"""Autoformat code and docstrings."""
print("Running docformatter")
docformatter(ctx)
print("Running isort")
isort(ctx, quiet=True)
print("Running black")
black(ctx, quiet=True)
@task
def flake8(ctx):
"""Check code for PEP8 violations using flake8."""
run("flake8 --format=pylint {}".format(LINT_TARGETS))
@task
def pylint(ctx):
"""Check code for static errors using pylint."""
run("pylint {}".format(LINT_TARGETS))
@task
def lint(ctx):
"""Run linters."""
linters = {"flake8": flake8, "pylint": pylint}
failures = []
for name, linter in linters.items():
print("Running {}".format(name))
try:
linter(ctx)
except EXIT_EXCEPTIONS:
failures.append(name)
result = "FAILED"
else:
result = "PASSED"
print("{}\n".format(result))
if failures:
failed = ", ".join(failures)
raise Exit("ERROR: Linters that failed: {}".format(failed))
@task(help={"args": "Override default pytest arguments"})
def unit(ctx, args="--cov={} {}".format(PACKAGE_SOURCE, TEST_TARGETS)):
"""Run unit tests using pytest."""
run("pytest {}".format(args))
@task
def test(ctx):
"""Run linters and tests."""
print("Building package")
build(ctx)
print("Building docs")
docs(ctx)
print("Running unit tests")
unit(ctx)
@task
def docs(ctx, serve=False, bind="127.0.0.1", port=8000):
"""Build docs."""
run("rm -rf docs/_build")
run("sphinx-build -q -W -b html docs docs/_build/html")
if serve:
print(
"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...".format(
bind=bind, port=port
)
)
run(
"python -m http.server -b {bind} --directory docs/_build/html {port}".format(
bind=bind, port=port
),
hide=True,
)
@task
def build(ctx):
"""Build Python package."""
run("rm -rf dist build docs/_build")
run("python setup.py -q sdist bdist_wheel")
@task
def clean(ctx):
"""Remove temporary files related to development."""
run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete")
run("rm -rf .tox .coverage .cache .pytest_cache **/.egg* **/*.egg* dist build")
@task(pre=[build])
def release(ctx):
"""Release Python package."""
run("twine upload dist/*")
| [
"[email protected]"
]
| |
979286ffb46a102ab49df74f8383e498329ab818 | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /dynamic_programming/백준/가장큰정사각형_백준.py | 4db92f7d4eee1a5199ea97cc10a52e85fa483fca | []
| no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # 백준 DP 난이도 골드 5
# 전형적인 dp문제
# dp[i][j]는 위, 왼쪽, 대각선 위 중 작은 것중에 하나를 자신과 더한 값
# -> 정사각형이라면 변의 길이가 모두 같아야하므로
# 1 1 1 1 1 1
# 1 1 1 -> 1 2 2
# 1 1 1 1 2 3
n, m = map(int, input().split())
arr = []
dp = [[0]*(m+1) for _ in range(n+1)]
for i in range(n):
arr.append(list(map(int, input())))
for j in range(m):
dp[i+1][j+1] = arr[i][j]
for i in range(n+1):
for j in range(m+1):
if dp[i][j] != 0:
dp[i][j] += min(dp[i-1][j-1], dp[i][j-1], dp[i-1][j])
res = 0
for row in dp:
res = max(res, max(row))
print(res**2)
| [
"[email protected]"
]
| |
c5c561e0a70c1027a7c149cd7ffb4e4f5bb38d0f | 9a9f31265c65bec0060271cd337580e7b4f3a7e9 | /project/pokupka.py | 81819a11dce3c4e08f65498d21c98238d72d5f98 | []
| no_license | AnatolyDomrachev/1kurs | efaabde4852172b61d3584237611fe19b9faa462 | 84ed0dceb670ec64c958bf1901636a02baf8f533 | refs/heads/master | 2023-02-19T21:42:53.286190 | 2021-01-19T07:41:15 | 2021-01-19T07:41:15 | 292,637,199 | 0 | 1 | null | 2020-09-16T02:29:14 | 2020-09-03T17:32:29 | Python | UTF-8 | Python | false | false | 586 | py | import magazin
import etc
magazin = magazin.Magazin('magazin.conf')
korzina = []
net_v_magazine = []
def pokupka(spisok):
for slovar in spisok:
est_v_mag = 'No'
for tovar in magazin.tovary:
if slovar['name'] == tovar['name']:
kupil = etc.beru(slovar, tovar)
korzina.append(kupil)
est_v_mag = 'Yes'
if est_v_mag == 'No':
print(slovar," нет в магазине")
print("Купили: ",korzina)
print()
print("Осталось: ",magazin.tovary)
| [
"[email protected]"
]
| |
070d2ffacad8dbdcc16c98b9921ba3c9c2b5c0ca | 3a21eac318260972a0f50aa6517bebd62d9634f3 | /minimarket/settings.py | a6e40467c5437d3caa279c03850dc038c10d6db9 | []
| no_license | alviandk/ahp | adaf735c2ad14cfffee41eca37df5ff2452e8812 | 60764c12bb30cd134bbce53d62cda835503191d2 | refs/heads/master | 2016-09-05T19:16:02.907235 | 2015-03-27T09:42:43 | 2015-03-27T09:42:43 | 32,963,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | """
Django settings for minimarket project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4z+05z^%!e=1p&*2uyz^_tel^5($l##z8f80t^@=60%*4z$#4m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ahp_aps',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'minimarket.urls'
WSGI_APPLICATION = 'minimarket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ahp',
'USER': 'root',
'PASSWORD':'',
'HOST':'localhost'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
| [
"[email protected]"
]
| |
3d5664d5e503269e5557e6b98623f3cb0c80edbc | e211000d0d843fd944266892f49a7649c7e8918d | /abc/065/python/code_c.py | fc52911095e41bda42258728f4b59ac2a5a9d1b0 | []
| no_license | habroptilus/atcoder-src | 63dfa16c6d4b80d1e36618377d3201888183281f | 4cd54202037996b3f4a4442b1bd19d42d8a46db1 | refs/heads/master | 2020-04-26T07:14:38.322156 | 2019-06-08T14:44:26 | 2019-06-08T14:44:26 | 173,388,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import math
N, M = map(int, input().split())
if abs(N - M) == 1:
print(math.factorial(N) * math.factorial(M) % (10**9 + 7))
elif N == M:
print(math.factorial(N) * math.factorial(M) * 2 % (10**9 + 7))
else:
print(0)
| [
"[email protected]"
]
| |
84e8e2a34adc392dbabc3541f6defc2c829bdb23 | a40f749cb8e876f49890ab8fbbbbf2c07a0dd210 | /examples/ad_manager/v201902/adjustment_service/update_traffic_adjustments.py | 60a54bd299660da60a8ece16a64cfb2643030b0a | [
"Apache-2.0"
]
| permissive | ale180192/googleads-python-lib | 77afff4c352ac3f342fc8b3922ec08873d6da5be | 783a2d40a49956fb16ed73280708f6f9e322aa09 | refs/heads/master | 2020-08-10T15:20:06.051974 | 2019-10-11T07:06:58 | 2019-10-11T07:06:58 | 214,367,074 | 0 | 0 | Apache-2.0 | 2019-10-11T07:04:21 | 2019-10-11T07:04:20 | null | UTF-8 | Python | false | false | 3,009 | py | #!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a historical adjustment of 110% for New Years Day traffic.
"""
from __future__ import print_function
import datetime
# Import appropriate modules from the client library.
from googleads import ad_manager
ADJUSTMENT_ID = 'INSERT_ADJUSTMENT_ID_HERE'
def main(client, adjustment_id):
# Initialize the adjustment service.
adjustment_service = client.GetService('AdjustmentService', version='v201902')
# Create a statement to select a single traffic forecast adjustment by id.
statement = (
ad_manager.StatementBuilder(
version='v201902').Where('id = :id').WithBindVariable(
'id', adjustment_id))
# Get the forecast traffic adjustment.
response = adjustment_service.getTrafficAdjustmentsByStatement(
statement.ToStatement())
# Create a new historical adjustment segment for New Year's Day.
this_new_years = datetime.date(datetime.date.today().year, 12, 31)
next_new_years = datetime.date(datetime.date.today().year + 1, 12, 31)
new_years_segment = {
'basisType': 'HISTORICAL',
'historicalAdjustment': {
'targetDateRange': {
'startDate': next_new_years,
'endDate': next_new_years
},
'referenceDateRange': {
'startDate': this_new_years,
'endDate': this_new_years
},
'milliPercentMultiplier': 110000
}
}
if 'results' in response and len(response['results']):
# Update each local traffic adjustment.
updated_adjustments = []
for adjustment in response['results']:
adjustment['forecastAdjustmentSegments'].append(new_years_segment)
updated_adjustments.append(adjustment)
# Update traffic adjustments remotely.
adjustments = adjustment_service.updateTrafficAdjustments(
updated_adjustments)
# Display the results.
if adjustments:
for adjustment in adjustments:
print('Traffic forecast adjustment with id %d and %d segments was '
'created.' % (adjustment['id'],
len(adjustment['forecastAdjustmentSegments'])))
else:
print('No traffic adjustments were updated.')
else:
print('No traffic adjustments found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADJUSTMENT_ID)
| [
"[email protected]"
]
| |
866b56f8009fd7f8a034eff87f37008c86df78d1 | c79bc3b25aac5f958da011119bf71fcca534bd1a | /hostedpi/cli.py | 9b5ae581de90ba5a42336748f8ef5c6dfe1b4a90 | [
"BSD-3-Clause"
]
| permissive | gnuchu/hostedpi | c4ff4d398bcc8fde0d2d421f8a67b315c40fcc33 | 325e8035e0bf671daeabb4d696eb5b36a6daa12d | refs/heads/main | 2023-03-31T04:07:17.604847 | 2021-03-28T00:10:22 | 2021-03-28T00:10:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,699 | py | import os
import sys
import argparse
from .picloud import PiCloud
from .utils import read_ssh_key, ssh_import_id
from .exc import HostedPiException
from .__version__ import __version__
class CLI:
def __init__(self):
self._args = None
self._commands = None
self._config = None
self._parser = None
self._output = None
self._store = None
self._cloud = None
self._pis = None
def __call__(self, args=None):
self._args = self.parser.parse_args(args)
try:
return self._args.func()
except HostedPiException as e:
sys.stderr.write("hostedpi error: {e}\n".format(e=e))
return 2
except KeyboardInterrupt:
print("Operation cancelled during process")
@property
def cloud(self):
if self._cloud is None:
API_ID = os.environ.get('HOSTEDPI_ID')
API_SECRET = os.environ.get('HOSTEDPI_SECRET')
if API_ID is None or API_SECRET is None:
print("HOSTEDPI_ID and HOSTEDPI_SECRET environment variables "
"must be set")
self._cloud = PiCloud(API_ID, API_SECRET)
return self._cloud
@property
def pis(self):
if self._pis is None:
self._pis = self.cloud.pis
return self._pis
@property
def parser(self):
"""
The parser for all the sub-commands that the script accepts. Returns the
newly constructed argument parser.
"""
if self._parser is None:
self._parser, self._commands = self._get_parser()
return self._parser
@property
def commands(self):
"A dictionary mapping command names to their sub-parser."
if self._commands is None:
self._parser, self._commands = self._get_parser()
return self._commands
def _get_parser(self):
parser = argparse.ArgumentParser(
description=(
"hostedpi is a tool for provisioning and managing Raspberry Pis "
"in the Mythic Beasts Pi Cloud"))
parser.add_argument(
'--version', action='version', version=__version__)
parser.set_defaults(func=self.do_help, cmd=None)
commands = parser.add_subparsers(title=("commands"))
help_cmd = commands.add_parser(
"help", aliases=["h"],
description=(
"With no arguments, displays the list of hostedpi "
"commands. If a command name is given, displays the "
"description and options for the named command. If a "
"setting name is given, displays the description and "
"default value for that setting."),
help=("Displays help about the specified command or setting"))
help_cmd.add_argument(
"cmd", metavar="cmd", nargs='?',
help=("The name of the command to output help for")
)
help_cmd.set_defaults(func=self.do_help)
test_cmd = commands.add_parser(
"test", aliases=["connect"],
description=(
"Test a connection to the Mythic Beasts API using API ID and "
"secret in environment variables."),
help=("Test a connection to the Mythic Beasts API"))
test_cmd.set_defaults(func=self.do_test)
get_images_cmd = commands.add_parser(
"images",
description=("Retrieve the list of operating system images available for the given Pi model."),
help=("Retrieve the list of operating system images available for the given Pi model"))
get_images_cmd.add_argument(
"model", metavar="model", type=int,
help=("The Pi model number (3 or 4) to get operating systems for")
)
get_images_cmd.set_defaults(func=self.do_get_images)
list_cmd = commands.add_parser(
"list", aliases=["ls"],
description=("List all Pis in the account"),
help=("List all Pis in the account"))
list_cmd.set_defaults(func=self.do_list)
show_cmd = commands.add_parser(
"show", aliases=["cat"],
description=("Show the information about one or more Pis in the account"),
help=("Show the information about one or more Pis in the account"))
show_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to show information for")
)
show_cmd.set_defaults(func=self.do_show_pis)
create_cmd = commands.add_parser(
"create",
description=("Provision a new Pi in the account"),
help=("Provision a new Pi in the account"))
create_cmd.add_argument(
"name", metavar="name",
help=("The name of the new Pi to provision")
)
create_cmd.add_argument(
"--model", metavar="model", type=int, nargs='?',
help=("The model of the new Pi to provision (3 or 4)")
)
create_cmd.add_argument(
"--disk", metavar="disk", type=int, nargs='?',
help=("The disk size in GB")
)
create_cmd.add_argument(
"--image", metavar="image", type=str, nargs='?',
help=("The operating system image to use")
)
create_cmd.add_argument(
"--ssh-key-path", metavar="ssh_key_path", nargs='?',
help=("The path to an SSH public key file to add to the Pi")
)
create_cmd.set_defaults(func=self.do_create)
provision_status_cmd = commands.add_parser(
"status",
description=("Get the provision status of one or more Pis"),
help=("Get the provision status of one or more Pis"))
provision_status_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get the provision status for")
)
provision_status_cmd.set_defaults(func=self.do_provision_status)
power_status_cmd = commands.add_parser(
"power",
description=("Get the power status for one or more Pis"),
help=("Get the power status (on/off) for one or more Pis"))
power_status_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get the power status for")
)
power_status_cmd.set_defaults(func=self.do_power_status)
reboot_cmd = commands.add_parser(
"reboot",
description=("Reboot one or more Pis in the account"),
help=("Reboot one or more Pis in the account"))
reboot_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pi to reboot")
)
reboot_cmd.set_defaults(func=self.do_reboot)
power_on_cmd = commands.add_parser(
"on", aliases=["poweron"],
description=("Power on one or more Pis in the account"),
help=("Power on one or more Pis in the account"))
power_on_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pi to power on")
)
power_on_cmd.set_defaults(func=self.do_power_on)
power_off_cmd = commands.add_parser(
"off", aliases=["poweroff"],
description=("Power off one or more Pis in the account"),
help=("Power off one or more Pis in the account"))
power_off_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pi to power off")
)
power_off_cmd.set_defaults(func=self.do_power_off)
cancel_cmd = commands.add_parser(
"cancel",
description=("Cancel one or more Pis in the account"),
help=("Cancel one or more Pis in the account"))
cancel_cmd.add_argument(
"names", metavar="names", nargs='+',
help=("The names of the Pis to cancel")
)
cancel_cmd.add_argument(
"-y", "--yes",
action="store_true",
help=("Proceed without confirmation")
)
cancel_cmd.set_defaults(func=self.do_cancel)
count_keys_cmd = commands.add_parser(
"count-keys", aliases=["num-keys"],
description=("Show the number of SSH keys currently on one or more Pis"),
help=("Show the number of SSH keys currently on one or more Pis"))
count_keys_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get keys for")
)
count_keys_cmd.set_defaults(func=self.do_count_keys)
show_keys_cmd = commands.add_parser(
"keys",
description=("Show the SSH keys currently on a Pi"),
help=("Show the SSH keys currently on a Pi"))
show_keys_cmd.add_argument(
"name", metavar="name",
help=("The name of the Pi to get keys for")
)
show_keys_cmd.set_defaults(func=self.do_show_keys)
add_key_cmd = commands.add_parser(
"add-key",
description=("Add an SSH key from a public key file to one or more Pis"),
help=("Add an SSH key from a public key file to one or more Pis"))
add_key_cmd.add_argument(
"ssh_key_path", metavar="ssh_key_path", nargs='?',
help=("The path to an SSH public key file to add to the Pi")
)
add_key_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pis to add keys to")
)
add_key_cmd.set_defaults(func=self.do_add_key)
copy_keys_cmd = commands.add_parser(
"copy-keys", aliases=["cp"],
description=("Copy all SSH keys from one Pi to one or more others"),
help=("Copy all SSH keys from one Pi to one or more others"))
copy_keys_cmd.add_argument(
"name_src", metavar="name_src",
help=("The name of the Pi to copy keys from")
)
copy_keys_cmd.add_argument(
"names_dest", metavar="names_dest", nargs='*',
help=("The name of the Pis to copy keys to")
)
copy_keys_cmd.set_defaults(func=self.do_copy_keys)
remove_keys_cmd = commands.add_parser(
"remove-keys",
description=("Remove all SSH keys from one or more Pis"),
help=("Remove all SSH keys from one or more Pis"))
remove_keys_cmd.add_argument(
"names", metavar="names", nargs='+',
help=("The names of the Pis to remove keys from")
)
remove_keys_cmd.set_defaults(func=self.do_remove_keys)
ssh_import_id_cmd = commands.add_parser(
"ssh-import-id",
description=("Import SSH keys from GitHub or Launchpad and add them to one or more Pis"),
help=("Import SSH keys from GitHub or Launchpad and add them to one or more Pis"))
ssh_import_id_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to import keys onto")
)
ssh_import_id_cmd.add_argument(
"--gh", metavar="github username", nargs='?',
help=("The GitHub username to import keys from")
)
ssh_import_id_cmd.add_argument(
"--lp", metavar="launchpad username", nargs='?',
help=("The Launchpad username to import keys from")
)
ssh_import_id_cmd.set_defaults(func=self.do_ssh_import_id)
ssh_command_cmd = commands.add_parser(
"ssh-command",
description=("Output the SSH command for one or more Pis in the account"),
help=("Output the (IPv4 or IPv6) SSH command for one or more Pis in the account"))
ssh_command_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get SSH commands for")
)
ssh_command_cmd.add_argument(
"--ipv6",
action="store_true",
help=("Show IPv6 command")
)
ssh_command_cmd.set_defaults(func=self.do_ssh_command)
ssh_config_cmd = commands.add_parser(
"ssh-config",
description=("Output the SSH config for one or more Pis in the account"),
help=("Output the (IPv4 or IPv6) SSH config for one or more Pis in the account"))
ssh_config_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get SSH config for")
)
ssh_config_cmd.add_argument(
"--ipv6",
action="store_true",
help=("Show IPv6 command")
)
ssh_config_cmd.set_defaults(func=self.do_ssh_config)
return parser, commands.choices
def get_pi(self, name):
pi = self.pis.get(name)
if not pi:
self.print_not_found(name)
return
return pi
def get_pis(self, names):
if not names:
return self.pis.items()
return {name: self.pis.get(name) for name in names}.items()
def print_not_found(self, name):
sys.stderr.write("{name} not found\n".format(name=name))
def do_help(self):
if self._args.cmd:
self.parser.parse_args([self._args.cmd, '-h'])
else:
self.parser.parse_args(['-h'])
def do_test(self):
if self.cloud:
print("Connected to the Mythic Beasts API")
return
return 2
def do_get_images(self):
images = self.cloud.get_operating_systems(model=self._args.model)
col_width = max(len(name) for name in images.values()) + 1
for id, name in images.items():
print("{name:{col_width}}: {id}".format(name=name, id=id, col_width=col_width))
def do_list(self):
for name in self.pis:
print(name)
def do_show_pis(self):
for name, pi in self.get_pis(self._args.names):
if pi:
print(pi, end='\n\n')
else:
self.print_not_found(name)
def do_create(self):
name = self._args.name
model = self._args.model
disk_size = self._args.disk
ssh_key_path = self._args.ssh_key_path
os_image = self._args.os_image
args = {
'model': model,
'disk_size': disk_size,
'ssh_key_path': ssh_key_path,
'os_image': os_image,
}
kwargs = {k: v for k, v in args.items() if v is not None}
pi = self.cloud.create_pi(name, **kwargs)
print("Pi {} provisioned successfully".format(name))
print()
print(pi)
def do_reboot(self):
for name, pi in self.get_pis(self._args.names):
if pi:
pi.reboot()
print("{name} rebooted".format(name=name))
else:
self.print_not_found(name)
def do_power_on(self):
for name, pi in self.get_pis(self._args.names):
if pi:
pi.on()
print("{name} powered on".format(name=name))
else:
self.print_not_found(name)
def do_power_off(self):
for name, pi in self.get_pis(self._args.names):
if pi:
pi.off()
print("{name} powered off".format(name=name))
else:
self.print_not_found(name)
def do_cancel(self):
if not self._args.yes:
num_pis = len(self._args.names)
try:
s = '' if num_pis == 1 else 's'
y = input("Cancelling {n} Pi{s}. Proceed? [Y/n]".format(n=num_pis, s=s))
except KeyboardInterrupt:
print()
print("Not cancelled")
return
if y.lower() not in 'y':
print("Not cancelled")
return
for name, pi in self.get_pis(self._args.names):
if pi:
pi.cancel()
print("{name} cancelled".format(name=name))
else:
self.print_not_found(name)
def do_show_keys(self):
pi = self.get_pi(self._args.name)
if not pi:
return 2
print(*pi.ssh_keys, sep='\n')
def do_count_keys(self):
for name, pi in self.get_pis(self._args.names):
num_keys = len(pi.ssh_keys)
s = '' if num_keys == 1 else 's'
print("{name}: {n} key{s}".format(name=name, n=num_keys, s=s))
def do_add_key(self):
ssh_key = read_ssh_key(self._args.ssh_key_path)
for name, pi in self.get_pis(self._args.names):
keys_before = len(pi.ssh_keys)
pi.ssh_keys |= {ssh_key}
keys_after = len(pi.ssh_keys)
num_keys = keys_after - keys_before
s = '' if num_keys == 1 else ''
print("{n} key{s} added to {name}".format(n=num_keys, name=name, s=s))
def do_copy_keys(self):
src_pi = self.get_pi(self._args.name_src)
if not src_pi:
return 2
ssh_keys = src_pi.ssh_keys
for name, pi in self.get_pis(self._args.names_dest):
if pi:
keys_before = len(pi.ssh_keys)
pi.ssh_keys |= ssh_keys
keys_after = len(pi.ssh_keys)
num_keys = keys_after - keys_before
s = '' if num_keys == 1 else 's'
print("{n} key{s} added to {name}".format(n=num_keys, name=name, s=s))
def do_remove_keys(self):
for name, pi in self.get_pis(self._args.names):
if pi:
num_keys = len(pi.ssh_keys)
pi.ssh_keys = set()
s = '' if num_keys == 1 else 's'
print("{n} key{s} removed from {name}".format(n=num_keys, name=name, s=s))
else:
self.print_not_found(name)
def do_ssh_import_id(self):
github = self._args.gh
launchpad = self._args.lp
github_keys = set()
launchpad_keys = set()
if github:
github_keys |= ssh_import_id(github=github)
s = '' if len(github_keys) == 1 else 's'
print("{n} key{s} retrieved from GitHub".format(n=len(github_keys), s=s))
if launchpad:
launchpad_keys |= ssh_import_id(launchpad=launchpad)
s = '' if len(launchpad_keys) == 1 else 's'
print("{n} key{s} retrieved from Launchpad".format(n=len(launchpad_keys), s=s))
print()
new_keys = github_keys | launchpad_keys
if len(new_keys) < (len(github_keys) + len(launchpad_keys)):
s = '' if len(new_keys) == 1 else 's'
print("{n} key{s} to add".format(n=len(new_keys), s=s))
if new_keys:
for name, pi in self.get_pis(self._args.names):
if pi:
keys_before = len(pi.ssh_keys)
pi.ssh_keys |= new_keys
keys_after = len(pi.ssh_keys)
num_keys = keys_after - keys_before
s = '' if num_keys == 1 else 's'
print("{n} key{s} added to {name}".format(n=num_keys, name=name, s=s))
else:
self.print_not_found(name)
else:
print("No keys to add")
def do_ssh_command(self):
for name, pi in self.get_pis(self._args.names):
if pi:
if self._args.ipv6:
print(pi.ipv6_ssh_command)
else:
print(pi.ipv4_ssh_command)
else:
self.print_not_found(name)
def do_ssh_config(self):
for name, pi in self.get_pis(self._args.names):
if pi:
if self._args.ipv6:
print(pi.ipv6_ssh_config)
else:
print(pi.ipv4_ssh_config)
else:
self.print_not_found(name)
def do_provision_status(self):
for name, pi in self.get_pis(self._args.names):
if pi:
print("{pi.name}: {pi.provision_status}".format(pi=pi))
else:
self.print_not_found(name)
def do_power_status(self):
for name, pi in self.get_pis(self._args.names):
if pi:
on_off = "on" if pi.power else "off"
print("{name}: powered {on_off}".format(name=name, on_off=on_off))
else:
self.print_not_found(name)
main = CLI()
| [
"[email protected]"
]
| |
132158a21c498725862cc23ae626f36d7f28db28 | 0c41f2fd4c1ad9b954097b0662e556b3eb288987 | /cellbender/remove_background/data/priors.py | 3989769165ab538647ccca8e672a97fca80bd06d | []
| permissive | broadinstitute/CellBender | e884a5520fc3e0fc2f422f8cd6dcdc6c594b5094 | 4990df713f296256577c92cab3314daeeca0f3d7 | refs/heads/master | 2023-08-21T14:55:33.619290 | 2023-08-08T18:40:14 | 2023-08-08T18:40:14 | 171,951,233 | 207 | 40 | BSD-3-Clause | 2023-08-30T05:27:18 | 2019-02-21T21:53:57 | Python | UTF-8 | Python | false | false | 15,821 | py | """Functionality for estimating various priors from the data"""
import numpy as np
import torch
from scipy.stats import gaussian_kde
from cellbender.remove_background import consts
from typing import Dict, Tuple, Union
import logging
logger = logging.getLogger('cellbender')
def _threshold_otsu(umi_counts: np.ndarray, n_bins: int = 256) -> float:
"""Return threshold value based on fast implementation of Otsu's method.
From skimage, with slight modifications:
https://github.com/scikit-image/scikit-image/blob/
a4e533ea2a1947f13b88219e5f2c5931ab092413/skimage/filters/thresholding.py#L312
Args:
umi_counts: Array of UMI counts
n_bins: Number of bins used to calculate histogram
Returns:
threshold: Upper threshold value. All droplets with UMI counts greater
than this value are assumed to contain cells.
References
----------
.. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
.. [2] https://scikit-image.org/docs/stable/auto_examples/applications/plot_thresholding.html
Notes
-----
The input image must be grayscale.
"""
# create a UMI count histogram
counts, bin_centers = _create_histogram(umi_counts=umi_counts, n_bins=n_bins)
# class probabilities for all possible thresholds
weight1 = np.cumsum(counts)
weight2 = np.cumsum(counts[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(counts * bin_centers) / weight1
mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of ``weight1``/``mean1`` should pair with zero values in
# ``weight2``/``mean2``, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[idx]
return threshold
def _create_histogram(umi_counts: np.ndarray, n_bins: int) -> Tuple[np.ndarray, np.ndarray]:
"""Return a histogram.
Args:
umi_counts: Array of UMI counts
n_bins: Number of bins used to calculate histogram
Returns:
counts: Each element is the number of droplets falling in each UMI
count bin
bin_centers: Each element is the value corresponding to the center of
each UMI count bin
"""
counts, bin_edges = np.histogram(umi_counts.reshape(-1), n_bins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
return counts.astype('float32', copy=False), bin_centers
def _peak_density_given_cutoff(umi_counts: np.ndarray,
cutoff: float,
cell_count_low_limit: float) -> Tuple[float, float]:
"""Run scipy.stats gaussian_kde on part of the UMI curve"""
# get the UMI count values we are including
noncell_counts = umi_counts[umi_counts <= cutoff]
# resample them: the magic of looking at a log log plot
n_putative_cells = (umi_counts > cell_count_low_limit).sum()
n_putative_empties = len(noncell_counts)
inds = np.logspace(np.log10(n_putative_cells),
np.log10(n_putative_cells + n_putative_empties),
num=1000,
base=10)
inds = [max(0, min(int(ind - n_putative_cells), len(noncell_counts) - 1)) for ind in inds]
noncell_counts = np.sort(noncell_counts)[::-1][inds]
# find the peak density: that is the empty count prior
# calculate range of data, rounding out to make sure we cover everything
log_noncell_counts = np.log(noncell_counts)
x = np.arange(
np.floor(log_noncell_counts.min()) - 0.01,
np.ceil(log_noncell_counts.max()) + 0.01,
0.1
)
# fit a KDE to estimate density
k = gaussian_kde(log_noncell_counts)
density = k.evaluate(x)
# the density peak is almost surely the empty droplets
log_peak_ind = np.argmax(density)
log_peak = x[log_peak_ind]
empty_count_prior = np.exp(log_peak)
# try to go about 1 stdev up from the peak
peak_density = np.max(density)
one_std_density = 0.6 * peak_density
one_std_inds = np.where(density[log_peak_ind:] < one_std_density)[0]
if len(one_std_inds) > 0:
one_std_ind = one_std_inds[0]
else:
one_std_ind = len(density[log_peak_ind:]) - 1
empty_count_upper_limit = np.exp(x[log_peak_ind:][one_std_ind])
return empty_count_prior, empty_count_upper_limit
def get_cell_count_given_expected_cells(umi_counts: np.ndarray,
expected_cells: int) -> Dict[str, float]:
"""In the case where a prior is passed in as input, use it
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
expected_cells: Input by user
Returns:
Dict with keys ['cell_counts']
"""
order = np.argsort(umi_counts)[::-1]
cell_counts = np.exp(np.mean(np.log(umi_counts[order][:expected_cells]))).item()
return {'cell_counts': cell_counts}
def get_empty_count_given_expected_cells_and_total_droplets(
umi_counts: np.ndarray,
expected_cells: int,
total_droplets: int,
) -> Dict[str, float]:
"""In the case where a prior is passed in as input, use it
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
expected_cells: Input by user, or prior estimate
total_droplets: Input by user
Returns:
Dict with keys ['empty_counts', 'empty_count_upper_limit']
"""
order = np.argsort(umi_counts)[::-1]
starting_point = max(expected_cells, total_droplets - 500)
empty_counts = np.median(umi_counts[order]
[int(starting_point):int(total_droplets)]).item()
# need to estimate here
cell_counts = np.exp(np.mean(np.log(umi_counts[order][:expected_cells]))).item()
middle = np.sqrt(cell_counts * empty_counts)
empty_count_upper_limit = min(middle, 1.5 * empty_counts)
return {'empty_counts': empty_counts,
'empty_count_upper_limit': empty_count_upper_limit}
def get_cell_count_empty_count(umi_counts: np.ndarray,
low_count_threshold: float = 15) -> Dict[str, float]:
"""Obtain priors on cell counts and empty droplet counts from a UMI curve
using heuristics, and without applying any other prior information.
Heuristics:
0. Ignore droplets with counts below low_count_threshold
1. Use Otsu's method to threshold the log UMI count data (ignoring droplets
past 1/4 of the total droplets above low_count_threshold, as we go down
the UMI curve). This is used as a lower limit on cell counts.
It seems quite robust.
2. Use the following iterative approach, until converged:
a. Establish an upper cutoff on possible empty droplets, using the
current estimate of empty counts and our cell count prior (the
estimate is 3/4 of the geometric mean of the two).
b. Use gaussian_kde from scipy.stats to create a smooth histogram of
the log UMI counts, for droplets with counts below the cutoff.
- A trick is used to resample the droplets before creating the
histogram, so that it looks more like a log-log plot
c. Identify the peak density of the histogram as the empty count
estimate.
- Convergence happens when our estimate of empty counts stops changing.
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
low_count_threshold: Ignore droplets with counts below this value
Returns:
Dict with keys ['cell_counts', 'empty_counts']
"""
logger.debug('Beginning priors.get_cell_count_empty_count()')
reverse_sorted_umi_counts = np.sort(umi_counts)[::-1]
umi_counts_for_otsu = reverse_sorted_umi_counts[:(umi_counts > low_count_threshold).sum() // 4]
log_cell_count_low_limit = _threshold_otsu(np.log(umi_counts_for_otsu))
cell_count_low_limit = np.exp(log_cell_count_low_limit)
logger.debug(f'cell_count_low_limit is {cell_count_low_limit}')
cell_count_prior = np.mean(umi_counts[umi_counts > cell_count_low_limit])
umi_counts_for_kde = reverse_sorted_umi_counts[reverse_sorted_umi_counts > low_count_threshold]
# initial conditions for the loop
# start low, but have a failsafe (especially for simulated data)
cutoff = max(0.1 * cell_count_low_limit, umi_counts_for_kde[-100])
empty_count_prior = -100
empty_count_upper_limit = None
delta = np.inf
a = 0
# iterate to convergence, at most 5 times
while delta > 10:
logger.debug(f'cutoff = {cutoff}')
# use gaussian_kde to find the peak in the histogram
new_empty_count_prior, empty_count_upper_limit = _peak_density_given_cutoff(
umi_counts=umi_counts_for_kde,
cutoff=cutoff,
cell_count_low_limit=cell_count_low_limit,
)
logger.debug(f'new_empty_count_prior = {new_empty_count_prior}')
# 3/4 of the geometric mean is our new upper cutoff
cutoff = 0.75 * np.sqrt(cell_count_prior * new_empty_count_prior)
delta = np.abs(new_empty_count_prior - empty_count_prior)
logger.debug(f'delta = {delta}')
empty_count_prior = new_empty_count_prior
a += 1
if a >= 5:
logger.debug('Heuristics for determining empty counts exceeded 5 '
'iterations without converging')
break
# do a final estimation of cell counts:
# go to the halfway point and then take the median of the droplets above
count_crossover = np.sqrt(cell_count_prior * empty_count_prior)
cell_count_prior = np.median(umi_counts[umi_counts > count_crossover])
logger.debug(f'cell_count_prior is {cell_count_prior}')
logger.debug(f'empty_count_prior is {empty_count_prior}')
logger.debug('End of priors.get_cell_count_empty_count()')
return {'cell_counts': cell_count_prior,
'empty_counts': empty_count_prior,
'empty_count_upper_limit': empty_count_upper_limit}
def get_expected_cells_and_total_droplets(umi_counts: np.ndarray,
cell_counts: float,
empty_counts: float,
empty_count_upper_limit: float,
max_empties: int = consts.MAX_EMPTIES_TO_INCLUDE) \
-> Dict[str, int]:
"""Obtain priors on cell counts and empty droplet counts from a UMI curve
using heuristics, and without applying any other prior information.
NOTE: to be run using inputs from get_cell_count_empty_count()
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
cell_counts: Prior from get_cell_count_empty_count()
empty_counts: Prior from get_cell_count_empty_count()
empty_count_upper_limit: Prior from get_cell_count_empty_count()
max_empties: Do not include more putative empty droplets than this
Returns:
Dict with keys ['expected_cells', 'total_droplets', 'transition_point']
Example:
>>> priors = get_cell_count_empty_count(umi_counts)
>>> priors.update(get_expected_cells_and_total_droplets(umi_counts, **priors))
"""
# expected cells does well when you give it a very conservative estimate
expected_cells = (umi_counts >= cell_counts).sum()
# total droplets will be between empty_count_prior and its upper limit
total_droplets_count_value = np.sqrt(empty_counts * empty_count_upper_limit)
total_droplets = (umi_counts >= total_droplets_count_value).sum()
# find the transition point
count_crossover = np.sqrt(cell_counts * empty_counts)
transition_point = (umi_counts >= count_crossover).sum()
logger.debug(f'In get_expected_cells_and_total_droplets(), found transition '
f'point at droplet {transition_point}')
# ensure out heuristics don't go too far out datasets with many cells
total_droplets = min(total_droplets, transition_point + max_empties)
return {'expected_cells': expected_cells,
'total_droplets': total_droplets,
'transition_point': transition_point}
def get_priors(umi_counts: np.ndarray,
low_count_threshold: float,
max_total_droplets: int = consts.MAX_TOTAL_DROPLETS_GUESSED) \
-> Dict[str, Union[int, float]]:
"""Get all priors using get_cell_count_empty_count() and
get_expected_cells_and_total_droplets(), employing a failsafe if
total_droplets is improbably large.
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
low_count_threshold: Ignore droplets with counts below this value
max_total_droplets: If the initial heuristics come up with a
total_droplets value greater than this, we re-run the heuristics
with higher low_count_threshold
Returns:
Dict with keys ['cell_counts', 'empty_counts',
'empty_count_upper_limit', 'surely_empty_counts',
'expected_cells', 'total_droplets', 'log_counts_crossover']
"""
logger.debug("Computing priors from the UMI curve")
priors = get_cell_count_empty_count(
umi_counts=umi_counts,
low_count_threshold=low_count_threshold,
)
priors.update(get_expected_cells_and_total_droplets(umi_counts=umi_counts, **priors))
logger.debug(f'Automatically computed priors: {priors}')
a = 0
while priors['total_droplets'] > max_total_droplets:
logger.debug(f'Heuristics for estimating priors resulted in '
f'{priors["total_droplets"]} total_droplets, which is '
f'typically too large. Recomputing with '
f'low_count_threshold = {priors["empty_count_upper_limit"]:.0f}')
priors = get_cell_count_empty_count(
umi_counts=umi_counts,
low_count_threshold=priors['empty_count_upper_limit'],
)
priors.update(get_expected_cells_and_total_droplets(umi_counts=umi_counts, **priors))
logger.debug(f'Automatically computed priors: {priors}')
a += 1
if a > 5:
break
# compute a few last things
compute_crossover_surely_empty_and_stds(umi_counts=umi_counts, priors=priors)
return priors
def compute_crossover_surely_empty_and_stds(umi_counts, priors):
"""Given cell_counts and total_droplets, compute a few more quantities
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
priors: Dict of priors
Returns:
None. Modifies priors dict in place.
"""
assert 'total_droplets' in priors.keys(), \
'Need total_droplets in priors to run compute_crossover_surely_empty_and_stds()'
assert 'cell_counts' in priors.keys(), \
'Need cell_counts in priors to run compute_crossover_surely_empty_and_stds()'
# Compute a crossover point in log count space.
reverse_sorted_counts = np.sort(umi_counts)[::-1]
surely_empty_counts = reverse_sorted_counts[priors['total_droplets']]
log_counts_crossover = (np.log(surely_empty_counts) + np.log(priors['cell_counts'])) / 2
priors.update({'log_counts_crossover': log_counts_crossover,
'surely_empty_counts': surely_empty_counts})
# Compute several other priors.
log_nonzero_umi_counts = np.log(umi_counts[umi_counts > 0])
d_std = np.std(log_nonzero_umi_counts[log_nonzero_umi_counts > log_counts_crossover]).item() / 5.
d_empty_std = 0.01 # this is basically turned off in favor of epsilon
priors.update({'d_std': d_std, 'd_empty_std': d_empty_std})
| [
"[email protected]"
]
| |
2ffdda96a873aba49978b503a61bf9f7d102c380 | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /nni/algorithms/compression/v2/pytorch/base/pruner.py | 730b9c749493d56b5adb0b6fab1fccd139408f77 | [
"MIT"
]
| permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 6,529 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
from .compressor import Compressor, LayerInfo
_logger = logging.getLogger(__name__)
__all__ = ['Pruner']
class PrunerModuleWrapper(Module):
def __init__(self, module: Module, module_name: str, config: Dict, pruner: Compressor):
"""
Wrap a module to enable data parallel, forward method customization and buffer registeration.
Parameters
----------
module
The module user wants to compress.
config
The configurations that users specify for compression.
module_name
The name of the module to compress, wrapper module shares same name.
pruner
The pruner used to calculate mask.
"""
super().__init__()
# origin layer information
self.module = module
self.name = module_name
# config and pruner
self.config = config
self.pruner = pruner
# register buffer for mask
self.register_buffer("weight_mask", torch.ones(self.module.weight.shape))
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.register_buffer("bias_mask", torch.ones(self.module.bias.shape))
else:
self.register_buffer("bias_mask", None)
def forward(self, *inputs):
# apply mask to weight, bias
self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)
return self.module(*inputs)
class Pruner(Compressor):
"""
The abstract class for pruning algorithm. Inherit this class and implement the `_reset_tools` to customize a pruner.
"""
def reset(self, model: Optional[Module] = None, config_list: Optional[List[Dict]] = None):
super().reset(model=model, config_list=config_list)
def _wrap_modules(self, layer: LayerInfo, config: Dict):
"""
Create a wrapper module to replace the original one.
Parameters
----------
layer
The layer to instrument the mask.
config
The configuration for generating the mask.
"""
_logger.debug("Module detected to compress : %s.", layer.name)
wrapper = PrunerModuleWrapper(layer.module, layer.name, config, self)
assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name
# move newly registered buffers to the same device of weight
wrapper.to(layer.module.weight.device)
return wrapper
def load_masks(self, masks: Dict[str, Dict[str, Tensor]]):
"""
Load an exist masks on the wrapper. You can train the model with an exist masks after load the masks.
Parameters
----------
masks
The masks dict with format {'op_name': {'weight': mask, 'bias': mask}}.
"""
wrappers = self.get_modules_wrapper()
for name, layer_mask in masks.items():
assert name in wrappers, '{} is not in wrappers of this pruner, can not apply the mask.'.format(name)
if layer_mask.get('weight') is not None:
assert hasattr(wrappers[name], 'weight_mask'), 'There is no attribute weight_mask in wrapper.'
setattr(wrappers[name], 'weight_mask', layer_mask.get('weight'))
if layer_mask.get('bias') is not None:
assert hasattr(wrappers[name], 'bias_mask'), 'There is no attribute bias_mask in wrapper.'
setattr(wrappers[name], 'bias_mask', layer_mask.get('bias'))
def compress(self) -> Tuple[Module, Dict[str, Dict[str, Tensor]]]:
"""
Returns
-------
Tuple[Module, Dict]
Return the wrapped model and mask.
"""
return self.bound_model, {}
# NOTE: need refactor dim with supporting list
def show_pruned_weights(self, dim: int = 0):
"""
Log the simulated prune sparsity.
Parameters
----------
dim
The pruned dim.
"""
for _, wrapper in self.get_modules_wrapper().items():
weight_mask = wrapper.weight_mask
mask_size = weight_mask.size()
if len(mask_size) == 1:
index = torch.nonzero(weight_mask.abs() != 0, as_tuple=False).tolist()
else:
sum_idx = list(range(len(mask_size)))
sum_idx.remove(dim)
index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0, as_tuple=False).tolist()
_logger.info(f'simulated prune {wrapper.name} remain/total: {len(index)}/{weight_mask.size(dim)}')
def export_model(self, model_path: str, mask_path: Optional[str] = None):
"""
Export pruned model weights, masks and onnx model(optional)
Parameters
----------
model_path
Path to save pruned model state_dict. The weight and bias have already multiplied the masks.
mask_path
Path to save mask dict.
"""
assert self.bound_model is not None, 'The bound model reference has been cleared.'
assert model_path is not None, 'model_path must be specified.'
mask_dict = {}
self._unwrap_model()
for name, wrapper in self.get_modules_wrapper().items():
weight_mask = wrapper.weight_mask
bias_mask = wrapper.bias_mask
if weight_mask is not None:
mask_sum = weight_mask.sum().item()
mask_num = weight_mask.numel()
_logger.debug('Layer: %s Sparsity: %.4f', name, 1 - mask_sum / mask_num)
wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask)
if bias_mask is not None:
wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask)
# save mask to dict
mask_dict[name] = {"weight": weight_mask, "bias": bias_mask}
torch.save(self.bound_model.state_dict(), model_path)
_logger.info('Model state_dict saved to %s', model_path)
if mask_path is not None:
torch.save(mask_dict, mask_path)
_logger.info('Mask dict saved to %s', mask_path)
self._wrap_model()
| [
"[email protected]"
]
| |
55303a17c04c8a0efbd951d112b3225f0d9cb8b7 | 48983b88ebd7a81bfeba7abd6f45d6462adc0385 | /MOG/50.py | 4f0d7d569452389c938806754ec6d5d1f0269de2 | []
| no_license | lozdan/oj | c6366f450bb6fed5afbaa5573c7091adffb4fa4f | 79007879c5a3976da1e4713947312508adef2e89 | refs/heads/master | 2018-09-24T01:29:49.447076 | 2018-06-19T14:33:37 | 2018-06-19T14:33:37 | 109,335,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # author: Daniel Lozano
# source: MatcomOnlineGrader (MOG) ( http://matcomgrader.com )
# problem name: El Numero Decodificado
# problem url: http://matcomgrader.com/problem/50/el-numero-decodificado/
n = int(input())
count = 1
def digits_sum(num):
add = 0
while num != 0:
add += num % 10
num = num // 10
return add
while count != digits_sum(n - count):
count += 1
print(n - count)
| [
"[email protected]"
]
| |
08842649a48eb36c8cf0554d9be65a5eb137f4a6 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /CondTools/L1Trigger/test/L1ConfigWriteRSOnline_cfg.py | 64fa0d8ce4623c21378f8483b5e825899465cb4e | []
| permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 8,202 | py | # This script doesn't work yet. PoolDBESSource does not see the IOV updates made earlier in the
# same event.
import FWCore.ParameterSet.Config as cms
process = cms.Process("L1ConfigWriteRSOnline")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cout.placeholder = cms.untracked.bool(False)
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring('*')
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register('runNumber',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Run number")
options.register('outputDBConnect',
'sqlite_file:l1config.db', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Connection string for output DB")
options.register('outputDBAuth',
'.', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Authentication path for outputDB")
options.register('keysFromDB',
1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"1 = read keys from OMDS, 0 = read keys from command line")
options.register('overwriteKeys',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Overwrite existing keys")
options.register('logTransactions',
1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Record transactions in log DB")
# arguments for setting object keys by hand
options.register('L1MuDTTFMasksRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1MuGMTChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1RCTChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GctChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtPrescaleFactorsAlgoTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtPrescaleFactorsTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskAlgoTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskVetoTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.parseArguments()
# Define CondDB tags
from CondTools.L1Trigger.L1CondEnum_cfi import L1CondEnum
from CondTools.L1Trigger.L1O2OTags_cfi import initL1O2OTags
initL1O2OTags()
if options.keysFromDB == 1:
process.load("CondTools.L1Trigger.L1ConfigRSKeys_cff")
else:
process.load("CondTools.L1Trigger.L1TriggerKeyDummy_cff")
from CondTools.L1Trigger.L1RSSubsystemParams_cfi import initL1RSSubsystems
initL1RSSubsystems( tagBaseVec = initL1O2OTags.tagBaseVec,
L1MuDTTFMasksRcdKey = options.L1MuDTTFMasksRcdKey,
L1MuGMTChannelMaskRcdKey = options.L1MuGMTChannelMaskRcdKey,
L1RCTChannelMaskRcdKey = options.L1RCTChannelMaskRcdKey,
L1GctChannelMaskRcdKey = options.L1GctChannelMaskRcdKey,
L1GtPrescaleFactorsAlgoTrigRcdKey = options.L1GtPrescaleFactorsAlgoTrigRcdKey,
L1GtPrescaleFactorsTechTrigRcdKey = options.L1GtPrescaleFactorsTechTrigRcdKey,
L1GtTriggerMaskAlgoTrigRcdKey = options.L1GtTriggerMaskAlgoTrigRcdKey,
L1GtTriggerMaskTechTrigRcdKey = options.L1GtTriggerMaskTechTrigRcdKey,
L1GtTriggerMaskVetoTechTrigRcdKey = options.L1GtTriggerMaskVetoTechTrigRcdKey,
includeL1RCTNoisyChannelMask = False )
process.L1TriggerKeyDummy.objectKeys = initL1RSSubsystems.params.recordInfo
# Get L1TriggerKeyList from DB
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.outputDB = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
toGet = cms.VPSet(cms.PSet(
record = cms.string('L1TriggerKeyListRcd'),
tag = cms.string('L1TriggerKeyList_' + initL1O2OTags.tagBaseVec[ L1CondEnum.L1TriggerKeyList ] )
)),
RefreshEachRun=cms.untracked.bool(True)
)
process.outputDB.connect = options.outputDBConnect
process.outputDB.DBParameters.authenticationPath = options.outputDBAuth
# Generate configuration data
process.load("CondTools.L1Trigger.L1ConfigRSPayloads_cff")
# writer modules
from CondTools.L1Trigger.L1CondDBPayloadWriter_cff import initPayloadWriter
initPayloadWriter( process,
outputDBConnect = options.outputDBConnect,
outputDBAuth = options.outputDBAuth,
tagBaseVec = initL1O2OTags.tagBaseVec )
process.L1CondDBPayloadWriter.writeL1TriggerKey = cms.bool(False)
if options.logTransactions == 1:
# initPayloadWriter.outputDB.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
initPayloadWriter.outputDB.logconnect = cms.untracked.string('sqlite_file:l1o2o-log.db')
process.L1CondDBPayloadWriter.logTransactions = True
if options.overwriteKeys == 0:
process.L1CondDBPayloadWriter.overwriteKeys = False
else:
process.L1CondDBPayloadWriter.overwriteKeys = True
from CondTools.L1Trigger.L1CondDBIOVWriter_cff import initIOVWriter
initIOVWriter( process,
outputDBConnect = options.outputDBConnect,
outputDBAuth = options.outputDBAuth,
tagBaseVec = initL1O2OTags.tagBaseVec,
tscKey = '' )
process.L1CondDBIOVWriter.logKeys = True
if options.logTransactions == 1:
# initIOVWriter.outputDB.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
initIOVWriter.outputDB.logconnect = cms.untracked.string('sqlite_file:l1o2o-log.db')
process.L1CondDBIOVWriter.logTransactions = True
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(options.runNumber),
lastValue = cms.uint64(options.runNumber),
interval = cms.uint64(1)
)
# CORAL debugging
#process.outputDB.DBParameters.messageLevel = cms.untracked.int32(3)
process.p = cms.Path(process.L1CondDBPayloadWriter*process.L1CondDBIOVWriter)
| [
"[email protected]"
]
| |
5399e23352d99fa49189fb77253df88e8639566e | eb82022c0cfc7c8747661cff9624ad2099fa1c3f | /dev_accounting_report/report/sales_delivery_out_rekap_xls.py | 195552276cf5b3294ff059aa939ef9c184ff83a4 | []
| no_license | dadysuarsa/Odoo | 8d026a066c390cc8f72805d2672212e61260c1cb | c9becd0c192fa239520ad3e1a11d81f70832eddf | refs/heads/master | 2023-03-11T06:02:06.011575 | 2021-02-26T02:17:37 | 2021-02-26T02:17:37 | 276,346,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,299 | py | import time
import xlwt, operator
from odoo.report import report_sxw
from report_engine_xls import report_xls
from odoo.tools.translate import _
from datetime import datetime
import pytz
class ReportStatus(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(ReportStatus, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'cr': cr,
'uid': uid,
'time': time,
})
_xs = report_xls.xls_styles
style_title = xlwt.easyxf(_xs['xls_title'])
style_blue = xlwt.easyxf(_xs['wrap'] + _xs['bold'] + _xs['fill_blue'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_blue_center = xlwt.easyxf(_xs['bold'] + _xs['fill_blue'] + _xs['center'] + _xs['borders_all'])
style_blue_center.alignment.middle = 1
style_yellow = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_yellow_right = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'] + _xs['right'], num_format_str=report_xls.decimal_format)
style_yellow_percent = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'], num_format_str=report_xls.percentage_format)
style_normal_bold = xlwt.easyxf(_xs['bold'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_normal = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.decimal_format)
style_normal_date = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.date_format)
style_normal_center = xlwt.easyxf(_xs['wrap'] + _xs['top'] + _xs['center'] + _xs['borders_all'])
style_normal_italic = xlwt.easyxf(_xs['italic'] + _xs['borders_all'])
style_normal_percent = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.percentage_format)
columns = [
['Tanggal Kirim', 13],
['No SJ/DO', 15],
['Satuan', 8],
['QTY Kirim', 17],
['Buyer', 45],
['No SC', 15],
['No Invoice', 15],
['Tgl Invoice', 12],
['Mata Uang', 10],
['Qty Invoice', 17],
['PPN VALAS', 17],
['PPN IDR', 17],
['DPP VALAS', 17],
['DPP IDR', 17],
['TOTAL VALAS', 17],
['TOTAL IDR', 17],
]
class sales_delivery_out_rekap_xls(report_xls):
def generate_xls_report(self, parser, _xs, data, obj, wb):
# import ipdb;ipdb.set_trace()
ws = wb.add_sheet(('Rekap Sales Detail Delivery'))
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
ws.set_horz_split_pos(7)
ws.write_merge(0, 0, 0, 5, 'REKAP SALES DELIVERY', style_title)
ws.write_merge(1, 1, 0, 3, (('Downloaded Date : %s') %(datetime.strptime(str(datetime.now(pytz.timezone('Asia/Jakarta')))[:18], "%Y-%m-%d %H:%M:%S").strftime("%d-%m-%Y %H:%M:%S"))), style_normal_date)
ws.write_merge(2, 2, 0, 3, 'Tanggal', style_blue_center)
ws.write_merge(2, 2, 4, 4, 'Divisi', style_blue_center)
ws.row(3).height_mismatch = True
ws.row(3).height = 20 * 28
ws.write_merge(3, 3, 0, 3, data['date_from'] + ' - ' + data['date_to'], style_normal_center)
ws.write_merge(3, 3, 4, 4, data['divisi'], style_normal_center)
ws.write_merge(5, 5, 0, 4, 'Delivery', style_blue_center)
ws.write_merge(5, 5, 5, 15, 'SO & Invoice', style_blue_center)
c_hdr_cell_style = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'],
num_format_str=report_xls.decimal_format)
c_hdr_cell_style_right = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'] + _xs['right'],
num_format_str=report_xls.decimal_format)
c_cell_style = xlwt.easyxf(_xs['borders_all'],
num_format_str=report_xls.decimal_format)
c_hdr_cell_style_grey = xlwt.easyxf(_xs['bold'] + _xs['fill_grey'] + _xs['borders_all'],
num_format_str=report_xls.decimal_format)
row_count = 6
col_count = 0
for column in columns:
ws.col(col_count).width = 256 * column[1]
ws.write(row_count, col_count, column[0], c_hdr_cell_style)
col_count += 1
row_count += 1
col_count = 0
row_start = row_count
for lines in data['csv']:
for line in lines:
ws.write(row_count, col_count, line, c_cell_style)
col_count += 1
row_count += 1
col_count = 0
row_count += 1
ws.write_merge(row_count, row_count, 6, 8, 'GRAND TOTAL', c_hdr_cell_style_grey)
col_count = 9
while col_count <= 15:
sum_cell_start = xlwt.Utils.rowcol_to_cell(row_start, col_count)
sum_cell_end = xlwt.Utils.rowcol_to_cell(row_count - 2, col_count)
ws.write(row_count, col_count, xlwt.Formula('sum(' + sum_cell_start + ':' + sum_cell_end + ')'), c_hdr_cell_style_grey)
col_count += 1
pass
sales_delivery_out_rekap_xls('report.sales.delivery.out.rekap.xls','stock.picking','addons/dev_accounting_report/report/report_excel.mako', parser=ReportStatus, header=False) | [
"[email protected]"
]
| |
71240c639014721fc67dd2c7ff9f05d6c32de443 | 095a1c126ffaf703d923431ce5279a0dac384740 | /timecard/views/auth_views.py | f01ca3273a967bcb926fb3d487993405f8ebdcb9 | []
| no_license | patpio/timecard | 8bc5c6dbfc3877157dc8bfca7f9f5debd1e7b486 | f4a2f2db69410a2b98d9815fbac5048ba8c47126 | refs/heads/master | 2023-03-22T15:51:06.658738 | 2021-01-12T22:42:44 | 2021-01-12T22:42:44 | 321,773,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | from flask import Blueprint, render_template, url_for, flash, request, abort
from flask_login import login_user, logout_user, login_required, current_user
from werkzeug.utils import redirect
from timecard import db
from ..models import User
from ..forms import SignUpForm, LoginForm
bp_auth = Blueprint('auth', __name__, url_prefix='/auth')
@bp_auth.route('/signup', methods=['GET', 'POST'])
@login_required
def signup():
if current_user != User.query.filter_by(username='admin').first():
abort(403)
form = SignUpForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('main.home'))
return render_template('signup.html', form=form)
@bp_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_username(form.username.data)
if user is not None and user.check_password(form.password.data):
login_user(user, form.remember_me.data)
flash(f'Logged in successfully as {user.username}', 'success')
return redirect(request.args.get('next') or url_for('main.home'))
return render_template('login.html', form=form)
@bp_auth.route('/logout', methods=['GET'])
def logout():
logout_user()
flash('Logged out successfully.', 'success')
return redirect(url_for('main.home'))
@bp_auth.route('/admin', methods=['GET', 'POST'])
def admin():
if User.query.all():
abort(403)
form = SignUpForm()
if form.validate_on_submit():
user = User(username='admin', email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
return render_template('signup.html', form=form)
| [
"[email protected]"
]
| |
9f64b6fde8ce5918644f9e426104b18db422e7c5 | f881c10e0d654da82218403dbd2adbdc606dc455 | /apps/user_login/models.py | 96fff17dd9d564cfa7fed5ca4f762658b6b74462 | []
| no_license | alialwahish/restfull_users | 1732dceeddf4367d678ff6cdf2668dbc95463182 | 24d00811b2b46b33e5cf5c311367bd153344dc70 | refs/heads/master | 2020-03-17T15:37:13.562082 | 2018-05-16T20:05:21 | 2018-05-16T20:05:21 | 133,717,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from __future__ import unicode_literals
from django.db import models
class dojo(models.Model):
name=models.CharField(max_length=255)
city=models.CharField(max_length=255)
state=models.CharField(max_length=2)
class ninjas(models.Model):
first_name=models.CharField(max_length=255)
last_name=models.CharField(max_length=255)
dojo = models.ForeignKey(dojo, on_delete=True, related_name="ninjas")
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.