blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c47b2a335796c963552a994e83d36361618a343d | 7073c53c421018bd1a2c8a14e55c61bcf7c8ed04 | /ask_1.py | c1ef86a87a8fd62f35b2730de5a77d797f38205a | []
| no_license | AntonisEkatommatis/1o_Eksamino_PYTHON | 0a0e075a2bff9705bca34ab3064ae58eab3305e1 | d9c019d62bed1ed71f9c4a441c0fb814ffa3a730 | refs/heads/master | 2020-04-21T14:40:08.915040 | 2019-02-12T19:32:55 | 2019-02-12T19:32:55 | 169,642,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | #Ergasia 1
#Antonis Ekatommatis
#Eisagwgh sthn episthmh twn ypologistwn
#1o Eksamino
#Dimiourgia Synartisis
def sumIntervals (L):
a=[]
b=[]
asin=0
bsin=0
apot=0
#Eisagawgi sthn lista a oles tis arxes apo ta oria
for i in range(len(L)):
a.append(L[i][0])
#Eisagwgi sthn lista b ta teleutaia psifia kathe oriou
for i in range(len(L)):
b.append(L[i][1])
#Bubblesort
N=len(a)
for i in range(1,N,1):
for j in range(N-1,i-1,-1):
if a[j] < a[j-1]:
a[j],a[j-1]=a[j-1],a[j]
b[j],b[j-1]=b[j-1],b[j]
#Elegxoi gia na vgei to athroisma
for i in range(1,len(a)):
while a[i] < b[i-1]:
a[i]=a[i]+1
for i in range(len(a)):
while a[i] > b[i]:
b[i]=b[i]+1
for item in a:
asin+=item
for item in b:
bsin+=item
apot=bsin-asin
return apot
print sumIntervals([[1,2], [6, 10], [11, 15]])
print sumIntervals([[1,4], [7, 10], [3, 5]])
print sumIntervals([[1,5], [10, 20], [1, 6], [16, 19], [5, 11]])
| [
"[email protected]"
]
| |
9dae9e1cb02e03ac83133c64c0010ed526601e15 | bd9c74247381121f71f3dde6b55c67856f58e124 | /编程题/第4章-6 输出前 n 个Fibonacci数 (15分).py | d9428ceda6a9128e079ff4764f63ec641e09169e | []
| no_license | Redomeliu/Python | 302cd5abd89f7040911c8afb1db6faee6d43de64 | 9f5568ec59d30ce0f7d572d072b86088e933abc8 | refs/heads/master | 2023-01-05T19:51:00.795864 | 2020-10-29T02:42:36 | 2020-10-29T02:42:36 | 308,293,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | def Fibonacci(i):
lis = [1,1]
n = 1
while(i>n):
lis.append(lis[n]+lis[n-1])
n +=1
return lis[i]
x = int(input())
count = 0
for i in range(x):
count +=1
print(f'{Fibonacci(i):>11d}',end="")
if count == 5 or i==x-1:
print('\n')
count=0
if x < 1:
print('Invalid.')
| [
"[email protected]"
]
| |
14c97cc76c6333d459e2b615402d70304853e1d8 | 520a9b3d11f4a4ce93d0927a8fd5c575252b3559 | /lib/python2.7/site-packages/sphinx/registry.py | cdae7722411e7bcd78f8a786996f665d8862a229 | [
"Apache-2.0"
]
| permissive | Larsende/F5_Agility_2018_Security_in_AWS | 90c7404962313b13cec63321e6fc38bdc9516dd0 | 1bebcf9d441a3e3b7348757fcbc83844fbb0132e | refs/heads/master | 2020-03-20T02:23:59.099742 | 2018-08-12T15:28:50 | 2018-08-12T15:28:50 | 137,111,587 | 0 | 2 | Apache-2.0 | 2018-08-03T21:19:48 | 2018-06-12T18:21:58 | Python | UTF-8 | Python | false | false | 15,315 | py | # -*- coding: utf-8 -*-
"""
sphinx.registry
~~~~~~~~~~~~~~~
Sphinx component registry.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import traceback
from pkg_resources import iter_entry_points
from six import iteritems, itervalues, string_types
from sphinx.domains import ObjType
from sphinx.domains.std import GenericObject, Target
from sphinx.errors import ExtensionError, SphinxError, VersionRequirementError
from sphinx.extension import Extension
from sphinx.locale import __
from sphinx.parsers import Parser as SphinxParser
from sphinx.roles import XRefRole
from sphinx.util import import_object
from sphinx.util import logging
from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import directive_helper
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterator, List, Type, Union # NOQA
from docutils import nodes # NOQA
from docutils.io import Input # NOQA
from docutils.parsers import Parser # NOQA
from docutils.transforms import Transform # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.domains import Domain, Index # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
from sphinx.util.typing import RoleFunction # NOQA
logger = logging.getLogger(__name__)
# list of deprecated extensions. Keys are extension name.
# Values are Sphinx version that merge the extension.
EXTENSION_BLACKLIST = {
"sphinxjp.themecore": "1.2"
} # type: Dict[unicode, unicode]
class SphinxComponentRegistry(object):
def __init__(self):
self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, unicode, Any], Any]]
self.builders = {} # type: Dict[unicode, Type[Builder]]
self.documenters = {} # type: Dict[unicode, Type[Documenter]]
self.domains = {} # type: Dict[unicode, Type[Domain]]
self.domain_directives = {} # type: Dict[unicode, Dict[unicode, Any]]
self.domain_indices = {} # type: Dict[unicode, List[Type[Index]]]
self.domain_object_types = {} # type: Dict[unicode, Dict[unicode, ObjType]]
self.domain_roles = {} # type: Dict[unicode, Dict[unicode, Union[RoleFunction, XRefRole]]] # NOQA
self.post_transforms = [] # type: List[Type[Transform]]
self.source_parsers = {} # type: Dict[unicode, Parser]
self.source_inputs = {} # type: Dict[unicode, Input]
self.translators = {} # type: Dict[unicode, nodes.NodeVisitor]
self.transforms = [] # type: List[Type[Transform]]
def add_builder(self, builder):
# type: (Type[Builder]) -> None
logger.debug('[app] adding builder: %r', builder)
if not hasattr(builder, 'name'):
raise ExtensionError(__('Builder class %s has no "name" attribute') % builder)
if builder.name in self.builders:
raise ExtensionError(__('Builder %r already exists (in module %s)') %
(builder.name, self.builders[builder.name].__module__))
self.builders[builder.name] = builder
def preload_builder(self, app, name):
# type: (Sphinx, unicode) -> None
if name is None:
return
if name not in self.builders:
entry_points = iter_entry_points('sphinx.builders', name)
try:
entry_point = next(entry_points)
except StopIteration:
raise SphinxError(__('Builder name %s not registered or available'
' through entry point') % name)
self.load_extension(app, entry_point.module_name)
def create_builder(self, app, name):
# type: (Sphinx, unicode) -> Builder
if name not in self.builders:
raise SphinxError(__('Builder name %s not registered') % name)
return self.builders[name](app)
def add_domain(self, domain):
# type: (Type[Domain]) -> None
logger.debug('[app] adding domain: %r', domain)
if domain.name in self.domains:
raise ExtensionError(__('domain %s already registered') % domain.name)
self.domains[domain.name] = domain
def has_domain(self, domain):
# type: (unicode) -> bool
return domain in self.domains
def create_domains(self, env):
# type: (BuildEnvironment) -> Iterator[Domain]
for DomainClass in itervalues(self.domains):
domain = DomainClass(env)
# transplant components added by extensions
domain.directives.update(self.domain_directives.get(domain.name, {}))
domain.roles.update(self.domain_roles.get(domain.name, {}))
domain.indices.extend(self.domain_indices.get(domain.name, []))
for name, objtype in iteritems(self.domain_object_types.get(domain.name, {})):
domain.add_object_type(name, objtype)
yield domain
def override_domain(self, domain):
# type: (Type[Domain]) -> None
logger.debug('[app] overriding domain: %r', domain)
if domain.name not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain.name)
if not issubclass(domain, self.domains[domain.name]):
raise ExtensionError(__('new domain not a subclass of registered %s '
'domain') % domain.name)
self.domains[domain.name] = domain
def add_directive_to_domain(self, domain, name, obj,
has_content=None, argument_spec=None, **option_spec):
# type: (unicode, unicode, Any, bool, Any, Any) -> None
logger.debug('[app] adding directive to domain: %r',
(domain, name, obj, has_content, argument_spec, option_spec))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
directives = self.domain_directives.setdefault(domain, {})
directives[name] = directive_helper(obj, has_content, argument_spec, **option_spec)
def add_role_to_domain(self, domain, name, role):
# type: (unicode, unicode, Union[RoleFunction, XRefRole]) -> None
logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
roles = self.domain_roles.setdefault(domain, {})
roles[name] = role
def add_index_to_domain(self, domain, index):
# type: (unicode, Type[Index]) -> None
logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
indices = self.domain_indices.setdefault(domain, [])
indices.append(index)
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]):
# type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None
logger.debug('[app] adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
# create a subclass of GenericObject as the new directive
directive = type(directivename, # type: ignore
(GenericObject, object),
{'indextemplate': indextemplate,
'parse_node': staticmethod(parse_node),
'doc_field_types': doc_field_types})
self.add_directive_to_domain('std', directivename, directive)
self.add_role_to_domain('std', rolename, XRefRole(innernodeclass=ref_nodeclass))
object_types = self.domain_object_types.setdefault('std', {})
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''):
# type: (unicode, unicode, unicode, nodes.Node, unicode) -> None
logger.debug('[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass, objname))
# create a subclass of Target as the new directive
directive = type(directivename, # type: ignore
(Target, object),
{'indextemplate': indextemplate})
self.add_directive_to_domain('std', directivename, directive)
self.add_role_to_domain('std', rolename, XRefRole(innernodeclass=ref_nodeclass))
object_types = self.domain_object_types.setdefault('std', {})
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_source_parser(self, suffix, parser):
# type: (unicode, Type[Parser]) -> None
logger.debug('[app] adding search source_parser: %r, %r', suffix, parser)
if suffix in self.source_parsers:
raise ExtensionError(__('source_parser for %r is already registered') % suffix)
self.source_parsers[suffix] = parser
def get_source_parser(self, filename):
# type: (unicode) -> Type[Parser]
for suffix, parser_class in iteritems(self.source_parsers):
if filename.endswith(suffix):
break
else:
# use special parser for unknown file-extension '*' (if exists)
parser_class = self.source_parsers.get('*')
if parser_class is None:
raise SphinxError(__('source_parser for %s not registered') % filename)
else:
if isinstance(parser_class, string_types):
parser_class = import_object(parser_class, 'source parser') # type: ignore
return parser_class
def get_source_parsers(self):
# type: () -> Dict[unicode, Parser]
return self.source_parsers
def create_source_parser(self, app, filename):
# type: (Sphinx, unicode) -> Parser
parser_class = self.get_source_parser(filename)
parser = parser_class()
if isinstance(parser, SphinxParser):
parser.set_application(app)
return parser
def add_source_input(self, input_class):
# type: (Type[Input]) -> None
for filetype in input_class.supported:
if filetype in self.source_inputs:
raise ExtensionError(__('source_input for %r is already registered') %
filetype)
self.source_inputs[filetype] = input_class
def get_source_input(self, filename):
# type: (unicode) -> Type[Input]
parser = self.get_source_parser(filename)
for filetype in parser.supported:
if filetype in self.source_inputs:
input_class = self.source_inputs[filetype]
break
else:
# use special source_input for unknown file-type '*' (if exists)
input_class = self.source_inputs.get('*')
if input_class is None:
raise SphinxError(__('source_input for %s not registered') % filename)
else:
return input_class
def add_translator(self, name, translator):
# type: (unicode, Type[nodes.NodeVisitor]) -> None
logger.info(bold(__('Change of translator for the %s builder.') % name))
self.translators[name] = translator
def get_translator_class(self, builder):
# type: (Builder) -> Type[nodes.NodeVisitor]
return self.translators.get(builder.name,
builder.default_translator_class)
def create_translator(self, builder, document):
# type: (Builder, nodes.Node) -> nodes.NodeVisitor
translator_class = self.get_translator_class(builder)
return translator_class(builder, document)
def add_transform(self, transform):
# type: (Type[Transform]) -> None
logger.debug('[app] adding transform: %r', transform)
self.transforms.append(transform)
def get_transforms(self):
# type: () -> List[Type[Transform]]
return self.transforms
def add_post_transform(self, transform):
# type: (Type[Transform]) -> None
logger.debug('[app] adding post transform: %r', transform)
self.post_transforms.append(transform)
def get_post_transforms(self):
# type: () -> List[Type[Transform]]
return self.post_transforms
def add_documenter(self, objtype, documenter):
# type: (unicode, Type[Documenter]) -> None
self.documenters[objtype] = documenter
def add_autodoc_attrgetter(self, typ, attrgetter):
# type: (Type, Callable[[Any, unicode, Any], Any]) -> None
self.autodoc_attrgettrs[typ] = attrgetter
def load_extension(self, app, extname):
# type: (Sphinx, unicode) -> None
"""Load a Sphinx extension."""
if extname in app.extensions: # alread loaded
return
if extname in EXTENSION_BLACKLIST:
logger.warning(__('the extension %r was already merged with Sphinx since '
'version %s; this extension is ignored.'),
extname, EXTENSION_BLACKLIST[extname])
return
# update loading context
app._setting_up_extension.append(extname)
try:
mod = __import__(extname, None, None, ['setup'])
except ImportError as err:
logger.verbose(__('Original exception:\n') + traceback.format_exc())
raise ExtensionError(__('Could not import extension %s') % extname, err)
if not hasattr(mod, 'setup'):
logger.warning(__('extension %r has no setup() function; is it really '
'a Sphinx extension module?'), extname)
metadata = {} # type: Dict[unicode, Any]
else:
try:
metadata = mod.setup(app)
except VersionRequirementError as err:
# add the extension name to the version required
raise VersionRequirementError(
__('The %s extension used by this project needs at least '
'Sphinx v%s; it therefore cannot be built with this '
'version.') % (extname, err)
)
if metadata is None:
metadata = {}
if extname == 'rst2pdf.pdfbuilder':
metadata['parallel_read_safe'] = True
elif not isinstance(metadata, dict):
logger.warning(__('extension %r returned an unsupported object from '
'its setup() function; it should return None or a '
'metadata dictionary'), extname)
metadata = {}
app.extensions[extname] = Extension(extname, mod, **metadata)
app._setting_up_extension.pop()
| [
"[email protected]"
]
| |
fd6eff07502fb4045b9c9bea91c6e2e5360f0a6c | dfac09701ae836ca8ff682ac741535eb84fec3af | /Dasha/modules/info.py | e291873d2b028cd6961d07b17aaaa10885835902 | []
| no_license | Srinath2006/Dasha | a166c2274e15e0b7a73a7216ae0a533843647f1d | 54a2025c2cea0f89c322249578c271d132b90fd0 | refs/heads/main | 2023-08-23T02:23:26.245367 | 2021-11-02T14:28:39 | 2021-11-02T14:28:39 | 423,876,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | from Dasha import ubot
from Dasha.events import dasha
from . import get_user
from telethon.tl.functions.users import GetFullUserRequest
@dasha(pattern="^/info ?(.*)")
async def new(event):
if not event.reply_to_msg_id and not event.pattern_match.group(1):
user = await ubot.get_entity(event.sender_id)
else:
try:
user, extra = await get_user(event)
except TypeError as e:
print(e)
pass
user_id = user.id
first_name = user.first_name
last_name = user.last_name
username = user.username
text = "╒═══「<b>User info</b>:\n"
if first_name:
text += f"<b>First Name:</b> {first_name}\n"
if last_name:
text += f"<b>Last Name:</b> {last_name}\n"
ups = None
if username:
text += f"<b>Username:</b> @{username}\n"
ups = await event.client(GetFullUserRequest(user.username))
text += f"<b>ID:</b> <code>{user_id}</code>\n"
text += f'<b>User link:</b> <a href="tg://user?id={user_id}">{first_name}</a>'
if ups:
text += f"\n\n<b>Bio:</b> <code>{ups.about}</code>"
text += f"\n\n<b>Gbanned: No</b>"
text += f"\n\n╘══「 <b>Groups count:</b> {ups.common_chats_count} 」"
await event.edit(text, parse_mode='html')
@dasha(pattern="^/id ?(.*)")
async def _t(event):
if not event.reply_to_msg_id and not event.pattern_match.group(1):
user = await ubot.get_entity(event.sender_id)
else:
try:
user, extra = await get_user(event)
except TypeError as e:
print(e)
pass
user_id = user.id
chat_id = event.chat_id
msg_id = event.id
event_id = event.id
c_id = str(chat_id).replace('-100', '')
if event.reply_to_msg_id:
event_id = event.reply_to_msg_id
text = f"**[Chat ID]**(http://t.me/{event.chat.username}) : `{chat_id}`\n"
text += f"**[Message ID]**(http://t.me/c/{c_id}/{event_id}) : `{event_id}`\n"
text += f"**[User ID]**(tg://user?id={user_id}) : `{user_id}`"
if event.reply_to_msg_id:
msg = await event.get_reply_message()
if msg.sticker:
type = "Sticker"
elif msg.audio:
type = "Audio"
elif msg.gif:
type = "Gif"
elif msg.video:
type = "Video"
elif msg.media:
type = "Media"
if msg.media:
file_id = msg.file.id
text += f"\n\n**Media Type:** `{type}`\n"
text += f"**Fid:** `{file_id}`"
await event.edit(text)
| [
"[email protected]"
]
| |
88842d784deeecde1c87e82ab837462e8ead03f9 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ZZsnGAjYLyosG9zmH_12.py | aa9e707c08223592b4481ac84b90ac438ecda630 | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py |
def flash(fc):
var1, oper, var2 = fc
return var1 + var2 if oper == '+' else var1 - var2 if oper == '-' else var1 * var2 if oper == 'x' else round(var1 / var2, 2) if oper == '/' and var2 else None
| [
"[email protected]"
]
| |
da9bfaca059b92580491b71e850ef87666555b90 | 9baa968c71efebb42c70c130de930cb4810ded31 | /OS_IMUDataConversion.py | 05e2d59dff4bb385de0ff7c99c81dc6fa852ff76 | []
| no_license | jravenhorst909/MoveShelf_IMU_analyser | 02cdc0e7f54f9120611106314ad8c055dfb7ad22 | cadc3c405b4f61f4d44a29783824889e1f204360 | refs/heads/main | 2023-02-12T20:46:16.826991 | 2021-01-06T10:21:20 | 2021-01-06T10:21:20 | 311,619,156 | 8 | 1 | null | 2021-01-06T10:21:21 | 2020-11-10T10:12:03 | Python | UTF-8 | Python | false | false | 1,582 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 8 13:28:40 2020
@author: Joris Ravenhorst
"""
import opensim as osim
def IMUdata_conversion(trial_dir_path,TrialName):
# Build an Xsens Settings Object.
# Instantiate the Reader Settings Class
xsensSettings = osim.XsensDataReaderSettings('myIMUMappings.xml')
# Instantiate an XsensDataReader
xsens = osim.XsensDataReader(xsensSettings)
# Read in seprate tables of data from the specified IMU file(s)
tables = xsens.read(trial_dir_path)
# get the trial name from the settings
trialID = xsensSettings.get_trial_prefix()
# Get Orientation Data as quaternions
quatTable = xsens.getOrientationsTable(tables)
# Write to file
osim.STOFileAdapterQuaternion.write(quatTable, trial_dir_path +'/'+ trialID + '_orientations.sto')
# OPTIONAL:
# # Get Acceleration Data
# accelTable = xsens.getLinearAccelerationsTable(tables)
# # Write to file
# osim.STOFileAdapterVec3.write(accelTable, trial_dir_path +'/'+ trialID + '_linearAccelerations.sto')
# # Get Magnetic (North) Heading Data
# magTable = xsens.getMagneticHeadingTable(tables)
# # Write to file
# osim.STOFileAdapterVec3.write(magTable, trial_dir_path +'/'+ trialID + '_magneticNorthHeadings.sto')
# # Get Angular Velocity Data
# angVelTable = xsens.getAngularVelocityTable(tables)
# # Write to file
# osim.STOFileAdapterVec3.write(angVelTable, trial_dir_path +'/'+ trialID + '_angularVelocities.sto')
return trialID | [
"[email protected]"
]
| |
bec5d5fbb09b6260d514209bc438f344d215832b | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=30/sched.py | a85202e958d39e172c17afa700742b708255c6d6 | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | -S 0 -X RUN -Q 0 -L 2 84 250
-S 1 -X RUN -Q 0 -L 2 80 250
-S 0 -X RUN -Q 0 -L 2 74 250
-S 0 -X RUN -Q 0 -L 2 59 250
-S 2 -X RUN -Q 1 -L 1 57 200
-S 2 -X RUN -Q 1 -L 1 48 175
-S 2 -X RUN -Q 1 -L 1 40 125
-S 2 -X RUN -Q 1 -L 1 33 300
-S 3 -X RUN -Q 2 -L 1 29 100
-S 3 -X RUN -Q 2 -L 1 27 125
-S 3 -X RUN -Q 2 -L 1 21 100
-S 3 -X RUN -Q 2 -L 1 19 150
-S 4 -X RUN -Q 3 -L 1 19 100
-S 4 -X RUN -Q 3 -L 1 15 100
-S 4 -X RUN -Q 3 -L 1 14 100
| [
"[email protected]"
]
| |
74be8d4d0984d5ac6a6f034cc22a1d68d58228f7 | 369e4a831387a477a1e37b995b264504e6ed91bd | /nomadiq_app/run.py | 66275b06c174c1dd71971b802216f699235dbd0b | []
| no_license | geneahn/nomadiq_repo | 2544762c507e0121c284b979240c52fca36a4f68 | baba67426f940f9de5f3b7cb0519a496467cd087 | refs/heads/master | 2020-04-21T22:38:43.380259 | 2019-04-16T07:08:21 | 2019-04-16T07:08:21 | 169,919,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from nomadiq_app import app as application
if __name__ == '__main__':
application.run(debug=True)
| [
"[email protected]"
]
| |
6dcb83acc4bf4107889ebbe1f7af8432518e95eb | 3a022998c1fa28408e6d38845f29730579d22a20 | /rootsofcube.py | 88f22babfd0f3217b9e57c2e5a715534a2a49d56 | []
| no_license | nandishaivalli/APS2020 | 1cd69fbade6fd2069f6b72dc5e8815d905c73836 | 57bcbe919c92692c61093e01304815c0f29222dc | refs/heads/master | 2020-12-20T20:58:23.523106 | 2020-05-16T07:44:33 | 2020-05-16T07:44:33 | 236,208,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #using formula find the roots of cubic eqn
a = 3
b = 4
c = 4
d = 3
#ax3+bx2+cx+d=0
p = (-1*b)/(3*a)
q = p**3 + (b*c-3*a*d)/(6*a**2)
r = c/(3*a)
x = (q+(q**2 + (r-p**2)**3)**0.5)**(1/3) + (q-(q**2 + (r-p**2)**3)**0.5)**(1/3) + p
print(x) | [
"[email protected]"
]
| |
5893049dfab4f9e7702c5a3117f4468d5c72a98f | 27bd7769798502bccbbc4b1bbc34e22d17f17d98 | /regressao_linear.py | 6dd8db7e2851e9a8717ad91bfc2f4b32d1eb00d7 | []
| no_license | jcclark/regressao_linear | 03f3bfd759de3e629788d7ba6891f081ae41a667 | bcf27dd810eb3916809b3683098ae2c3bd4dc619 | refs/heads/master | 2020-08-01T09:29:57.254610 | 2019-10-05T22:35:14 | 2019-10-05T22:35:14 | 210,948,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | import csv
import math
from random import randint
import matplotlib.pyplot as plt
def modelo( data, tipo, i):
dados = dv_dados(data, tipo)
b_treino, b_teste = dv_bases(dados, i)
b_0, b_1 = regressao_linear(b_treino, tipo)
x = [d[0] for d in dados]
y = [(b_0 + (d[0] * b_1)) for d in dados]
desvio = desvio_padrao(b_teste, b_0, b_1)
print("Desvio padrão: " + str( round(desvio, 2) ))
plt.title('Média Provas x ' + tipo )
plt.xlabel(tipo.title())
plt.ylabel('Média provas')
plt.scatter(x, y)
plt.plot(x, y)
plt.show()
def desvio_padrao( b_teste, b_0, b_1):
desvio = 0
for d in b_teste:
y = d[1]
fx = (b_0 + (d[0] * b_1))
desvio += (y - fx) ** 2
return desvio
def regressao_linear( b_treino, type):
N = len(b_treino)
x = somatorio(b_treino, 'x')
y = somatorio(b_treino, 'y')
xy = somatorio(b_treino, 'xy')
x1 = somatorio(b_treino, 'x2')
b_1 = ((x * y) - (N * xy)) / ((x ** 2) - (N * x1))
b_0 = (y - (b_1 * x))/ N
return b_0, b_1
def somatorio( l_n, tipo):
numeros = []
for t in l_n:
if tipo == 'x':
a = t[0]
elif tipo == 'y':
a = t[1]
elif tipo == 'xy':
a = t[0] * t[1]
elif tipo == 'x2':
a = t[0] ** 2
else:
a = 1
print('Erro')
numeros.append(a)
return sum(numeros)
def dv_dados( data, tipo):
res = []
for item in data:
if tipo == "Idade":
x = item.get("Idade")
elif tipo == "Tempo de Estudo":
x = item.get("Tempo de Estudo")
elif tipo == "Faltas":
x = item.get("Faltas")
y = item.get("MediaProvas")
res.append((int(x), int(y)))
return res
def dv_bases(dados, i):
p_treino = []
while (len(p_treino) < round(i * 0.7)):
posicao = randint(0, i - 1)
if posicao not in p_treino:
p_treino.append(posicao)
d_treino = [dados[p] for p in p_treino]
d_treino = [dados[p] for p in range(len(dados)) if p not in p_treino]
return d_treino, d_treino
def excutar():
data = []
with open("AnaliseEstudo.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=";")
for row in csv_reader:
if row[0] != "Idade":
media = (int(row[3]) + int(row[4]) + int(row[5])) / 3
aux = {
"Idade": row[0],
"Tempo de Estudo": row[1],
"Faltas":row[2],
"MediaProvas": media
}
data.append(aux)
for tipo in ["Idade", "Tempo de Estudo", "Faltas"]:
modelo(data, tipo, len(data))
excutar() | [
"[email protected]"
]
| |
da11437adf2aba52e01ffabe242c48711dbfe401 | d0a54183ad20c3e1bfb3d70d118b3a2ccf9256be | /pylearn2/pylearn2/training_algorithms/bgd.py | 86cdd585642ea1c5ac01de3c8ab7785692360024 | [
"BSD-3-Clause"
]
| permissive | julius506/pylearn2 | 93973fafb80ccd724c9ec16d6f0dcab0544acbcb | 9134a6438e954cf5d8a1684ef8f5e2767549d6bc | refs/heads/master | 2020-05-18T08:52:43.499030 | 2014-12-03T05:26:25 | 2014-12-03T05:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,764 | py | """
Module for performing batch gradient methods.
Technically, SGD and BGD both work with any batch size, but SGD has no line
search functionality and is thus best suited to small batches, while BGD
supports line searches and thuse works best with large batches.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import logging
import warnings
import numpy as np
from theano import config
from pylearn2.compat import OrderedDict
from pylearn2.monitor import Monitor
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
from pylearn2.utils.iteration import is_stochastic
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.utils import safe_zip
from pylearn2.train_extensions import TrainExtension
from pylearn2.termination_criteria import TerminationCriterion
from pylearn2.utils import sharedX
from pylearn2.space import CompositeSpace, NullSpace
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
class BGD(TrainingAlgorithm):
"""
Batch Gradient Descent training algorithm class
Parameters
----------
cost : pylearn2.costs.Cost, optional
A pylearn2 Cost, or None, in which case model.get_default_cost()
will be used
batch_size : int, optional
Like the SGD TrainingAlgorithm, this TrainingAlgorithm still
iterates over minibatches of data. The difference is that this
class uses partial line searches to choose the step size along
each gradient direction, and can do repeated updates on the same
batch. The assumption is that you use big enough minibatches with
this algorithm that a large step size will generalize reasonably
well to other minibatches. To implement true Batch Gradient
Descent, set the batch_size to the total number of examples
available. If batch_size is None, it will revert to the model's
force_batch_size attribute.
batches_per_iter : int, optional
WRITEME
updates_per_batch : int, optional
Passed through to the optimization.BatchGradientDescent's
`max_iters parameter`
monitoring_batch_size : int
Size of monitoring batches.
monitoring_batches : WRITEME
monitoring_dataset : Dataset or dict, optional
A Dataset or a dictionary mapping string dataset names to Datasets
termination_criterion : WRITEME
set_batch_size : bool, optional
If True, BGD will attempt to override the model's
`force_batch_size` attribute by calling set_batch_size on it.
reset_alpha : bool, optional
Passed through to the optimization.BatchGradientDescent's
`reset_alpha` parameter
conjugate : bool, optional
Passed through to the optimization.BatchGradientDescent's
`conjugate` parameter
min_init_alpha : float, optional
WRITEME
reset_conjugate : bool, optional
Passed through to the optimization.BatchGradientDescent's
`reset_conjugate` parameter
line_search_mode : WRITEME
verbose_optimization : bool, optional
WRITEME
scale_step : float, optional
WRITEME
theano_function_mode : WRITEME
init_alpha : WRITEME
seed : WRITEME
"""
def __init__(self, cost=None, batch_size=None, batches_per_iter=None,
updates_per_batch=10, monitoring_batch_size=None,
monitoring_batches=None, monitoring_dataset=None,
termination_criterion=None, set_batch_size=False,
reset_alpha=True, conjugate=False, min_init_alpha=.001,
reset_conjugate=True, line_search_mode=None,
verbose_optimization=False, scale_step=1.,
theano_function_mode=None, init_alpha=None, seed=None):
self.__dict__.update(locals())
del self.self
if monitoring_dataset is None:
assert monitoring_batches is None
assert monitoring_batch_size is None
self._set_monitoring_dataset(monitoring_dataset)
self.bSetup = False
self.termination_criterion = termination_criterion
self.rng = make_np_rng(seed, [2012, 10, 16],
which_method=["randn", "randint"])
def setup(self, model, dataset):
"""
Allows the training algorithm to do some preliminary configuration
*before* we actually start training the model. The dataset is provided
in case other derived training algorithms need to modify model based on
the dataset.
Parameters
----------
model : object
A Python object representing the model to train. Loosely
implementing the interface of models.model.Model.
dataset : pylearn2.datasets.dataset.Dataset
Dataset object used to draw training data
"""
self.model = model
if self.cost is None:
self.cost = model.get_default_cost()
try:
if self.cost.is_stochastic():
raise TypeError("BGD is not compatible with stochastic "
"costs.")
except NotImplementedError:
warnings.warn("BGD is not compatible with stochastic costs "
"and cannot determine whether the current cost is "
"stochastic.")
if self.batch_size is None:
self.batch_size = model.force_batch_size
else:
batch_size = self.batch_size
if self.set_batch_size:
model.set_batch_size(batch_size)
elif hasattr(model, 'force_batch_size'):
if not (model.force_batch_size <= 0 or batch_size ==
model.force_batch_size):
raise ValueError("batch_size is %d but " +
"model.force_batch_size is %d" %
(batch_size, model.force_batch_size))
self.monitor = Monitor.get_monitor(model)
self.monitor.set_theano_function_mode(self.theano_function_mode)
data_specs = self.cost.get_data_specs(model)
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
# Build a flat tuple of Theano Variables, one for each space,
# named according to the sources.
theano_args = []
for space, source in safe_zip(space_tuple, source_tuple):
name = 'BGD_[%s]' % source
arg = space.make_theano_batch(name=name)
theano_args.append(arg)
theano_args = tuple(theano_args)
# Methods of `self.cost` need args to be passed in a format compatible
# with their data_specs
nested_args = mapping.nest(theano_args)
fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
self.on_load_batch = fixed_var_descr.on_load_batch
cost_value = self.cost.expr(model, nested_args,
** fixed_var_descr.fixed_vars)
grads, grad_updates = self.cost.get_gradients(
model, nested_args, ** fixed_var_descr.fixed_vars)
assert isinstance(grads, OrderedDict)
assert isinstance(grad_updates, OrderedDict)
if cost_value is None:
raise ValueError("BGD is incompatible with " + str(self.cost) +
" because it is intractable, but BGD uses the " +
"cost function value to do line searches.")
# obj_prereqs has to be a list of function f called with f(*data),
# where data is a data tuple coming from the iterator.
# this function enables capturing "mapping" and "f", while
# enabling the "*data" syntax
def capture(f, mapping=mapping):
new_f = lambda *args: f(mapping.flatten(args, return_tuple=True))
return new_f
obj_prereqs = [capture(f) for f in fixed_var_descr.on_load_batch]
if self.monitoring_dataset is not None:
if (self.monitoring_batch_size is None and
self.monitoring_batches is None):
self.monitoring_batch_size = self.batch_size
self.monitoring_batches = self.batches_per_iter
self.monitor.setup(
dataset=self.monitoring_dataset,
cost=self.cost,
batch_size=self.monitoring_batch_size,
num_batches=self.monitoring_batches,
obj_prereqs=obj_prereqs,
cost_monitoring_args=fixed_var_descr.fixed_vars)
params = model.get_params()
self.optimizer = BatchGradientDescent(
objective=cost_value,
gradients=grads,
gradient_updates=grad_updates,
params=params,
param_constrainers=[model.modify_updates],
lr_scalers=model.get_lr_scalers(),
inputs=theano_args,
verbose=self.verbose_optimization,
max_iter=self.updates_per_batch,
reset_alpha=self.reset_alpha,
conjugate=self.conjugate,
reset_conjugate=self.reset_conjugate,
min_init_alpha=self.min_init_alpha,
line_search_mode=self.line_search_mode,
theano_function_mode=self.theano_function_mode,
init_alpha=self.init_alpha)
# These monitoring channels keep track of shared variables,
# which do not need inputs nor data.
if self.monitoring_dataset is not None:
self.monitor.add_channel(
name='ave_step_size',
ipt=None,
val=self.optimizer.ave_step_size,
data_specs=(NullSpace(), ''),
dataset=self.monitoring_dataset.values()[0])
self.monitor.add_channel(
name='ave_grad_size',
ipt=None,
val=self.optimizer.ave_grad_size,
data_specs=(NullSpace(), ''),
dataset=self.monitoring_dataset.values()[0])
self.monitor.add_channel(
name='ave_grad_mult',
ipt=None,
val=self.optimizer.ave_grad_mult,
data_specs=(NullSpace(), ''),
dataset=self.monitoring_dataset.values()[0])
self.first = True
self.bSetup = True
def train(self, dataset):
"""
.. todo::
WRITEME
"""
assert self.bSetup
model = self.model
rng = self.rng
train_iteration_mode = 'shuffled_sequential'
if not is_stochastic(train_iteration_mode):
rng = None
data_specs = self.cost.get_data_specs(self.model)
# The iterator should be built from flat data specs, so it returns
# flat, non-redundent tuples of data.
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
if len(space_tuple) == 0:
# No data will be returned by the iterator, and it is impossible
# to know the size of the actual batch.
# It is not decided yet what the right thing to do should be.
raise NotImplementedError("Unable to train with BGD, because "
"the cost does not actually use data "
"from the data set. "
"data_specs: %s" % str(data_specs))
flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
iterator = dataset.iterator(mode=train_iteration_mode,
batch_size=self.batch_size,
num_batches=self.batches_per_iter,
data_specs=flat_data_specs,
return_tuple=True,
rng=rng)
mode = self.theano_function_mode
for data in iterator:
if ('targets' in source_tuple and mode is not None
and hasattr(mode, 'record')):
Y = data[source_tuple.index('targets')]
stry = str(Y).replace('\n', ' ')
mode.record.handle_line('data Y ' + stry + '\n')
for on_load_batch in self.on_load_batch:
on_load_batch(mapping.nest(data))
self.before_step(model)
self.optimizer.minimize(*data)
self.after_step(model)
actual_batch_size = flat_data_specs[0].np_batch_size(data)
model.monitor.report_batch(actual_batch_size)
def continue_learning(self, model):
"""
.. todo::
WRITEME
"""
if self.termination_criterion is None:
return True
else:
rval = self.termination_criterion.continue_learning(self.model)
assert rval in [True, False, 0, 1]
return rval
def before_step(self, model):
"""
.. todo::
WRITEME
"""
if self.scale_step != 1.:
self.params = list(model.get_params())
self.value = [param.get_value() for param in self.params]
def after_step(self, model):
"""
.. todo::
WRITEME
"""
if self.scale_step != 1:
for param, value in safe_zip(self.params, self.value):
value = (1. - self.scale_step) * value + self.scale_step \
* param.get_value()
param.set_value(value)
class StepShrinker(TrainExtension, TerminationCriterion):
"""
.. todo::
WRITEME
"""
def __init__(self, channel, scale, giveup_after, scale_up=1.,
max_scale=1.):
self.__dict__.update(locals())
del self.self
self.continue_learning = True
self.first = True
self.prev = np.inf
def on_monitor(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
"""
monitor = model.monitor
if self.first:
self.first = False
self.monitor_channel = sharedX(algorithm.scale_step)
# TODO: make monitor accept channels not associated with any
# dataset,
# so this hack won't be necessary
hack = monitor.channels.values()[0]
monitor.add_channel('scale_step', hack.graph_input,
self.monitor_channel, dataset=hack.dataset,
data_specs=hack.data_specs)
channel = monitor.channels[self.channel]
v = channel.val_record
if len(v) == 1:
return
latest = v[-1]
logger.info("Latest {0}: {1}".format(self.channel, latest))
# Only compare to the previous step, not the best step so far
# Another extension can be in charge of saving the best parameters ever
# seen.We want to keep learning as long as we're making progress. We
# don't want to give up on a step size just because it failed to undo
# the damage of the bigger one that preceded it in a single epoch
logger.info("Previous is {0}".format(self.prev))
cur = algorithm.scale_step
if latest >= self.prev:
logger.info("Looks like using {0} "
"isn't working out so great for us.".format(cur))
cur *= self.scale
if cur < self.giveup_after:
logger.info("Guess we just have to give up.")
self.continue_learning = False
cur = self.giveup_after
logger.info("Let's see how {0} does.".format(cur))
elif latest <= self.prev and self.scale_up != 1.:
logger.info("Looks like we're making progress "
"on the validation set, let's try speeding up")
cur *= self.scale_up
if cur > self.max_scale:
cur = self.max_scale
logger.info("New scale is {0}".format(cur))
algorithm.scale_step = cur
self.monitor_channel.set_value(np.cast[config.floatX](cur))
self.prev = latest
def __call__(self, model):
"""
.. todo::
WRITEME
"""
return self.continue_learning
class ScaleStep(TrainExtension):
"""
.. todo::
WRITEME
"""
def __init__(self, scale, min_value):
self.scale = scale
self.min_value = min_value
self.first = True
def on_monitor(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
"""
if self.first:
monitor = model.monitor
self.first = False
self.monitor_channel = sharedX(algorithm.scale_step)
# TODO: make monitor accept channels not associated with any
# dataset,
# so this hack won't be necessary
hack = monitor.channels.values()[0]
monitor.add_channel('scale_step', hack.graph_input,
self.monitor_channel, dataset=hack.dataset)
cur = algorithm.scale_step
cur *= self.scale
cur = max(cur, self.min_value)
algorithm.scale_step = cur
self.monitor_channel.set_value(np.cast[config.floatX](cur))
class BacktrackingStepShrinker(TrainExtension, TerminationCriterion):
"""
.. todo::
WRITEME
"""
def __init__(self, channel, scale, giveup_after, scale_up=1.,
max_scale=1.):
self.__dict__.update(locals())
del self.self
self.continue_learning = True
self.first = True
self.prev = np.inf
def on_monitor(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
"""
monitor = model.monitor
if self.first:
self.first = False
self.monitor_channel = sharedX(algorithm.scale_step)
# TODO: make monitor accept channels not associated with any
# dataset,
# so this hack won't be necessary
hack = monitor.channels.values()[0]
monitor.add_channel('scale_step', hack.graph_input,
self.monitor_channel, dataset=hack.dataset)
channel = monitor.channels[self.channel]
v = channel.val_record
if len(v) == 1:
return
latest = v[-1]
logger.info("Latest {0}: {1}".format(self.channel, latest))
# Only compare to the previous step, not the best step so far
# Another extension can be in charge of saving the best parameters ever
# seen.We want to keep learning as long as we're making progress. We
# don't want to give up on a step size just because it failed to undo
# the damage of the bigger one that preceded it in a single epoch
logger.info("Previous is {0}".format(self.prev))
cur = algorithm.scale_step
if latest >= self.prev:
logger.info("Looks like using {0} "
"isn't working out so great for us.".format(cur))
cur *= self.scale
if cur < self.giveup_after:
logger.info("Guess we just have to give up.")
self.continue_learning = False
cur = self.giveup_after
logger.info("Let's see how {0} does.".format(cur))
logger.info("Reloading saved params from last call")
for p, v in safe_zip(model.get_params(), self.stored_values):
p.set_value(v)
latest = self.prev
elif latest <= self.prev and self.scale_up != 1.:
logger.info("Looks like we're making progress "
"on the validation set, let's try speeding up")
cur *= self.scale_up
if cur > self.max_scale:
cur = self.max_scale
logger.info("New scale is {0}".format(cur))
algorithm.scale_step = cur
self.monitor_channel.set_value(np.cast[config.floatX](cur))
self.prev = latest
self.stored_values = [param.get_value() for param in
model.get_params()]
def __call__(self, model):
"""
.. todo::
WRITEME
"""
return self.continue_learning
| [
"[email protected]"
]
| |
e97a029febec768754bf9e0c325316f93500cf73 | 8025929108bb70599cc5079b0f32d35c556dfe33 | /font_batch_subset.py | 4166574af64597447c4f5d871beec315011070c0 | []
| no_license | waivek/website | 1b0a0c49c3d28f6736362251a184cf48db4063b7 | 82a5f91ef9335de2dde86af934e5305895cd160d | refs/heads/master | 2023-07-11T08:10:32.109086 | 2023-07-02T06:47:19 | 2023-07-02T06:47:19 | 113,569,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | from columnar import columnar
import argparse
import subprocess
import os.path
import re
parser = argparse.ArgumentParser(description='Batch subsets a font to an accepted unicode range')
parser.add_argument("directory")
parser.add_argument("unicode_range")
args = parser.parse_args()
headers = [ "Input", "Output", "Input Size", "Output Size"]
data = []
# correct - fonts\Vollkorn\Vollkorn.400italic.ttf
filenames = os.listdir(args.directory)
layout_features="lnum,tnum"
directory_save = os.getcwd()
os.chdir(args.directory)
selected_filenames = [ filename for filename in filenames if re.match(r"(\w+)\.(\d)00(italic)?\.ttf", filename) ]
total_selected_filenames = len(selected_filenames)
padding_length = len(str(total_selected_filenames))
for i, filename in enumerate(selected_filenames, start=1):
status_message = "[%s/%s] %s" % (str(i).rjust(padding_length), total_selected_filenames)
print(status_message)
m = re.match(r"(\w+)\.(\d)00(italic)?\.ttf", filename)
font_name_with_underscores, font_weight, font_style = m.groups()
font_style = font_style if font_style else "normal"
input_filename_root, extension = os.path.splitext(filename)
output_filename = input_filename_root + "-subset"
ttf_subset_command = 'pyftsubset {input_filename} --unicodes={unicode_range} --output-file={output_filename}.ttf --layout-features={layout_features}'.format(
input_filename=filename, unicode_range=args.unicode_range, output_filename=output_filename, layout_features=layout_features)
woff_subset_command = 'pyftsubset {input_filename} --unicodes={unicode_range} --output-file={output_filename}.woff --layout-features={layout_features} --flavor=woff'.format(
input_filename=filename, unicode_range=args.unicode_range, output_filename=output_filename, layout_features=layout_features)
woff2_subset_command = 'pyftsubset {input_filename} --unicodes={unicode_range} --output-file={output_filename}.woff2 --layout-features={layout_features} --flavor=woff2'.format(
input_filename=filename, unicode_range=args.unicode_range, output_filename=output_filename, layout_features=layout_features)
subprocess.run(ttf_subset_command)
input_byte_size = os.path.getsize(filename)
input_kilobyte_size_string = str(round(input_byte_size / 1024, 2)) + " KB"
# Subsetting fonts/Vollkorn/Vollkorn.400.ttf to Vollkorn.400-subset.ttf (was 313.88 KB, now 5.72 KB)
# Subsetting fonts/Vollkorn/Vollkorn.400.ttf to Vollkorn.400-subset.zopfli.woff (was 313.88 KB, now 3.44 KB)
# Subsetting fonts/Vollkorn/Vollkorn.400.ttf to Vollkorn.400-subset.woff2 (was 313.88 KB, now 2.8 KB)
output_ttf_byte_size = os.path.getsize("%s.ttf" % output_filename)
output_ttf_kilboyte_size_string = str(round(output_ttf_byte_size / 1024, 2)) + " KB"
data.append([ filename, "%s.ttf" % output_filename, input_kilobyte_size_string, output_ttf_kilboyte_size_string ])
subprocess.run(woff_subset_command)
output_woff_byte_size = os.path.getsize("%s.woff" % output_filename)
output_woff_kilboyte_size_string = str(round(output_woff_byte_size / 1024, 2)) + " KB"
data.append([ filename, "%s.woff" % output_filename, input_kilobyte_size_string, output_woff_kilboyte_size_string ])
subprocess.run(woff2_subset_command)
output_woff2_byte_size = os.path.getsize("%s.woff2" % output_filename)
output_woff2_kilboyte_size_string = str(round(output_woff2_byte_size / 1024, 2)) + " KB"
data.append([ filename, "%s.woff2" % output_filename, input_kilobyte_size_string, output_woff2_kilboyte_size_string ])
os.chdir(directory_save)
print()
table = columnar(data, headers, no_borders=True, justify=["l", "l", "r", "r"] )
print(table)
| [
"[email protected]"
]
| |
c2abb820a33634fbd4d2baa8cc40894fd5ffc9db | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/customer_service/migrations/0018_remove_customerservice_serviceitem.py | 5389c0887e5e5598bfdb43884190c5126c6d8681 | []
| no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Generated by Django 3.1.2 on 2020-11-16 14:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer_service', '0017_auto_20201116_1115'),
]
operations = [
migrations.RemoveField(
model_name='customerservice',
name='serviceitem',
),
]
| [
"[email protected]"
]
| |
bfed24c5b9f51dc5e88f33ea662a3f1e253a3474 | 39c7137821880e504608a4508299ed5883e3ebe3 | /website/run.py | d10df72b10721b85de47d5e8500058bead40b612 | []
| no_license | tharrington923/rCheckUp | 081a6b806e41f2b0d2a1b82e35f402b0ed9c932d | 80e13d9b79b069f01dc83cb43d282557d5f78787 | refs/heads/master | 2020-12-10T03:27:05.944264 | 2017-06-29T16:02:15 | 2017-06-29T16:02:15 | 95,575,694 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | #!/usr/bin/env python
from rcheckup import app
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
]
| |
ba55baa802dc3a6b2fe5b7b479d00888a5eec426 | 728be4e1d3ab5a63e2d6900637fdfb822d965a43 | /blobing_photo.py | a41716f876d2c4ec077f3e39a3870d46e285f6db | []
| no_license | Reveta/openCV_Python | 7ea7aeb5da5243b4359cb75ed9e14489051d917f | 6b88a9c1ba459f2810c54b91da77e0f0914da894 | refs/heads/master | 2023-07-16T00:26:17.752429 | 2021-08-23T19:33:10 | 2021-08-23T19:33:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | # !/usr/bin/python
# Standard imports
import cv2
import numpy as np
# Read image
# image_original = cv2.imread("media/blobs/blob.jpg", cv2.IMREAD_GRAYSCALE)
image_original = cv2.imread("media/blobs/microchips_1.png", cv2.IMREAD_COLOR)
# image_original = cv2.imread("media/blobs/microchips_2.png", cv2.IMREAD_COLOR)
th, image = cv2.threshold(image_original, 150, 255, cv2.THRESH_BINARY)
image = cv2.medianBlur(src=image, ksize=3)
image = cv2.bitwise_not(image)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 150
params.maxThreshold = 255
# params.filterByColor = True
# params.blobColor = 203
# Filter by Area.
# params.filterByArea = True
# params.minArea = 100
#
# Filter by Circularity
# params.filterByCircularity = True
# params.minCircularity = 0.7
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(image)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
# the size of the circle corresponds to the size of blob
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
th, image = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY_INV)
im_with_keypoints = cv2.drawKeypoints(image_original, keypoints, np.array([]), (0, 0, 255),
cv2.DrawMatchesFlags_DRAW_RICH_KEYPOINTS)
down_width = 800
down_height = 800
down_points = (down_width, down_height)
image_original = cv2.resize(image_original, down_points, interpolation= cv2.INTER_LINEAR)
im_with_keypoints = cv2.resize(im_with_keypoints, down_points, interpolation= cv2.INTER_LINEAR)
# Show blobs
cv2.imshow("original", image_original)
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
| [
"[email protected]"
]
| |
07030cbb64db6488b93f8e7f03c975d1d39c099d | df5cd640098a10e754a9552187fc5ad8c50df90c | /colour/examples/algebra/examples_interpolation.py | 4acf509db6a9fd00459d7e4bce455a3a20c6b8ca | [
"BSD-3-Clause"
]
| permissive | ofek/colour | d4963c9b77b0d119cf3ef3296dbf5369167472df | 04f4863ef49093a93244c1fedafd1d5e2b1b76da | refs/heads/develop | 2021-07-08T05:33:14.220392 | 2017-09-29T22:34:14 | 2017-09-29T22:34:14 | 105,406,461 | 0 | 0 | null | 2017-09-30T23:06:18 | 2017-09-30T23:06:18 | null | UTF-8 | Python | false | false | 3,265 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases interpolation computations.
"""
import pylab
import colour
from colour.plotting import * # noqa
from colour.utilities.verbose import message_box
message_box('Interpolation Computations')
message_box(('Comparing "Sprague (1880)" and "Cubic Spline" recommended '
'interpolation methods to "Pchip" method.'))
uniform_spd_data = {
340: 0.0000,
360: 0.0000,
380: 0.0000,
400: 0.0641,
420: 0.0645,
440: 0.0562,
460: 0.0537,
480: 0.0559,
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
620: 0.1511,
640: 0.1688,
660: 0.1996,
680: 0.2397,
700: 0.2852,
720: 0.0000,
740: 0.0000,
760: 0.0000,
780: 0.0000,
800: 0.0000,
820: 0.0000
}
non_uniform_spd_data = {
340.1: 0.0000,
360: 0.0000,
380: 0.0000,
400: 0.0641,
420: 0.0645,
440: 0.0562,
460: 0.0537,
480: 0.0559,
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
620: 0.1511,
640: 0.1688,
660: 0.1996,
680: 0.2397,
700: 0.2852,
720: 0.0000,
740: 0.0000,
760: 0.0000,
780: 0.0000,
800: 0.0000,
820.9: 0.0000
}
base_spd = colour.SpectralPowerDistribution('Reference', uniform_spd_data)
uniform_interpolated_spd = colour.SpectralPowerDistribution(
'Uniform - Sprague Interpolation', uniform_spd_data)
uniform_pchip_interpolated_spd = colour.SpectralPowerDistribution(
'Uniform - Pchip Interpolation', uniform_spd_data)
non_uniform_interpolated_spd = colour.SpectralPowerDistribution(
'Non Uniform - Cubic Spline Interpolation', non_uniform_spd_data)
uniform_interpolated_spd.interpolate(colour.SpectralShape(interval=1))
uniform_pchip_interpolated_spd.interpolate(
colour.SpectralShape(interval=1), method='Pchip')
non_uniform_interpolated_spd.interpolate(colour.SpectralShape(interval=1))
shape = base_spd.shape
x_limit_min, x_limit_max, y_limit_min, y_limit_max = [], [], [], []
pylab.plot(
base_spd.wavelengths,
base_spd.values,
'ro-',
label=base_spd.name,
linewidth=2)
pylab.plot(
uniform_interpolated_spd.wavelengths,
uniform_interpolated_spd.values,
label=uniform_interpolated_spd.name,
linewidth=2)
pylab.plot(
uniform_pchip_interpolated_spd.wavelengths,
uniform_pchip_interpolated_spd.values,
label=uniform_pchip_interpolated_spd.name,
linewidth=2)
pylab.plot(
non_uniform_interpolated_spd.wavelengths,
non_uniform_interpolated_spd.values,
label=non_uniform_interpolated_spd.name,
linewidth=2)
x_limit_min.append(shape.start)
x_limit_max.append(shape.end)
y_limit_min.append(min(base_spd.values))
y_limit_max.append(max(base_spd.values))
settings = {
'x_label':
'Wavelength $\\lambda$ (nm)',
'y_label':
'Spectral Power Distribution',
'x_tighten':
True,
'legend':
True,
'legend_location':
'upper left',
'x_ticker':
True,
'y_ticker':
True,
'limits': (min(x_limit_min), max(x_limit_max), min(y_limit_min),
max(y_limit_max))
}
boundaries(**settings)
decorate(**settings)
display(**settings)
| [
"[email protected]"
]
| |
dfa355b0aa14dd76ed80003014bdb0d64e25c20b | 84fe91caa2209022b8300a7f42dea0b78db41e12 | /action/test/test_watermark.py | acb7ae6abfbc2f7e2d4e76c4d1d2bf2991c3c0c8 | [
"MIT"
]
| permissive | mshemanskyi/image-batch-processor | 6b579f1e3442262a9abb5a0cd8f6731a23ebf13b | dd7e0d9bc77fd06fe0f94c4f4fd025323633d9f7 | refs/heads/master | 2023-01-10T10:03:52.523962 | 2020-11-07T21:18:47 | 2020-11-07T21:18:47 | 303,668,070 | 0 | 0 | MIT | 2020-11-07T20:27:36 | 2020-10-13T10:33:10 | Python | UTF-8 | Python | false | false | 677 | py | import unittest
from action.watermark import watermarkImage, watermarkText
import cv2 as cv
import numpy as np
class WatermarkTest((unittest.TestCase)):
@classmethod
def setUpClass(self):
self.testImage = cv.imread('examples/img1.jpg')
self.watermarkLogo = cv.imread('examples/wm-logo.png')
def setUp(self):
pass
def test_watermak__image_action(self):
wmi = watermarkImage(self.testImage, self.watermarkLogo)
self.assertTrue(np.any(self.testImage != wmi))
def test_watarmark_text_action(self):
wmi = watermarkText(self.testImage, 'watermarkLogo')
self.assertTrue(np.any(self.testImage != wmi))
| [
"[email protected]"
]
| |
e5473522c26a9ba7d2654c9fa7a954d97bb4ed88 | f855be082bd55159cf6f77be034a78ecf2020232 | /actions/actions.py | 9501206f8fdb9634b4fe7561bdb7409868ff48ea | []
| no_license | athenasaurav/venkat | 64e3b9522bbcceb141d87f4b04498dcfd7f10969 | bfb7a767793483a6a8663e1e112a20c03433b2df | refs/heads/main | 2023-03-27T11:32:52.246192 | 2021-03-16T18:01:55 | 2021-03-16T18:01:55 | 348,443,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,129 | py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import Form, AllSlotsReset, Restarted
class ActionHelloWorld(Action):
def name(self) -> Text:
return "action_greet"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Hi human, I'm sure you want something 😃 I'm sure you are gonna ask me something")
return []
class ActionPostAccommodationServicesHotel(Action):
def name(self) -> Text:
return "action_post_Accommodation_Services_Hotel"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Hotel")
return []
class ActionPostCityAttractionSpa(Action):
def name(self) -> Text:
return "action_post_city_attraction_spa"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Spa")
return []
class ActionPostEducationalSuppliesDealers(Action):
def name(self) -> Text:
return "action_post_Educational_Supplies_Dealers"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Education supplies dealers")
return []
class ActionPostEducationalSuppliesDealersBookShop(Action):
def name(self) -> Text:
return "action_post_Educational_Supplies_Dealers_book_shop"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Book Services")
return []
class ActionPostAccountant(Action):
def name(self) -> Text:
return "action_post_Accountant"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Accountant")
return []
class ActionPostInsurance(Action):
def name(self) -> Text:
return "action_post_Insurance"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Insurance")
return []
class ActionPostATM(Action):
def name(self) -> Text:
return "action_post_ATM"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="ATM")
return []
class ActionPostGovernmentOffices(Action):
def name(self) -> Text:
return "action_post_Government_Offices"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="GOVT OFFICE")
return []
class ActionPostNightlife(Action):
def name(self) -> Text:
return "action_post_Nightlife"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Nightlife")
return []
class ActionPostNightlifeHookah(Action):
def name(self) -> Text:
return "action_post_Nightlife_Hookah"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Nightlife Hookah")
return []
class ActionPostSchools(Action):
def name(self) -> Text:
return "action_post_Schools"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Schools")
return []
class ActionPostSchoolsRetakers(Action):
def name(self) -> Text:
return "action_post_Schools_Retakers"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Schools Retakers")
return []
class ActionPostGamesCenters(Action):
def name(self) -> Text:
return "action_post_Games_Centers"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Game Centers")
return []
class ActionPostFoodandDininglocation(Action):
def name(self) -> Text:
return "action_post_Food_and_Dining_location"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Food_and_Dining_location")
return []
class ActionPostFoodandDiningFoodProductSuppliers(Action):
def name(self) -> Text:
return "action_post_Food_and_Dining_Food_Product_Suppliers"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Food_and_Dining_Food_Product_Suppliers")
return []
class ActionPostFoodandDiningRestaurant(Action):
def name(self) -> Text:
return "action_post_Food_and_Dining_Restaurant"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Food_and_Dining_Restaurant")
return []
class ActionPostFoodandDiningSeafood(Action):
def name(self) -> Text:
return "action_post_Food_and_Dining_Seafood"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Food_and_Dining_Seafood")
return []
class ActionPostFoodandDiningSweet(Action):
def name(self) -> Text:
return "action_post_Food_and_Dining_Sweet"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Food_and_Dining_Sweet")
return []
class ActionPostGrocery(Action):
def name(self) -> Text:
return "action_post_Grocery"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Grocery")
return []
class ActionPostClinicsAndDoctorsType(Action):
def name(self) -> Text:
return "action_post_Clinics_and_Doctors_type"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Clinics_and_Doctors_type")
return []
class ActionPostClinicsandDoctorstypeChiropractors(Action):
def name(self) -> Text:
return "action_post_Clinics_and_Doctors_type_Chiropractors"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Clinics_and_Doctors_type_Chiropractors")
return []
class ActionPostClinicsandDoctorstypeDentists(Action):
def name(self) -> Text:
return "action_post_Clinics_and_Doctors_type_Dentists"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Clinics_and_Doctors_type_Dentists")
return []
class ActionPostEmergencyServices(Action):
def name(self) -> Text:
return "action_post_Emergency_Services"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Emergency_Services")
return []
class ActionPostLegalService(Action):
def name(self) -> Text:
return "action_post_Legal_Service"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Legal_Service")
return []
class ActionPostLegalServiceAdvocates(Action):
def name(self) -> Text:
return "action_post_Legal_Service_Advocates"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Legal_Service_Advocates")
return []
class ActionPostLegalServiceImmigration(Action):
def name(self) -> Text:
return "action_post_Legal_Service_Immigration"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Legal_Service_Immigration")
return []
class ActionPostLegalServicePassport(Action):
def name(self) -> Text:
return "action_post_Legal_Service_Passport"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Legal_Service_Passport")
return []
class ActionPostFuneralsandMemorials(Action):
def name(self) -> Text:
return "action_post_Funerals_and_Memorials"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Funerals_and_Memorials")
return []
class ActionPostGardenandLawn(Action):
def name(self) -> Text:
return "action_post_Garden_and_Lawn"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Garden_and_Lawn")
return []
class ActionPostGardenandLawnGarden(Action):
def name(self) -> Text:
return "action_post_Garden_and_Lawn_Garden"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Garden_and_Lawn_Garden")
return []
class ActionPostGardenandLawnIrrigation(Action):
def name(self) -> Text:
return "action_post_Garden_and_Lawn_Irrigation"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="action_post_Garden_and_Lawn_Irrigation")
return []
class ActionRestarted(Action):
def name(self):
return "action_restart"
def run(self, dispatcher, tracker, domain):
dispatcher.utter_template("utter_reset_full", tracker)
return [Form(None), AllSlotsReset(None), Restarted(None)]
| [
"[email protected]"
]
| |
563f625c719041548b3b0dee923cd3f05db82e19 | 498328d283bbcfa2aed7780cc2ccd1ab1435998c | /validation.py | 7edbc1a87dd4a75e441e312bb3351b880ee9201b | []
| no_license | ktan8/AM205-Project | 51a336921314b5e977dd9682a60f8c73adc64598 | e697afe74f0b9fb8d2b177191e7d8472a3167031 | refs/heads/master | 2020-04-11T01:01:34.481178 | 2018-12-16T15:01:19 | 2018-12-16T15:01:19 | 161,403,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,987 | py | '''Reads suppulemeal data'''
import pandas as pd
import os
import network_matrices
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
def label_active(row):
'''Label whether a given knockout was higher express - huristic is to see
if it's above 20% points above/below baseline'''
if(row['RNA1_percent'] >1.2 and row["RNA2_percent"] > 1.2):
return(1)
elif(row["RNA1_percent"] < 0.8 and row["RNA2_percent"] < 0.8):
return(-1)
return(0)
def get_data_jak2():
'''Read the expression data for jak2'''
path = os.path.join("data", "data_jak2_knockdown.txt")
data = pd.read_csv(path, sep = '\t')
data["RNA1_percent"] = data["sh-JAK2-shRNA1"] / data["shRNA-control"]
data["RNA2_percent"] = data["sh-JAK2-shRNA2"] / data["shRNA-control"]
data['mat_val'] = data.apply(label_active, axis = 1)
return(data)
def get_data_stat5():
'''Get stat5 expression data'''
path3 = os.path.join("data", "data_stat5_knockdown.txt")
d4 = pd.read_csv(path3, sep = '\t')
d4['percent_change' ] = d4['data.expr.anno.ctl'] / d4['data.expr.anno.kdSTAT5']
return(d4)
def get_pathway(path):
'''Read in hand currated network'''
pathway = pd.read_csv(path)
pathway = pathway.set_index("Unnamed: 0")
pathway.index.name = "Gene"
conversion_path = os.path.join("data", "convertModelToExprNames.txt")
conversions = pd.read_table(conversion_path)
return(pathway, conversions)
def compute_expressions(conversions, df, gene_col = "Gene Symbol", control_col = "shRNA-control", experiment_col = "sh-JAK2-shRNA1"):
'''Create a dictionary of the expresison data percent change per gene'''
expression_levels_change = {}
for elem in conversions:
model_name = elem[0]
targets = elem[1].split(',')
total_control = 0
total_expression = 0
for gene in targets:
avg_df = df[df[gene_col] == gene].mean() # We may have multiple expression
total_control += avg_df[experiment_col]
total_expression += avg_df[control_col]
change = total_control / total_expression - 1
if(not np.isnan(change)):
expression_levels_change[model_name] = change
return(expression_levels_change)
def get_val(df, index, gene_names):
a = (df[df["Gene Symbol"] == gene_names[index]]['mat_val']).sum()
return(np.sign(a))
def quick_correlation(jak2_index, indicies, mat1, exp):
'''Compute the correlation at a given index pair in mat1'''
l1 = np.squeeze(np.asarray(mat1[jak2_index, indicies]))
return(scipy.stats.spearmanr(np.abs(l1), np.abs(exp)))
def wrapper_corr(jak2_index, indicies, exp):
'''Dummy wrapper for syntax'''
return(lambda x : quick_correlation(jak2_index, indicies, x, exp))
def run_example(data, pathway, conversions, elc, knockdown_gene ):
path2 = os.path.join("data", "PDL1_pathway", "PDL1_pathway.matrix.csv")
tran = True
rem = True
'''Get the models'''
(prop, dist, neighbors) = network_matrices.run_many(path2, tran, rem)
(_, gene_names, _) = network_matrices.get_data(path2, tran, rem)
genes_have_all = list(elc.items())
genes_have = [x[0] for x in genes_have_all]
genes_have_val = [x[1] for x in genes_have_all]
indicies = []
jak2_index = -1
pdl1_index = pathway.index.get_loc("PD-L1")
for i in range(0, len(gene_names)):
if(gene_names[i] == knockdown_gene):
jak2_index = i
if(gene_names[i] in genes_have):
indicies.append(i)
genes_have_val = np.array(genes_have_val)
f = wrapper_corr(jak2_index, indicies, genes_have_val)
#Correlation data
corr_data = [f(prop[0]), f(prop[1]), f(prop[2]), f(dist[0]), f(dist[1]) ]
'''Plotting code'''
blues = [plt.cm.Blues(300), plt.cm.Blues(200), plt.cm.Blues(100)]
oranges= [plt.cm.Oranges(200), plt.cm.Oranges(100)]
greys= [plt.cm.Greys(300), plt.cm.Greys(200), plt.cm.Greys(100)]
reds= [
plt.cm.Reds(200), plt.cm.Reds(175), plt.cm.Reds(150),
plt.cm.Reds(125), plt.cm.Reds(100)
]
colors = blues + oranges + greys + reds
objects = ('Propogation (d+s)', 'Propogation (d)',
'Propogation (u)', 'Distance (d)',
'Adjacency (d+s)')
y_pos = np.arange(len(objects))
blues = [plt.cm.Blues(300), plt.cm.Blues(200), plt.cm.Blues(100)]
oranges= [plt.cm.Oranges(200), plt.cm.Oranges(100)]
greys= [plt.cm.Greys(300)]
colors = blues + oranges + greys + reds
corr_data = [x[0] for x in corr_data]
plt.bar(y_pos, corr_data, align='center', alpha=0.5, color = colors)
plt.xticks(y_pos, objects, rotation='vertical')
plt.ylabel('Correlation')
plt.title('Strength with experiment')
plt.show()
def isactive(val):
'''Look at the direction. Up is more than 10%, down is < 10%'''
if(val > 0.1):
return(1)
elif(val < -0.1):
return(-1)
return(0)
vfunc = np.vectorize(isactive)
genes_have_val_ind = vfunc(genes_have_val)
'''Get the precent sign match'''
q = lambda inp : np.sum(np.sign(inp[jak2_index, indicies]) == np.sign(genes_have_val_ind)) / (len(indicies))
sign_match = [q(prop[0]), q(prop[1]), q(prop[2]), q(dist[0]), q(dist[1]),
]
plt.bar(y_pos, sign_match, align='center', alpha=0.5, color = colors)
plt.xticks(y_pos, objects, rotation='vertical')
plt.ylabel('Percent agreement')
plt.title('Percent agrement in sign with different models')
plt.show()
all_data_names = ["Propogation (d+s)", "Progation (d)", "Propogation (u)", "Distance (d)", "Distance (u)", "neighbors (signed)"]
all_data_models = [prop[0], prop[1], prop[0], dist[0], dist[1], neighbors[0]]
def write_data(index, all_data, all_data_names, pathway):
list_arr = []
for model in all_data:
list_arr.append((np.squeeze(np.asarray(model[:, index]))))
df_res = pd.DataFrame(list_arr, index = all_data_names, columns = pathway.columns[1:])
df_res.index.name = "method"
return(df_res)
'''Can write this data to a csv if needed'''
d = write_data(pdl1_index , all_data_models, all_data_names, pathway)
return(d)
(pathway, conversions) = get_pathway(os.path.join("data", "PDL1_pathway", "PDL1_pathway.matrix.csv"))
elc = compute_expressions(conversions.values, jak_2)
knockdown_gene = "JAK2"
run_example(jak_2, pathway, conversions, elc, knockdown_gene)
#stat5 = get_data_stat5()
#elc2 = compute_expressions(conversions.values, stat5, 'data.expr.anno.GENE_SYMBOL','data.expr.anno.ctl', 'data.expr.anno.kdSTAT5')
#knockdown_gene = "STAT5"
#run_example(stat5, pathway, conversions, elc2, knockdown_gene)
#
#
#f(neighbors[2].T) | [
"[email protected]"
]
| |
aea1aef16acb0e35be9835c1de7d957ef061384a | a55bf36b8c58cb5b430734a9e5c35fec3a757b46 | /Pygame/Maps/Attempts/Map01/app.py | 4eab769097b0c2c65af739eb60bbae8eaf628217 | []
| no_license | alexetsnyder/OuroborosPython | 5547a6f925263430f024b5fd1d1d797600ec5f9d | 3c5e93fb90368c30c2748218a56a4d184e144caa | refs/heads/master | 2021-06-28T22:37:07.555035 | 2021-03-01T23:45:10 | 2021-03-01T23:45:10 | 225,740,980 | 1 | 0 | null | 2021-03-01T23:45:10 | 2019-12-04T00:03:14 | Python | UTF-8 | Python | false | false | 1,680 | py | #app.py
import pygame, island_map
import imp, go, events, config
from pygame import freetype
from structs import *
class Screen:
def __init__(self, size):
self.set_size(size)
self.surface = pygame.display.set_mode(self.size, pygame.RESIZABLE)
def wire_events(self):
imp.IMP().add_listener(events.WindowResizedEvent().listen(self.on_resize))
def on_resize(self, event):
self.surface = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)
def set_size(self, size):
self.w, self.h = self.size = size
def set_title(self, title):
pygame.display.set_caption(title)
def fill(self, color=Color.BLACK):
self.surface.fill(color)
def flip(self):
pygame.display.flip()
class App:
def __init__(self):
pygame.init()
self.island_map = island_map.IslandMap((0, 0), imp.IMP().screen.size, 320, 200)
self.wire_events()
def wire_events(self):
imp.IMP().add_listener(events.WindowResizedEvent().listen(self.on_resize))
def on_resize(self, event):
self.island_map.refresh((0, 0), (event.w, event.h))
def update(self):
pass
def draw(self):
imp.IMP().screen.fill(Color.BLACK)
imp.IMP().draw(self.island_map)
imp.IMP().screen.flip()
def free(self):
pygame.quit()
def run(self):
while imp.IMP().running:
for event in pygame.event.get():
imp.IMP().dispatch(event)
self.update()
self.draw()
self.free()
if __name__=='__main__':
data = config.Config('data/config_data.txt')
screen = Screen((640, 400))
event_dispatcher = events.EventDispatcher()
imp.IMP().init(screen, data, event_dispatcher, data.try_get('DEBUG', False))
app = App()
app.run() | [
"[email protected]"
]
| |
a3686500cf1b87f08d8a3d467cb0f56dead33719 | 44cfc32ae35487314cbf2ea83fbc96de9ac3e693 | /Trees/post_iterative.py | f989a9dbf228047512a35fb120c7ca73c34c63c3 | []
| no_license | Imonymous/Practice | 738fdf2099a74127460a647fee8db97c95f322fc | ff6138d858777e6a669a5a387cd2f775c60c9aff | refs/heads/master | 2020-03-18T16:53:15.029553 | 2019-11-09T23:29:27 | 2019-11-09T23:29:27 | 134,991,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | #!/usr/bin/env python
from isBST import TreeNode, BinaryTree, readBinaryTree
# Using one stack
def build_stack(node, s):
while node:
if node.right_ptr:
s.append(node.right_ptr)
s.append(node)
node = node.left_ptr
def peek(s):
if len(s) > 0:
return s[-1]
return None
def post_iterative_1_stack(root):
s = []
build_stack(root, s)
while s:
curr = s.pop()
if curr.right_ptr == peek(s):
temp = s.pop()
s.append(curr)
build_stack(temp, s)
else:
print(curr.val)
# Using two stacks
def dfs(node):
if node is None:
return
print(node.val)
dfs(node.left_ptr)
dfs(node.right_ptr)
def post_iterative_2_stack(root):
# dfs(root)
s1 = []
s2 = []
s1.append(root)
while s1:
curr = s1.pop()
s2.append(curr)
if curr.right_ptr:
s1.append(curr.right_ptr)
if curr.left_ptr:
s1.append(curr.left_ptr)
while s2:
curr = s2.pop()
print(curr.val)
def main():
root = readBinaryTree()
post_iterative_2_stack(root)
post_iterative_1_stack(root)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
1f20faa1db5d79da116d4b083125c2a17a098fc2 | 2485df18e7584efc0652ab5693ca299a29bf315a | /Finance/Monte-Carlo/Python/Pricing.py | 443d07a10067c4ceebbe99b11a406d8ee136e2d2 | []
| no_license | lesibius/Programming-For-Fun | c16a576dcb91ff12e7ae8116386d4ac89c5e40c0 | ae7e19e6ba78f4f95bbbf64f0d222f16d6b22b1d | refs/heads/master | 2020-05-29T08:41:19.938610 | 2017-02-17T10:50:23 | 2017-02-17T10:50:23 | 69,820,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,485 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 10:28:21 2017
@author: clem
"""
import math
import numpy.random
import scipy.stats
import matplotlib.pyplot as plt
import sklearn.utils as sku
class Path:
"""
Class to represent the path of a random variable following a Brownian motion (e.g. stock price)
"""
def __init__(self,s0,nPeriod,deltaT):
"""
Parameters
----------
s0: float
Initial value of the random variable
nPeriod: int
Number of period to generate
deltaT: float
Inter-period time interval
Returns
-------
None
"""
self.NPeriod = nPeriod
self.DeltaT = deltaT
self.Values = [s0 for i in range(nPeriod)]
def GetLastItem(self):
"""
Returns the last value of the Path instance
Parameters
----------
None
Returns
-------
type: float
Last value of the Path instance
"""
return self.Values[self.NPeriod-1]
def __setitem__(self,ind,item):
"""
Set the value of an item in the Path value list
Parameters
----------
ind: int
Index in the random variable path list
item:
Value to set
Returns
-------
None
"""
self.Values[ind] = item
def __getitem__(self,ind):
"""
Returns a value of an item from the Path value list with the index provided
Parameters
----------
ind: int
Index of the item to return
Returns
-------
type: float
Value of the random variable at the given index
"""
return self.Values[ind]
def GetItemByDate(self,date):
"""
Return the value of the random variable at a given date
Parameters
----------
date: float
Date for which the random variable value should be returned
Returns
-------
type: float
Value of the random variable at the selected date
"""
frac, ind = math.modf(date / self.DeltaT)
ind=int(ind)
if frac < 0.000001:
return self[ind]
else:
return frac*self[ind]+(1-frac)*self[ind+1]
def Plot(self):
"""
Plot a graph of the path
Parameters
----------
None
Returns
-------
None
"""
plt.plot([self.DeltaT * period for period in range(self.NPeriod)],self.Values)
class PathGenerator:
"""
Class generating Path instances to price derivatives
"""
def __init__(self,nPath,totaltime,deltaT,model, drift, vol,**kwargs):
"""
Parameters
----------
nPath: int
Number of path to generate
totaltime: float
Total time of each path
deltaT: float
Time interval between two periods
drift: float/function
Drift of the Brownian motion
model: str
Name of the model to use. Currently, only "black-scholes" supported
vol: float/function
Volatility of the Brownian motion
**kwargs: optional arguments to provide for certain model
"""
self.TotalTime = totaltime
self.NPath = nPath
self.NPeriod = int(totaltime/deltaT)
self.DeltaT = deltaT
self.Drift = drift
self.Vol = vol
if model.lower() in ['bs','black-scholes','black scholes','merton','black-scholes-merton','black scholes merton']:
self.DriftFun = self._BSDriftFun
self.VolFun = self._BSVolFun
def _BSDriftFun(self,S,t,rt,sigmat):
"""
Drift function used by the Black-Scholes-Merton model
"""
return self.Drift * S
def _BSVolFun(self,S,t,r,sigmat):
"""
Volatility function used by the Black-Scholes-Merton model
"""
return self.Vol * S * numpy.random.normal(0.0,1.0)
def _initPaths(self,s0):
"""
Init the attribute Paths as a list of Path instance
Parameters
----------
s0: float
Initial value of the random variable
Returns
-------
None
"""
self.Paths = [Path(s0,self.NPeriod,self.DeltaT) for i in range(self.NPath)]
def GeneratePaths(self,s0):
"""
Generate the Path instance to price derivatives
Parameters
----------
s0: float
Initial value of the random variable
Returns
-------
None
"""
self._initPaths(s0)
for i in range(self.NPath):
S = s0
for j in range(self.NPeriod-1):
t = (j+1)*self.DeltaT
S = S + self.DriftFun(S,t,0.01,0) * self.DeltaT + self.VolFun(S,t,0.01,0) * math.sqrt(self.DeltaT)
self.Paths[i][j+1] = S
def Discount(self,date):
"""
Returns the discount factor for a given date
Warnings
--------
Only provide the discount factor, and do not implement r=f(t) yet
Parameters
----------
date: float
Date of the cash flow
Returns
-------
type: float
Discount factor for the selected date
"""
return math.exp(-date * 0.01)
def __getitem__(self,ind):
"""
Return a Path instance from the path list
Parameters
----------
ind: int
Index of the Path instance
Returns
-------
type: Path
Path at the ind index
"""
return self.Paths[ind]
def __iter__(self):
"""
Returns an iterable list of the Path instance
Parameters
----------
None
Returns
-------
type: list
List of Path instance
"""
for path in self.Paths:
yield path
class Option:
"""
Option class
"""
def __init__(self,payoff,underlying,expiry=None):
"""
Parameters
----------
payoff: function(Path -> float)
Payoff function
underlying: PathGenerator
Underlying PathGenerator instance
expiry: float (optional)
Expiry date (or last expirty date) of the derivative
"""
self.Underlying = underlying
self.Payoff = payoff
if expiry is None:
self.Expiry = underlying.TotalTime
else:
self.Expiry = expiry
def _GetValue(self,path):
"""
Compute the value at expiry date of the option"
Parameters
----------
path: Path
Path for which the value should be computed
Returns
-------
type: float
Value at expiry
"""
return self.Payoff(path) * self.Underlying.Discount(self.Expiry)
def Price(self,nbootstrap = 1000):
"""
Compute the option price using simulations
Parameters
----------
None
Returns
-------
type: float
Option price at t = 0
"""
tmpval = []
for path in self.Underlying:
tmpval.append(self._GetValue(path))
#av = mean(tmpval)
#st = (1.0/sqrt(self.Underlying.NPath))*numpy.std(tmpval)
#Dirty bootstrap procedure. Fall far from the closed form solution for OTM put
#But still better than without actually
bootstrap = [mean(sku.resample(tmpval)) for i in range(nbootstrap)]
av = mean(bootstrap)
st = numpy.std(bootstrap)
return [av - 1.96 * st, av ,av + 1.96 * st]
#Filled with some data ~ today
libor = 0.76944/100.0
S0 = 2267.89
r = 12.0 * math.log(1+libor/12.0)
sigma =0.06 #0.1177
K = 2250.0
t=1.0/12.0
nperiod = 1000
npath = 2000
pg = PathGenerator(npath,t,t/nperiod,'bs',r,sigma)
pg.GeneratePaths(S0)
plainvanillacall = Option(lambda x: max(x.GetLastItem() - K,0),pg)
plainvanillaput = Option(lambda x: max(K - x.GetLastItem(),0),pg)
print("Call price data: {}".format(plainvanillacall.Price()))
print("Put price data: {}".format(plainvanillaput.Price()))
N = lambda x: scipy.stats.norm.cdf(x)
d1 = (1.0/(sigma *math.sqrt(t))) * (math.log(S0/K)+(r+(sigma**2.0)/2.0)*t)
d2 = d1 - sigma * math.sqrt(t)
print("Closed form call: {}".format(S0 * N(d1) - K * math.exp(-r*t) * N(d2)))
print("Closed form put: {}".format(-S0 * N(-d1) + K * math.exp(-r*t) * N(-d2)))
from matplotlib.ticker import FuncFormatter
"""
plt.hist([p.GetLastItem() for p in pg.Paths],bins=10)
def to_percent(y,position):
return str(100*y/npath) + '%'
formatter = FuncFormatter(to_percent)
plt.gca().yaxis.set_major_formatter(formatter)
"""
print(mean([p.GetLastItem() for p in pg.Paths]))
print(S0 * math.exp(r * plainvanillacall.Expiry))
pg.Paths[0].Plot()
#plt.gca().set_ylim([0,2500])
| [
"[email protected]"
]
| |
c5764734108e5118eb033f9417b70073be8ac9a0 | 28541d61368a14a0d5003db4cc07fed21b40c41f | /Chapter-4/maze3.py | 2a2bcf9a00c029002b258874bd88cd10f9fc123a | []
| no_license | eizin6389/python_algorithm | 390861f9342ce907f2cda0b45b84d364bcba7541 | abf3588ed97a343b6559eb5d69156708d42bc243 | refs/heads/master | 2022-12-06T20:48:49.470312 | 2020-08-14T13:29:26 | 2020-08-14T13:29:26 | 282,905,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | maze = [
[9,9,9,9,9,9,9,9,9,9,9,9],
[9,0,0,0,9,0,0,0,0,0,0,9],
[9,0,9,0,0,0,9,9,0,9,9,9],
[9,0,9,9,0,9,0,0,0,9,0,9],
[9,0,0,0,9,0,0,9,9,0,9,9],
[9,9,9,0,0,9,0,9,0,0,0,9],
[9,0,0,0,9,0,9,0,0,9,1,9],
[9,0,9,0,0,0,0,9,0,0,9,9],
[9,0,0,9,0,9,0,0,9,0,0,9],
[9,0,9,0,9,0,9,0,0,9,0,9],
[9,0,0,0,0,0,0,9,0,0,0,9],
[9,9,9,9,9,9,9,9,9,9,9,9]
]
dir = [[1,0],[0,1],[-1,0],[0,-1]]
x, y, depth, d = 1, 1, 0, 0
while maze[x][y] != 1:
maze[x][y] = 2
for i in range(len(dir)):
j = (d + i - 1) % len(dir)
if maze[x + dir[j][0]][y + dir[j][1]] < 2:
x += dir[j][0]
y += dir[j][1]
d = j
depth += 1
break
elif maze[x + dir[j][0]][y + dir[j][1]] == 2:
x += dir[j][0]
y += dir[j][1]
d = j
depth -= 1
break
print(depth)
| [
"[email protected]"
]
| |
3eaa1551407f554655a52f1b22c4d721669fa579 | 3e6e18edfe81bb19e298ae4e1831cb76c2c6069d | /src/lpcshop/models/bottles.py | a4dba719454dd661eebe4d48daada55e5b64e9f8 | []
| no_license | libertalia/lpc | 2e72de7eee36cd92d62e4d250186bda2353c179a | 972343abdcffffc2bec0cac4e2057c91edfa1716 | refs/heads/master | 2023-01-07T08:13:02.708844 | 2016-05-13T01:34:57 | 2016-05-13T01:34:57 | 58,680,165 | 0 | 1 | null | 2022-12-26T19:59:29 | 2016-05-12T22:02:24 | JavaScript | UTF-8 | Python | false | false | 2,117 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.encoding import python_2_unicode_compatible
from djangocms_text_ckeditor.fields import HTMLField
from shop.money.fields import MoneyField
from shop.models.product import BaseProduct, BaseProductManager
from shop.models.defaults.mapping import ProductPage, ProductImage
@python_2_unicode_compatible
class Bottle(BaseProduct):
# common product fields
product_name = models.CharField(max_length=255, verbose_name=_("Product Name"))
slug = models.SlugField(verbose_name=_("Slug"))
unit_price = MoneyField(_("Unit price"), decimal_places=3,
help_text=_("Net price for this product"))
description = HTMLField(verbose_name=_("Description"),
help_text=_("Description for the list view of products."))
# controlling the catalog
order = models.PositiveIntegerField(verbose_name=_("Sort by"), db_index=True)
cms_pages = models.ManyToManyField('cms.Page', through=ProductPage,
help_text=_("Choose list view this product shall appear on."))
images = models.ManyToManyField('filer.Image', through=ProductImage)
objects = BaseProductManager()
# filter expression used to search for a product item using the Select2 widget
lookup_fields = ('product_name__icontains',)
class Meta:
verbose_name = _("Bottle")
ordering = ('order',)
def __str__(self):
return self.product_name
@property
def sample_image(self):
return self.images.first()
def get_price(self, request):
return self.unit_price
def get_absolute_url(self):
# sorting by highest level, so that the canonical URL associates with the
# most generic category
cms_page = self.cms_pages.order_by('depth').last()
if cms_page is None:
return urljoin('category-not-assigned', self.slug)
return urljoin(cms_page.get_absolute_url(), self.slug)
| [
"[email protected]"
]
| |
0486cb46bc1ae6e72dae5915d07ace26bbe31c92 | ebc478ba6921afc9a3f613eda29942e3434512c6 | /snpfc/snpCompare.py | 4b454241b380e1545d7cabec6d16716f95c013b5 | [
"Python-2.0"
]
| permissive | TeamMacLean/snpFC | 704c24947bf13a476c733845324061a7e12e4b2f | 6f3f90ebba4aa527bca80381d7d0002438011d22 | refs/heads/master | 2020-04-28T02:28:44.062833 | 2019-04-26T11:36:11 | 2019-04-26T11:36:11 | 174,899,809 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 14,318 | py | import os
import vcf
class SnpCompare:
"class to compare SNPs from multiple VCFs."
def __init__(self, vcffiles):
"""Initializes the object with vcf files and data structure variables to compare snps."""
self.vcffilenames = vcffiles
self.snpsites = {}
self.snp_positions = {}
def _record_all_snp_positions(self, chromosome, position):
"""
creates datastructure to store snp positions.
Records all chromosome and positions in global variable (dictionary data structure) and initializes with an array of False boolean values for each vcf input file
Each boolean value is a positional value of a snp in input vcf files in an array.
E.g. if input vcf files are ["test1.vcf", "test2.vcf", "test3.vcf"]
snpsites["chr1"]["1"] = [False, False, False]
snpsites["chr1"]["10"] = [False, False, False]
snpsites["chr2"]["1"] = [False, False, False]
:type chromosome: string
:param chromosome: name of the chromosome
:type position: int
:param position: position of SNP call in the chromosome
return:
None
"""
if chromosome in self.snpsites.keys():
if str(position) in self.snpsites[chromosome].keys():
return
else:
self.snpsites[chromosome][str(position)] = [False] * len(
self.vcffilenames
)
else:
self.snpsites.update(
{chromosome: {str(position): [False] * len(self.vcffilenames)}}
)
def _record_all_snps(self, filename, chromosome, position, ref, alt):
"""
creates datastructure to store all snps and it's relevant information.
append the snp records to the dictionary data structure once they passed the filter
:type filename: string
:param filename: vcf filename
:type chromosome: string
:param chromosome: chromosome name on which SNP was call
:type position: int
:param position: base position in the chromosome
:type ref: String
:param ref: reference base in SNP call
:type alt: String
:param alt: alternate base in SNP call
return:
None
"""
if filename in self.snp_positions.keys():
if chromosome in self.snp_positions[filename].keys():
self.snp_positions[filename][chromosome].update(
{
str(position): {
"ref": ref,
"alt": str(alt)
.replace("[", "")
.replace("]", "")
.replace(" ", ""),
}
}
)
else:
self.snp_positions[filename].update(
{
chromosome: {
str(position): {
"ref": ref,
"alt": str(alt)
.replace("[", "")
.replace("]", "")
.replace(" ", ""),
}
}
}
)
else:
self.snp_positions.update(
{
filename: {
chromosome: {
str(position): {
"ref": ref,
"alt": str(alt)
.replace("[", "")
.replace("]", "")
.replace(" ", ""),
}
}
}
}
)
def _get_snp_data(self):
""" reads chromosome, position, reference and alternative columns for SNPs and store in dict data structure."""
vcf_counter = 0
for filename in self.vcffilenames:
vcf_reader = vcf.Reader(open(filename), "rb")
samplename = vcf_reader.samples[0]
for record in vcf_reader:
chromosome, position, ref, alt = (
record.CHROM,
record.POS,
record.REF,
record.ALT,
)
position = str(position)
## code to build all snps position
self._record_all_snp_positions(chromosome, position)
self._record_all_snps(filename, chromosome, position, ref, alt)
# self.snp_positions.update({str(vcf_counter) + "_" + chromosome + "_" + str(position):{"ref": str(ref), "alt":str(alt).replace("[","").replace("]", "")}})
self.snpsites[chromosome][str(position)][vcf_counter] = True
vcf_counter += 1
def count_list_elements_occurrences(self, alt_bases):
"""
counts number of each element of input array.
:type alt_bases: Array
:param alt_bases: alternate bases from all VCF files for same chromosome and position. e.g. ["A", "T", "A", "T,C"]
return:
array with count of each element in the input array. e.g for above array it retuns [2, 1, 2, 1]
"""
counts = []
for x in alt_bases:
counts.append(alt_bases.count(x))
return counts
def get_unique_snps(self):
""" records a unique snps in a vcf file """
for chromosome in self.snpsites.keys():
for position in self.snpsites[chromosome].keys():
for filenumber in range(len(self.vcffilenames)):
if (
self.snpsites[chromosome][position][filenumber] == True
and sum(self.snpsites[chromosome][position]) == 1
): # First any(array) finds
self.snp_positions[self.vcffilenames[filenumber]][chromosome][
position
].update({"unique": True})
elif (
sum(self.snpsites[chromosome][position]) >= 2
): # there might be snp at same position but with different alt base
snp_index = [
i
for i, j in enumerate(self.snpsites[chromosome][position])
if j == True
]
totalindex = len(snp_index)
# Lets check the alt base in these vcf files using index
# lets get array of alt bases from each file
alt_snps = []
for index in snp_index:
alt_snps.append(
self.snp_positions[self.vcffilenames[index]][
chromosome
][position]["alt"]
)
# get the counts of the elements
counts = self.count_list_elements_occurrences(alt_snps)
for index in range(len(counts)):
if counts[index] == 1:
# this is unique, so occurred once
self.snp_positions[self.vcffilenames[snp_index[index]]][
chromosome
][position].update(
{"unique": True}
) # vcffilenames[snp_index[index]] = this will be the filename
# print("this is unique", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])
# else:
# vcf_database["self.snp_positions"][chromosome + "_" + position].update({"unique":False})
return
def get_common_snps(self):
""" records SNPs common to all VCF input files. """
for chromosome in self.snpsites.keys():
for position in self.snpsites[chromosome].keys():
if all(self.snpsites[chromosome][position]) == True:
# lets check if all alt bases are same
alt_snps = []
for index in range(len(self.snpsites[chromosome][position])):
alt_snps.append(
self.snp_positions[self.vcffilenames[index]][chromosome][
position
]["alt"]
)
counts = self.count_list_elements_occurrences(alt_snps)
for countindex in range(len(counts)):
if counts[countindex] == len(self.vcffilenames):
self.snp_positions[self.vcffilenames[countindex]][
chromosome
][position].update({"common": True})
def compare(self, outdir, save=True, display=False):
"""
save the common/unique snps to files and/or display the results.
:type outdir: string
:param outdir: output directory to save the output files
:type save: boolean
:param save: save the results to output files. Default True
:type display: boolean
:param display: display the results on the screen. Default False
returns:
None
"""
self._get_snp_data()
self.get_unique_snps()
self.get_common_snps()
outfiles = []
for filename in self.vcffilenames:
outfile = (
outdir
+ "/"
+ os.path.basename(filename).replace(".vcf", "")
+ "_snpcompare.txt"
)
outfh = open(outfile, "w")
outfiles.append(outfile)
if display == True:
print("Common and Unique SNPS in vcf File : ", filename)
else:
print(
"Common and Unique SNPs from file ",
filename,
" are saved in :",
outfile,
)
for chromosome in self.snp_positions[filename].keys():
for position in self.snp_positions[filename][chromosome].keys():
if (
"common"
in self.snp_positions[filename][chromosome][position].keys()
and self.snp_positions[filename][chromosome][position]["common"]
== True
):
if save == True:
outfh.write(
" ".join(
[
chromosome,
position,
self.snp_positions[filename][chromosome][
position
]["ref"],
self.snp_positions[filename][chromosome][
position
]["alt"],
"common",
]
)
+ "\n"
)
if display == True:
print(
" ".join(
[
chromosome,
position,
self.snp_positions[filename][chromosome][
position
]["ref"],
self.snp_positions[filename][chromosome][
position
]["alt"],
"common",
]
)
)
elif (
"unique"
in self.snp_positions[filename][chromosome][position].keys()
and self.snp_positions[filename][chromosome][position]["unique"]
== True
):
if save == True:
outfh.write(
" ".join(
[
chromosome,
position,
self.snp_positions[filename][chromosome][
position
]["ref"],
self.snp_positions[filename][chromosome][
position
]["alt"],
"unique",
]
)
+ "\n"
)
if display == True:
print(
" ".join(
[
chromosome,
position,
self.snp_positions[filename][chromosome][
position
]["ref"],
self.snp_positions[filename][chromosome][
position
]["alt"],
"unique",
]
)
)
outfh.close()
if display == True:
print("The outputs are saved in these files :", " ".join(outfiles))
return
| [
"[email protected]"
]
| |
4ac2e03f18c59c4cf2e602bb3b261c600978f096 | c7575775b41d1435f33131eb5fdb1b5ec9066bba | /heyapp/settings.py | e259df91898c04a481dccc46c69e0c51a5722d51 | []
| no_license | Wambuilucy/nostalgia | 12a5c951769b2d4dc0e4e68fce65bb53d360e0e7 | 5daf50d9e37e263a40c948ffcb155d85e03bd5b2 | refs/heads/master | 2023-01-06T08:00:33.547694 | 2020-11-03T17:00:44 | 2020-11-03T17:00:44 | 309,750,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | """
Django settings for heyapp project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x5a(^4i%79m302-_w6^0&v*pa--xyf&=&=dr8j#2v5lu1%-^l-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'heyapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heyapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
7bd8c0b70b4ffef14ae6fc96a939fd776a871a5f | c4b852ce48ddef75f8c57bd2e70cc8ae7dc10693 | /ToDo/console/migrations/0006_auto_20150309_1629.py | cbad7de01caa9cfa4fe39921f022ec9d123245ed | []
| no_license | MMX13/todo | a2d063b3872579faa4b2ac4e1df4dfab74fe6788 | b303fcec6e44f93223e6dab0de5a6d359c72cc70 | refs/heads/master | 2020-06-07T11:36:13.255423 | 2015-03-20T05:00:11 | 2015-03-20T05:00:11 | 31,998,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('console', '0005_auto_20150309_1627'),
]
operations = [
migrations.AlterField(
model_name='task',
name='scheduledDate',
field=models.DateField(null=True, blank=True),
preserve_default=True,
),
]
| [
"[email protected]"
]
| |
396451adf046ae9a1e9a93d08c731002c02b4a78 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AntMerchantExpandIndirectOnlineModifyResponse.py | deb575d50e426359ce6993ae14225946249fa464 | [
"Apache-2.0"
]
| permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntMerchantExpandIndirectOnlineModifyResponse(AlipayResponse):
def __init__(self):
super(AntMerchantExpandIndirectOnlineModifyResponse, self).__init__()
self._sub_merchant_id = None
@property
def sub_merchant_id(self):
return self._sub_merchant_id
@sub_merchant_id.setter
def sub_merchant_id(self, value):
self._sub_merchant_id = value
def parse_response_content(self, response_content):
response = super(AntMerchantExpandIndirectOnlineModifyResponse, self).parse_response_content(response_content)
if 'sub_merchant_id' in response:
self.sub_merchant_id = response['sub_merchant_id']
| [
"[email protected]"
]
| |
4b664002f3b91925204f95cf5afde92db89ca9f4 | 154e563104144721865a90987db0332bef08a4c3 | /rh_aligner/plotting/__init__.py | 8187d6023c6db7b60a29f9fbf00456387099c256 | [
"MIT"
]
| permissive | Rhoana/rh_aligner | 565572d645769053c74a36ddf0f53ecc20d997fe | baab698f6520b9b999bccf423dc510b0c8f4b9bb | refs/heads/master | 2021-01-01T05:29:25.406459 | 2016-05-09T15:34:58 | 2016-05-09T15:34:58 | 56,165,015 | 3 | 3 | null | 2016-05-05T20:00:26 | 2016-04-13T15:43:33 | Python | UTF-8 | Python | false | false | 281 | py | """
Plotting of the stitching and alignment steps library
- to deubg the steps
"""
from .view_pre_pmcc_mfov import view_pre_pmcc_mfov
from .view_post_pmcc_mfov import view_post_pmcc_mfov
__all__ = [
'view_pre_pmcc_mfov',
'view_post_pmcc_mfov'
]
| [
"[email protected]"
]
| |
467775b4bd0bdc529f7af369a772db9776c3f4d4 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /atcoder/corp/dwacon6_a.py | 0f60c706492fb0f7e55329255dd53fcbe06cb6d9 | [
"MIT"
]
| permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | N = int(input())
titles, times = [], []
for _ in range(N):
s, t = input().split()
titles.append(s)
times.append(int(t))
idx = titles.index(input())
ans = 0
for i in range(idx+1, N):
ans += times[i]
print(ans)
| [
"[email protected]"
]
| |
c16c35fcddd7c0a797b27db56f807ce47716da1a | 7bc03c4f3fc28a9cebc9ebd3581060129a112416 | /ogb/ogbn-products/mlp.py | 3a6a307fffdcbe5fdff3be599899997a804eddba | [
"BSD-3-Clause"
]
| permissive | cornell-zhang/GraphZoom | 468f6a03cf8009d133345f267425933421054e6a | 5156f86fa22efa853e864553de4225a9122d8062 | refs/heads/master | 2023-04-04T19:06:33.155239 | 2021-04-20T23:21:35 | 2021-04-20T23:21:35 | 238,811,116 | 106 | 16 | BSD-3-Clause | 2023-03-24T23:42:18 | 2020-02-07T00:10:40 | Python | UTF-8 | Python | false | false | 4,345 | py | import argparse
import numpy as np
import torch
import torch.nn.functional as F
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from logger import Logger
from csv import writer
class MLP(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(MLP, self).__init__()
self.lins = torch.nn.ModuleList()
self.lins.append(torch.nn.Linear(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.lins.append(torch.nn.Linear(hidden_channels, hidden_channels))
self.lins.append(torch.nn.Linear(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
def forward(self, x):
for lin in self.lins[:-1]:
x = lin(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return torch.log_softmax(x, dim=-1)
def train(model, x, y_true, train_idx, optimizer):
model.train()
optimizer.zero_grad()
out = model(x[train_idx])
loss = F.nll_loss(out, y_true.squeeze(1)[train_idx])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, x, y_true, split_idx, evaluator):
model.eval()
out = model(x)
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, valid_acc, test_acc
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (MLP)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = dataset[0]
x = data.x
if args.use_node_embedding:
embedding = np.load('./embed_results/embeddings.npy')
embedding = torch.from_numpy(embedding).float()
x = torch.cat([x, embedding], dim=-1)
x = x.to(device)
y_true = data.y.to(device)
train_idx = split_idx['train'].to(device)
model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes, args.num_layers,
args.dropout).to(device)
evaluator = Evaluator(name='ogbn-products')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, x, y_true, train_idx, optimizer)
result = test(model, x, y_true, split_idx, evaluator)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}%, '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()
total_params = sum(p.numel() for p in model.parameters())
print(f'mlp total params are {total_params}')
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
1517524505fa74f594aa60a8d208dff423017646 | 2ce15189f888f9823469d7eaca69c7c1e06d9a01 | /apps/login_app/migrations/0001_initial.py | 0d97fff9e69a4eec96bb77b7913c10519bb3f726 | []
| no_license | paulfranco/cards | 6cbcbe3e71b889cb8fbbbbc0fe9a3a4b229087d0 | 5250b2526d37b9993355fc2f5afed3df778a2812 | refs/heads/master | 2021-07-04T16:49:41.182133 | 2017-09-27T06:20:44 | 2017-09-27T06:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-27 01:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('email', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
],
),
]
| [
"[email protected]"
]
| |
36ef3a599bd8eeffa53ece08e08283a8b626e2f6 | 29a0ce2e17aa6a092d3c5bae2da9a3a4fcd7fd22 | /pandas_practice/basic5.5_clean.py | 0b15b414ec7a4ed81aeffe658385351a63fed922 | []
| no_license | varun0308/python_practice | 0a94c7c5789665e6f930166a424499647b83ba55 | 6badc59b871b610ef407054128bc1ed482ec148f | refs/heads/main | 2023-08-25T12:56:22.404006 | 2021-10-02T13:59:47 | 2021-10-02T13:59:47 | 330,429,476 | 0 | 0 | null | 2021-01-20T09:22:17 | 2021-01-17T16:02:00 | Python | UTF-8 | Python | false | false | 380 | py | import pandas as pd
df = pd.read_csv('D:\Varun\python_practice\pandas_practice\dirtydata.csv')
print(df.head())
# We can drop any unwanted column as
df1 = df.drop(['Calories'], axis = 1) # Or axis = 1
print(df.head())
# We can concatinate 2 dataframe, column-wise or row-wise
df2 = pd.concat([df1,df['Calories']], axis = 1) # Or axis = 0 for row concatination
print(df2.head()) | [
"[email protected]"
]
| |
47cf0fd8aeef98b4cf379b295d0c198188f4b2da | 22a671da954e3c6ecc83f7d24bb4a7c6c22cdeb9 | /flask/lib/python3.6/io.py | 153ad1e1b39a6a6b1fc5224983d5dbaa58d11d97 | []
| no_license | areibman/Kaggle-Renthop | 62b7b0066bd6e980f10447681d749f183e8552b9 | 553aadca9ba1faa1696df048fb46e3843dda0113 | refs/heads/master | 2021-09-03T14:22:09.400118 | 2018-01-09T18:50:13 | 2018-01-09T18:50:13 | 116,855,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | /Users/alex/anaconda3/lib/python3.6/io.py | [
"[email protected]"
]
| |
97550ee9088bd44771d6985a9fa127e9f57fd623 | 9fb8221849511024fa74a7a9f40c89075e06d7b1 | /node_modules/bcrypt/build/config.gypi | e53b910c93cae2e01ae79dba6454d5dd5ee46d45 | [
"MIT"
]
| permissive | ericajlittle/tinyURL | d94f6598ef49c95177bc84fa52351672dd9b9c86 | b1f167da5a5665ab93598594389ac7f3a665b534 | refs/heads/master | 2021-01-11T21:13:21.703523 | 2017-01-31T21:51:28 | 2017-01-31T21:51:28 | 79,271,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,489 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"debug_devtools": "node",
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt57l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt57l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "57",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/vagrant/.node-gyp/6.3.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/vagrant/week2day2/express_tinyURL/node_modules/bcrypt/lib/binding/bcrypt_lib.node",
"module_name": "bcrypt_lib",
"module_path": "/vagrant/week2day2/express_tinyURL/node_modules/bcrypt/lib/binding",
"cache_lock_stale": "60000",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/3.10.3 node/v6.3.1 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/home/vagrant/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"progress": "true",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/home/vagrant/.nvm/versions/node/v6.3.1",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/vagrant/.npm",
"global_style": "",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "true",
"access": "",
"also": "",
"unicode": "",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "6.3.1",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/vagrant/.nvm/versions/node/v6.3.1/etc/npmrc",
"init_module": "/home/vagrant/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/vagrant/.nvm/versions/node/v6.3.1/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"[email protected]"
]
| |
25824908e100267109197ad1c04cca8d349a6f10 | 8cf0cf9b71b7c5fbaa150e9893bf461ef661045e | /ownblock/ownblock/apps/parking/models.py | 84c75498d4e8e94365b81a282ee43d877a925a7d | [
"MIT"
]
| permissive | danjac/ownblock | 676b27a5aa0d4ce2ac2cd924a632489cd6fc21ee | ac662fb7efb2f04567e2f85638c1250286452611 | refs/heads/master | 2016-08-02T21:51:56.055598 | 2015-05-02T12:54:47 | 2015-05-02T12:54:47 | 34,940,828 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from django.conf import settings
from django.db import models
from django_countries.fields import CountryField
class Vehicle(models.Model):
description = models.CharField(max_length=100)
registration_number = models.CharField(max_length=12)
country = CountryField(default="FI")
resident = models.ForeignKey(settings.AUTH_USER_MODEL)
reserved_place = models.CharField(max_length=12, blank=True)
def __str__(self):
return self.registration_number
def get_groups(self):
return [self.resident,
self.resident.apartment.building.site.group]
def has_permission(self, user, perm):
if user.role == 'resident':
return user == self.resident
if user.role == 'manager':
return (self.resident.apartment.building.site_id ==
user.site_id)
return False
| [
"[email protected]"
]
| |
2c04318b0b4fa9de6ab094de0b250b0fad83f7ad | 3ee88f6e7c427eef6dd7bb177f4b3a727bccbcd2 | /pbnh/conf.py | 90a4b7dcbb4ef4decd192aada7dd76669fc36f77 | [
"MIT"
]
| permissive | bhanderson/pbnh | 67a4132adf366dafda3d01cfcfae92f14d84266c | 59bcb27de1a2300f14476349f3182c651a8f88a1 | refs/heads/master | 2022-11-05T13:45:17.674023 | 2022-10-24T23:21:24 | 2022-10-24T23:21:24 | 55,452,403 | 17 | 10 | MIT | 2022-10-24T23:21:25 | 2016-04-04T23:21:09 | JavaScript | UTF-8 | Python | false | false | 580 | py | import yaml
import copy
DEFAULTS = {
"server": {
"bind_ip": "0.0.0.0",
"bind_port": 5001,
"debug": True,
},
"database": {
"dbname": "pastedb",
"dialect": "sqlite",
"driver": None,
"host": None,
"password": None,
"port": None,
"username": None
}
}
def get_config():
try:
with open('secrets.yml') as config:
return yaml.load(config)
except IOError:
return copy.copy(DEFAULTS)
| [
"[email protected]"
]
| |
caef2901988d4c4ef7134aa4f7bd5d6bc5e027cf | e180e68c468557b186d083869c005c98abdf539a | /Testing Hardcoded format/test12.py | 45c8d0b0c94ebea29eb0ddab7264b94cf3c404aa | []
| no_license | singhalshubh/Notification-system-Testing-using-selenium | 8a58977d7d63c1216e420363f408826e9bfccf7a | e460e7ceeb63e5eea9a914be0ed84febaebe47c7 | refs/heads/master | 2020-03-21T21:33:03.046748 | 2020-01-17T09:53:24 | 2020-01-17T09:53:24 | 139,069,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | __author__= 'shubh'
import unittest
from selenium import webdriver
class signup(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def test_unsubscribe_community(self):
user ="raghav"
pwd= "sha123#56su"
driver = webdriver.Firefox()
driver.maximize_window() #For maximizing window
driver.implicitly_wait(20) #gives an implicit wait for 20 seconds
driver.get("http://127.0.0.1:8000/")
driver.find_element_by_xpath('//a [@href="/login/?next=/"]').click()
driver.get("http://localhost:8000/login/?next=/")
elem = driver.find_element_by_id("id_username")
elem.send_keys(user)
elem = driver.find_element_by_id("id_password")
elem.send_keys(pwd)
driver.find_element_by_class_name('btn-block').click()
driver.find_element_by_xpath('//a [@href="/communities/"]').click()
driver.find_element_by_xpath('//a [@href="/community-view/1/"]').click()
driver.find_element_by_id("join-us").click()
driver.find_element_by_xpath('//a [@href="/logout/"]').click()
driver.get("http://127.0.0.1:8000/")
user ="shubh"
pwd= "sha123#56su"
driver.find_element_by_xpath('//a [@href="/login/?next=/"]').click()
driver.get("http://localhost:8000/login/?next=/")
elem = driver.find_element_by_id("id_username")
elem.send_keys(user)
elem = driver.find_element_by_id("id_password")
elem.send_keys(pwd)
driver.find_element_by_class_name('btn-block').click()
driver.find_element_by_xpath('//a [@href="/communities/"]').click()
driver.find_element_by_xpath('//a [@href="/community-view/2/"]').click()
driver.find_element_by_xpath('//a [@href="/manage_community/2/"]').click()
name = "raghav"
Role = "author"
elem = driver.find_element_by_id("username")
elem.send_keys(name)
elem = driver.find_element_by_id("role")
elem.send_keys(Role)
#specify the class
driver.find_element_by_class_name('btn-block').click()
driver.find_element_by_xpath('//a [@href="/logout/"]').click()
driver.get("http://127.0.0.1:8000/")
user ="raghav"
pwd= "sha123#56su"
driver.find_element_by_xpath('//a [@href="/login/?next=/"]').click()
driver.get("http://localhost:8000/login/?next=/")
elem = driver.find_element_by_id("id_username")
elem.send_keys(user)
elem = driver.find_element_by_id("id_password")
elem.send_keys(pwd)
driver.find_element_by_class_name('btn-block').click()
driver.find_element_by_xpath('//a [@href="/notifications/"]').click()
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
4f1cde96439f98545e5230a27a9a466a7284b8c1 | 2d39d2dbfc4d54c79899caf6fab83d23b1823b9a | /extract_info.py | f2316188355dc7286dc2172aa53a4455fba34d35 | []
| no_license | josephthurman/nyt-reviews | 77db49ab131cbbb2664a45cf4d1625b2ea79f5c8 | 15067020ecb96acfc89a2b14c38ebd3e0dbf7841 | refs/heads/master | 2020-03-24T11:51:41.936973 | 2018-10-03T19:17:44 | 2018-10-03T19:17:44 | 142,697,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,327 | py | from bs4 import BeautifulSoup
import re
import json
import os
# Some helper functions - mostly used in testing
def get_review(counter):
with open('./reviews/review' + str(counter) + '.html', 'r') as file:
parsed = BeautifulSoup(file, 'html.parser')
return(parsed)
# Extract the text of the review
def find_review(bs):
tag_searches = [('p', re.compile('story-body-text story-content')),
('p', re.compile('css-1i0edl6'))]
for (tag, regex) in tag_searches:
result = bs.find_all(tag, {'class': regex})
if len(result) > 0:
review_text = ''
for p in result:
review_text += p.get_text()
review_text = re.sub(r'\s+', ' ', review_text)
return(review_text)
# Return EMPTY if review text cannot be found
return("EMPTY")
# Extract the rating from the review
def find_stars(bs):
# Newer reviews have the rating set off from the story in special html tag. Find those first
tag_searches = [('span', re.compile('ReviewFooter-stars')),
('div', re.compile('ReviewFooter-rating')),
('li', re.compile('critic-star-rating')),
('li', re.compile('critic-word-rating'))]
stars = "NA"
for (tag, regex) in tag_searches:
result = bs.find_all(tag, {'class': regex})
if len(result) > 0:
text = result[0].get_text()
stars = re.sub(r'\s+', ' ', text).strip()
if stars in ['Satisfactory', 'Fair', 'Poor']:
return(stars)
else:
return(str(len(stars)))
# Older stories have the rating just sitting in a plain paragraph. We can also find those
if re.search('<p.*?>\s*[Ss]atisfactory\s*</p>', str(bs)):
return('Satisfactory')
if re.search('<p.*?>\s*[Ff]air\s*</p>', str(bs)):
return('Fair')
if re.search('<p.*?>\s*[Pp]oor\s*</p>', str(bs)):
return('Poor')
direct_search = re.search('<p.*?>\s*★+\s*</p>', str(bs))
if direct_search:
just_stars = re.search('★+', direct_search.group()).group()
return(str(len(just_stars)))
# If all else fails, return 'NA' to show we couldn't find a rating
return('NA')
# Extract the number of recommended dishes in the review
def find_rec_dishes(bs):
tag_searches = [('div', 'class', re.compile('ReviewFooter-recommendedDishes')),
('span', 'itemprop', re.compile('[Mm]enu'))]
rec_dish_text = ''
for (tag, property, regex) in tag_searches:
result = bs.find_all(tag, {property: regex})
if result:
if len(result) > 1:
rec_dish_text = result[1].get_text()
else:
rec_dish_text = result[0].get_text()
break
# Two reviews have the prices listed in a totally different format. We pick those up here
if rec_dish_text == '':
regex = re.compile(r'RECOMMENDED\s*</strong>(.*?)</p>', flags = re.DOTALL)
rec_dish_text = re.search(regex, str(bs)).group(1)
# Return the number of recommended dishes
rec_dish_list = re.split('; |\. ', rec_dish_text)
return(len(rec_dish_list))
# Convert numeric price to price category
# This is a best guess - they don't have a current key for this.
# But we're actually only doing this for 2 reviews anyway
def price_to_category(price):
if price < 25:
return(1)
elif price < 50:
return(2)
elif price < 100:
return(3)
else:
return(4)
# Extract the price rating in the review
def find_price(bs):
tag_searches = [('dd', 'class', 'price'),
('span', 'itemprop', re.compile('[Pp]rice[Rr]ange'))]
price_text = ''
for (tag, property, regex) in tag_searches:
result = bs.find_all(tag, {property: regex})
if len(result) > 0:
price_text = result[0].get_text()
break
# Two reviews have the prices listed in a totally different format. We pick those up here
if price_text == '':
regex = re.compile(r'PRICES\s*</strong>(.*?)</p>', flags = re.DOTALL)
price_text = re.search(regex, str(bs)).group(1)
# Read price description and get the dollar sign rating
dollar_regex = re.compile('(\$+)\s')
dollarsigns = re.search(dollar_regex, price_text)
if dollarsigns:
return(len(dollarsigns.group(1)))
else:
# Extract actual price numbers - finds numbers preceded by $, or by hyphens for ranges,
# e.g. return 5, 17 and 45 from somthing like "apps $5-17, entrees $45"
price_regex = re.compile('(?<=[-\$])[0-9]+')
list_of_prices = re.findall(price_regex, price_text)
if list_of_prices:
max_price = max(map(int,list_of_prices))
return(price_to_category(max_price))
else:
# Error - couldn't find the price category.
return(0)
if __name__ == '__main__':
with open('./reviews/url_list.txt', 'r') as url_file:
urls = json.load(url_file)
cleaned_reviews = []
unprocessed_URLS = []
for counter, review_url in enumerate(urls):
# progress counter for debugging
if counter % 10 == 0:
print(counter)
# Read review
parsed = get_review(counter)
rating = find_stars(parsed)
if rating != 'NA':
restaurant_info = {'id': counter,
'review_url': review_url,
'rating': rating,
'review_text': find_review(parsed),
'price': find_price(parsed),
'rec_dishes' : find_rec_dishes(parsed)}
cleaned_reviews.append(restaurant_info)
else:
unprocessed_URLS.append(review_url)
# There are still some articles for which we can't find a star rating. The list of such articles is saved here
# It ends up being short enough to inspect by hand and see that none of these articles are real reviews with stars
os.makedirs('data', exist_ok=True)
with open('./data/unprocessed_URLs.txt', 'w') as outfile:
json.dump(unprocessed_URLS, outfile)
# Save cleaned reviews for further analysis
with open('./data/cleaned_reviews.json', 'w') as outfile:
json.dump(cleaned_reviews, outfile) | [
"[email protected]"
]
| |
12554a6f358810f3d7bcf732d99807639d1b65bf | 22ebdd6881730a9474ede8e8167c615990c4e275 | /prob17a.py | e5cae5301008b8a21864cb95ac76154a72222942 | []
| no_license | MMohan1/eueler | a96a465b265334b03645f2e2bb66c85395c54e75 | 05a88f1c9b41fbc3d6bcd95b38b83a6510b3b50a | refs/heads/master | 2021-01-18T15:14:35.320214 | 2015-02-02T11:02:06 | 2015-02-02T11:02:06 | 15,935,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | def prob17():
list1=[]
total_char=0
dict1={0:'',1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten'}
dict2={11:'eleven',12:'twelve',13:'thirteen',14:'fourteen',15:'fifteen',16:'sixteen',17:'seventeen',18:'eighteen',19:'nineteen'}
dict3={0:'',1:'ten',2:'twenty',3:'thirty',4:'fourty',5:'fifty',6:'sixty',7:'seventy',8:'eighty',9:'ninty'}
for i in range(1,100):
x=str(i)
if len(x) == 1:
list1.append(dict1[i])
elif len(x) == 2 and x[1] == '0':
list1.append(dict3[int(x[0])])
elif len(x) == 2 and x[0] == '1':
list1.append(dict2[i])
elif len(x) == 2:
list1.append(dict3[int(x[0])]+dict1[int(x[1])])
p = sum([len(i) for i in list1])
print list1,p
k = 3*((13*99)+p) + 3*((14*99)+p) + 3*((15*99)+p) + len('onethousand') + p + 99
print k
if __name__ == '__main__':
prob17()
| [
"[email protected]"
]
| |
564f224574f406c1a966ab5582a316627e5a9ae1 | 2cfa657fd119a23de2a5c2ae6d55e6d2516bae2d | /test/functional/wallet_keypool_topup.py | 1c1aa4fe3a776fdc70d840768a3b9deacdbccf53 | [
"MIT"
]
| permissive | vivuscoin/vivuscoin | 640b10ae3a72c03b501e03b07caae09ce6c87c81 | ba0db89712234bf68b2d6b63ef2c420d65c7c25d | refs/heads/master | 2023-05-07T06:26:26.241247 | 2021-05-25T03:54:32 | 2021-05-25T03:54:32 | 362,198,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.test_framework import VivuscoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
class KeypoolRestoreTest(VivuscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert(not address_details["isscript"] and not address_details["iswitness"])
elif i == 1:
assert(address_details["isscript"] and not address_details["iswitness"])
else:
assert(not address_details["isscript"] and address_details["iswitness"])
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
connect_nodes_bi(self.nodes, 0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress())['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| [
"[email protected]"
]
| |
dada702324b30a4d4a00d067c7b3c97d8b05129b | b8ef1a5cd3856a8e9134c3313a4e23522f199df7 | /Baekjoon/1966_프린터 큐/1966_프린터 큐.py | 73d38dbd9d6bdf4902738765108954a7e7151128 | []
| no_license | scl2589/Algorithm_problem_solving | 910623d9675ae0219320abfd1fefc7d576027544 | 80db697cdd0180a7d4dbcfae4944d4a54191bddf | refs/heads/master | 2023-07-29T10:56:38.225206 | 2021-09-11T13:50:46 | 2021-09-11T13:50:46 | 235,363,353 | 0 | 0 | null | 2021-03-04T15:39:41 | 2020-01-21T14:36:41 | Python | UTF-8 | Python | false | false | 548 | py | from collections import deque
tc = int(input())
for _ in range(tc):
N, M = map(int, input().split())
impt = list(map(int, input().split()))
q = deque()
for idx, value in enumerate(impt):
q.append([idx, value])
count = 0
while True:
max_num = sorted(q, key = lambda x: x[1], reverse=True)[0][1]
num = q.popleft()
if num[0] == M and num[1] == max_num:
break
elif num[1] == max_num:
count += 1
else:
q.append(num)
print(count + 1)
| [
"[email protected]"
]
| |
fd6eb4ffc23f7389f26fd2d60442434609b29286 | 5f814192b19721dc9c06e0e9595738b0f8561233 | /OCR/east_text_detection.py | f8bee5a5e3d5d1b813617866d1b192837295a2ef | []
| no_license | irischo/civil_translation | 7b3c5c58e201f74547d5ae21123fdfd9d4bc5e64 | 240638a434957ea25cfac262da93fc23e292f6f2 | refs/heads/master | 2022-11-23T15:32:50.503095 | 2020-07-29T00:40:06 | 2020-07-29T00:40:06 | 283,387,321 | 0 | 0 | null | 2020-07-29T03:21:50 | 2020-07-29T03:21:49 | null | UTF-8 | Python | false | false | 2,829 | py | from imutils.object_detection import non_max_suppression
import numpy as np
import argparse
import time
import cv2
# argument parse
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', type=str, help='path to input image')
ap.add_argument('-east', '--east', type=str, help='path to input EAST text detector')
ap.add_argument('-c', '--min-confidence', type=float, default=0.5, help='minimum probability required to inspect a region')
ap.add_argument('-w', '--width', type=int, default=320, help='resized image width (should be multiple of 32)')
ap.add_argument('-e', '--height', type=int, default=320, help='resized image height (should be multiple of 32)')
args = vars(ap.parse_args())
# load image
image = cv2.imread(args['image'])
orig = image.copy()
(H, W) = image.shape[:2]
# set new width & height
(newW, newH) = (args['width'], args['height'])
rW = W / float(newW)
rH = H / float(newH)
# resize image
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
layerNames = [
'feature_fusion/Conv_7/Sigmoid', # text or not check
'feature_fusion/concat_3' # image geometry
]
# load pre-trained EAST text decorator (from frozen_east_text_detection.pb)
print('[INFO] loading EAST text detector ...')
net = cv2.dnn.readNet(args['east'])
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapPB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()
# show timing information on text prediction
print("[INFO] text detection took {:.6f} seconds".format(end - start))
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < args['min_confidence']:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
boxes = non_max_suppression(np.array(rects), probs=confidences)
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)
cv2.imshow('Text Detection', orig)
cv2.waitKey(0)
| [
"[email protected]"
]
| |
2054a0b722aa2fa72a01cff5884700470b6bbf49 | edcccb46c663d7ef52b72c40c2b93c1f2b61c2d2 | /Sample Work/Remove Char.py | f3ddb784e1ebb06aec5b34fd384a31d52f733336 | []
| no_license | mre9798/Python_Anlin | 235292e80fe38181098706f733dba78614f38e7a | 83c1e0c994fa55e9bb8cd5de1097b2de7345813e | refs/heads/master | 2023-02-24T22:31:38.279969 | 2021-02-01T09:17:36 | 2021-02-01T09:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | # Remove nth character
s = input("Enter a string : ")
pos = int(input("Enter position of character to be removed : "))
print(s[:pos-1] + s[pos:]) | [
"[email protected]"
]
| |
39b251cf54d0848c796c624fffa0a173acf39360 | 8d5ff31efba68cfc3e833d74ee2e1baa20c2ae87 | /example/petstore/apis/pet/updatePet.py | 905f31823115094929116457528d605abd007d7b | []
| no_license | avanov/swagger_codegen | 8fd3f08f0e0d7aff5beb5602e755836009b0494f | b0fcc84cfd12091ef443fc000d4ff6a547a48d86 | refs/heads/master | 2022-12-11T04:21:24.066731 | 2020-08-28T13:56:34 | 2020-08-28T13:56:34 | 292,118,282 | 0 | 0 | null | 2020-09-01T22:10:54 | 2020-09-01T22:10:53 | null | UTF-8 | Python | false | false | 1,585 | py | from __future__ import annotations
import pydantic
import datetime
import asyncio
import typing
from pydantic import BaseModel
from swagger_codegen.api.base import BaseApi
from swagger_codegen.api.request import ApiRequest
class Tag(BaseModel):
id: typing.Optional[int] = None
name: typing.Optional[str] = None
class Category(BaseModel):
id: typing.Optional[int] = None
name: typing.Optional[str] = None
class Pet(BaseModel):
category: typing.Optional[Category] = None
id: typing.Optional[int] = None
name: str
photoUrls: typing.List[str]
status: typing.Optional[str] = None
tags: typing.Optional[typing.List[Tag]] = None
def make_request(self: BaseApi, __request__: Pet,) -> Pet:
"""Update an existing pet"""
def serialize_item(item):
if isinstance(item, pydantic.BaseModel):
return item.dict()
return item
if isinstance(__request__, (list, tuple, set)):
body = [serialize_item(item) for item in __request__]
else:
body = __request__.dict()
m = ApiRequest(
method="PUT",
path="/api/v3/pet".format(),
content_type="application/json",
body=body,
headers=self._only_provided({}),
query_params=self._only_provided({}),
cookies=self._only_provided({}),
)
return self.make_request(
{
"200": {"application/json": Pet, "application/xml": Pet,},
"400": {"default": None,},
"404": {"default": None,},
"405": {"default": None,},
},
m,
)
| [
"[email protected]"
]
| |
fc07829f755d7e6cdcfbb45f1595dfd39618bdaa | 6d69b249a81e076d79787dd08eb8957908052052 | /libs/parse/sections/grouper_mixins/blocks.py | 154423ab5e2c3ebe6544db079d4af4ebccbedda5 | []
| no_license | 2vitalik/wiktionary | 02ee1f1327c3b82fc7b4d7da12083b1431b1eb8b | 8edae2f7dcf9089084c5ce7033c4fb0b454f4dfa | refs/heads/master | 2023-02-06T11:28:41.554604 | 2023-02-05T22:49:01 | 2023-02-05T22:49:01 | 121,025,447 | 7 | 2 | null | 2021-10-13T17:36:32 | 2018-02-10T15:06:24 | Lua | UTF-8 | Python | false | false | 486 | py | from libs.parse.groupers.sections.blocks.any_blocks import AnyBlocksGrouper
from libs.parse.groupers.sections.blocks.blocks import BlocksGrouper
from libs.parse.sections.grouper_mixins.sub_blocks import SubBlocksGroupersMixin
from libs.parse.utils.decorators import parsed
class BlocksGroupersMixin(SubBlocksGroupersMixin):
@property
@parsed
def blocks(self):
return BlocksGrouper(self)
@parsed
def any_blocks(self):
return AnyBlocksGrouper(self)
| [
"[email protected]"
]
| |
5df906375ee0c7d24ede8dd570122ce0cbdd1251 | 9bdc5bd0b6195761fbceed17c0725bc48a5941a1 | /testing/keras_taylor_1D.py | f24a460e11be29c65a55abbe497af20fe014f122 | []
| no_license | lapsintra/htt-ml | bc6bbb12eda4a3f0abbc5c0db13940a31b667a08 | ce07cad6fcc8625b1595157de6486759b74f6d62 | refs/heads/master | 2020-04-05T16:29:29.858916 | 2018-12-04T19:32:10 | 2018-12-04T19:32:10 | 157,015,043 | 0 | 0 | null | 2018-11-10T19:38:56 | 2018-11-10T19:38:56 | null | UTF-8 | Python | false | false | 7,871 | py | #!/usr/bin/env python
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True # disable ROOT internal argument parser
import argparse
from array import array
import yaml
import pickle
import numpy as np
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams['font.size'] = 16
import matplotlib.pyplot as plt
from matplotlib import cm
from keras.models import load_model
import tensorflow as tf
from tensorflow_derivative.inputs import Inputs
from tensorflow_derivative.outputs import Outputs
from tensorflow_derivative.derivatives import Derivatives
import logging
logger = logging.getLogger("keras_taylor_1D")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def parse_arguments():
parser = argparse.ArgumentParser(description="Produce confusion matrice")
parser.add_argument("config_training", help="Path to training config file")
parser.add_argument("config_testing", help="Path to testing config file")
parser.add_argument("fold", type=int, help="Trained model to be tested.")
parser.add_argument(
"--no-abs",
action="store_true",
default=False,
help="Do not use abs for metric.")
parser.add_argument(
"--normalize",
action="store_true",
default=False,
help="Normalize rows.")
return parser.parse_args()
def parse_config(filename):
logger.debug("Load config %s.", filename)
return yaml.load(open(filename, "r"))
def main(args, config_test, config_train):
# Load preprocessing
path = os.path.join(config_train["output_path"],
config_test["preprocessing"][args.fold])
logger.info("Load preprocessing %s.", path)
preprocessing = pickle.load(open(path, "rb"))
# Load Keras model
path = os.path.join(config_train["output_path"],
config_test["model"][args.fold])
logger.info("Load keras model %s.", path)
model_keras = load_model(path)
# Get TensorFlow graph
inputs = Inputs(config_train["variables"])
try:
sys.path.append("htt-ml/training")
import keras_models
except:
logger.fatal("Failed to import Keras models.")
raise Exception
try:
name_keras_model = config_train["model"]["name"]
model_tensorflow_impl = getattr(
keras_models, config_train["model"]["name"] + "_tensorflow")
except:
logger.fatal(
"Failed to load TensorFlow version of Keras model {}.".format(
name_keras_model))
raise Exception
model_tensorflow = model_tensorflow_impl(inputs.placeholders, model_keras)
outputs = Outputs(model_tensorflow, config_train["classes"])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Get operations for first-order derivatives
deriv_ops = {}
derivatives = Derivatives(inputs, outputs)
for class_ in config_train["classes"]:
deriv_ops[class_] = []
for variable in config_train["variables"]:
deriv_ops[class_].append(derivatives.get(class_, [variable]))
# Loop over testing dataset
path = os.path.join(config_train["datasets"][(1, 0)[args.fold]])
logger.info("Loop over test dataset %s to get model response.", path)
file_ = ROOT.TFile(path)
mean_abs_deriv = {}
for i_class, class_ in enumerate(config_train["classes"]):
logger.debug("Process class %s.", class_)
tree = file_.Get(class_)
if tree == None:
logger.fatal("Tree %s does not exist.", class_)
raise Exception
values = []
for variable in config_train["variables"]:
typename = tree.GetLeaf(variable).GetTypeName()
if typename == "Float_t":
values.append(array("f", [-999]))
elif typename == "Int_t":
values.append(array("i", [-999]))
else:
logger.fatal("Variable {} has unknown type {}.".format(variable, typename))
raise Exception
tree.SetBranchAddress(variable, values[-1])
if tree.GetLeaf(variable).GetTypeName() != "Float_t":
logger.fatal("Weight branch has unkown type.")
raise Exception
weight = array("f", [-999])
tree.SetBranchAddress(config_test["weight_branch"], weight)
deriv_class = np.zeros((tree.GetEntries(),
len(config_train["variables"])))
weights = np.zeros((tree.GetEntries()))
for i_event in range(tree.GetEntries()):
tree.GetEntry(i_event)
# Preprocessing
values_stacked = np.hstack(values).reshape(1, len(values))
values_preprocessed = preprocessing.transform(values_stacked)
# Keras inference
response = model_keras.predict(values_preprocessed)
response_keras = np.squeeze(response)
# Tensorflow inference
response = sess.run(
model_tensorflow,
feed_dict={
inputs.placeholders: values_preprocessed
})
response_tensorflow = np.squeeze(response)
# Check compatibility
mean_error = np.mean(np.abs(response_keras - response_tensorflow))
if mean_error > 1e-5:
logger.fatal(
"Found mean error of {} between Keras and TensorFlow output for event {}.".
format(mean_error, i_event))
raise Exception
# Calculate first-order derivatives
deriv_values = sess.run(
deriv_ops[class_],
feed_dict={
inputs.placeholders: values_preprocessed
})
deriv_values = np.squeeze(deriv_values)
deriv_class[i_event, :] = deriv_values
# Store weight
weights[i_event] = weight[0]
if args.no_abs:
mean_abs_deriv[class_] = np.average((deriv_class), weights=weights, axis=0)
else:
mean_abs_deriv[class_] = np.average(np.abs(deriv_class), weights=weights, axis=0)
# Normalize rows
classes = config_train["classes"]
matrix = np.vstack([mean_abs_deriv[class_] for class_ in classes])
if args.normalize:
for i_class, class_ in enumerate(classes):
matrix[i_class, :] = matrix[i_class, :] / np.sum(
matrix[i_class, :])
# Plotting
variables = config_train["variables"]
plt.figure(0, figsize=(len(variables), len(classes)))
axis = plt.gca()
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
axis.text(
j + 0.5,
i + 0.5,
'{:.2f}'.format(matrix[i, j]),
ha='center',
va='center')
q = plt.pcolormesh(matrix, cmap='Wistia')
#cbar = plt.colorbar(q)
#cbar.set_label("mean(abs(Taylor coefficients))", rotation=270, labelpad=20)
plt.xticks(
np.array(range(len(variables))) + 0.5, variables, rotation='vertical')
plt.yticks(
np.array(range(len(classes))) + 0.5, classes, rotation='horizontal')
plt.xlim(0, len(config_train["variables"]))
plt.ylim(0, len(config_train["classes"]))
output_path = os.path.join(config_train["output_path"],
"fold{}_keras_taylor_1D.png".format(args.fold))
logger.info("Save plot to {}.".format(output_path))
plt.savefig(output_path, bbox_inches='tight')
if __name__ == "__main__":
args = parse_arguments()
config_test = parse_config(args.config_testing)
config_train = parse_config(args.config_training)
main(args, config_test, config_train)
| [
"[email protected]"
]
| |
4e6dc77e570b5419eef0fc74fd16710afdfd3235 | 190d03cf370844548b9e8c89952dfbaec4d0c5c8 | /p103.py | 467aee99fa0ff340b0a00d481a047ab36a7d0d52 | []
| no_license | alainlou/leetcode | 446d101a9fd2f9eaa2229252e5909e7df36b4a74 | fe500bcb067be59aa048259e3860e9da6f98344d | refs/heads/master | 2022-10-16T12:20:44.726963 | 2022-09-18T15:29:05 | 2022-09-18T15:29:05 | 178,775,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from DS.TreeNode import TreeNode
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None:
return []
ans = []
q = [(root, 0)]
while len(q) > 0:
curr = q.pop(0)
if len(ans) <= curr[1]:
ans.append([])
ans[-1].append(curr[0].val)
if curr[0].left:
q.append((curr[0].left, curr[1]+1))
if curr[0].right:
q.append((curr[0].right, curr[1]+1))
for i in range(1, len(ans), 2):
ans[i] = ans[i][::-1]
return ans
| [
"[email protected]"
]
| |
55579000cd5ed23906233808f75b3a4fae18bda1 | 7e9e76ccb9a8132f875ff8e3279eeb95b4f9b424 | /houseDjango/loggings/urls.py | fbc79cdd86349e0b486fc3cb7c5f10e4b586d14a | []
| no_license | IPingStudio/houseDjango | 9f282273beb80ab4ec2e741bcba28c10c59880c5 | 55aff03fb7febb106304f4555ecdce254539cfa6 | refs/heads/master | 2023-01-13T02:04:00.362837 | 2020-11-02T02:30:37 | 2020-11-02T02:30:37 | 302,852,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | """houseDjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path, include
from . import views
urlpatterns = [
re_path(r'^$', views.searchLoginByHouse, name='showLog'),
]
| [
"[email protected]"
]
| |
68a88475ce22d1c7e3b750255bbf7cee712a0ee7 | e8d927d593e2d8d08004e12ec11988062b9df8b3 | /typeidea/blog/adminx.py | d5363e831143e4fac4e100c666967e13751ed032 | []
| no_license | choupihuan/typeidea | 9c4f6a90dfb53f94dcaeb15e5e6e915f9f9e0ee6 | f8e13b1e4afd0f67bf365bdbb04e35cc4f56a0da | refs/heads/master | 2020-06-05T19:51:29.757681 | 2019-06-29T11:32:46 | 2019-06-29T11:32:46 | 192,530,809 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,567 | py | import xadmin
from django.urls import reverse
from django.utils.html import format_html
from django.contrib.admin.models import LogEntry
from xadmin.layout import Row,Fieldset,Container
from xadmin.filters import manager
from xadmin.filters import RelatedFieldListFilter
from .models import Post,Category,Tag
from .adminforms import PostAdminForm
from typeidea.base_admin import BaseOwnerAdmin
class PostInline:
form_layout = (
Container(Row('title','desc')
)
)
extra = 1
model = Post
@xadmin.sites.register(Category)
class CategoryAdmin(BaseOwnerAdmin):
list_display = ('name','status','is_nav','owner','create_time','post_count')
fields = ('name','status','is_nav')
inlines = [PostInline,]
def post_count(self,obj):
return obj.post_set.count()
post_count.short_description = '文章数量'
@xadmin.sites.register(Tag)
class TagAdmin(BaseOwnerAdmin):
list_display = ('name','status','create_time')
fields = ('name','status')
class CategoryOwnerFilter(RelatedFieldListFilter):
""" 自定义过滤器只展示当前用户分类 """
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return field.name == 'category'
def __init__(self,field,request,params,model,model_admin,field_path):
super().__init__(field,request,params,model,model_admin,field_path)
self.lookup_choices = Category.objects.filter(owner=request.user).values_list('id','name')
manager.register(CategoryOwnerFilter,take_priority=True)
@xadmin.sites.register(Post)
class PostAdmin(BaseOwnerAdmin):
form = PostAdminForm
list_display = [
'title','category','status',
'create_time','owner','operator'
]
list_display_links = []
exclude = ('owner',)
list_filter = ['category']
search_fields = ['title','category__name']
actions_on_top = True
actions_on_bottom = False
#编辑页面
save_on_top = False
filter_horizontal = ('tag',)
# fields = (
# ('category','title'),
# 'desc',
# 'status',
# 'content',
# 'tag',
# )
# fieldsets = (
# ('基础配置',{
# 'description':'基础配置',
# 'fields': (
# ('title','category','tag'),
# 'status',
# ),
# }),
# ('内容',{
# 'fields': (
# 'desc',
# 'content',
# ),
# }),
#
# )
# 使用xadmin修改的地方
form_layout = (
Fieldset(
'基础信息',
Row('title','category'),
'status',
'tag',
),
Fieldset(
'内容信息',
'desc',
'is_md',
'content_ck',
'content_md',
'content',
)
)
# @property
# def media(self):
# media = super().media
# media.add_js(['https://cdn.bootcss.com/bootstrap/4.0.0-beta.2/js/bootstrap.bundle.js'])
# media.add_css({
# 'all':('https://cdn.bootcss.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css')
# })
# return media
def operator(self,obj):
return format_html(
'<a href="{}">编辑</a>',
reverse('xadmin:blog_post_change',args=(obj.id,))
)
operator.short_description = '操作'
@xadmin.sites.register(LogEntry)
class LogEntryAdmin():
list_display = ['object_repr','object_id','action_flag','user','change_message']
| [
"[email protected]"
]
| |
fcd899d0868ace0b83ecc9fb302a4fa7c51e35ea | a20707ab33beba0b0aac6401d7baf91c135af4c6 | /Lab/Notes/Embed MatPlotLib in Tkinter.py | a5538e3c87caac7cdc899dbd0792cc75179dc895 | []
| no_license | ffarassati/Metherate-Lab | 21fa34fadf165c6ba0d1862db5e4097fb047da4f | 4f34d59d9168c150f7ae578a38e8b11674275c54 | refs/heads/master | 2020-05-17T05:17:13.318713 | 2019-06-17T09:37:41 | 2019-06-17T09:37:41 | 183,529,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,885 | py | # The code for changing pages was derived from: http://stackoverflow.com/questions/7546050/switch-between-two-frames-in-tkinter
# License: http://creativecommons.org/licenses/by-sa/3.0/
# https://pythonprogramming.net/how-to-embed-matplotlib-graph-tkinter-gui/
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import tkinter as tk
from tkinter import ttk
LARGE_FONT= ("Verdana", 12)
class SeaofBTCapp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.iconbitmap(self, default="")
tk.Tk.wm_title(self, "Sea of BTC client")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, PageOne, PageTwo, PageThree):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Start Page", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button = ttk.Button(self, text="Visit Page 1",
command=lambda: controller.show_frame(PageOne))
button.pack()
button2 = ttk.Button(self, text="Visit Page 2",
command=lambda: controller.show_frame(PageTwo))
button2.pack()
button3 = ttk.Button(self, text="Graph Page",
command=lambda: controller.show_frame(PageThree))
button3.pack()
class PageOne(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Page One!!!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button1 = ttk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(StartPage))
button1.pack()
button2 = ttk.Button(self, text="Page Two",
command=lambda: controller.show_frame(PageTwo))
button2.pack()
class PageTwo(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Page Two!!!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button1 = ttk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(StartPage))
button1.pack()
button2 = ttk.Button(self, text="Page One",
command=lambda: controller.show_frame(PageOne))
button2.pack()
class PageThree(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Graph Page!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button1 = ttk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(StartPage))
button1.pack()
f = Figure(figsize=(5,5), dpi=100)
a = f.add_subplot(111)
a.plot([1,2,3,4,5,6,7,8],[5,6,1,3,8,9,3,5])
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
app = SeaofBTCapp()
app.mainloop()
| [
"[email protected]"
]
| |
18369325e71e025ee816fd5e5d7ce741992af937 | 399765d71fbc931125fe75cc6c71eabdac4dc58f | /app-tornado.py | bced06bd7f1d0304a5654802440c2e6409be3fa6 | []
| no_license | G-Guillard/NEO | 98e989952c7a15de630962c44d65998e979f7efd | 3317b228fbeead14b1546290e9e7a7a7abbd7ff3 | refs/heads/master | 2020-12-10T09:16:47.845598 | 2020-01-16T15:29:19 | 2020-01-16T15:29:19 | 233,555,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,208 | py | import os
from tornado.web import Application, RequestHandler
from tornado.ioloop import IOLoop
from pymongo import MongoClient
from bson import Code
import pandas as pd
from plotly.utils import PlotlyJSONEncoder
import plotly.graph_objects as go
import plotly.express as px
import json
from sklearn.cluster import KMeans
#Connect to Mongo client and DB
client = MongoClient('db', 27017)
db = client.nasadb
coll = db.neo
collname = "neo" # Near Earth Objects
def get_keys(collection, numeric=False):
"""Get all documents keys, or only numerical keys"""
if numeric:
mapper = Code("function() { for (var key in this) { if (typeof(this[key]) == 'number') emit(key, null); } }")
else:
mapper = Code("function() { for (var key in this) { emit(key, null); } }")
reducer = Code("function(key, stuff) { return null; }")
result = db[collection].map_reduce(mapper, reducer, collection + "_keys")
return result.distinct("_id")
def get_features(f1, f2=''):
"""Get features from Mongo DB"""
cursor = coll.find()
if f2:
return pd.DataFrame(list(cursor), columns=[f1, f2])
else:
return pd.DataFrame(list(cursor), columns=[f1])
class Index(RequestHandler):
def get(self):
"""Index page contains links to plots of single distributions"""
items = [key for key in get_keys(collname, numeric=True)]
self.render('templates/index-tornado.html', items = items)
class Plot1D(RequestHandler):
def get(self, feature):
"""Plot distribution of a feature"""
if not feature:
self.set_status(400)
return self.render('templates/error-tornado.html', code=400)
else:
feature = feature.split("/")[0]
if not feature in get_keys(collname):
self.set_status(404)
return self.render('templates/error-tornado.html', code=404, key=feature)
df = get_features(feature)
fig = go.Figure(data=[go.Histogram(x=df[feature])])
graphJSON = json.dumps(fig, cls=PlotlyJSONEncoder)
return self.render('templates/plot-tornado.html', plot=json.dumps(graphJSON))
class Plot2D(RequestHandler):
def get(self, query=''):
"""GET request for 2D plot"""
features = query.split("/")
if features[0] and features[1]:
self.plot(features[0], features[1])
else:
self.set_status(400)
return self.render('templates/error-tornado.html', code=400)
def post(self):
"""POST request for 2D plot"""
feature1 = self.get_argument('x')
feature2 = self.get_argument('y')
if feature1 and feature2:
self.plot(feature1, feature2)
else:
self.set_status(400)
return self.render('templates/error-tornado.html', code=400)
def plot(self, feature1, feature2):
"""Plot 2D distributions of two features"""
if not feature1 in get_keys(collname):
self.set_status(404)
return self.render('templates/error-tornado.html', code=404, key=feature1)
if not feature2 in get_keys(collname):
self.set_status(404)
return self.render('templates/error-tornado.html', code=404, key=feature2)
df = get_features(feature1, feature2)
fig = px.scatter(df, x=feature1, y=feature2)
graphJSON = json.dumps(fig, cls=PlotlyJSONEncoder)
return self.render('templates/plot-tornado.html', plot=json.dumps(graphJSON))
class KMeansClassifier(RequestHandler):
def get(self, n_clusters=2):
"""Unsupervised classification attempt"""
# Default argument value doesn't work, set to ''
if not n_clusters:
n_clusters = 2
cursor = coll.find()
df = pd.DataFrame(list(cursor), columns=[
'Absolute Magnitude',
'Est Dia in M(min)',
'Est Dia in M(max)',
'Relative Velocity km per hr',
# 'Miss Dist (Astronomical)',
'Minimum Orbit Intersection',
'Jupiter Tisserand Invariant',
'Epoch Osculation',
'Eccentricity',
'Semi Major Axis',
'Inclination',
'Asc Node Longitude',
'Orbital Period',
'Perihelion Distance',
'Perihelion Arg',
'Aphelion Dist',
'Perihelion Time',
'Mean Anomaly',
'Mean Motion'
])
normalized_df = (df - df.mean()) / df.std()
normalized_df = normalized_df.dropna()
pred = KMeans(n_clusters=int(n_clusters)).fit_predict(normalized_df)
df["pred"] = pred
fig = px.scatter(df, x="Mean Anomaly", y="Eccentricity", color="pred")
graphJSON = json.dumps(fig, cls=PlotlyJSONEncoder)
return self.render('templates/plot-tornado.html', plot=json.dumps(graphJSON))
# Launch the app
if __name__ == "__main__":
app = Application([
('/',Index),
(r'/plots/1D/(.*)',Plot1D),
(r'/plots/2D/',Plot2D),
(r'/plots/2D/(.*)',Plot2D),
(r'/classify/kmeans/(.*)',KMeansClassifier)
])
app.listen(5000)
IOLoop.current().start()
| [
"[email protected]"
]
| |
d3e48f69c086cadbd11c7ae49adb48301ab31324 | 62669fa98cfba28653f97adfaf169c166e02d69c | /yi_parser.py | b829e653e392286b80afb190dd2a9d2223f8e028 | []
| no_license | Kevin-Xi/Yi_Analysis_Tools | d8e7850275572bc00c59a46fe10c0ae2a584670d | 2191948795f1c6df621ec1ab535d43c35b3067d7 | refs/heads/master | 2020-04-18T11:47:18.576014 | 2013-06-05T08:26:39 | 2013-06-05T08:26:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | # -- coding=utf-8 -- #
"""Parser of Yi
Analyse specified Yao of a Gua
Usage:
python yi_parser.py [options]
Options:
-s ... use specified xml file
-g ... specify a Gua, between 1 and 64
-y ... specify a Yao, between 1 and 6
-a analyse all
-h show help document
Examples:
yi_parser.py -s new_yi.xml use 'new_yi.xml' as source xml file
yi_parser.py -g 32 analyse all Yao of Gua no.32
yi_parser.py -y 5 analyse Yao no.5 of every Gua
yi_parser.py -g 16 -y 3 analyse the third Yao of Gua no.16
yi_parser.py -a analyse all
Visit https://github.com/Kevin-Xi/Yi_Analysis_Tools for the latest version.
"""
from xml.dom import minidom
import sys
import getopt
import time
class Yi_parser():
result = {}
def __init__(self, xmlfile, gflag, yflag):
xmldoc = minidom.parse(xmlfile)
self.xmlroot = xmldoc.childNodes[0]
target_gua = self.xmlroot.childNodes[gflag*2-1]
self.yao_bucket = target_gua.childNodes
self.target_yao = self.yao_bucket[yflag*2-1]
self.gflag = gflag
self.yflag = yflag
self.parse()
def parse(self):
self.target_p = int(self.target_yao.attributes['p'].value)
self.dangwei()
self.dezhong()
self.ying()
self.cheng_up() #乘
self.cheng_down() #承
def dangwei(self):
if self.yflag % 2 == self.target_p % 2:
self.result['当位'] = 1
else:
self.result['当位'] = 0
def dezhong(self):
if self.yflag == 2 or self.yflag == 5:
self.result['得中'] = 1
else:
self.result['得中'] = 0
def ying(self):
ying_no = self.yflag-3 > 0 and self.yflag-3 or self.yflag+3
ying_p = int(self.yao_bucket[ying_no*2-1].attributes['p'].value)
if self.target_p != ying_p:
self.result['应'] = 1
else:
self.result['应'] = 0
def cheng_up(self):
try:
cheng_p = int(self.yao_bucket[self.yflag*2-3].attributes['p'].value)
if self.target_p == 1 and cheng_p == 0:
self.result['乘'] = 1
else:
self.result['乘'] = 0
except:
self.result['乘'] = -1
def cheng_down(self):
try:
cheng_p = int(self.yao_bucket[self.yflag*2+1].attributes['p'].value)
if self.target_p == 0 and cheng_p == 1:
self.result['承'] = 1
else:
self.result['承'] = 0
except:
self.result['承'] = -1
def output(self):
return ', '.join(['%s = %s' % (k, v) for k, v in self.result.items() if self.result[k] != -1]) + '\n'
class ArgOutofRangeError(Exception): pass
def usage():
print __doc__
def main(argv):
xmlfile = 'yi.xml'
gflag = 0
yflag = 0
try:
opts, args = getopt.getopt(argv, 's:g:y:ah') #the args should be here to take the junk
for opt, arg in opts:
if opt == 'h':
usage()
sys.exit()
elif opt == '-s':
xmlfile = arg
elif opt == '-g':
gflag = int(arg)
if gflag > 64 or gflag < 1:
raise ArgOutofRangeError
elif opt == '-y':
yflag = int(arg)
if yflag > 6 or yflag < 1:
raise ArgOutofRangeError
elif opt == '-a':
(gflag, yflag) = (0, 0) #HOW TO PROCESS OVERWRITE CONFLICT BETWEEN g, y and a
except getopt.GetoptError:
usage()
sys.exit(2)
#except ArgOutofRangeError, ValueError: #WHY CANNOT???????
except:
usage()
sys.exit(3)
if gflag != 0 and yflag != 0:
run(xmlfile, gflag, yflag)
elif gflag == 0 and yflag != 0:
for i in range(64): # A big memory comsumer, but I think let the class only parse one yao is right. How to do? Cache?
run(xmlfile, i+1, yflag)
elif gflag != 0 and yflag == 0:
for i in range(6):
run(xmlfile, gflag, i+1)
else:
for i in range(64):
for j in range(6):
run(xmlfile, i+1, j+1)
def run(xmlfile, gflag, yflag):
y = Yi_parser(xmlfile, gflag, yflag)
print y.output()
if __name__ == '__main__':
if len(sys.argv) <= 1:
usage()
sys.exit()
main(sys.argv[1:])
| [
"[email protected]"
]
| |
552d7737321351084598b558d7543b946c890425 | 4f989b9e1e9c1caa70b5d013d95b95f3c3f0cb3c | /classbasedapiview/urls.py | e0fddc3de7513210995950c8b9d2832b0757ac90 | []
| no_license | vikashtiwary118/class-based-apiview | 4cdb9bc786c16d51dbc5e80060c2b92579659bea | bccaa57a8c7ca07e17402b6d9a12e7f32ba2546f | refs/heads/main | 2023-05-17T00:47:38.819526 | 2021-06-07T05:09:35 | 2021-06-07T05:09:35 | 374,537,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from django.contrib import admin
from django.urls import path
from api import views
urlpatterns = [
path('admin/', admin.site.urls),
path('studentapi/', views.StudentAPI.as_view()),
path('studentapi/<int:pk>', views.StudentAPI.as_view()),
]
| [
"[email protected]"
]
| |
2e6669b326e3edda7be0bf7c377f290405bcf0c3 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2671/60590/241334.py | 25e5fee553d7db62878bfa4165d65ebdfde7331c | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | def cal(w):
if w==1:
return 2
if w==2:
return 3
elif w==3:
return 5
else:
w2=2
w3=3
for i in range(w-2):
temp=w2+w3
w2=w3
w3=temp
return temp
t = int(input())
for i in range(t):
w=int(input())
print(2**w-cal(w)) | [
"[email protected]"
]
| |
9e81266bf495049477857e380675d6fdda9605c5 | 6d35c28f907b6ce999522ae53b98fa6ded98df6f | /server/corpsoftportal/corpsoftportal/wsgi.py | dee886c41e13032dd34908158d6bf79ad0948be8 | []
| no_license | Rassoliny/csp | 7a5f22c0b6c969ef566b1871c097aad5fc5a38d1 | 815aa0abcf0796e67ac4a81d065d4d99243881f1 | refs/heads/master | 2020-04-18T06:54:56.747455 | 2019-03-11T12:24:02 | 2019-03-11T12:24:02 | 167,342,026 | 0 | 0 | null | 2019-02-11T10:14:42 | 2019-01-24T09:38:40 | JavaScript | UTF-8 | Python | false | false | 405 | py | """
WSGI config for corpsoftportal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'corpsoftportal.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
8c055cb19494e422d0d5bb1e656c1a374f646cbb | 6da108db00c294a55410bde285fa7851358331df | /mobycity/carpooling/forms.py | 78599ffb37b78a430fdc9db6e0e32a0c46f8eed0 | [
"MIT"
]
| permissive | LucienD/Mobct | 5639eb108922dc03dd88d62b93b8eaac0c989dae | 1c8422b30c205bdb2dc21c988f74280194980ec4 | refs/heads/master | 2020-07-04T18:55:49.784534 | 2016-11-20T13:30:48 | 2016-11-20T13:30:48 | 74,147,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,259 | py | from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from carpooling.models import Carpool
class OfferCarpoolForm(forms.ModelForm):
class Meta:
model = Carpool
fields = [
'departure_latitude',
'departure_longitude',
'arrival_latitude',
'arrival_longitude',
'frequency',
'occ_departure_datetime',
'occ_arrival_datetime',
'reg_departure_time',
'reg_arrival_time',
'seats_number',
'free',
'comment',
]
exclude = (
'organizer',
)
widgets = {
'frequency': forms.RadioSelect(),
'departure_latitude': forms.HiddenInput(),
'departure_longitude': forms.HiddenInput(),
'arrival_latitude': forms.HiddenInput(),
'arrival_longitude': forms.HiddenInput(),
'occ_departure_datetime': forms.SplitDateTimeWidget(),
'occ_arrival_datetime': forms.SplitDateTimeWidget(),
}
def clean_occ_departure_datetime(self):
if self.cleaned_data['frequency'] == 'REG':
return None
else:
if (self.cleaned_data['occ_departure_datetime']):
return self.cleaned_data['occ_departure_datetime']
else:
raise ValidationError(_('This field is required.'))
def clean_occ_arrival_datetime(self):
if self.cleaned_data['frequency'] == 'REG':
return None
else:
if (self.cleaned_data['occ_arrival_datetime']):
return self.cleaned_data['occ_arrival_datetime']
else:
raise ValidationError(_('This field is required.'))
def clean_reg_departure_time(self):
if self.cleaned_data['frequency'] == 'OCC':
return None
else:
if (self.cleaned_data['reg_departure_time']):
return self.cleaned_data['reg_departure_time']
else:
raise ValidationError(_('This field is required.'))
def clean_reg_arrival_time(self):
if self.cleaned_data['frequency'] == 'OCC':
return None
else:
if (self.cleaned_data['reg_arrival_time']):
return self.cleaned_data['reg_arrival_time']
else:
raise ValidationError(_('This field is required.'))
class SearchCarpoolForm(forms.ModelForm):
class Meta:
model = Carpool
fields = [
'departure_latitude',
'departure_longitude',
'arrival_latitude',
'arrival_longitude',
'frequency',
'occ_arrival_datetime',
'reg_arrival_time',
]
exclude = (
'organizer',
)
widgets = {
'frequency': forms.RadioSelect(),
'departure_latitude': forms.HiddenInput(),
'departure_longitude': forms.HiddenInput(),
'arrival_latitude': forms.HiddenInput(),
'arrival_longitude': forms.HiddenInput(),
'occ_arrival_datetime': forms.SplitDateTimeWidget(),
} | [
"[email protected]"
]
| |
b5c26aa9fc4c568ff3effe6c26bb5dbcdedb889b | 9d027df3b1295ecc49be75b2902c9e0f1089405b | /ingest/stuart_lab_remove_all.py | 6594820647844b6aa97c529cc4ea3026d27fc4e6 | [
"MIT"
]
| permissive | mrJeppard/clusterdb-ingest | 5e3681f0712f830e3ffd34c802cc7221d463c0d6 | f52f3ee03a1071ef15a63412e1e2085fdf74e584 | refs/heads/master | 2022-12-12T03:56:44.097278 | 2019-05-14T21:06:13 | 2019-05-14T21:06:13 | 174,733,533 | 0 | 0 | MIT | 2022-12-08T01:41:16 | 2019-03-09T18:49:35 | Python | UTF-8 | Python | false | false | 2,222 | py | """
Works with a directory of anndata objects which are the result of the stuart lab
runs from October 2018
Update the data values common across the cluster solution, such as
cluster_description, method...
To run change the DATADIR and FULLPATHDB global run and run the script from the repos venv.
python stuart_lab_anndata_in.py
"""
import os, csv
import scanpy as sc
import pandas as pd
from sqlalchemy import create_engine, Table, MetaData, select
"""
# Full path to the sqllite db on bop
FULLDBPATH = "/soe/swat/cellAtlas/data/cluster.swat.db"
# Path to the data directory filled with anndata objects.
DATADIR = "/soe/swat/cellAtlas/data/cluster"
# Path to the dataset tsv file.
DATASETPATH = "/soe/swat/clusterdb-ingest/dataset.tsv"
"""
# Full path to the sqllite db for swat
FULLDBPATH = "/Users/swat/dev/cdb/cluster.db"
# Path to the data directory filled with anndata objects.
DATADIR = "/Users/swat/dev/cdbIngest/dataIn"
# Path to the dataset tsv file.
DATASETPATH = "/Users/swat/dev/cdbIngest/dataIn/dataset.tsv"
# Connection to the database.
dbstartstr = "sqlite:///%s" % FULLDBPATH
engine = create_engine(dbstartstr, echo=False)
metadata = MetaData()
conn = engine.connect()
# Accessor for each of the tables.
dataset = Table('dataset', metadata, autoload=True, autoload_with=engine)
cluster_solution_table = Table('cluster_solution', metadata, autoload=True, autoload_with=engine)
cluster = Table('cluster', metadata, autoload=True, autoload_with=engine)
cell_of_cluster = Table('cell_of_cluster', metadata, autoload=True, autoload_with=engine)
# Remove obsolete datasets from the db.
with open(DATASETPATH, 'rU') as fin:
fin = csv.DictReader(fin, delimiter='\t')
for row in fin:
dataset_delete = dataset.delete().where(dataset.c.name == row['name'])
result = conn.execute(dataset_delete)
print('deleted:', row['name'])
for name in [
'Tabula Muris facs',
'Tabula Muris droplet',
'Quake Brain',
'UCSC Human Cortex',
'Immune Bone',
'Immune Cord']:
dataset_delete = dataset.delete().where(dataset.c.name == name)
result = conn.execute(dataset_delete)
print('deleted:', name)
# Remove obsolete cluster solutions from the db
| [
""
]
| |
d078bea59e3fd25f0fb187ea727e6ef3f081d3f2 | a6db31c3f9264c7b0ad5c3307dee4afd7ce689c1 | /Python/MyPythonTest/zy/com/python3/Demo003.py | f0fcd0b4de6ffc722baa962d301009f4d34b9480 | []
| no_license | aJanefish/Work | 2933fa5437b3c216ff1d76aa641af26226e5295c | 96e585d6208e9d6bab5576a25853c0fdb54fbcd8 | refs/heads/master | 2021-06-29T15:53:28.458468 | 2020-02-23T08:23:44 | 2020-02-23T08:23:44 | 141,233,308 | 0 | 0 | null | 2020-10-13T10:42:43 | 2018-07-17T04:58:08 | Python | UTF-8 | Python | false | false | 317 | py | # -*- coding: UTF-8 -*-
# Filename : test.py
# author by : www.runoob.com
# 文件操作
# 写文件
print
with open("test.txt", "wt") as out_file:
out_file.write("该文本会写入到文件中\n看到我了吧!")
# Read a file
with open("test.txt", "rt") as in_file:
text = in_file.read()
print(text)
| [
"[email protected]"
]
| |
42912ac022fad380ab3b2d80fe7989268ffc6c88 | a1858b1eeef112531b52f977b644a83fc3436688 | /second/protos/train_pb2.py | d23b33889b3ebd11208cdc61c940a72a4d0ed2f7 | []
| no_license | NorthCross/CLOCs | 2086ab2fe7fdce0c93ee7f181caf1b94774506a5 | 4cef59b23990a35656853bb16c4e897164b2ef02 | refs/heads/master | 2023-08-16T08:09:42.479641 | 2021-10-25T22:47:50 | 2021-10-25T22:47:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 5,945 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: second/protos/train.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from second.protos import optimizer_pb2 as second_dot_protos_dot_optimizer__pb2
from second.protos import preprocess_pb2 as second_dot_protos_dot_preprocess__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='second/protos/train.proto',
package='second.protos',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x19second/protos/train.proto\x12\rsecond.protos\x1a\x1dsecond/protos/optimizer.proto\x1a\x1esecond/protos/preprocess.proto\"\x95\x02\n\x0bTrainConfig\x12+\n\toptimizer\x18\x01 \x01(\x0b\x32\x18.second.protos.Optimizer\x12\r\n\x05steps\x18\x02 \x01(\r\x12\x16\n\x0esteps_per_eval\x18\x03 \x01(\r\x12\x1d\n\x15save_checkpoints_secs\x18\x04 \x01(\r\x12\x1a\n\x12save_summary_steps\x18\x05 \x01(\r\x12\x1e\n\x16\x65nable_mixed_precision\x18\x06 \x01(\x08\x12\x19\n\x11loss_scale_factor\x18\x07 \x01(\x02\x12!\n\x19\x63lear_metrics_every_epoch\x18\x08 \x01(\x08\x12\x19\n\x11\x64\x65tection_2d_path\x18\t \x01(\tb\x06proto3'
,
dependencies=[second_dot_protos_dot_optimizer__pb2.DESCRIPTOR,second_dot_protos_dot_preprocess__pb2.DESCRIPTOR,])
_TRAINCONFIG = _descriptor.Descriptor(
name='TrainConfig',
full_name='second.protos.TrainConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='optimizer', full_name='second.protos.TrainConfig.optimizer', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='second.protos.TrainConfig.steps', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps_per_eval', full_name='second.protos.TrainConfig.steps_per_eval', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_checkpoints_secs', full_name='second.protos.TrainConfig.save_checkpoints_secs', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_summary_steps', full_name='second.protos.TrainConfig.save_summary_steps', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_mixed_precision', full_name='second.protos.TrainConfig.enable_mixed_precision', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_scale_factor', full_name='second.protos.TrainConfig.loss_scale_factor', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clear_metrics_every_epoch', full_name='second.protos.TrainConfig.clear_metrics_every_epoch', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detection_2d_path', full_name='second.protos.TrainConfig.detection_2d_path', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=385,
)
_TRAINCONFIG.fields_by_name['optimizer'].message_type = second_dot_protos_dot_optimizer__pb2._OPTIMIZER
DESCRIPTOR.message_types_by_name['TrainConfig'] = _TRAINCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainConfig = _reflection.GeneratedProtocolMessageType('TrainConfig', (_message.Message,), {
'DESCRIPTOR' : _TRAINCONFIG,
'__module__' : 'second.protos.train_pb2'
# @@protoc_insertion_point(class_scope:second.protos.TrainConfig)
})
_sym_db.RegisterMessage(TrainConfig)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
808c37dcb29ca35c6d9c2b58002ea569528d3997 | f82728b6f868a2275e63848f936d2c22354dc024 | /Data Structure/Array Or Vector/Search An Element in Sorted Array/SolutionByYashika.py | 08248acbf202fda8812505c30c4bb5b29244da21 | [
"MIT"
]
| permissive | shoaibrayeen/Programmers-Community | 0b0fa4ba1427fa472126e640cda52dff3416b939 | cd0855e604cfd54101aaa9118f01a3e3b4c48c1c | refs/heads/master | 2023-03-16T03:13:48.980900 | 2023-03-12T00:25:37 | 2023-03-12T00:25:37 | 209,997,598 | 285 | 424 | MIT | 2023-03-12T00:25:38 | 2019-09-21T14:17:24 | C++ | UTF-8 | Python | false | false | 381 | py | def search(array,ele):
if len(array)==0: #if array is empty
return False
if array[0]==ele: # if ele is found at 0th index
return True
return search(array[1:],ele) # recursively search for the element in the array
size=int(input())
array=[int(i) for i in input().split()]
ele=int(input())
if search(array,ele):
print("Yes")
else:
print("No") | [
"[email protected]"
]
| |
866c1d3d680c6beedd8c74466cc4d708fc411925 | 58a392f4c4bae3fa1c99e3d081eda0701bb9c93f | /comment/migrations/0001_initial.py | 02c7a01063db0d61442f5fc9a3fe66b88b72b2b4 | []
| no_license | KD233233/kd | 84e6b60457c70b0eb603049762a3e17c808701f9 | d0c298aefdc4b0712afa27604f832459b97a8edf | refs/heads/master | 2020-05-19T07:21:08.312106 | 2019-05-05T06:03:42 | 2019-05-05T06:03:42 | 184,895,175 | 1 | 0 | null | 2019-05-04T13:09:01 | 2019-05-04T12:58:51 | null | UTF-8 | Python | false | false | 864 | py | # Generated by Django 2.1.7 on 2019-02-26 01:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0006_auto_20190226_0940'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=52)),
('email', models.CharField(max_length=50)),
('content', models.CharField(max_length=1000)),
('time', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='blog.Post')),
],
),
]
| [
"[email protected]"
]
| |
e1267a54f015e66aaf57df060a0ebb302d36b67e | 8d4f26bccc3b016cf45e8270df617cea73d1a741 | /utils/transforms.py | 4a3b38f5fe3802f8719e83a981a8f9c1740e3a2c | []
| no_license | krylatov-pavel/aibolit-ECG | 3b6e4fc8d87ada6a615038c7fb94048570af2d43 | 27bad875981547ea93ac0088518eb29149078988 | refs/heads/master | 2022-12-26T05:49:30.827061 | 2019-08-19T10:47:20 | 2019-08-19T10:47:20 | 191,343,111 | 1 | 0 | null | 2022-12-08T05:56:08 | 2019-06-11T09:53:21 | Python | UTF-8 | Python | false | false | 631 | py | import torch
from torchvision import transforms
def squeeze(x):
return torch.squeeze(x, dim=0)
def clip_fn(min, max):
def clip(x):
x = torch.clamp(x, min, max)
return x
return clip
def scale_fn(min, max, a, b):
def scale(x):
x = ((b - a) * (x - min) / (max - min)) + a
return x
return scale
def get_transform():
clip = clip_fn(-19, 21)
scale = scale_fn(-19, 21, 0, 5)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(clip),
transforms.Lambda(scale),
transforms.Lambda(squeeze)
])
return transform | [
"[email protected]"
]
| |
eabdc4dfe6fbd29641364cd7beabe54e1c5611c7 | 49facbcd74c2ca6adbca447a45e30a4190b35bc7 | /EMAN2/2.06/lib/emitem3d.py | 4fb979d45e44e783786926fe49428f77efc44768 | []
| no_license | CVL-dev/StructuralBiology | 801c37bfa5664e74c36f9f97478d59016f1f61fc | 1f1eaa42ae565c2032e245c6a874c8439d2719b9 | refs/heads/master | 2023-02-25T10:28:00.700774 | 2013-03-02T08:29:43 | 2013-03-02T08:29:43 | 5,609,380 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,273 | py | #!/usr/local/eman/2.06/Python/bin/python
# Copyright (c) 2011- Baylor College of Medicine
#
# This software is issued under a joint BSD/GNU license. You may use the
# source code in this file under either license. However, note that the
# complete EMAN2 and SPARX software packages have some GPL dependencies,
# so you are responsible for compliance with the licenses of these packages
# if you opt to use BSD licensing. The warranty disclaimer below holds
# in either instance.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston MA 02111-1307 USA
#
#
from OpenGL import GL
from PyQt4 import QtCore, QtGui
from EMAN2 import Transform, Vec4f, Vec3f
from libpyGLUtils2 import GLUtil
from valslider import ValSlider, EMSpinWidget
import weakref
import math
import numpy
class EMVertexBuffers(object):
""" Class to hold vertex buffers """
vbuffers = []
numbuffers = 2
@staticmethod
def getBuffers():
if not len(EMVertexBuffers.vbuffers):
EMVertexBuffers.vbuffers = GL.glGenBuffers(EMVertexBuffers.numbuffers)
return EMVertexBuffers.vbuffers
def drawBoundingBox(width, height, depth):
""" Draw bounding box use C side for easy of vertex arrays """
GL.glPushAttrib(GL.GL_LIGHTING_BIT)
GL.glMaterialfv(GL.GL_FRONT, GL.GL_EMISSION, [1.0,1.0,1.0,1.0])
GLUtil.glDrawBoundingBox(width, height, depth)
GL.glPopAttrib()
class EMItem3D(object): #inherit object for new-style class (new-stype classes required for super() and Python properties)
"""
The base class for nodes in our scene graph, which is used in our 3D scene viewer.
In our case, the scene graph is a tree data structure.
"""
# Class attrib to connect openGL int identifiers to class instances
vbuffers = 0
selection_idx_dict = {}
selection_recycle = []
selection_intname = -1
name = "General 3D Item"
nodetype = "BaseNode"
@staticmethod
def get_transformlayout(layout, idx, attribdict):
"""
Adds a transform layout to a dialog
@param layout, the layout to append to
@param idx, the row to being appnding to
"""
font = QtGui.QFont()
font.setBold(True)
translatelabel = QtGui.QLabel("Translation")
translatelabel.setFont(font)
translatelabel.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(translatelabel, idx, 0, 1, 4)
txlabel = QtGui.QLabel("Tx")
tylabel = QtGui.QLabel("Ty")
txlabel.setAlignment(QtCore.Qt.AlignRight)
tylabel.setAlignment(QtCore.Qt.AlignRight)
attribdict["tx"] = QtGui.QLineEdit("0.0")
attribdict["ty"] = QtGui.QLineEdit("0.0")
attribdict["tx"].setMinimumWidth(100.0)
attribdict["ty"].setMinimumWidth(100.0)
layout.addWidget(txlabel, idx+1, 0, 1, 1)
layout.addWidget(attribdict["tx"], idx+1, 1, 1, 1)
layout.addWidget(tylabel, idx+1, 2, 1, 1)
layout.addWidget(attribdict["ty"], idx+1, 3, 1, 1)
tzlabel = QtGui.QLabel("Tz")
zoomlabel = QtGui.QLabel("Zm")
tylabel.setAlignment(QtCore.Qt.AlignRight)
zoomlabel.setAlignment(QtCore.Qt.AlignRight)
attribdict["tz"] = QtGui.QLineEdit("0.0")
attribdict["zoom"] = QtGui.QLineEdit("1.0")
attribdict["tz"].setMinimumWidth(100.0)
attribdict["zoom"].setMinimumWidth(100.0)
layout.addWidget(tzlabel, idx+2, 0, 1, 1)
layout.addWidget(attribdict["tz"], idx+2, 1, 1, 1)
layout.addWidget(zoomlabel, idx+2, 2, 1, 1)
layout.addWidget(attribdict["zoom"], idx+2, 3, 1, 1)
rotatelabel = QtGui.QLabel("EMAN Rotation")
rotatelabel.setFont(font)
rotatelabel.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(rotatelabel, idx+3, 0, 1, 4)
azlabel = QtGui.QLabel("Az")
azlabel.setAlignment(QtCore.Qt.AlignRight)
attribdict["az"] = QtGui.QLineEdit("0.0")
altlabel = QtGui.QLabel("Alt")
altlabel.setAlignment(QtCore.Qt.AlignRight)
attribdict["alt"] = QtGui.QLineEdit("0.0")
attribdict["az"] .setMinimumWidth(100.0)
attribdict["alt"].setMinimumWidth(100.0)
layout.addWidget(azlabel, idx+4, 0, 1, 1)
layout.addWidget(attribdict["az"], idx+4, 1, 1, 1)
layout.addWidget(altlabel, idx+4, 2, 1, 1)
layout.addWidget(attribdict["alt"], idx+4, 3, 1, 1)
philabel = QtGui.QLabel("Phi")
philabel.setAlignment(QtCore.Qt.AlignRight)
attribdict["phi"] = QtGui.QLineEdit("0.0")
layout.addWidget(philabel, idx+5, 0, 1, 1)
layout.addWidget(attribdict["phi"], idx+5, 1, 1, 1)
@staticmethod
def getTransformFromDict(attribdict):
""" Return a transform using a dict created using the above function"""
return Transform({"type":"eman","az":float(attribdict["az"].text()),"alt":float(attribdict["alt"].text()),"phi":float(attribdict["phi"].text()),"tx":float(attribdict["tx"].text()),"ty":float(attribdict["ty"].text()),"tz":float(attribdict["tz"].text()),"scale":float(attribdict["zoom"].text())})
@staticmethod
def getNodeDialogWidget(attribdict):
"""
Get Data Widget
"""
nodewidget = QtGui.QWidget()
grid = QtGui.QGridLayout()
node_name_label = QtGui.QLabel("Node Name")
attribdict["node_name"] = QtGui.QLineEdit()
grid.addWidget(node_name_label , 0, 0, 1, 2)
grid.addWidget(attribdict["node_name"], 0, 2, 1, 2)
EMItem3D.get_transformlayout(grid, 2, attribdict)
nodewidget.setLayout(grid)
return nodewidget
@staticmethod
def getNodeForDialog(attribdict):
"""
Create a new node using a attribdict
"""
return EMItem3D(attribdict["parent"], transform=EMItem3D.getTransformFromDict(attribdict))
def __init__(self, parent = None, children = [], transform=None):
"""
@type parent: EMItem3D
@param parent: the parent node to the current node or None for the root node
@type children: list
@param children: the child nodes
@type transform: Transform or None
@param transform: The transformation (rotation, scaling, translation) that should be applied before rendering this node and its children
"""
self.label = None # Customisabl label, used to label the inspector in the tree
self.setParent(parent)
self.setChildren(children)
if not transform: transform = Transform()
self.transform = transform
self.is_visible = True
self.is_selected = False
self.hide_selection = False
self.item_inspector = None # This is an inspector widget
self.EMQTreeWidgetItem = None # This is an inspector tree item
self.boundingboxsize = None
self.getAndSetUniqueInteger()
def getChildren(self): return self.children
def setChildren(self, children):
self.children = list(children)
for child in children:
child.parent = self
def getParent(self): return self.parent
def setParent(self, parent):
self.parent = parent
#if parent: # Why is this here is causes chaos b/c things get added twice. You can only implment this if you check to ensure self is not already a child of parent!
#parent.addChild(self)
def isSelectedItem(self): return self.is_selected
def setSelectedItem(self, is_selected): self.is_selected = is_selected
def isSelectionHidded(self): return self.hide_selection
def getHiddenSelected(self): return self.hide_selection
def setHiddenSelected(self, hide_selection): self.hide_selection = hide_selection
def getTransform(self): return self.transform
def setTransform(self, transform): self.transform = transform
def isVisibleItem(self): return self.is_visible
def setVisibleItem(self, is_visible): self.is_visible = is_visible
def setLabel(self, label): self.label = label
def getLabel(self): return self.label
def __del__(self):
EMItem3D.selection_recycle.append(self.intname)
def getEvalString(self):
"""
Retrun a string that after eval can reinstatiate the object
"""
return "EMItem3D()"
def getAndSetUniqueInteger(self):
"""
Stuff for the selection mechanism, return a unique int for each instance of EMItem3D
"""
if len(EMItem3D.selection_recycle) > 0:
self.intname = EMItem3D.selection_recycle.pop()
else:
EMItem3D.selection_intname += 1
self.intname = EMItem3D.selection_intname
EMItem3D.selection_idx_dict[self.intname] = weakref.ref(self)
def addChild(self, node):
"""
Adds a child node, if not already in the list of child nodes.
@type node: EMItem3D
@param node: the child node to add
"""
self.children.append(node)
node.parent = self
def insertChild(self, node, nodeindex):
"""
Inserts a child node, if not already in the list of child nodes.
@type node: EMItem3D
@param node: the child node to add
@param nodeindex: the index for insertion
"""
self.children.insert(nodeindex, node)
node.parent = self
def addChildren(self, nodes):
"""
Adds all the provided child nodes which are not already in the list of child nodes.
@type nodes: an iterable collection of EMItem3D (or subclass) objects
@param nodes: the nodes which will be added as child nodes
"""
for node in nodes:
if not node in self.children:
self.children.append(node)
node.parent = self
def hasChild(self, node):
"""
Tests whether the supplied node is a child of the current node.
@type node: EMItem3D
@param node: test whether this node is a child node of self
"""
return node in self.children
def getChildIndex(self, node):
"""
Returns the index of child in the list of children. Raises an execption if not found
"""
return self.children.index(node)
def displayTree(self, level = 1):
"""
This prints a representation of the subtree rooted at this node to standard output. Useful for debugging.
"""
indent = "\t"*(level-1)
marker = "<-->" if self.parent else "-->"
print indent, marker, self.intname
for child in self.children:
child.displayTree(level+1)
def addParentReferences(self):
"""
For the subtree rooted at self, give each child node a reference to its parent node.
"""
for child in self.children:
child.parent = self
child.addParentReferences()
def removeParentReferences(self):
"""
For the subtree rooted at self, break reference cycles by setting self.parent to None.
"""
self.parent = None
for child in self.children:
child.removeParentReferences()
def removeChild(self, node):
"""
Remove the supplied node from the list of child nodes This also removes all its descendant nodes.
@type node: EMItem3D
@param node: the node to remove
"""
if node in self.children:
node.removeParentReferences()
while node in self.children: #In case a duplicate got in, somehow
self.children.remove(node)
def getSelectedAncestorNodes(self):
"""
Return a list of selected node ancestors
"""
selected_ancestors = []
node = self
while node.parent:
if node.parent.is_selected:
selected_ancestors.append(node.parent)
node = node.parent
return selected_ancestors
def getRootNode(self):
"""
Returns the root node
"""
node = self
while node.parent:
node = node.parent
return node
def getAllSelectedNodes(self):
"""
For the tree rooted at self, this recursive method returns a list of all the selected nodes.
@return: a list of selected nodes
"""
selected_list = []
if self.is_selected:
selected_list.append(self)
for child in self.children: #Recursion ends on leaf nodes here
selected_list.extend(child.getAllSelectedNodes()) #Recursion
return selected_list
def getAllNodes(self):
"""
For the tree rooted at self, this recursive method returns a list of all the nodes.
@return: a list of selected nodes
"""
nodelist = []
nodelist.append(self)
for child in self.children: #Recursion ends on leaf nodes here
nodelist.extend(child.getAllNodes()) #Recursion
return nodelist
def getNearbySelectedNodes(self):
"""
For the tree rooted at self, this recursive method returns a list of the selected nodes that are near self.
A selected node will not be in the returned list if one of its ancestor nodes is also selected.
@return: a list of selected nodes
"""
selected_list = []
if self.is_selected:
return [self]
else:
for child in self.children:
selected_list.extend(child.getNearbySelectedNodes())
return selected_list
def getDistantSelectedNodes(self):
"""
For the tree rooted at self, this recursive method returns a list of the selected nodes that are distant from self.
A selected node will not be in the returned list if one of its descendant nodes is also selected.
@return: a list of selected nodes
"""
selected_list = []
for child in self.children:
selected_list.extend(child.getDistantSelectedNodes())
if not selected_list: #either this is a leaf node, or there are no selected nodes in the child subtrees
if self.is_selected:
selected_list.append(self)
return selected_list
def getItemInspector(self):
"""
Return a Qt widget that controls the scene item
"""
if not self.item_inspector: self.item_inspector = EMItem3DInspector("Node", self)
return self.item_inspector
def setEMQTreeWidgetItem(self, node):
"""
Relate a QtreeItem to this node
"""
self.EMQTreeWidgetItem = node
def getTransformStdCoord(self):
"""
This returns the transform in standard coordinate system, not one changed by7 parent matrices
"""
tt = t = self.transform
tp = self.getParentMatrixProduct()
if tp: tt = tp*t
return tt
def getParentMatrixProduct(self):
"""
Get the product of all parent matrices
This is a recursive function
"""
if self.parent:
if self.parent.getParentMatrixProduct():
return self.parent.getParentMatrixProduct()*self.parent.getTransform()
else:
return self.parent.getTransform()
else:
return None
def updateMatrices(self, params, xformtype):
"""
The matrcies are updated in such a way that each is done in the standard cooridnate system and the std coord system is not perturbed by the others
@type params: List
@param params: A list defining how the transform in each active node is modified
@type xfromtype: sting
@param xformtype: The sort of transform we wish to do
"""
if self.is_selected:
if xformtype == "rotate":
if self.parent:
self.transform.rotate_origin_newbasis(self.getParentMatrixProduct(), params[0], params[1], params[2], params[3])
else:
self.transform.rotate_origin(Transform({"type":"spin","Omega":params[0],"n1":params[1],"n2":params[2],"n3":params[3]}))
elif xformtype == "translate":
if self.parent:
self.transform.translate_newbasis(self.getParentMatrixProduct(), params[0], params[1], params[2])
else:
self.transform.translate(params[0], params[1], params[2])
elif xformtype == "scale":
self.transform.scale(params[0])
else:
raise Exception,"Invalid transformation type"
# Now tell all children to update
# TODO: we MIGHT want to get rid of the recursive part of this algorithm
# instead, the calling function would get a list of all selected nodes, and apply transformations to each
for child in self.children:
child.updateMatrices(params, xformtype) #Note: the transformation is only applied to SELECTED nodes
def getItemDictionary(self):
"""
Return a dictionary of item parameters (used for restoring sessions
"""
return {"TRANSFORMATION":self.transform.get_params("eman"),"CONSTRUCTOR":self.getEvalString(),"NAME":str(self.getLabel()),"VISIBLE":self.isVisibleItem(),"SELECTED":self.isSelectedItem(),"NODETYPE":self.nodetype,"HIDDENSEL":self.getHiddenSelected()}
def setUsingDictionary(self, dictionary):
"""
Set item attributes using a dictionary, used in session restoration
"""
self.setTransform(Transform(dictionary["TRANSFORMATION"]))
self.setVisibleItem(dictionary["VISIBLE"])
self.setSelectedItem(dictionary["SELECTED"])
try:
self.setHiddenSelected(dictionary["HIDDENSEL"])
except:
pass
self.setLabel(dictionary["NAME"])
def render(self):
"""
This is the method to call to render the node and its child nodes.
It calls self.renderNode() to render the current node.
Usually, this method is unchanged in subclasses.
"""
if not self.is_visible:
return #Also applies to subtree rooted at this node
if self.transform.is_identity():
GL.glPushName(self.intname)
self.renderNode()
for child in self.children:
child.render()
GL.glPopName()
else:
GL.glPushMatrix()
GL.glPushName(self.intname)
GLUtil.glMultMatrix(self.transform) #apply the transformation
self.renderNode()
for child in self.children:
child.render()
GL.glPopName()
GL.glPopMatrix()
if self.item_inspector != None and self.is_selected:
self.item_inspector.updateItemControls()
def renderNode(self):
"""
This method, which is called by self.render(), renders the current node.
It should be implemented in subclasses that represent visible objects.
"""
pass
def keyPressEvent(self, event): pass
def keyReleaseEvent(self, event): pass
def mouseDoubleClickEvent(self, event): pass
def mouseMoveEvent(self, event): pass
def mousePressEvent(self, event): pass
def mouseReleaseEvent(self, event): pass
def wheelEvent(self, event): pass
class EMItem3DInspector(QtGui.QTabWidget):
"""
Class to make the EMItem GUI controls
"""
def __init__(self, name, item3d):
QtGui.QTabWidget.__init__(self)
self.item3d = weakref.ref(item3d)
self.name = name
self.inspector = None
self.transfromboxmaxheight = 400 # This might be problematic
self.addTabs()
def setInspector(self, inspector):
""" This is a reference back to the main inspector, which holds all the item inspectors"""
self.inspector = weakref.ref(inspector)
def addTabs(self):
""" Add a tab for each 'column' """
tabwidget = QtGui.QWidget()
gridbox = QtGui.QGridLayout()
EMItem3DInspector.addControls(self, gridbox)
tabwidget.setLayout(gridbox)
self.addTab(tabwidget, "basic")
def addControls(self, gridbox):
""" Construct all the widgets in this Item Inspector """
# selection box and label
font = QtGui.QFont()
font.setBold(True)
label = QtGui.QLabel(self.name,self)
label.setFont(font)
label.setAlignment(QtCore.Qt.AlignCenter)
gridbox.addWidget(label, 0, 0, 1, 1)
databox = QtGui.QHBoxLayout()
self.boundingbox = None
if self.item3d().boundingboxsize:
self.boundingbox = QtGui.QLabel("Size: "+self.item3d().boundingboxsize,self)
databox.addWidget(self.boundingbox)
gridbox.addLayout(databox, 1, 0, 1, 1)
# angluar controls
xformframe = QtGui.QFrame()
xformframe.setFrameShape(QtGui.QFrame.StyledPanel)
xformbox = QtGui.QGridLayout()
xformlabel = QtGui.QLabel("Transformation", xformframe)
xformlabel.setFont(font)
xformlabel.setAlignment(QtCore.Qt.AlignCenter)
xformbox.addWidget(xformlabel, 0, 0, 1, 2)
# Rotations
self.rotcombobox = QtGui.QComboBox()
xformbox.addWidget(self.rotcombobox, 1, 0, 1, 2)
self.rotstackedwidget = QtGui.QStackedWidget()
self.addRotationWidgets()
xformbox.addWidget(self.rotstackedwidget, 2, 0, 1, 2)
#translations
txlabel = QtGui.QLabel("TX",xformframe)
txlabel.setAlignment(QtCore.Qt.AlignCenter)
xformbox.addWidget(txlabel, 3, 0, 1, 1)
tylabel = QtGui.QLabel("TY",xformframe)
tylabel.setAlignment(QtCore.Qt.AlignCenter)
xformbox.addWidget(tylabel, 3, 1, 1, 1)
self.tx = EMSpinWidget(0.0, 1.0)
self.ty = EMSpinWidget(0.0, 1.0)
xformbox.addWidget(self.tx, 4, 0, 1, 1)
xformbox.addWidget(self.ty, 4, 1, 1, 1)
tzlabel = QtGui.QLabel("TZ",xformframe)
tzlabel.setAlignment(QtCore.Qt.AlignCenter)
xformbox.addWidget(tzlabel, 5, 0, 1, 1)
zoomlabel = QtGui.QLabel("Zoom",xformframe)
zoomlabel.setAlignment(QtCore.Qt.AlignCenter)
xformbox.addWidget(zoomlabel, 5, 1, 1, 1)
self.tz = EMSpinWidget(0.0, 1.0)
self.zoom = EMSpinWidget(1.0, 0.1, postivemode=True, wheelstep=0.1)
xformbox.addWidget(self.tz, 6, 0, 1, 1)
xformbox.addWidget(self.zoom, 6, 1, 1, 1)
self.resetbuttontx = QtGui.QPushButton("Reset Tx")
self.resetbuttonrot = QtGui.QPushButton("Reset Rot")
xformbox.addWidget(self.resetbuttontx, 7, 0, 1, 1)
xformbox.addWidget(self.resetbuttonrot, 7, 1, 1, 1)
xformframe.setLayout(xformbox)
xformframe.setMaximumWidth(350)
xformframe.setMaximumHeight(self.transfromboxmaxheight)
xformframe.setLayout(xformbox)
gridbox.addWidget(xformframe, 2, 0, 1, 1)
# set to default, but run only as a base class
if type(self) == EMItem3DInspector: self.updateItemControls()
QtCore.QObject.connect(self.tx,QtCore.SIGNAL("valueChanged(int)"),self._on_translation)
QtCore.QObject.connect(self.ty,QtCore.SIGNAL("valueChanged(int)"),self._on_translation)
QtCore.QObject.connect(self.tz,QtCore.SIGNAL("valueChanged(int)"),self._on_translation)
QtCore.QObject.connect(self.zoom,QtCore.SIGNAL("valueChanged(int)"),self._on_scale)
QtCore.QObject.connect(self.resetbuttontx,QtCore.SIGNAL("clicked()"),self._on_resettx)
QtCore.QObject.connect(self.resetbuttonrot,QtCore.SIGNAL("clicked()"),self._on_resetrot)
def _on_translation(self, value):
"""
Need to contain the right coords. And do translation in the correct corrd system
"""
tt = t = Transform({"tx":self.tx.getValue(),"ty":self.ty.getValue(),"tz":self.tz.getValue()})
tp = self.item3d().getParentMatrixProduct()
if tp: tt = tp.inverse()*t
self.item3d().getTransform().set_trans(tt.get_trans())
self.inspector().updateSceneGraph()
def _on_scale(self, value):
self.item3d().getTransform().set_scale(self.zoom.getValue())
self.inspector().updateSceneGraph()
def _on_resettx(self):
self.item3d().getTransform().set_trans(0.0, 0.0, 0.0)
self.updateItemControls()
self.inspector().updateSceneGraph()
def _on_resetrot(self):
self.item3d().getTransform().set_rotation({"type":"eman","az":0.0,"alt":0.0,"phi":0.0})
self.updateItemControls()
self.inspector().updateSceneGraph()
def _isRotNaN(self, rot1, rot2, rot3):
""" Better check to make sure get_rotation did not return Nan, so to prevent a crash """
if rot1 != rot1: return True
if rot2 != rot2: return True
if rot3 != rot3: return True
return False
def updateItemControls(self):
""" Updates this item inspector. Function is called by the item it observes"""
# Translation update
stdtransfrom = self.item3d().getTransformStdCoord()
translation = stdtransfrom.get_trans()
self.tx.setValue(translation[0])
self.ty.setValue(translation[1])
self.tz.setValue(translation[2])
# Rotation update
rotation = stdtransfrom.get_rotation(str(self.rotcombobox.currentText()))
is_identity = stdtransfrom.is_rot_identity()
comboboxidx = self.rotcombobox.currentIndex()
if comboboxidx == 0:
if self._isRotNaN(rotation["az"],rotation["alt"],rotation["phi"]): return
self.emanazslider.setValue(rotation["az"], quiet=1)
self.emanaltslider.setValue(rotation["alt"], quiet=1)
self.emanphislider.setValue(rotation["phi"], quiet=1)
if comboboxidx == 1:
if self._isRotNaN(rotation["gamma"],rotation["beta"],rotation["alpha"]): return
self.imagicgammaslider.setValue(rotation["gamma"], quiet=1)
self.imagicbetaslider.setValue(rotation["beta"], quiet=1)
self.imagicalphaslider.setValue(rotation["alpha"], quiet=1)
if comboboxidx == 2:
if self._isRotNaN(rotation["psi"],rotation["theta"],rotation["phi"]): return
self.spiderpsislider.setValue(rotation["psi"], quiet=1)
self.spiderthetaslider.setValue(rotation["theta"], quiet=1)
self.spiderphislider.setValue(rotation["phi"], quiet=1)
if comboboxidx == 3:
if self._isRotNaN(rotation["phi"],rotation["theta"],rotation["omega"]): return
self.mrcpsislider.setValue(rotation["phi"], quiet=1)
self.mrcthetaslider.setValue(rotation["theta"], quiet=1)
self.mrcomegaslider.setValue(rotation["omega"], quiet=1)
if comboboxidx == 4:
if self._isRotNaN(rotation["ztilt"],rotation["ytilt"],rotation["xtilt"]): return
self.xyzzslider.setValue(rotation["ztilt"], quiet=1)
self.xyzyslider.setValue(rotation["ytilt"], quiet=1)
self.xyzxslider.setValue(rotation["xtilt"], quiet=1)
if comboboxidx == 5:
if self._isRotNaN(rotation["n1"],rotation["n2"],rotation["n3"]): return
if is_identity and self.spinn1slider.getValue() == 0.0 and self.spinn2slider.getValue() == 0.0 and self.spinn3slider.getValue() == 0.0:
self.spinomegaslider .setValue(0.0, quiet=1)
self.spinn1slider.setValue(0.0, quiet=1)
self.spinn2slider.setValue(0.0, quiet=1)
self.spinn3slider.setValue(1.0, quiet=1)
else:
self.spinomegaslider .setValue(rotation["Omega"], quiet=1)
# Don't change slider if reult is Nan
if rotation["n1"] == rotation["n1"]: self.spinn1slider.setValue(rotation["n1"], quiet=1)
if rotation["n2"] == rotation["n2"]: self.spinn2slider.setValue(rotation["n2"], quiet=1)
if rotation["n3"] == rotation["n3"]: self.spinn3slider.setValue(rotation["n3"], quiet=1)
if comboboxidx == 6:
if self._isRotNaN(rotation["n1"],rotation["n2"],rotation["n3"]): return
if is_identity and self.spinn1slider.getValue() == 0.0 and self.spinn2slider.getValue() == 0.0 and self.spinn3slider.getValue() == 0.0:
self.spinomegaslider.setValue(0.0, quiet=1)
self.sgirotn1slider.setValue(0.0, quiet=1)
self.sgirotn2slider.setValue(0.0, quiet=1)
self.sgirotn3slider.setValue(1.0, quiet=1)
else:
self.spinomegaslider.setValue(rotation["q"], quiet=1)
# Don't change slider if reult is Nan
if rotation["n1"] == rotation["n1"]: self.sgirotn1slider.setValue(rotation["n1"], quiet=1)
if rotation["n2"] == rotation["n2"]: self.sgirotn2slider.setValue(rotation["n2"], quiet=1)
if rotation["n3"] == rotation["n3"]: self.sgirotn3slider.setValue(rotation["n3"], quiet=1)
if comboboxidx == 7:
if self._isRotNaN(rotation["e1"],rotation["e2"],rotation["e3"]): return
if is_identity:
self.quaternione0slider.setValue(1.0, quiet=1)
self.quaternione1slider.setValue(0.0, quiet=1)
self.quaternione2slider.setValue(0.0, quiet=1)
self.quaternione3slider.setValue(0.0, quiet=1)
else:
self.quaternione0slider.setValue(rotation["e0"], quiet=1)
self.quaternione1slider.setValue(rotation["e1"], quiet=1)
self.quaternione2slider.setValue(rotation["e2"], quiet=1)
self.quaternione3slider.setValue(rotation["e3"], quiet=1)
# Scaling update
self.zoom.setValue(self.item3d().getTransform().get_scale())
def updateMetaData(self):
"""
I didn't want to put this in update b/c this data doesn't change very often, and I don't want to waste CPU
Its a judgement call really, less coupling vs. more efficiency
"""
if self.boundingbox: self.boundingbox.setText("Size: "+self.item3d().boundingboxsize)
def addRotationWidgets(self):
""" Add alll the widgets for the various EMAN2 rotation conventions """
EMANwidget = QtGui.QWidget()
Imagicwidget = QtGui.QWidget()
Spiderwidget = QtGui.QWidget()
MRCwidget = QtGui.QWidget()
XYZwidget = QtGui.QWidget()
spinwidget = QtGui.QWidget()
sgirotwidget = QtGui.QWidget()
quaternionwidget = QtGui.QWidget()
# EMAN
emanbox = QtGui.QVBoxLayout()
self.emanazslider = ValSlider(EMANwidget, (0.0, 360.0), " Az", rounding = 1)
self.emanaltslider = ValSlider(EMANwidget, (0.0, 180.0), "Alt", rounding = 1)
self.emanphislider = ValSlider(EMANwidget, (0.0, 360.0), "Phi", rounding = 1)
emanbox.addWidget(self.emanazslider)
emanbox.addWidget(self.emanaltslider)
emanbox.addWidget(self.emanphislider)
EMANwidget.setLayout(emanbox)
# Imagic
imagicbox = QtGui.QVBoxLayout()
self.imagicgammaslider = ValSlider(Imagicwidget, (0.0, 360.0), "Gamma", rounding = 1)
self.imagicbetaslider = ValSlider(Imagicwidget, (0.0, 180.0), " Beta", rounding = 1)
self.imagicalphaslider = ValSlider(Imagicwidget, (0.0, 360.0), " Alpha", rounding = 1)
imagicbox.addWidget(self.imagicgammaslider)
imagicbox.addWidget(self.imagicbetaslider)
imagicbox.addWidget(self.imagicalphaslider)
Imagicwidget.setLayout(imagicbox)
# Spider
spiderbox = QtGui.QVBoxLayout()
self.spiderpsislider = ValSlider(Spiderwidget, (0.0, 360.0), " Psi", rounding = 1)
self.spiderthetaslider = ValSlider(Spiderwidget, (0.0, 180.0), "Theta", rounding = 1)
self.spiderphislider = ValSlider(Spiderwidget, (0.0, 360.0), " Phi", rounding = 1)
spiderbox.addWidget(self.spiderpsislider)
spiderbox.addWidget(self.spiderthetaslider)
spiderbox.addWidget(self.spiderphislider)
Spiderwidget.setLayout(spiderbox)
# MRC
mrcbox = QtGui.QVBoxLayout()
self.mrcpsislider = ValSlider(MRCwidget, (0.0, 360.0), " Psi", rounding = 1)
self.mrcthetaslider = ValSlider(MRCwidget, (0.0, 180.0), " Theta", rounding = 1)
self.mrcomegaslider = ValSlider(MRCwidget, (0.0, 360.0), "Omega", rounding = 1)
mrcbox.addWidget(self.mrcpsislider)
mrcbox.addWidget(self.mrcthetaslider)
mrcbox.addWidget(self.mrcomegaslider)
MRCwidget.setLayout(mrcbox)
# XYZ
xyzbox = QtGui.QVBoxLayout()
self.xyzzslider = ValSlider(XYZwidget, (0.0, 360.0), "Z", rounding = 1)
self.xyzyslider = ValSlider(XYZwidget, (0.0, 180.0), "Y", rounding = 1)
self.xyzxslider = ValSlider(XYZwidget, (0.0, 360.0), "X", rounding = 1)
xyzbox.addWidget(self.xyzzslider)
xyzbox.addWidget(self.xyzyslider)
xyzbox.addWidget(self.xyzxslider)
XYZwidget.setLayout(xyzbox)
# spin
spinbox = QtGui.QVBoxLayout()
self.spinomegaslider = ValSlider(spinwidget, (0.0, 180.0), "Omega", rounding = 1)
self.spinn1slider = ValSlider(spinwidget, (0.0, 1.0), " N1", rounding = 4)
self.spinn2slider = ValSlider(spinwidget, (0.0, 1.0), " N2", rounding = 4)
self.spinn3slider = ValSlider(spinwidget, (0.0, 1.0), " N3", rounding = 4)
spinbox.addWidget(self.spinomegaslider)
spinbox.addWidget(self.spinn1slider)
spinbox.addWidget(self.spinn2slider)
spinbox.addWidget(self.spinn3slider)
spinwidget.setLayout(spinbox)
# sgirot
sgirotbox = QtGui.QVBoxLayout()
self.sgirotqslider = ValSlider(sgirotwidget, (0.0, 180.0), " Q", rounding = 1)
self.sgirotn1slider = ValSlider(sgirotwidget, (0.0, 1.0), "N1", rounding = 4)
self.sgirotn2slider = ValSlider(sgirotwidget, (0.0, 1.0), "N2", rounding = 4)
self.sgirotn3slider = ValSlider(sgirotwidget, (0.0, 1.0), "N3", rounding = 4)
sgirotbox.addWidget(self.sgirotqslider)
sgirotbox.addWidget(self.sgirotn1slider)
sgirotbox.addWidget(self.sgirotn2slider)
sgirotbox.addWidget(self.sgirotn3slider)
sgirotwidget.setLayout(sgirotbox)
# quaternion
quaternionbox = QtGui.QVBoxLayout()
self.quaternione0slider = ValSlider(quaternionwidget, (0.0, 1.0), "E0", rounding = 4)
self.quaternione1slider = ValSlider(quaternionwidget, (0.0, 1.0), "E1", rounding = 4)
self.quaternione2slider = ValSlider(quaternionwidget, (0.0, 1.0), "E2", rounding = 4)
self.quaternione3slider = ValSlider(quaternionwidget, (0.0, 1.0), "E3", rounding = 4)
quaternionbox.addWidget(self.quaternione0slider)
quaternionbox.addWidget(self.quaternione1slider)
quaternionbox.addWidget(self.quaternione2slider)
quaternionbox.addWidget(self.quaternione3slider)
quaternionwidget.setLayout(quaternionbox)
# Add widgets to the stack
self.rotstackedwidget.addWidget(EMANwidget)
self.rotstackedwidget.addWidget(Imagicwidget)
self.rotstackedwidget.addWidget(Spiderwidget)
self.rotstackedwidget.addWidget(MRCwidget)
self.rotstackedwidget.addWidget(XYZwidget)
self.rotstackedwidget.addWidget(spinwidget)
self.rotstackedwidget.addWidget(sgirotwidget)
self.rotstackedwidget.addWidget(quaternionwidget)
# add choices to combobox
self.rotcombobox.addItem("EMAN")
self.rotcombobox.addItem("Imagic")
self.rotcombobox.addItem("Spider")
self.rotcombobox.addItem("MRC")
self.rotcombobox.addItem("XYZ")
self.rotcombobox.addItem("spin")
self.rotcombobox.addItem("sgirot")
self.rotcombobox.addItem("quaternion")
# Signal for all sliders
QtCore.QObject.connect(self.rotcombobox, QtCore.SIGNAL("activated(int)"), self._rotcombobox_changed)
QtCore.QObject.connect(self.emanazslider,QtCore.SIGNAL("valueChanged"),self._on_EMAN_rotation)
QtCore.QObject.connect(self.emanaltslider,QtCore.SIGNAL("valueChanged"),self._on_EMAN_rotation)
QtCore.QObject.connect(self.emanphislider,QtCore.SIGNAL("valueChanged"),self._on_EMAN_rotation)
QtCore.QObject.connect(self.imagicgammaslider,QtCore.SIGNAL("valueChanged"),self._on_Imagic_rotation)
QtCore.QObject.connect(self.imagicbetaslider,QtCore.SIGNAL("valueChanged"),self._on_Imagic_rotation)
QtCore.QObject.connect(self.imagicalphaslider,QtCore.SIGNAL("valueChanged"),self._on_Imagic_rotation)
QtCore.QObject.connect(self.spiderpsislider,QtCore.SIGNAL("valueChanged"),self._on_Spider_rotation)
QtCore.QObject.connect(self.spiderthetaslider,QtCore.SIGNAL("valueChanged"),self._on_Spider_rotation)
QtCore.QObject.connect(self.spiderphislider,QtCore.SIGNAL("valueChanged"),self._on_Spider_rotation)
QtCore.QObject.connect(self.mrcpsislider,QtCore.SIGNAL("valueChanged"),self._on_MRC_rotation)
QtCore.QObject.connect(self.mrcthetaslider,QtCore.SIGNAL("valueChanged"),self._on_MRC_rotation)
QtCore.QObject.connect(self.mrcomegaslider,QtCore.SIGNAL("valueChanged"),self._on_MRC_rotation)
QtCore.QObject.connect(self.xyzzslider,QtCore.SIGNAL("valueChanged"),self._on_XYZ_rotation)
QtCore.QObject.connect(self.xyzyslider,QtCore.SIGNAL("valueChanged"),self._on_XYZ_rotation)
QtCore.QObject.connect(self.xyzxslider,QtCore.SIGNAL("valueChanged"),self._on_XYZ_rotation)
QtCore.QObject.connect(self.spinomegaslider,QtCore.SIGNAL("valueChanged"),self._on_spin_rotation)
QtCore.QObject.connect(self.spinn1slider,QtCore.SIGNAL("valueChanged"),self._on_spin_rotation)
QtCore.QObject.connect(self.spinn2slider,QtCore.SIGNAL("valueChanged"),self._on_spin_rotation)
QtCore.QObject.connect(self.spinn3slider,QtCore.SIGNAL("valueChanged"),self._on_spin_rotation)
QtCore.QObject.connect(self.sgirotqslider,QtCore.SIGNAL("valueChanged"),self._on_sgirot_rotation)
QtCore.QObject.connect(self.sgirotn1slider,QtCore.SIGNAL("valueChanged"),self._on_sgirot_rotation)
QtCore.QObject.connect(self.sgirotn2slider,QtCore.SIGNAL("valueChanged"),self._on_sgirot_rotation)
QtCore.QObject.connect(self.sgirotn3slider,QtCore.SIGNAL("valueChanged"),self._on_sgirot_rotation)
QtCore.QObject.connect(self.quaternione0slider,QtCore.SIGNAL("valueChanged"),self._on_quaternion_rotation)
QtCore.QObject.connect(self.quaternione1slider,QtCore.SIGNAL("valueChanged"),self._on_quaternion_rotation)
QtCore.QObject.connect(self.quaternione2slider,QtCore.SIGNAL("valueChanged"),self._on_quaternion_rotation)
QtCore.QObject.connect(self.quaternione3slider,QtCore.SIGNAL("valueChanged"),self._on_quaternion_rotation)
def _rotcombobox_changed(self, idx):
self.rotstackedwidget.setCurrentIndex(idx)
self.updateItemControls()
def _on_EMAN_rotation(self, value):
self._set_rotation_std_coords(Transform({"type":"eman","az":self.emanazslider.getValue(),"alt":self.emanaltslider.getValue(),"phi":self.emanphislider.getValue()}))
self.inspector().updateSceneGraph()
def _on_Imagic_rotation(self, value):
self._set_rotation_std_coords(Transform({"type":"imagic","gamma":self.imagicgammaslider.getValue(),"beta":self.imagicbetaslider.getValue(),"alpha":self.imagicalphaslider.getValue()}))
self.inspector().updateSceneGraph()
def _on_Spider_rotation(self, value):
self._set_rotation_std_coords(Transform({"type":"spider","psi":self.spiderpsislider.getValue(),"theta":self.spiderthetaslider.getValue(),"phi":self.spiderphislider.getValue()}))
self.inspector().updateSceneGraph()
def _on_MRC_rotation(self, value):
self._set_rotation_std_coords(Transform({"type":"mrc","phi":self.mrcpsislider.getValue(),"theta":self.mrcthetaslider.getValue(),"omega":self.mrcomegaslider.getValue()}))
self.inspector().updateSceneGraph()
def _on_XYZ_rotation(self, value):
self._set_rotation_std_coords(Transform({"type":"xyz","ztilt":self.xyzzslider.getValue(),"ytilt":self.xyzyslider.getValue(),"xtilt":self.xyzxslider.getValue()}))
self.inspector().updateSceneGraph()
def _on_spin_rotation(self, value):
v = Vec3f(self.spinn1slider.getValue(), self.spinn2slider.getValue(), self.spinn3slider.getValue())
v.normalize()
self._set_rotation_std_coords(Transform({"type":"spin","Omega":self.spinomegaslider.getValue(),"n1":v[0],"n2":v[1],"n3":v[2]}))
self.inspector().updateSceneGraph()
def _on_sgirot_rotation(self, value):
v = Vec3f(self.sgirotn1slider.getValue(), self.sgirotn2slider.getValue(), self.sgirotn3slider.getValue())
v.normalize()
self._set_rotation_std_coords(Transform({"type":"sgirot","q":self.sgirotqslider.getValue(),"n1":v[0],"n2":v[1],"n3":v[2]}))
self.inspector().updateSceneGraph()
def _on_quaternion_rotation(self, value):
v = Vec4f(self.quaternione0slider.getValue(), self.quaternione1slider.getValue(), self.quaternione2slider.getValue(), self.quaternione3slider.getValue())
v.normalize()
self._set_rotation_std_coords(Transform({"type":"quaternion","e0":v[0],"e1":v[1],"e2":v[2],"e3":v[3]}))
self.inspector().updateSceneGraph()
def _set_rotation_std_coords(self, rotation):
""" This function sets the rotation as if there were no preceeding ones, otherwise a rot around Z could be arounf y,x, etc.
Works by transforming local coords into global corrds"""
tt = rotation
tp = self.item3d().getParentMatrixProduct()
if tp: tt = tp.inverse()*rotation
self.item3d().getTransform().set_rotation(tt.get_rotation())
if __name__ == '__main__':
#Test code
root = EMItem3D(0)
a = EMItem3D(root)
b = EMItem3D(root)
c = EMItem3D(root)
# root.addChildren([a,b,c])
aa = EMItem3D(a)
ab = EMItem3D(a)
ac = EMItem3D(a)
# a.addChildren([aa,ab,ac])
ba = EMItem3D(b)
bb = EMItem3D(b)
bc = EMItem3D(b)
# b.addChildren([ba,bb,bc])
ca = EMItem3D(c)
cb = EMItem3D(c)
cc = EMItem3D(c)
# c.addChildren([ca,cb,cc])
aaa = EMItem3D(aa)
aab = EMItem3D(aa)
aac = EMItem3D(aa)
# aa.addChildren([aaa,aab,aac])
a.is_selected = True
aab.is_selected = True
ba.is_selected = True
bc.is_selected = True
c.is_selected = True
cc.is_selected = True
print "getAllSelectedNodes() test: "
print "\tpassed? ", set(root.getAllSelectedNodes()) == set([a,aab,ba,bc,c,cc])
print "getNearbySelectedNodes() test: "
print "\tpassed? ", set(root.getNearbySelectedNodes()) == set([a,ba,bc,c])
print "getDistantSelectedNodes() test: "
print "\tpassed? ", set(root.getDistantSelectedNodes()) == set([aab,ba,bc,cc])
root.displayTree()
print "\n"
a.removeParentReferences()
root.displayTree()
print "\n"
root.addParentReferences()
root.displayTree()
print "\n"
print "removing child..."
root.removeChild(a)
root.displayTree()
del a, b, c, aa, ab, ac, ba, bb, bc, ca, cb, cc, aaa, aab, aac #Ensure instances are deleted before the class object is deleted, which is important in EMItem3D.__del__(self): | [
"[email protected]"
]
| |
5b7a76806a2d1052d41fbce4748ec2feec0badda | 3873dadd6e60a426439fbfefcff084590cd39670 | /solver.py | 04f717bfbda2274a9b02cda05635305feab94052 | []
| no_license | adam-urbanczyk/Eigenvalues | 52e7a0f25501f24ee8d76f4ce2d6a49e17565813 | 5484b0be5fac95ef0a0f1f2ab9d19ddffe128e6c | refs/heads/master | 2021-01-11T21:23:49.372225 | 2016-03-22T04:46:44 | 2016-03-22T04:46:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,571 | py | """
Provides Solver class and other tools for solving eigenvalue problems.
Handles all adaptive cases.
"""
from dolfin import refine, CellFunction, Constant, Measure, FunctionSpace, \
TrialFunction, TestFunction, dot, assemble, dx, DirichletBC, \
grad, SLEPcEigenSolver, Function, FacetFunction, PETScMatrix, Mesh, \
project, interpolate, ds, \
Expression, DomainBoundary
from boundary import get_bc_parts
from transforms import transform_mesh
from domains import build_mesh
import numpy as np
import time
USE_EIGEN = False
# FIXME finish EIGEN implementation
class Solver:
""" Main eigenvalue solver class. """
def __init__(self, mesh, bcList, transformList, deg=2,
bcLast=False, method='CG',
wTop='1', wBottom='1'):
"""
Initialize basic data.
Method should be either CG or CR.
"""
self.pickleMesh = None
self.mesh = mesh
self.deg = deg
self.dim = mesh.topology().dim()
self.size = self.mesh.size(self.dim)
self.exit = False
self.bcList = bcList
self.transformList = transformList
self.bcLast = bcLast
if method in {'nonconforming', 'lower bound'}:
self.method = 'CR'
self.deg = 1
else:
self.method = 'CG'
self.CGbound = (method == 'lower bound')
self.monitor = None
self.adaptive = self.upTo = self.edge = False
self.number = 10
self.target = None
self.wTop = wTop
self.wBottom = wBottom
self.space = None
def refineTo(self, size, upTo=False, edge=False):
"""
Save arguments.
Procedure is done while solving.
"""
self.upTo = upTo
self.size = size
self.edge = edge
def refineMesh(self):
""" Perform mesh refinement. """
if self.upTo:
mesh = refine_mesh_upto(self.mesh, self.size, self.edge)
else:
mesh = refine_mesh(self.mesh, self.size, self.edge)
return mesh
def __call__(self, monitor):
""" Call solvers and return eigenvalues/eigenfunctions. """
self.monitor = monitor
self.mesh = build_mesh(*self.pickleMesh)
results = list(self.solve())
results.extend(self.getGeometry())
return results
def progress(self, s):
"""
Send progress report.
Assumes monitor is a queue (as in multiprocessing), or a function.
"""
try:
self.monitor.put(s)
except:
try:
self.monitor(s)
except:
pass
time.sleep(0.01)
def newFunction(self):
""" Create a function in the appropriate FEM space. """
if not self.mesh:
self.addMesh()
if not self.space:
# space takes a long time to construct
self.space = FunctionSpace(self.mesh, 'CG', 1)
return Function(self.space)
def addMesh(self, mesh=None):
"""
Keep fully transformed mesh.
This breaks pickling.
"""
if mesh is None:
self.mesh = build_mesh(*self.pickleMesh)
self.mesh = self.refineMesh()
self.mesh = transform_mesh(self.mesh, self.transformList)
self.finalsize = self.mesh.size(self.dim)
else:
self.mesh = mesh
self.extraRefine = self.deg > 1
if self.extraRefine:
self.mesh = refine(self.mesh)
def removeMesh(self):
""" Remove mesh to restore pickling ability. """
if self.pickleMesh is None:
self.pickleMesh = [self.mesh.cells(), self.mesh.coordinates()]
self.mesh = None
def solveFor(self, number=10, target=None, exit=False):
""" Save parameters related to number of eigenvalues. """
self.number = number
self.target = target
self.exit = exit
def solve(self):
""" Find eigenvalues for transformed mesh. """
self.progress("Building mesh.")
# build transformed mesh
mesh = self.refineMesh()
# dim = mesh.topology().dim()
if self.bcLast:
mesh = transform_mesh(mesh, self.transformList)
Robin, Steklov, shift, bcs = get_bc_parts(mesh, self.bcList)
else:
Robin, Steklov, shift, bcs = get_bc_parts(mesh, self.bcList)
mesh = transform_mesh(mesh, self.transformList)
# boundary conditions computed on non-transformed mesh
# copy the values to transformed mesh
fun = FacetFunction("size_t", mesh, shift)
fun.array()[:] = bcs.array()[:]
bcs = fun
ds = Measure('ds', domain=mesh, subdomain_data=bcs)
V = FunctionSpace(mesh, self.method, self.deg)
u = TrialFunction(V)
v = TestFunction(V)
self.progress("Assembling matrices.")
wTop = Expression(self.wTop)
wBottom = Expression(self.wBottom)
#
# build stiffness matrix form
#
s = dot(grad(u), grad(v))*wTop*dx
# add Robin parts
for bc in Robin:
s += Constant(bc.parValue)*u*v*wTop*ds(bc.value+shift)
#
# build mass matrix form
#
if len(Steklov) > 0:
m = 0
for bc in Steklov:
m += Constant(bc.parValue)*u*v*wBottom*ds(bc.value+shift)
else:
m = u*v*wBottom*dx
# assemble
# if USE_EIGEN:
# S, M = EigenMatrix(), EigenMatrix()
# tempv = EigenVector()
# else:
S, M = PETScMatrix(), PETScMatrix()
# tempv = PETScVector()
if not np.any(bcs.array() == shift+1):
# no Dirichlet parts
assemble(s, tensor=S)
assemble(m, tensor=M)
else:
#
# with EIGEN we could
# apply Dirichlet condition symmetrically
# completely remove rows and columns
#
# Dirichlet parts are marked with shift+1
#
# temp = Constant(0)*v*dx
bc = DirichletBC(V, Constant(0.0), bcs, shift+1)
# assemble_system(s, temp, bc, A_tensor=S, b_tensor=tempv)
# assemble_system(m, temp, bc, A_tensor=M, b_tensor=tempv)
assemble(s, tensor=S)
bc.apply(S)
assemble(m, tensor=M)
# bc.zero(M)
# if USE_EIGEN:
# M = M.sparray()
# M.eliminate_zeros()
# print M.shape
# indices = M.indptr[:-1] - M.indptr[1:] < 0
# M = M[indices, :].tocsc()[:, indices]
# S = S.sparray()[indices, :].tocsc()[:, indices]
# print M.shape
#
# solve the eigenvalue problem
#
self.progress("Solving eigenvalue problem.")
eigensolver = SLEPcEigenSolver(S, M)
eigensolver.parameters["problem_type"] = "gen_hermitian"
eigensolver.parameters["solver"] = "krylov-schur"
if self.target is not None:
eigensolver.parameters["spectrum"] = "target real"
eigensolver.parameters["spectral_shift"] = self.target
else:
eigensolver.parameters["spectrum"] = "smallest magnitude"
eigensolver.parameters["spectral_shift"] = -0.01
eigensolver.parameters["spectral_transform"] = "shift-and-invert"
eigensolver.solve(self.number)
self.progress("Generating eigenfunctions.")
if eigensolver.get_number_converged() == 0:
return None
eigf = []
eigv = []
if self.deg > 1:
mesh = refine(mesh)
W = FunctionSpace(mesh, 'CG', 1)
for i in range(eigensolver.get_number_converged()):
pair = eigensolver.get_eigenpair(i)[::2]
eigv.append(pair[0])
u = Function(V)
u.vector()[:] = pair[1]
eigf.append(interpolate(u, W))
return eigv, eigf
def SolveExit(self):
""" Find expected exit time/torsion function. """
pass
def getGeometry(self):
""" Compute geometric factors. """
self.progress("Computing geometric factors.")
# build transformed mesh
mesh = self.refineMesh()
mesh = transform_mesh(mesh, self.transformList)
V = FunctionSpace(mesh, 'CG', 1)
u = Function(V)
u.vector()[:] = 1
# area/volume
# weight from denominator of Rayleigh
w = Expression(self.wBottom)
geometry = {}
A = geometry['A'] = assemble(u*w*dx)
# perimeter/surface area
geometry['P'] = assemble(u*w*ds)
# center of mass
x = Expression('x[0]')
y = Expression('x[1]')
cx = assemble(u*x*w*dx)/A
cy = assemble(u*y*w*dx)/A
c = [cx, cy]
if self.dim == 3:
z = Expression('x[2]')
cz = assemble(u*z*w*dx)/A
c.append(cz)
geometry['c'] = c
# moment of inertia
if self.dim == 2:
f = Expression(
"(x[0]-cx)*(x[0]-cx)+(x[1]-cy)*(x[1]-cy)",
cx=cx, cy=cy)
else:
f = Expression(
"(x[0]-cx)*(x[0]-cx)+(x[1]-cy)*(x[1]-cy)+(x[2]-cz)*(x[2]-cz)",
cx=cx, cy=cy, cz=cz)
geometry['I'] = assemble(u*f*w*dx)
# TODO: implement Gs
# TODO: implement diameter and inradius
geometry['D'] = None
geometry['R'] = None
return [geometry]
def AdaptiveSolve(self):
""" Adaptive refine and solve. """
pass
def refine_mesh(mesh, size, edge=False):
""" Refine mesh to at least given size, using one of two methods. """
dim = mesh.topology().dim()
if not edge:
# FEniCS 1.5 and 1.6 have a bug which prevents uniform refinement
while mesh.size(dim) < size:
mesh = refine(mesh)
else:
# Refine based on MeshFunction
while mesh.size(dim) < size:
print refine(mesh).size(dim)
full = CellFunction("bool", mesh, True)
print refine(mesh, full).size(dim)
mesh = refine(mesh, full)
return mesh
def refine_mesh_upto(mesh, size, edge=False):
""" Refine mesh to at most given size, using one of two methods. """
dim = mesh.topology().dim()
if mesh.size(dim) > size:
return mesh
if not edge:
while True:
# FEniCS 1.5 and 1.6 have a bug which prevents uniform refinement
mesh2 = refine(mesh)
if mesh2.size(dim) > size:
return mesh
mesh = mesh2
else:
# Refine based on MeshFunction
while True:
all = CellFunction("bool", mesh, True)
mesh2 = refine(mesh, all)
if mesh2.size(dim) > size:
return mesh
mesh = mesh2
def shiftMesh(mesh, vector):
""" Shift mesh by vector. """
mesh.coordinates()[:, :] += np.array(vector)[None, :]
def symmetrize(u, d, sym):
""" Symmetrize function u. """
if len(d) == 3:
# three dimensions -> cycle XYZ
return cyclic3D(u)
elif len(d) >= 4:
# four dimensions -> rotations in 2D
return rotational(u, d[-1])
nrm = np.linalg.norm(u.vector())
V = u.function_space()
mesh = Mesh(V.mesh())
# test if domain is symmetric using function equal 0 inside, 1 on boundary
# extrapolation will force large values if not symmetric since the flipped
# domain is different
bc = DirichletBC(V, 1, DomainBoundary())
test = Function(V)
bc.apply(test.vector())
if len(d) == 2:
# two dimensions given: swap dimensions
mesh.coordinates()[:, d] = mesh.coordinates()[:, d[::-1]]
else:
# one dimension given: reflect
mesh.coordinates()[:, d[0]] *= -1
# FIXME functionspace takes a long time to construct, maybe copy?
W = FunctionSpace(mesh, 'CG', 1)
try:
# testing
test = interpolate(Function(W, test.vector()), V)
# max-min should be around 1 if domain was symmetric
# may be slightly above due to boundary approximation
assert max(test.vector()) - min(test.vector()) < 1.1
v = interpolate(Function(W, u.vector()), V)
if sym:
# symmetric
pr = project(u+v)
else:
# antisymmetric
pr = project(u-v)
# small solution norm most likely means that symmetrization gives
# trivial function
assert np.linalg.norm(pr.vector())/nrm > 0.01
return pr
except:
# symmetrization failed for some reason
print "Symmetrization " + str(d) + " failed!"
return u
def cyclic3D(u):
""" Symmetrize with respect to (xyz) cycle. """
try:
nrm = np.linalg.norm(u.vector())
V = u.function_space()
assert V.mesh().topology().dim() == 3
mesh1 = Mesh(V.mesh())
mesh1.coordinates()[:, :] = mesh1.coordinates()[:, [1, 2, 0]]
W1 = FunctionSpace(mesh1, 'CG', 1)
# testing if symmetric
bc = DirichletBC(V, 1, DomainBoundary())
test = Function(V)
bc.apply(test.vector())
test = interpolate(Function(W1, test.vector()), V)
assert max(test.vector()) - min(test.vector()) < 1.1
v1 = interpolate(Function(W1, u.vector()), V)
mesh2 = Mesh(mesh1)
mesh2.coordinates()[:, :] = mesh2.coordinates()[:, [1, 2, 0]]
W2 = FunctionSpace(mesh2, 'CG', 1)
v2 = interpolate(Function(W2, u.vector()), V)
pr = project(u+v1+v2)
assert np.linalg.norm(pr.vector())/nrm > 0.01
return pr
except:
print "Cyclic symmetrization failed!"
return u
def rotational(u, n):
""" Symmetrize with respect to n-fold symmetry. """
# TODO: test one rotation only
V = u.function_space()
if V.mesh().topology().dim() > 2 or n < 2:
return u
mesh = V.mesh()
sum = u
nrm = np.linalg.norm(u.vector())
rotation = np.array([[np.cos(2*np.pi/n), np.sin(2*np.pi/n)],
[-np.sin(2*np.pi/n), np.cos(2*np.pi/n)]])
for i in range(1, n):
mesh = Mesh(mesh)
mesh.coordinates()[:, :] = np.dot(mesh.coordinates(), rotation)
W = FunctionSpace(mesh, 'CG', 1)
v = interpolate(Function(W, u.vector()), V)
sum += v
pr = project(sum)
if np.linalg.norm(pr.vector())/nrm > 0.01:
return pr
else:
return u
| [
"[email protected]"
]
| |
3e7d7a968820733d5a77fff903a398968800b2e8 | 4dccaf218916f20294d2fb944c78244210636955 | /wordladder/neighbors.py | 2f39fc1064fad1452249909ac8e93418e46ab8bb | []
| no_license | aaronli39/artificial_intel | 750649495fde223fa22b83f3c82092cbb89001f4 | 3c284ac836972ec9ecc67ee10b3431bdccf4f2a3 | refs/heads/master | 2021-06-22T02:22:09.531412 | 2020-12-31T01:59:48 | 2020-12-31T01:59:48 | 168,211,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,231 | py | #! /usr/bin/python3
import sys
def comp(a, b, end):
ac, bc = 0, 0
# print(a, b)
for i in range(4):
if a[1][i] == end[i]: ac += 1
for i in range(4):
if b[1][i] == end[i]: bc += 1
if (ac + a[0]) < (bc + b[0]): return 1
elif (ac + a[0]) == (bc + b[0]): return 0
return -1
# priority queue
class Pqueue:
# default comparison
# a and b are lists [steps, word]
def bad_cmp(self, a, b, end):
ac, bc = 0, 0
for i in range(self.len):
if a[1][i] == end[i]: ac += 1
for i in range(self.len):
if b[1][i] == end[i]: bc += 1
if (ac - a[0]) < (bc - b[0]): return -1
elif (ac - a[0]) == (bc - b[0]): return 0
return 1
def ord_cmp(self, a, b, end):
ac, bc = 0, 0
for i in range(self.len):
if a[1][i] == end[i]: ac += 1
for i in range(self.len):
if b[1][i] == end[i]: bc += 1
if (ac - a[0]) < (bc - b[0]): return 1
elif (ac - a[0]) == (bc - b[0]): return 0
return -1
# init initializes the array, size, and comparator obj
def __init__(self, length, start, result, comparator=ord_cmp):
self.data = []
self.size = 0
self.len = length
self.end = result
self.cmp_to = comparator
# push takes in 1 val, appends it, and bubbles the
# val to appropriate position
def push(self, val):
self.data.append(val)
self.size += 1
el = self.size - 1
par = int((self.size - 2) / 2)
while(self.cmp_to(self, self.data[el], self.data[par], self.end) == -1 and par >= 0):
self.data[el], self.data[par] = self.data[par], self.data[el]
el = par
par = int((par - 1) / 2)
# pushL takes in a list, appends each value in list to
# queue, and the bubbles them to their correct location
def push_all(self, data):
[self.push(i) for i in data]
return
# pop root, rearrange rest of array
def pop(self):
if self.size == 0: return None
ret = self.data[0]
# set new root as last element to bubble down
self.data[0] = self.data[self.size - 1]
self.data, self.size = self.data[:-1], self.size - 1
# starting indices with left and right children
cur = 0
left, right = 2 * cur + 1, 2 * cur + 2
# if it's just a one level tree, pop root and swap left and current if necessary
if self.size == 2 and self.cmp_to(self, self.data[cur], self.data[left], self.end) == 1:
self.data[cur], self.data[left] = self.data[left], self.data[cur]
while(left < self.size and right < self.size and
(self.cmp_to(self, self.data[cur], self.data[left], self.end) == 1 or self.cmp_to(self, self.data[cur], self.data[right], self.end) == 1)):
# if left smaller than right
if self.cmp_to(self, self.data[left], self.data[right], self.end) == 1:
# right smaller so swap right and current
self.data[cur], self.data[right] = self.data[right], self.data[cur]
cur = right
left, right = cur * 2 + 1, cur * 2 + 2
else:
# swap left and current
self.data[cur], self.data[left] = self.data[left], self.data[cur]
cur = left
left, right = cur * 2 + 1, cur * 2 + 2
return ret
# look at next root
def peek(self):
if self.size == 0: return None
return self.data[0][1]
# returns list representation of the queue
def toList(self):
ret = [self.pop() for i in self.data]
return ret
# returns the elements in the queue without popping
def internal_list(self):
return self.data
# do the actual ladder
def ladder(words, length, start, end):
queue = Pqueue(length, start, end)
neighbors, path, explored, steps = [start], [], set(), 0
while True:
steps += 1
for i in neighbors:
queue.push([steps, i, path])
# if no more elements there is no ladder
if len(queue.data) == 0: return [start, end]
val = queue.pop()
if val[1] == end: return val[2] + [val[1]]
else:
if val[1] not in explored:
explored.add(val[1])
path = val[2] + [val[1]]
neighbors = [word for word in words[val[1]] if word not in explored]
else:
path = []
neighbors = []
# general function
def do(inp, out, data, length):
# vars to use
alpha = "abcdefghijklmnopqrstuvwxyz"
words = {}
try:
with open(inp, "rU") as temp:
temp = temp.read().split("\n")
goal = []
for i in temp:
goal.append(i.split(","))
except:
print("error reading input file")
return
# create the dictionary of words of desired length
for i in data:
ind = 0
neighbors = set()
while ind < length:
for letter in alpha:
word = i[:ind] + letter + i[ind + 1:]
if word in data and word != i:
neighbors.add(word)
ind += 1
words[i] = list(neighbors)
# getting wordladder data
if goal[0][0] == "":
print("input file empty")
return
ret = []
print(goal)
for i in goal:
ret.append(ladder(words, length, i[0], i[1]))
# create write data
with open(out, "w") as temp:
for i in ret:
temp.write(",".join(i) + "\n")
return
# sys argv stuff
def main():
try:
with open(sys.argv[1], "rU") as temp:
length = len(temp.read().split("\n")[0].split(",")[0])
with open("dictall.txt", "rU") as temp:
data = set()
# create set of all 4 letter words to pass on
data = {word for word in temp.read().strip().split("\n") if len(word) == length}
except:
print("error getting input file")
do(sys.argv[1], sys.argv[2], data, length)
main() | [
"[email protected]"
]
| |
fc3b822caec378c8a3defbf7471bb237e7933eb6 | bd8eb564710998df1f2d77650d357d1b3b02fbd4 | /can/bt_plugin_can.py | ea56995dcbca5f67b055e7185b770431e790c486 | []
| no_license | gpollo/babeltrace-fun-plugins | 43afd3c64004cebc6902e2f8713b3a23a980b1ea | 7145f8647db7d9662fd37ecfe5b4265654e7b7fc | refs/heads/master | 2022-02-25T17:43:51.193150 | 2019-08-16T04:34:55 | 2019-08-16T04:34:55 | 198,295,555 | 0 | 0 | null | 2019-07-22T20:14:43 | 2019-07-22T20:14:42 | null | UTF-8 | Python | false | false | 9,344 | py | import bt2
import cantools
class CANIterator(bt2._UserMessageIterator):
def __init__(self, port):
(self._events, self._trace_class, self._messages) = port.user_data
self._trace = self._trace_class()
self._stream_class = self._trace_class[0]
self._stream = self._trace.create_stream(self._stream_class)
self._init_msgs = [self._create_stream_beginning_message(self._stream)]
self._end_msgs = [self._create_stream_end_message(self._stream)]
self._iter = iter(self._events)
self._next = self._next_init
def _create_decoded_event(self, timestamp, frame_id, bytedata):
if len(self._messages[frame_id]) == 2:
(database, event_class) = self._messages[frame_id]
decoded_data = database.decode_message(frame_id, bytedata)
elif len(self._messages[frame_id]) == 3:
(database, event_classes, key) = self._messages[frame_id]
decoded_data = database.decode_message(frame_id, bytedata)
event_class = event_classes[decoded_data[key]]
else:
raise ValueError
event_msg = self._create_event_message(
event_class, self._stream, default_clock_snapshot=timestamp
)
for key in event_msg.event.payload_field.keys():
event_msg.event.payload_field[key] = decoded_data[key]
return event_msg
def _create_unknown_event(self, timestamp, frame_id, bytedata):
event_class = self._messages[None]
event_msg = self._create_event_message(
event_class, self._stream, default_clock_snapshot=timestamp
)
event_msg.event.payload_field["id"] = frame_id
for i in range(7):
event_msg.event.payload_field[f"byte {i}"] = bytedata[i]
return event_msg
def _next_init(self):
if len(self._init_msgs) > 0:
return self._init_msgs.pop(0)
else:
self._next = self._next_events
return self._next()
def _next_events(self):
try:
(timestamp, frame_id, bytedata) = next(self._iter)
if frame_id in self._messages:
event_msg = self._create_decoded_event(timestamp, frame_id, bytedata)
else:
event_msg = self._create_unknown_event(timestamp, frame_id, bytedata)
return event_msg
except StopIteration:
self._next = self._next_end
return self._next()
def _next_end(self):
if len(self._end_msgs) > 0:
return self._end_msgs.pop(0)
else:
raise StopIteration
def __next__(self):
return self._next()
@bt2.plugin_component_class
class CANSource(bt2._UserSourceComponent, message_iterator_class=CANIterator):
def __init__(self, params, obj):
inputs = CANSource._get_param_list(params, "inputs")
databases = CANSource._get_param_list(params, "databases")
(trace_class, messages) = self._create_trace_class_for_databases(databases)
for path in inputs:
self._create_port_for_can_trace(trace_class, messages, str(path))
def _create_trace_class_for_databases(self, databases):
messages = dict()
clock_class = self._create_clock_class(frequency=1000)
trace_class = self._create_trace_class()
stream_class = trace_class.create_stream_class(
name="can", default_clock_class=clock_class
)
print(f"created trace class {trace_class}")
event_class = CANSource._create_unknown_event_class(trace_class, stream_class)
messages[None] = event_class
print(f"created event class 'UNKNOWN' at {event_class}")
for path in databases:
CANSource._create_database_event_classes(
trace_class, stream_class, str(path), messages
)
return (trace_class, messages)
def _create_port_for_can_trace(self, trace_class, messages, path):
try:
file = open(path, "rb")
except FileNotFoundError as err:
raise ValueError(f"couldn't read {path}") from err
events = []
while True:
# Custom binary format parsing.
#
# [bytes 0 - 3] timestamp
# [bytes 4 - 7] frame ID (standard or extended)
# [bytes 8 - 15] up to 64 bits of data
msg = file.read(16)
if msg == b"":
break
timestamp = 0
timestamp += msg[0] << 0
timestamp += msg[1] << 8
timestamp += msg[2] << 16
timestamp += msg[3] << 32
frame_id = 0
frame_id += msg[4] << 0
frame_id += msg[5] << 8
data = msg[8:]
events.append((timestamp, frame_id, data))
self._add_output_port(path, (events, trace_class, messages))
@staticmethod
def _get_param_list(params, key):
if key not in params:
raise ValueError(f"missing `{key}` parameter")
param = params[key]
if type(param) != bt2.ArrayValue:
raise TypeError(
f"expecting `{key}` parameter to be a list, got a {type(param)}"
)
if len(param) == 0:
raise ValueError(f"expecting `{key}` to not be of length zero")
return param
@staticmethod
def _create_database_event_classes(trace_class, stream_class, path, messages):
try:
database = cantools.db.load_file(path)
except FileNotFoundError as err:
raise ValueError(f"database file `{path}` couldn't be read.") from err
for message in database.messages:
if message.frame_id in messages:
print(f"{message.name} already present in another database")
continue
multiplexed = False
for signal in message.signal_tree:
if isinstance(signal, str):
continue
multiplexed = True
break
if multiplexed:
(event_classes, key) = CANSource._create_multiplexed_message_classes(
trace_class, stream_class, message
)
messages[message.frame_id] = [database, event_classes, key]
print(f"created event class '{message.name}' at {event_class}")
else:
event_class = CANSource._create_message_class(
trace_class, stream_class, message
)
messages[message.frame_id] = [database, event_class]
print(f"created event class '{message.name}' at {event_class}")
@staticmethod
def _create_unknown_event_class(trace_class, stream_class):
field_class = trace_class.create_structure_field_class()
field_class.append_member("id", trace_class.create_real_field_class())
for i in range(8):
field_class.append_member(
f"byte {i}", trace_class.create_real_field_class()
)
event_class = stream_class.create_event_class(
name="UNKNOWN", payload_field_class=field_class
)
return event_class
@staticmethod
def _create_multiplexed_message_classes(trace_class, stream_class, message):
event_classes = dict()
multiplexer = None
signals = []
for signal in message.signal_tree:
if isinstance(signal, str):
signals.append(signal)
elif multiplexer is None:
multiplexer = signal
else:
raise ValueError(f"multiple multiplexer in message `{message.name}`")
if multiplexer is None or len(multiplexer) == 0:
raise ValueError(f"no multiplexer found in `{message.name}`")
if len(multiplexer) > 1:
raise ValueError(f"more than 1 multiplexer found in `{message.name}`")
key = list(multiplexer.keys())[0]
for value in multiplexer[key].keys():
field_class = trace_class.create_structure_field_class()
field_class.append_member(key, trace_class.create_real_field_class())
for signal in multiplexer[key][value]:
field_class.append_member(signal, trace_class.create_real_field_class())
for signal in signals:
field_class.append_member(signal, trace_class.create_real_field_class())
event_class = stream_class.create_event_class(
name=message.name, payload_field_class=field_class
)
event_classes[value] = event_class
return (event_classes, key)
@staticmethod
def _create_message_class(trace_class, stream_class, message):
field_class = trace_class.create_structure_field_class()
for signal in message.signals:
field_class.append_member(
signal.name, trace_class.create_real_field_class()
)
event_class = stream_class.create_event_class(
name=message.name, payload_field_class=field_class
)
return event_class
bt2.register_plugin(
module_name=__name__,
name="can",
description="CAN Format",
author="Gabriel-Andrew Pollo-Guilbert",
license="GPL",
version=(1, 0, 0),
)
| [
"[email protected]"
]
| |
635ba9cef3c47552319481c624406b556a3d4b17 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=0/params.py | aff20aa8732740d3ec888ec12ac9538f67a70364 | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.570619',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 0,
'utils': 'uni-medium-3'}
| [
"[email protected]"
]
| |
32e18a4664cd9782b3a51b71a5e8d6d18c54ee03 | c8705c604e9dbc50c65d157f95c42f2529354bbb | /keras_layers/keras_layer_L2Normalization.py | 3c5977e6a76ed021285bfd28c2762ba39cdd4fff | []
| no_license | toschilt/object_detection_ic | e00b45c56865273f831b29c6017e89f92e545de6 | bd2d8aa36c6734cdb21c517ff3bb47d174993663 | refs/heads/main | 2023-05-30T18:33:23.881966 | 2021-06-24T02:32:58 | 2021-06-24T02:32:58 | 342,574,861 | 0 | 0 | null | 2021-06-24T02:32:59 | 2021-02-26T12:52:28 | null | UTF-8 | Python | false | false | 2,734 | py | '''
A custom Keras layer to perform L2-normalization.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import keras.backend as K
import keras as keras
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
class L2Normalization(Layer):
'''
Performs L2 normalization on the input tensor with a learnable scaling parameter
as described in the paper "Parsenet: Looking Wider to See Better" (see references)
and as used in the original SSD model.
Arguments:
gamma_init (int): The initial scaling parameter. Defaults to 20 following the
SSD paper.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Returns:
The scaled tensor. Same shape as the input tensor.
References:
http://cs.unc.edu/~wliu/papers/parsenet.pdf
'''
def __init__(self, gamma_init=20, **kwargs):
if K.image_data_format() == 'channels_last':
self.axis = 3
else:
self.axis = 1
self.gamma_init = gamma_init
super(L2Normalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
#gamma = self.gamma_init * np.ones((input_shape[self.axis],))
#self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
#self.trainable_weights = [self.gamma]
self.gamma = self.add_weight(name='{}_gamma'.format(self.name),
shape=(input_shape[self.axis],),
initializer=keras.initializers.Constant(value=self.gamma_init),
trainable=True)
super(L2Normalization, self).build(input_shape)
def call(self, x, mask=None):
output = K.l2_normalize(x, self.axis)
return output * self.gamma
def get_config(self):
config = {
'gamma_init': self.gamma_init
}
base_config = super(L2Normalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"[email protected]"
]
| |
6133612965233013406ba7cb943881799c725dd8 | 875c602f53b5775468e5a39161a0cf2bf3a4af53 | /pipify/ShowProxy.py | 1a8189c43ef62b8a1e97050d34e8aaa60c0641ea | []
| no_license | cjsatuforc/fusion360-kaneka | 87bf141e7e2a546a1a6b0a9e7260e51cb4a83c5b | d29037d7432be0be6fea86efabdaaf1faeb864f3 | refs/heads/master | 2020-05-03T12:29:24.776853 | 2018-02-09T19:18:26 | 2018-02-09T19:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,504 | py | # (C) Copyright 2015 by Autodesk, Inc.
# Permission to use, copy, modify, and distribute this software in
# object code form for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies and
# that both that copyright notice and the limited warranty and restricted
# rights notice below appear in all supporting documentation.
#
# AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS.
# AUTODESK SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR
# FITNESS FOR A PARTICULAR USE. AUTODESK, INC. DOES NOT WARRANT THAT THE
# OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR FREE.
import adsk.core, adsk.fusion, traceback
# Global variable used to maintain a reference to all event handlers.
handlers = []
dim=0.09375
def getInputs(inputs):
try:
selection = inputs.itemById('selectEnt').selection(0)
face = selection.entity
evalType = inputs.itemById('evalType').selectedItem.name
density = int(inputs.itemById('number').value)
return(evalType, face, density)
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def add_pip(lines, ent, pip, sketchPoints):
p0 = ent.geometry.getData()[1]
# recLines = lines.addTwoPointRectangle(ent.geometry.getData()[1], ent.geometry.getData()[1])
sketchPoint = sketchPoints.add(p0)
# Move sketch point
translation = adsk.core.Vector3D.create(dim*pip*2, 0, 0)
sketchPoint.move(translation)
x = sketchPoint.geometry.x
y = sketchPoint.geometry.y
z = sketchPoint.geometry.z
p1 = adsk.core.Point3D.create(x + dim, y, z)
lines.addByTwoPoints(sketchPoint.geometry, p1)
p2 = adsk.core.Point3D.create(x + dim, y-dim, z)
lines.addByTwoPoints(p1, p2)
p3 = adsk.core.Point3D.create(x + dim*2, y-dim, z)
lines.addByTwoPoints(p2, p3)
p4 = adsk.core.Point3D.create(x + dim*2, y, z)
lines.addByTwoPoints(p3, p4)
# seg2 = lines.addByTwoPoints(ent.geometry.getData()[1], ent.geometry.getData()[1] + dim)
# seg3 = lines.addByTwoPoints(ent.geometry.getData()[1], ent.geometry.getData()[1] + dim)
# seg4 = lines.addByTwoPoints(ent.geometry.getData()[1], ent.geometry.getData()[1] + dim)
def draw(ent, num_pips):
app = adsk.core.Application.get()
ui = app.userInterface
# doc = app.documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)
design = adsk.fusion.Design.cast(app.activeProduct)
# Get the root component of the active design.
rootComp = design.rootComponent
# Create a new sketch on the xy plane.
sketches = rootComp.sketches
xyPlane = rootComp.xYConstructionPlane
sketch = sketches.add(xyPlane)
# Get sketch points
sketchPoints = sketch.sketchPoints
# Draw two connected lines.
lines = sketch.sketchCurves.sketchLines;
for x in range(num_pips):
add_pip(lines, ent, x, sketchPoints)
# ExecutePreview event handler class.
class MyExecutePreviewHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
cmdArgs = adsk.core.CommandEventArgs.cast(args)
# Get the current info from the dialog.
inputs = cmdArgs.command.commandInputs
(evalType, ent, num_pips) = getInputs(inputs)
draw(ent, num_pips)
# Set this property indicating that the preview is a good
# result and can be used as the final result when the command
# is executed.
cmdArgs.isValidResult = True
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Called when the command is executed. However, because this command
# is using the ExecutePreview event and is setting the isValidResult property
# to true, the results created in the preview will be used as the final
# results and the Execute will not be called.
class MyExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# Get the current info from the dialog.
inputs = args.command.commandInputs
(evalType, face, density) = getInputs(inputs)
draw(face)
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# CommandCreated event handler class.
class MyCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
command = adsk.core.Command.cast(args.command)
inputs = command.commandInputs
# Create a selection input to get a selected entity from the user.
selectInput = inputs.addSelectionInput('selectEnt', 'Selection', 'Select an entity')
selectInput.addSelectionFilter('SketchLines')
selectInput.setSelectionLimits(1, 1)
# Create a text box that will be used to display the results.
textResult = inputs.addTextBoxCommandInput('textResult', '', '', 2, True)
# Create a text box that will be used to display the results.
varResult = inputs.addTextBoxCommandInput('varResult', '', '', 2, True)
# Add the selection input to get the points.
typeInput = inputs.addDropDownCommandInput('evalType', 'Huristic', adsk.core.DropDownStyles.LabeledIconDropDownStyle)
typeInput.listItems.add('Floor', True, '', -1)
typeInput.listItems.add('Ceiling', True, '', -1)
# Add the unitless value input to get the density.
densityInput = inputs.addValueInput('number', 'NumPips', '', adsk.core.ValueInput.createByString('10'))
baseDimInput = inputs.addDistanceValueCommandInput('baseDim', 'baseDimInput', adsk.core.ValueInput.createByString('3in/32'))
# Connect to the input changed event.
onInputChanged = MyInputChangedHandler()
command.inputChanged.add(onInputChanged)
handlers.append(onInputChanged)
# Connect to the execute preview and execute events.
onExecutePreview = MyExecutePreviewHandler()
command.executePreview.add(onExecutePreview)
handlers.append(onExecutePreview)
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# InputChanged event handler class.
class MyInputChangedHandler(adsk.core.InputChangedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# Get the selection command input.
cmdInput = adsk.core.CommandInput.cast(args.input)
if cmdInput.id == 'selectEnt':
selInput = adsk.core.SelectionCommandInput.cast(cmdInput)
# Check that an entity is selected.
if selInput.selectionCount > 0:
ent = selInput.selection(0).entity
# Create a string showing the proxy path.
path = getPath(ent)
entType = ent.objectType
entType = entType.split(':')
entType = entType[len(entType)-1]
path += '/' + entType
# Get the text box command input and display the path string in it.
textResult = cmdInput.parentCommand.commandInputs.itemById('textResult')
textResult.text = path
# Get the text box command input and display the path string in it.
varResult = cmdInput.parentCommand.commandInputs.itemById('varResult')
# if It's a line then this is x, y of the first and second point.
varResult.text = "begin: (" + str(ent.geometry.getData()[1].x) + ", " + str(ent.geometry.getData()[1].y) + ")"
varResult.text += " end: (" + str(ent.geometry.getData()[2].x) + ", " + str(ent.geometry.getData()[2].y) + ")"
# If we were instpecting a point these are the x, y coords
# varResult.text = str(ent.geometry.getData()[1]) + ", " + str(ent.geometry.getData()[2])
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Builds up the string showing the proxy path by stepping up the path from
# the proxy entity itself to each occurrence that defines its context.
def getPath(ent):
path = ''
if ent.assemblyContext:
occ = ent.assemblyContext
while occ:
if path == '':
path = occ.name
else:
path = occ.name + '/' + path
occ = occ.assemblyContext
path = 'Root/' + path
else:
path = 'Root'
return path
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
# Create a new command and connect to the command created event.
buttonDef = ui.commandDefinitions.addButtonDefinition('ekinsShowProxyPath', 'Show Proxy', 'Display the proxy path of the selected entity.', 'Resources/ShowProxy')
onCommandCreated = MyCommandCreatedHandler()
buttonDef.commandCreated.add(onCommandCreated)
handlers.append(onCommandCreated)
# Add a control for the command into the INSPECT panel.
inspectPanel = ui.allToolbarPanels.itemById('InspectPanel')
inspectPanel.controls.addCommand(buttonDef)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def stop(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
# Clean up all UI related to this command.
buttonDef = ui.commandDefinitions.itemById('ekinsShowProxyPath')
if buttonDef:
buttonDef.deleteMe()
inspectPanel = ui.allToolbarPanels.itemById('InspectPanel')
if inspectPanel.controls.itemById('ekinsShowProxyPath'):
inspectPanel.controls.itemById('ekinsShowProxyPath').deleteMe()
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| [
"[email protected]"
]
| |
8abbdd180f33166add8aa0e2afc8656a3e61eb68 | 198dd2fd5b2aa27b950bd5844c97a1ebdbd3af17 | /dephell/repositories/_local.py | 90ceb7d1f9c1045a3bc1c9a026ebe0a9eea2cb71 | [
"MIT"
]
| permissive | espdev/dephell | 68411b20c1830836dcea0eec96a8bd15e95171d5 | 17d5604e7b443b4d58bffc635a139adb49431efc | refs/heads/master | 2020-11-26T01:05:07.580285 | 2019-12-20T14:29:07 | 2019-12-20T14:29:07 | 228,915,765 | 0 | 0 | MIT | 2019-12-18T20:24:21 | 2019-12-18T20:24:20 | null | UTF-8 | Python | false | false | 4,172 | py | # built-in
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple, Union
# app
from ..cache import RequirementsCache
from ..config import Config
from ..constants import FILES
from ..models.release import Release
from ._warehouse import WarehouseLocalRepo
from .base import Interface
class LocalRepo(Interface):
def __init__(self, path: Union[Path, str]):
if type(path) is str:
path = Path(path)
self.path = path
def get_releases(self, dep) -> Tuple[Release, ...]:
releases = []
dist_path = (self.path / 'dist')
if dist_path.exists():
repo = WarehouseLocalRepo(name='tmp', path=dist_path)
releases = list(repo.get_releases(dep=dep))
root = self.get_root(name=dep.name, version='0.0.0')
self.update_dep_from_root(dep=dep, root=root)
releases.append(Release(
raw_name=root.raw_name,
version=root.version,
time=datetime.fromtimestamp(self.path.stat().st_mtime),
))
return tuple(reversed(releases))
async def get_dependencies(self, name: str, version: str, extra: Optional[str] = None) -> tuple:
cache = RequirementsCache('local', 'deps', name, str(version))
deps = cache.load()
if deps:
return deps
root = self.get_root(name=name, version=version)
deps = root.dependencies
if extra:
deps = tuple(dep for dep in deps if extra in dep.envs)
cache.dump(root=root)
return deps
def get_root(self, name: str, version: str):
from ..converters import EggInfoConverter, SDistConverter, WheelConverter, CONVERTERS
if not self.path.exists():
raise FileNotFoundError(str(self.path))
# load from file
if self.path.is_file():
for converter in CONVERTERS.values():
if converter.can_parse(path=self.path):
return converter.load(path=self.path)
raise LookupError('cannot find loader for file ' + str(self.path))
# get from wheel or sdist
patterns = (
('-*-*-*.whl', WheelConverter()),
('.tar.gz', SDistConverter()),
('.tgz', SDistConverter()),
)
for suffix, converter in patterns:
paths = tuple(self.path.glob('**/{name}-{version}{suffix}'.format(
name=name.replace('-', '_'),
version=str(version),
suffix=suffix,
)))
if paths:
path = min(paths, key=lambda path: len(path.parts))
return converter.load(path=path)
# read from egg-info
path = self.path / (name + '.egg-info')
if path.exists():
return EggInfoConverter().load(path=path)
# read from dephell config
path = self.path / 'pyproject.toml'
if path.exists():
config = Config().attach_file(path=path, env='main')
if config is not None:
section = config.get('to') or config.get('from')
if section and 'path' in section and 'format' in section:
converter = CONVERTERS[section['format']]
path = self.path.joinpath(section['path'])
return converter.load(path)
# get from dependencies file
for fname in FILES:
path = self.path / fname
if not path.exists():
continue
for converter in CONVERTERS.values():
if converter.can_parse(path=path):
return converter.load(path=path)
raise LookupError('cannot find dependencies in ' + str(self.path))
@staticmethod
def update_dep_from_root(dep, root) -> None:
if not dep.description:
dep.description = root.description
if not dep.authors:
dep.authors = root.authors
if not dep.links:
dep.links = root.links
if not dep.classifiers:
dep.classifiers = root.classifiers
if not dep.license:
dep.license = root.license
| [
"[email protected]"
]
| |
033a0c1772872da6d3ec1735dadf3faddd9151e8 | 9fe8442df6f27791f8798d5905969a85ae705dd4 | /Python_Stack/Django/Django_Orm/Users_Shell/Shell_App/models.py | f638bb1644c0850de0525c46a0966d2155c451dc | []
| no_license | cpugao/Python | 225493da5d1c8f146680182cd2951d81c5a087ff | 77a8d6f884543ae6679d9635a1eb96d6dda3bc10 | refs/heads/master | 2022-12-21T11:23:22.624304 | 2020-09-08T07:34:58 | 2020-09-08T07:34:58 | 293,735,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.db import models
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email_address = models.EmailField(max_length=255)
age = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Create your models here.
| [
"[email protected]"
]
| |
4c40cb98ca63c117e54ac2a813fc76d9104e8a5b | 88ba7e002797738d2f7496a70158931f31992e57 | /src/ANN/ANNTest.py | 560f606b973aeb3685629e144e15a1a875e68462 | []
| no_license | HUbbm409/bbm406-project-what-is-this-books-genre | 6a9464dda8ae80bf23017b2b3252cea1600ac674 | 541391f1fa11ba039374dec334161042c6315eef | refs/heads/master | 2020-04-13T06:18:43.608987 | 2019-01-18T16:44:02 | 2019-01-18T16:44:02 | 163,016,881 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,822 | py | from object_management.object_manager import ObjectManager
import keras
import gensim
import numpy as np
from sklearn.metrics import hamming_loss
from sklearn.metrics import zero_one_loss
import ast
import os
from pathlib import Path
def ConvertGenreListToVector(data):
genreTypes = {' Fiction': 0, ' Speculative fiction': 1, ' Science Fiction': 2, ' Novel': 3, ' Fantasy': 4,
" Children's literature": 5, ' Mystery': 6, ' Young adult literature': 7, ' Suspense': 8,
' Crime Fiction': 9, ' Historical novel': 10, ' Thriller': 11, ' Horror': 12, ' Romance novel': 13,
' Historical fiction': 14, ' Detective fiction': 15, ' Adventure novel': 16, ' Non-fiction': 17,
' Alternate history': 18, ' Spy fiction': 19, ' Comedy': 20, ' Dystopia': 21, ' Autobiography': 22,
' Satire': 23, ' Gothic fiction': 24, ' Comic novel': 25, ' Biography': 26}
labels = [] # represent books all genres as a vector
for genreList in data["GenreList"]:
vector = [0] * 27
genres = ast.literal_eval(genreList)
for genre in genres:
vector[genreTypes.get(genre)] = 1.0
labels.append(vector)
return np.array(labels)
def Word2Vec(model, data):
temp = []
for x in data:
count = 0
vector = np.zeros((300,))
for word in x.split(' '):
try:
vector += model.get_vector(word)
count += 1
except KeyError:
continue
if count == 0:
vector = np.zeros((300,))
count = 1
print(x)
temp.append(vector / count)
return temp
# -- Get data --
directory = Path().absolute()
modelFiles = os.listdir(str(directory) + "\\Model")
word2vecModel = gensim.models.KeyedVectors.load_word2vec_format('../2vecmodels/GoogleNews-vectors-negative300.bin', binary=True)
objManager = ObjectManager()
testData = objManager.ReadObject(str(directory)+"\\RecordedObject\\TestData")
file = open("log.txt","w")
for modelName in modelFiles:
file.write(str(modelName) + "\n")
model = keras.models.load_model(str(directory) + '\\Model\\' + str(modelName) )
# -- Test Data --
testSummary = testData["Summary"] # novel's summaries for test
testGenre = testData["Genre"] # novel's genres for test
tempTest = Word2Vec(word2vecModel, testSummary)
x_test = np.array(tempTest)
y_test = ConvertGenreListToVector(testData)
# Multilabel classifier
trasholdList = [0.1, 0.15, 0.175, 0.2, 0.225, 0.25, 0.275, 0.30, 0.35, 0.40, 0.45, 0.50]
# -My Prediction-
for th in trasholdList:
print("-- Trashold Value: ", th, " --")
file.write("-- Trashold Value: "+ str(th) + " --"+ "\n")
predict = model.predict(x_test, 32)
predict[predict >= th] = 1
predict[predict < th] = 0
predictionHit = 0
for i in range(predict.shape[0]):
pre = [i for i, e in enumerate(predict[i]) if e == 1]
acc = [i for i, e in enumerate(y_test[i]) if e == 1]
hitNum = 0
for j in pre:
if acc.__contains__(j):
hitNum += 1
hitRate = hitNum / len(acc)
if hitRate > 0.65:
predictionHit += 1
print('Test accuracy:', (predictionHit / predict.shape[0]))
file.write('Test accuracy:'+ str(predictionHit / predict.shape[0]) + "\n")
print("Hamming Loss:", hamming_loss(np.array(y_test),predict ))
print("Zero One Loss:", zero_one_loss(np.array(y_test),predict ))
file.write("Hamming Loss:" + str(hamming_loss(np.array(y_test),predict)) + "\n")
file.write("Zero One Loss:" + str(zero_one_loss(np.array(y_test),predict)) + "\n")
file.close()
print("Done")
| [
"[email protected]"
]
| |
67c5c82a547f0328a5f2785a7f49e1a9f8b45785 | 1050b277f6cb4bf4535c913dcf699c51fc705dd1 | /pysp/projects/shield/metahumans/migrations/0004_metahuman_last_update.py | e6025ee3ee97df1d184e5429f1ccd7eb01954743 | []
| no_license | joserequenaidv/my-eoi | 7d680d6a48f688867eae37c1af103a54d8cf57b9 | 019d0fffbee95372eb63c378d0c93b66b6e6fedf | refs/heads/master | 2022-12-06T19:52:29.703696 | 2020-08-12T14:27:38 | 2020-08-12T14:27:38 | 267,091,694 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.0.5 on 2020-05-04 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metahumans', '0003_metahuman'),
]
operations = [
migrations.AddField(
model_name='metahuman',
name='last_update',
field=models.DateTimeField(auto_now=True),
),
]
| [
"[email protected]"
]
| |
53de45efc62f9e11b91f03a858fe16a54ff85139 | bc105a7586ed67a352430563d36770078f2b3a29 | /most-recent-seasonal-spotify-playlist-generator.py | 201c6dca176ea15c06ff02e4be8530bd6769e48f | []
| no_license | neasatang/seasonal-spotify-playlist-generator | 1692957deccb56fa49c85cfe05d4416a3ce79a64 | 843a8175bf8f62294656d1da30e86f300bfd34d4 | refs/heads/master | 2023-02-08T00:55:18.594188 | 2020-12-29T16:01:11 | 2020-12-29T16:01:11 | 239,403,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | import os
import datetime
import spotipy
from spotipy.oauth2 import SpotifyOAuth
spring_months = ("02", "03", "04")
summer_months = ("05", "06", "07")
autumn_months = ("08", "09", "10")
winter_months = ("01", "11", "12")
seasons = {
"01": winter_months,
"04": spring_months,
"07": summer_months,
"10": autumn_months
}
def get_season(month):
if month in ("01","11","12"):
return " Winter '", "❄"
elif month in ("02","03","04"):
return " Spring '", "🌸"
elif month in ("05", "06", "07"):
return " Summer '", "🌞"
else:
return " Autumn '", "🍂"
def add_to_specific_season_playlist(year, month, track, months):
season = get_season(month)
if month == "01":
change_year = int(year) - 1
year = str(change_year)
playlist_name = season[0] + year[-2:] + " " + season[1]
playlist_exists = False
playlist_id = ""
user_playlists = sp.current_user_playlists()
for item in user_playlists["items"]:
if playlist_name == item['name']:
playlist_id = item["id"]
playlist_exists = True
if playlist_exists is False:
newly_created_playlist = sp.user_playlist_create(os.environ["USER_ID"], playlist_name, description= "Automatically generated playlist for" + season[0] + year[-2:] + season[1] + " https://github.com/neasatang/monthly-spotify-playlist-generator" )
playlist_id = newly_created_playlist["id"]
sp.playlist_add_items(playlist_id, {track})
scope = "playlist-modify-public"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
results = sp.current_user_playlists()
playlist = sp.playlist(os.environ["PLAYLIST_ID"])
total = playlist["tracks"]["total"]
offset = 0
temp_offset = 0
while offset != total:
playlist_tracks = sp.playlist_items(os.environ["PLAYLIST_ID"], offset=offset)
for item in playlist_tracks["items"]:
date = item["added_at"].split("-")
now = datetime.datetime.now()
previous_month = str(now.month-1)
# edge case
if previous_month == "0":
previous_month = "12"
if len(previous_month) < 2:
previous_month = "0" + previous_month
if previous_month in seasons and str(now.year) == date[0] and \
date[1] in seasons.get(previous_month):
if item["track"] is not None:
track_id = item["track"]["id"]
if track_id is not None:
add_to_specific_season_playlist(date[0], date[1], track_id, seasons.get(previous_month))
new_track_id = sp.track(track_id)
temp_offset += 1
offset = temp_offset
| [
"[email protected]"
]
| |
f7eacb3ff42e16ef6d307a110f5e395dc4aaec5a | 829dc287dbdbd1b73a65035d3be67a22ea37a140 | /test.py | 8411f87c1d6b22e3dd4402e562b56f07cf1916b7 | []
| no_license | JesseE/flask-prototype-sqldata | 24a3ef0f35dc1639b10dba1eb9ef20b9b3507f5a | e14874eea4773893b9a5130b4fd4b643ce3212e6 | refs/heads/master | 2021-01-09T20:32:47.744412 | 2016-05-31T14:59:36 | 2016-05-31T14:59:36 | 60,099,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | from app import app
import unittest
class FlaskTestCase(unittest.TestCase):
# Ensure that flask was set up correctly
def test_index(self):
#isolated test client like selenium
tester = app.test_client(self)
response = tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
# Ensure that login page load correctly
def test_login_page_loads(self):
#isolated test client like selenium
tester = app.test_client(self)
response = tester.get('/login', content_type='html/text')
self.assertTrue(b'Please login' in response.data)
# Ensure that login page load correctly with correct credentials
def test_correct_login(self):
tester = app.test_client()
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'you were just logged in', response.data)
# Ensure that login page load correctly with incorrect credentials
def test_incorrect_login(self):
tester = app.test_client()
response = tester.post(
'/login',
data=dict(username="wrong", password="wrong"),
follow_redirects=True
)
self.assertIn(b'Invalid credentilals. please try again.', response.data)
# Ensure that login out page load correctly
def test_logout(self):
tester = app.test_client()
tester.post(
'/logout',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
response = tester.get('/logout', follow_redirects=True)
self.assertIn(b'You need to login first.', response.data)
# Ensure that the main page requires login
def test_main_route_requires_login(self):
tester = app.test_client(self)
response = tester.get('/', follow_redirects=True)
self.assertTrue(b'You need to login first.' in response.data)
# Ensure that post show up on the main page
def test_post_show_up(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin",password="admin"),
follow_redirects=True
)
self.assertIn(b'name:',response.data)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
654213eace46387f5fd43bd5e09a377bfc08c864 | 916d152cef8955d68465e963e131e10db02e431c | /bin/count_lines.py | f3a76cbaeaa78a9165926a99eb4a4e83d20122c4 | []
| no_license | camgunz/profile | fa57f44f2f94eaee90e99249440435bbaa0f0b15 | 7ab9e02b3750096c6a46ff83a28dd9413d8a8d28 | refs/heads/master | 2021-09-26T14:12:55.968071 | 2021-09-16T15:10:44 | 2021-09-16T15:10:44 | 31,148,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!/usr/bin/env python
import sys
import os.path
def main():
total = 0
for file_name in sys.argv[1:]:
if not os.path.isfile(file_name):
continue
with open(file_name, 'r') as fobj:
line_count = len([ln for ln in fobj.readlines() if ln.strip()])
print('%s: %s' % (file_name, line_count))
total += line_count
print('------------------------------')
print(f'Total: {total}')
main()
| [
"[email protected]"
]
| |
b8e64eea9a8b89c992011677975679292443dff5 | 259bf472d38d05ea86f23932789f5fb7f264df89 | /projectApps/consumers.py | ed362646e24bf9f2b8800ab761743d1a1ce6b81f | []
| no_license | andrewryan/ClassCollabs | 0fc3416e5f5fed59aae6aa2fad8a2e8373a7c74b | e73bb0164ce8e8f64f794b4fbd1509adfa09cf96 | refs/heads/master | 2021-01-20T18:04:13.536089 | 2017-07-18T22:35:27 | 2017-07-18T22:35:27 | 90,900,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | import json
from channels import Group
from channels.auth import channel_session_user, channel_session_user_from_http
# When a user logs in, add them to the 'users' group,
# with their username and the value of 'is_logged_in',
# websockets have to be passed a JSON serializable message
@channel_session_user_from_http
def ws_connect(message):
Group('users').add(message.reply_channel)
Group('chat').add(message.reply_channel)
Group('users').send({
'text': json.dumps({
'username': message.user.username,
'is_logged_in': True
})
})
# When the user logs out remove them from the 'users' group,
# and change the is_logged_in value to false
@channel_session_user
def ws_disconnect(message):
Group('users').send({
'text': json.dumps({
'username': message.user.username,
'is_logged_in': False
})
})
Group('users').discard(message.reply_channel)
Group("chat").discard(message.reply_channel)
# Trying to send messages through websockets for chat,
# 'text' represents the data of the message being sent
def ws_message(message):
Group('chat').send({
"text": "[user] %s" % message.content['text'],
})
| [
"[email protected]"
]
| |
8a5e173f87c878730e0dfa0966878e78abf3d25e | 3e5d04efc67876ddd3cc490f4254b43d81287c69 | /task_list/urls.py | adea6b8465672d6033e912421b32e4fc5eebe08d | []
| no_license | asattelmaier/django_task_list | f7208b94fda5b5fd30e84bc175f030acea43eb78 | 0bd949a12d63e191ac097c4dd0d05115ecc478c8 | refs/heads/master | 2021-09-13T11:52:14.475084 | 2018-04-29T13:56:50 | 2018-04-29T13:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | """task_list URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from tasks.views import get_task_list
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('', get_task_list, name='task_list'),
] | [
"[email protected]"
]
| |
6bf0b46a1682e0078e8be749f54915500deaa131 | 81a551f87fc7ea1ce501e0f92f0bf787562299b3 | /pip_stripper/_pip_stripper.py | 3c419f84503f987c0933f8b411541416e898701b | [
"MIT"
]
| permissive | jpeyret/pip-stripper | c35bd7c9fd544a6e24180e3e544c067c3e5f48f7 | bf50bf80915acfaaf501db914c26c4265bf4d6a4 | refs/heads/master | 2023-01-05T01:22:18.502356 | 2019-04-23T04:28:49 | 2019-04-23T04:28:49 | 180,931,747 | 4 | 1 | MIT | 2022-12-26T20:47:21 | 2019-04-12T04:43:22 | Python | UTF-8 | Python | false | false | 10,834 | py | # -*- coding: utf-8 -*-
"""Main module."""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""doc for _main.py - """
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
import sys
import argparse
import re
import os
import subprocess
import distutils.sysconfig as sysconfig
from traceback import print_exc as xp
import pdb
from pip_stripper._baseutils import (
set_cpdb,
set_rpdb,
ppp,
debugObject,
cpdb,
fill_template,
rpdb,
sub_template,
)
from pip_stripper.writers import ScanWriter
from pip_stripper.matching import Matcher
from pip_stripper.pip import ClassifierPip
from pip_stripper.trackimports import ClassifierImport
from pip_stripper.common import Command
from pip_stripper.builder import Builder
from yaml import safe_load as yload, dump
if __name__ == "__main__":
set_cpdb(cpdb, remove=True)
dn_script = os.path.dirname(__file__)
if not dn_script:
fnp_script = os.path.join(dn_cwd_start, sys.argv[0])
dn_script = os.path.dirname(fnp_script)
class Main(object):
""" manages batch"""
di_opt = {}
def __repr__(self):
return self.__class__.__name__
def _get_fnp(self, subject):
try:
if subject == "templatedir":
return os.path.join(dn_script, "templates")
else:
fn = self.config["vars"]["filenames"][subject]
return os.path.join(self.workdir, fn)
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def __init__(self, options):
try:
self.options = options
pwd = os.getcwd()
self.workdir = self.options.workdir or pwd
self.config = None
fnp_config = self.options.config
if self.options.init:
fnp_config = self._initialize(fnp_config)
if not fnp_config:
for dn in [self.workdir, pwd]:
fnp_config = os.path.join(dn, self.FN_CONFIG)
try:
with open(fnp_config) as fi:
self.config = yload(fi)
break
except (IOError,) as e:
pass
else:
msg = "missing configuration file. perhaps you wanted to use the --init option to create one?"
print(msg)
sys.exit(1)
else:
with open(fnp_config) as fi:
self.config = yload(fi)
self.scan = not self.options.noscan
#
self.vars = dict()
self.vars["scandir"] = self.workdir
sectionname = "filenames"
section = self.config["vars"][sectionname]
for k, v in section.items():
self.vars.update(**{"%s_%s" % (sectionname, k): v})
self.import_classifier = ClassifierImport(self)
self.scanwriter = ScanWriter(self)
self.matcher = Matcher()
self.builder = Builder(self)
except (ValueError,) as e:
raise
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def process(self):
try:
if self.scan:
self.scanner = Scanner(self)
self.scanner.run()
self.import_classifier.run()
for name in self.import_classifier.packagetracker.di_packagename:
self.matcher.imp.feed(name)
pips = self.pip_classifier = ClassifierPip(self)
for set_ in pips.di_bucket.values():
[self.matcher.pip.feed(name) for name in set_]
pips.run(self.import_classifier.packagetracker)
# for name in self.li_pip:
self.scanwriter.write()
if self.options.build:
self.builder.process()
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
DN = os.path.dirname(__file__)
FN_CONFIG = "pip-stripper.yaml"
_s_stdlib = None
@property
def s_stdlib(self):
"""load the std lib import names"""
if self._s_stdlib is None:
self._s_stdlib = liststdlib()
self._s_stdlib |= set(self.config.get("extra_stdlib", []))
return self._s_stdlib
_aliases = _imp2pip = None
@property
def imp2pip(self):
"""uses the aliases to look up import name to pip name """
if self._imp2pip is None:
self._imp2pip = {v: k for k, v in self.aliases.items()}
return self._imp2pip
@property
def aliases(self):
if self._aliases is None:
self._aliases = Matcher.match_all(self)
# self._aliases = self.matcher.di_pip_imp.copy()
self._aliases.update(**self.config.get("hardcoded_aliases", {}))
return self._aliases
pip2imp = aliases
_raw_imports = None
@property
def raw_imports(self):
if self._raw_imports is None:
fnp = self._get_fnp("imports")
with open(fnp) as fi:
self._raw_imports = fi.readlines()
return self._raw_imports
_all_imports = None
@property
def all_imports(self):
"""loads the grep-ed import scans on demand"""
if self._all_imports is None:
self._all_imports = set(
self.import_classifier.packagetracker.di_packagename
)
return self._all_imports
_all_freezes = None
_all_pips = None
@property
def all_freezes(self):
if self._all_freezes is None:
# this triggers the pips which what populates
# the freezes...
self.all_pips
return self._all_freezes
@property
def all_pips(self):
"""loads the pip freeze output on demand"""
if self._all_pips is None:
self._all_pips = set()
self._all_freezes = {}
fnp = self._get_fnp("freeze")
with open(fnp) as fi:
for line in fi.readlines():
try:
packagename = self.pip_classifier.parse_requirement_line(line)
except (ValueError,) as e:
logger.warning("could not parse packagename on %s" % (line))
continue
self._all_pips.add(packagename)
self._all_freezes[packagename] = line.strip()
return self._all_pips
@classmethod
def getOptParser(cls):
parser = argparse.ArgumentParser()
dest = "config"
parser.add_argument(
"--" + dest,
action="store",
help="config file. if not provided will look for %s in --workdir, current directory "
% (cls.FN_CONFIG),
)
dest = "noscan"
default = False
parser.add_argument(
"--" + dest,
default=default,
action="store_true",
help="don't scan to classify packages. build phase will re-use existing pip-stripper.scan.yaml. [%s]. "
% (default),
)
dest = "build"
default = False
parser.add_argument(
"--" + dest,
default=default,
action="store_true",
help="read pip-stripper.scan.yaml to create requirements.prod/dev.txt [%s]"
% (default),
)
dest = "init"
parser.add_argument(
"--" + dest,
action="store_true",
help="initialize the config file (as %s) if it doesn't exist"
% (cls.FN_CONFIG),
)
dest = "workdir"
parser.add_argument(
"--" + dest,
action="store",
help="work directory [defaults to config file's value or current directory]",
)
dest = "verbose"
default = False
parser.add_argument(
"--" + dest,
default=default,
action="store_true",
help="verbose mode. adds extra zzz_debug: entry to pip-stripper.scan.yaml [%s]"
% (default),
)
return parser
def _initialize(self, fnp_config):
"""--init option handling"""
try:
fnp_config = fnp_config or os.path.join(self.workdir, self.FN_CONFIG)
if os.path.isfile(fnp_config):
print(
"pip-stripper configuration file exists already at @ %s. leaving it alone"
% (fnp_config)
)
return fnp_config
# load the template file
fnp_template = os.path.join(self.DN, "templates/pip-stripper.yaml")
with open(fnp_template) as fi:
tmpl = fi.read()
seed = fill_template(tmpl, self)
with open(fnp_config, "w") as fo:
fo.write(seed)
print("pip-stripper configuration generated @ %s" % (fnp_config))
return fnp_config
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def liststdlib():
"""
pretty grungy code, will need a rework
"""
listed = set()
std_lib = sysconfig.get_python_lib(standard_lib=True)
for top, dirs, files in os.walk(std_lib):
for nm in files:
if nm != "__init__.py" and nm[-3:] == ".py":
found = os.path.join(top, nm)[len(std_lib) + 1 : -3].replace("\\", ".")
found = found.split("/")[0]
listed.add(found)
return listed
class Scanner(object):
def __init__(self, mgr):
self.mgr = mgr
self.config = self.mgr.config.get(self.__class__.__name__)
self.tasknames = self.config["tasknames"]
def run(self):
try:
for taskname in self.tasknames:
config = self.mgr.config.get("Command")["tasks"][taskname]
command = Command(self.mgr, taskname, config)
command.run()
fnp_out = os.path.join(
self.mgr.workdir, self.mgr.config["vars"]["filenames"]["liststdlib"]
)
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def main(args=None):
"""the console_scripts entry point"""
if args is None:
args = sys.argv[1:]
parser = Main.getOptParser()
options = parser.parse_args(args)
mgr = Main(options)
mgr.process()
if __name__ == "__main__":
# conditional pdb.trace()-ing with --cpdb on command line
set_cpdb(cpdb, remove=True)
main()
| [
"[email protected]"
]
| |
0e056bd4bb9433bc9d9431ddbca1ea8006b3f319 | dcb57598b4b7f9fb7a25f847574b227824b5420c | /tests/integration_tests/test_features.py | 53172ef02b0bf7875a94cad0a5af4b9fa26394f8 | [
"MIT"
]
| permissive | netor27/features-webapp | 473a0f393bea5d30d654d1880869fe8daebe4c5b | 801d582975de7734c90bf0a4c95f404cbf305e86 | refs/heads/master | 2021-05-11T07:36:06.494569 | 2018-05-08T17:54:15 | 2018-05-08T17:54:15 | 118,023,763 | 0 | 2 | MIT | 2018-05-08T17:54:16 | 2018-01-18T18:48:42 | Python | UTF-8 | Python | false | false | 17,571 | py | import pytest
from unittest import TestCase
from flask import url_for, json
from datetime import date
from web.server import create_app
from web.db import db
from web.status import status
from web.models import Feature
from tests.integration_tests.post_helpers import PostHelper
class FeaturesTests(TestCase):
@pytest.fixture(autouse=True)
def transact(self, request, configfile, waitForDb):
self.app = create_app(configfile, waitForDb)
self.test_client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
self.test_user_name = 'testuserusers'
self.test_user_password = 'T3s!p4s5w0RDd12#'
self.ph = PostHelper(self.test_client, self.test_user_name, self.test_user_password)
db.create_all()
yield
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_create_and_retrieve_feature(self):
"""
Ensure we can create a new Feature and then retrieve it
"""
# create our user so we can authenticate
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create a new feature, assert we receive a 201 http code and and assert there's only one Feature in the db
title = 'New Feature Title'
description = 'Description ' * 10
target_date = date(2018, 6, 15)
priority = 1
client = 'Client 1'
area = 'Billing'
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
self.assertEqual(Feature.query.count(), 1)
# check that the returned values in the post response are correct
post_res_data = json.loads(post_res.get_data(as_text=True))
self.assertEqual(post_res_data['title'], title)
self.assertEqual(post_res_data['description'], description)
self.assertEqual(post_res_data['target_date'], target_date.isoformat())
self.assertEqual(post_res_data['client_priority'], priority)
self.assertEqual(post_res_data['client']['name'], client)
self.assertEqual(post_res_data['area']['name'], area)
# get the new feature url, retrieve it and assert the correct values
feature_url = post_res_data['url']
res = self.test_client.get(
feature_url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK,
res.get_data(as_text=True))
self.assertEqual(res_data['title'], title)
self.assertEqual(res_data['description'], description)
self.assertEqual(res_data['target_date'], target_date.isoformat())
self.assertEqual(res_data['client_priority'], priority)
self.assertEqual(res_data['client']['name'], client)
self.assertEqual(res_data['area']['name'], area)
def test_retrieve_features_list(self):
"""
Ensure we can retrieve the features list
"""
# create our user so we can authenticate
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create 4 features and assert the response
for i in range(1, 5):
title = 'New Feature Title {}'.format(i)
description = 'Description {}'.format(i)
target_date = date(2018, 6, i)
priority = i
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# assert we only have this 4
self.assertEqual(Feature.query.count(), 4)
# retrieve the complete list of features, it should return only the 4 we created
url = url_for('api.featurelistresource', _external=True)
res = self.test_client.get(
url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK,
res.get_data(as_text=True))
self.assertEqual(res_data['count'], 4)
def test_update_feature(self):
"""
Ensure we can update the name for an existing feature
"""
# create our user so we can authenticate and create the feature
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create a new feature, assert we receive a 201 http code and and assert there's only one Feature in the db
title = 'New Feature Title'
description = 'Description ' * 10
target_date = date(2018, 6, 15)
priority = 1
client = 'Client 1'
area = 'Billing'
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
self.assertEqual(Feature.query.count(), 1)
post_res_data = json.loads(post_res.get_data(as_text=True))
# Create a new area and a new client, so we test we can update those too
area = "New Area"
res = self.ph.create_area(area)
self.assertEqual(res.status_code, status.HTTP_201_CREATED, res.get_data(as_text=True))
client = "New Client"
res = self.ph.create_client(client)
self.assertEqual(res.status_code, status.HTTP_201_CREATED, res.get_data(as_text=True))
# Create the patch request with the updated values
feature_url = post_res_data['url']
title = 'Updated Title'
description = 'Updated Description ' * 10
target_date = date(2018, 5, 19)
priority = 15
data = {'title': title, 'description': description, 'target_date': target_date.isoformat(),
'client_priority': priority, 'client': client, 'area': area}
patch_response = self.test_client.patch(
feature_url,
headers=self.ph.get_authentication_headers(),
data=json.dumps(data))
self.assertEqual(patch_response.status_code,
status.HTTP_200_OK, patch_response.get_data(as_text=True))
# retrieve the updated feature and validate the name is the same as the updated value
res = self.test_client.get(
feature_url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK, res.get_data(as_text=True))
self.assertEqual(res_data['title'], title)
self.assertEqual(res_data['description'], description)
self.assertEqual(res_data['target_date'], target_date.isoformat())
self.assertEqual(res_data['client_priority'], priority)
self.assertEqual(res_data['area']['name'], area)
self.assertEqual(res_data['client']['name'], client)
def test_features_priority_adjustment_when_adding_a_new_feature(self):
"""
Ensure that when creating a new feature that has the same priority as another one it should adjust the priorities
"""
# create our user so we can authenticate
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create 4 features and assert the response
for i in range(10):
title = 'Title {}'.format(i+1)
description = 'Description {}'.format(i+1)
target_date = date(2018, 6, 1)
priority = i+1
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(title, description, target_date, priority, client, area)
self.assertEqual(post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# assert we only have this 10 features (with priorities from 1 to 10)
self.assertEqual(Feature.query.count(), 10)
# create a new one with priority 5, so the service must update all the priorities that are higher than 5
title = 'New Feature'
description = 'Description'
target_date = date(2018, 6, i)
priority = 5
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(title, description, target_date, priority, client, area)
self.assertEqual(post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# Query all the priorities and verify they are updated correctly
url = url_for('api.featurelistresource', _external=True, page=1, size=11)
res = self.test_client.get(
url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK,
res.get_data(as_text=True))
self.assertEqual(res_data['count'], 11)
# because it's a new db for this test, the id should be the same as the priority before we updated them
features = res_data['results']
for i in range(11):
id = features[i]['id']
priority = features[i]['client_priority']
if id <= 4:
self.assertEqual(priority, id)
elif id == 11:
self.assertEqual(priority, 5)
else:
self.assertEqual(priority, id + 1)
def test_features_priority_adjustment_when_updating_an_existing_feature(self):
"""
Ensure that when updating a feature that has the same priority as another one it should adjust the priorities
"""
# create our user so we can authenticate
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create the first feature, that we will update later
title = 'Title 1'
description = 'Description 1'
target_date = date(2018, 6, 1)
priority = 1
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(title, description, target_date, priority, client, area)
res_data = json.loads(post_res.get_data(as_text=True))
self.assertEqual(post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
feature_url = res_data['url']
# create other 9 features and assert the response
for i in range(1,10):
title = 'Title {}'.format(i+1)
description = 'Description {}'.format(i+1)
target_date = date(2018, 6, 1)
priority = i+1
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(title, description, target_date, priority, client, area)
res_data = json.loads(post_res.get_data(as_text=True))
self.assertEqual(post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# assert we only have this 10 features (with priorities from 1 to 10)
self.assertEqual(Feature.query.count(), 10)
# update a feature with priority 1 to priority 2, so the service must update all the priorities that are higher or equal than 2
priority = 2
data = {'client_priority': priority}
patch_response = self.test_client.patch(
feature_url,
headers=self.ph.get_authentication_headers(),
data=json.dumps(data))
self.assertEqual(patch_response.status_code,
status.HTTP_200_OK, patch_response.get_data(as_text=True))
# Query all the priorities and verify they are updated correctly
url = url_for('api.featurelistresource', _external=True, page=1, size=10)
res = self.test_client.get(
url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK,
res.get_data(as_text=True))
self.assertEqual(res_data['count'], 10)
# because it's a new db for this test, the id should be the same as the priority before we updated them
features = res_data['results']
for i in range(10):
id = features[i]['id']
priority = features[i]['client_priority']
self.assertEqual(priority, id+1)
def test_retrieve_features_list_by_area(self):
"""
Ensure we can retrieve the features list for an area
"""
# create our user so we can authenticate
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create 4 features and assert the response
for i in range(1, 5):
title = 'New Feature Title {}'.format(i)
description = 'Description {}'.format(i)
target_date = date(2018, 6, i)
priority = i
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# create another 4 features but for another area
for i in range(1, 5):
title = 'New Feature Title {}'.format(i)
description = 'Description {}'.format(i)
target_date = date(2018, 6, i)
priority = i
client = "Client"
area = "Claims"
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# assert we only have this 8
self.assertEqual(Feature.query.count(), 8)
# retrieve the complete list of features for the first client, it should return only the 4 we created
url = url_for('api.featurelistbyarearesource', id = 1, _external=True)
res = self.test_client.get(
url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK,
res.get_data(as_text=True))
self.assertEqual(res_data['count'], 4)
def test_retrieve_features_list_by_client(self):
"""
Ensure we can retrieve the features list for a client
"""
# create our user so we can authenticate
res = self.ph.create_user(self.test_user_name, self.test_user_password)
self.assertEqual(res.status_code, status.HTTP_201_CREATED,
res.get_data(as_text=True))
# create 4 features and assert the response
for i in range(1, 5):
title = 'New Feature Title {}'.format(i)
description = 'Description {}'.format(i)
target_date = date(2018, 6, i)
priority = i
client = "Client"
area = "Billing"
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# create another 4 features but for another area
for i in range(1, 5):
title = 'New Feature Title {}'.format(i)
description = 'Description {}'.format(i)
target_date = date(2018, 6, i)
priority = i
client = "Client 2"
area = "Billing"
post_res = self.ph.create_feature(
title, description, target_date, priority, client, area)
self.assertEqual(
post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))
# assert we only have this 8
self.assertEqual(Feature.query.count(), 8)
# retrieve the complete list of features for the first client, it should return only the 4 we created
url = url_for('api.featurelistbyclientresource', id = 1, _external=True)
res = self.test_client.get(
url,
headers=self.ph.get_authentication_headers())
res_data = json.loads(res.get_data(as_text=True))
self.assertEqual(res.status_code, status.HTTP_200_OK,
res.get_data(as_text=True))
self.assertEqual(res_data['count'], 4)
| [
"[email protected]"
]
| |
b866f47bcedbd24a41b9e989c6316740d4e2d137 | 5b21e3b64083c5449f367a927a0f718af7a74491 | /venv/Scripts/pip3-script.py | c91dffd6ff58842f226b6cdff66d4e17ad56d2ea | []
| no_license | james-bogert/Lists_Project | ae257cd0005a081c0b89cad05c33d8f0f9fabbe6 | abf81699f126f33d60cd87e19fd619c71a8042e7 | refs/heads/master | 2020-04-08T00:46:15.953431 | 2018-11-23T18:27:48 | 2018-11-23T18:27:48 | 158,258,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!C:\Users\jjbog\Lists_Project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
]
| |
1b6973b7da92004e47d0485666a2fa1e91eead94 | 310bfe2d33786fb4af8ef9c74149649a6c735d34 | /cnn_processor.py | 434a6cb36151f9ca71d57f3afd22659a222e3e4a | []
| no_license | hanayashiki/FindEmotionalWords | 472e62351b86db34ffcfd8c880b2703da4f96f8b | 333affee5c61fcc074cfa2727e7616974a9aac35 | refs/heads/master | 2021-08-11T19:18:47.166614 | 2017-11-14T02:55:45 | 2017-11-14T02:55:45 | 109,950,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | import tensorflow as tf
from settings_cnn import *
import test_cnn
optimizer, losses, sigmoid_output, accuracy, one_zero = test_cnn.build()
def process(x_data):
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, model_path)
y = sess.run(one_zero, feed_dict={test_cnn.xs: x_data})
to_ret = []
for y_line in y:
emotion_obj_list = []
for idx in range(len(y_line)):
if y_line[idx] == 1:
emotion_obj_list.append((idx // sequence_length + 1, idx % sequence_length + 1))
to_ret.append(emotion_obj_list)
print(emotion_obj_list)
#print(y)
#print(to_ret)
return to_ret
if __name__ == '__main__':
process()
| [
"[email protected]"
]
| |
81ead41af15f2e458481b49604b4fc00b30f8ecc | 9cbd22ce203ab7f40d6e02a7ee2b565461369b45 | /bagbankde/items.py | 768a82202c55a20a6a785aed852583aebad99500 | []
| no_license | hristo-grudev/bagbankde | a506ed6af28db7ad4c609d7fbd922d5a699b64d6 | 1afcb0454b9e498c4b4eccb233b7d2aa15823513 | refs/heads/main | 2023-03-26T14:09:08.641400 | 2021-03-18T14:16:31 | 2021-03-18T14:16:31 | 349,100,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import scrapy
class BagbankdeItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
| [
"[email protected]"
]
| |
61f49f427b666427264c788d37ae7c6625a6b4c8 | cc3aa2b5a08416b96cc0633647f9567864c0de58 | /test/test3.py | 4904b3d4897d02086ce0982afb38c6f431168874 | []
| no_license | piixeltree/testcase_generator | 2c31be02061b3d52d729c20dce13b90d071629f9 | d1694b5799a92b2a66f6ae5423d9881c3c45dfa4 | refs/heads/master | 2020-03-10T03:23:46.548505 | 2018-04-12T11:49:08 | 2018-04-12T11:49:08 | 129,163,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | def sub(a,b):
return (a-b,4)
| [
"[email protected]"
]
| |
16e773d0dc3490478889f1eef2ab3f40c45177bc | 68f119bc8d30e106df37b56993df7909ec0c95fc | /readbacks/apps/common/utils.py | 8a772b631c0e4f6fa7b0e4a540cf30b524185fed | [
"MIT"
]
| permissive | argybarg/readbacks | 7b580d8a2a733b89cd03d7eb9803fbea8ce3a585 | 6b78bc3f33e53dd0850ed40d1412e355b7cd7cd6 | refs/heads/master | 2020-01-22T15:16:28.978295 | 2014-05-22T22:01:26 | 2014-05-22T22:01:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | # https://djangosnippets.org/snippets/1259/
def truncatesmart(value, limit=80):
"""
Truncates a string after a given number of chars keeping whole words.
Usage:
{{ string|truncatesmart }}
{{ string|truncatesmart:50 }}
"""
try:
limit = int(limit)
# invalid literal for int()
except ValueError:
# Fail silently.
return value
# Make sure it's unicode
value = unicode(value)
# Return the string itself if length is smaller or equal to the limit
if len(value) <= limit:
return value
# Cut the string
value = value[:limit]
# Break into words and remove the last
words = value.split(' ')[:-1]
# Join the words and return
return ' '.join(words) + '...' | [
"[email protected]"
]
| |
df87989ac6e9817600210b00a6107f9f117640db | 40dc0e7a24ff1ef5fb932b44cb663acd71df31fe | /AlphaZero_Gomoku-master/human_play.py | b617f0dea3383b34bdde3cf7c28d37ce05809a08 | [
"MIT"
]
| permissive | channerduan01/gomoku | 42e558528fb93bd50b585e32538e822c4a3279a9 | 0b759ab8b6633190fc205f00e34d6947ef8871bd | refs/heads/master | 2021-01-10T13:31:55.148620 | 2019-05-05T03:08:28 | 2019-05-05T03:08:28 | 50,356,694 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,043 | py | # -*- coding: utf-8 -*-
"""
human VS AI models
Input your move in the format: 2,3
@author: Junxiao Song
"""
from __future__ import print_function
import pickle
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
from policy_value_net_numpy import PolicyValueNetNumpy
# from policy_value_net import PolicyValueNet # Theano and Lasagne
# from policy_value_net_pytorch import PolicyValueNet # Pytorch
# from policy_value_net_tensorflow import PolicyValueNet # Tensorflow
# from policy_value_net_keras import PolicyValueNet # Keras
class Human(object):
"""
human player
"""
def __init__(self):
self.player = None
def set_player_ind(self, p):
self.player = p
def get_action(self, board):
try:
location = input("Your move: ")
if isinstance(location, str): # for python3
location = [int(n, 10) for n in location.split(",")]
move = board.location_to_move(location)
except Exception as e:
move = -1
if move == -1 or move not in board.availables:
print("invalid move")
move = self.get_action(board)
return move
def __str__(self):
return "Human {}".format(self.player)
def run():
# print("!!!!!!!!!!")
n = 5
width, height = 8, 8
model_file = 'best_policy_8_8_5.model'
try:
board = Board(width=width, height=height, n_in_row=n)
game = Game(board)
# ############### human VS AI ###################
# load the trained policy_value_net in either Theano/Lasagne, PyTorch or TensorFlow
# best_policy = PolicyValueNet(width, height, model_file = model_file)
# mcts_player = MCTSPlayer(best_policy.policy_value_fn, c_puct=5, n_playout=400)
# load the provided model (trained in Theano/Lasagne) into a MCTS player written in pure numpy
try:
policy_param = pickle.load(open(model_file, 'rb'))
except:
policy_param = pickle.load(open(model_file, 'rb'),
encoding='bytes') # To support python3
best_policy = PolicyValueNetNumpy(width, height, policy_param)
# best_policy = PolicyValueNet(width, height, model_file)
mcts_player = MCTSPlayer(best_policy.policy_value_fn,
c_puct=5,
n_playout=400) # set larger n_playout for better performance
# uncomment the following line to play with pure MCTS (it's much weaker even with a larger n_playout)
# mcts_player = MCTS_Pure(c_puct=5, n_playout=1000)
# human player, input your move in the format: 2,3
human = Human()
# set start_player=0 for human first
game.start_play(human, mcts_player, start_player=1, is_shown=1)
# game.start_self_play(mcts_player, is_shown=1)
except KeyboardInterrupt:
print('\n\rquit')
if __name__ == '__main__':
run()
| [
"[email protected]"
]
| |
526d68e7367544fddb75b1feac1d125516d58c15 | 2eca0111818adce7454e76197436fa567a0ab6bd | /storefront/themed_products/migrations/0003_auto_20210114_1449.py | c47334bfb5dc7db3481a78f5d8fa49cd1326dfff | []
| no_license | jbar173/Storefront | 336eedc9b75e31e8f2f1fd0015fdcbdc10bd7467 | 0a7a9cf3fd6267fb828b1b5d2f99b8ad0794db32 | refs/heads/main | 2023-03-28T19:08:25.633081 | 2021-03-29T16:07:07 | 2021-03-29T16:07:07 | 321,958,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # Generated by Django 3.1.2 on 2021-01-14 14:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20210113_2056'),
('themed_products', '0002_themedbouquet_theme_name'),
]
operations = [
migrations.AddField(
model_name='themedbouquet',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='t_bouquet', to='orders.order'),
),
migrations.AlterField(
model_name='themedbouquet',
name='theme_name',
field=models.CharField(choices=[('Type', 'Type'), ('Colour', 'Colour'), ('Type and Colour', 'Type and Colour')], max_length=50, null=True),
),
]
| [
"[email protected]"
]
| |
bcc5720fc4cdcc6f82c6aabd3852ce15ab77b4e0 | d886d3a2c1c9e117a817d60d4d29122fe49afef5 | /test/lexTests.py | 4d5cde4ff3ceea8ae499d385a879634d350fa9f5 | [
"LicenseRef-scancode-scintilla"
]
| permissive | missdeer/Scintilla-iOS | e70012012af9c2a500871d59d6b52386f083983a | 956d4fb53c2ffa135648f86b0b395f51e4136448 | refs/heads/master | 2022-05-16T12:28:45.315261 | 2022-04-03T08:57:27 | 2022-04-03T08:57:27 | 178,977,537 | 0 | 0 | NOASSERTION | 2020-01-08T01:39:51 | 2019-04-02T01:47:01 | C++ | UTF-8 | Python | false | false | 3,424 | py | # -*- coding: utf-8 -*-
from __future__ import with_statement
import io, os, sys, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
keywordsHTML = [
b"b body content head href html link meta "
b"name rel script strong title type xmlns",
b"function",
b"sub"
]
class TestLexers(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def AsStyled(self):
text = self.ed.Contents()
data = io.BytesIO()
prevStyle = -1
for o in range(self.ed.Length):
styleNow = self.ed.GetStyleAt(o)
if styleNow != prevStyle:
styleBuf = "{%0d}" % styleNow
data.write(styleBuf.encode('utf-8'))
prevStyle = styleNow
data.write(text[o:o+1])
return data.getvalue()
def LexExample(self, name, lexerName, keywords=None):
if keywords is None:
keywords = []
self.ed.SetCodePage(65001)
self.ed.LexerLanguage = lexerName
bits = self.ed.StyleBitsNeeded
mask = 2 << bits - 1
self.ed.StyleBits = bits
for i in range(len(keywords)):
self.ed.SetKeyWords(i, keywords[i])
nameExample = os.path.join("examples", name)
namePrevious = nameExample +".styled"
nameNew = nameExample +".new"
with open(nameExample, "rb") as f:
prog = f.read()
BOM = b"\xEF\xBB\xBF"
if prog.startswith(BOM):
prog = prog[len(BOM):]
lenDocument = len(prog)
self.ed.AddText(lenDocument, prog)
self.ed.Colourise(0, lenDocument)
self.assertEquals(self.ed.EndStyled, lenDocument)
try:
with open(namePrevious, "rb") as f:
prevStyled = f.read()
except FileNotFoundError:
prevStyled = ""
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
print(progStyled)
print(prevStyled)
self.assertEquals(progStyled, prevStyled)
# The whole file doesn't parse like it did before so don't try line by line
# as that is likely to fail many times.
return
# Try partial lexes from the start of every line which should all be identical.
for line in range(self.ed.LineCount):
lineStart = self.ed.PositionFromLine(line)
self.ed.StartStyling(lineStart, mask)
self.assertEquals(self.ed.EndStyled, lineStart)
self.ed.Colourise(lineStart, lenDocument)
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
self.assertEquals(progStyled, prevStyled)
# Give up after one failure
return
def testCXX(self):
self.LexExample("x.cxx", b"cpp", [b"int"])
def testPython(self):
self.LexExample("x.py", b"python",
[b"class def else for if import in print return while"])
def testHTML(self):
self.LexExample("x.html", b"hypertext", keywordsHTML)
def testASP(self):
self.LexExample("x.asp", b"hypertext", keywordsHTML)
def testPHP(self):
self.LexExample("x.php", b"hypertext", keywordsHTML)
def testVB(self):
self.LexExample("x.vb", b"vb", [b"as dim or string"])
def testLua(self):
self.LexExample("x.lua", b"lua", [b"function end"])
def testRuby(self):
self.LexExample("x.rb", b"ruby", [b"class def end"])
def testPerl(self):
self.LexExample("x.pl", b"perl", [b"printf sleep use while"])
def testD(self):
self.LexExample("x.d", b"d",
[b"keyword1", b"keyword2", b"", b"keyword4", b"keyword5",
b"keyword6", b"keyword7"])
if __name__ == '__main__':
Xite.main("lexTests")
| [
"[email protected]"
]
| |
b70b6252b5bca3bad1bd930541510f86d3f7360e | 167c6b1238c0e3f8f17e3659ae4e34a8dff3908d | /task1.py | bbb51409030000b6d4014178effcd77099474735 | []
| no_license | Pajke123/ORS-PA-18-Homework07 | 8345ae8758601e9b42e76f0ac324adb005802727 | fdd7c0c853f0fd70616295145e8ebef04677808f | refs/heads/master | 2020-05-21T01:02:13.703392 | 2019-04-30T12:53:10 | 2019-04-30T12:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | """
=================== TASK 1 ====================
* Name: Roll The Dice
*
* Write a script that will simulate rolling the
* dice. The script should fetch the number of times
* the dice should be "rolled" as user input.
* At the end, the script should print how many times
* each number appeared (1 - 6).
*
* Note: Please describe in details possible cases
* in which your solution might not work.
*
* Note: You can use `rand` module to simulate dice
* rolling.
===================================================
"""
# Write your script here
| [
"[email protected]"
]
| |
7d42995cc032265dc1da6c26ba81455cc32bcebd | c60c199410289c1d7ec4aea00833b461e1f08f88 | /.history/older-than/older/source-example/day2/user-list.py | a923e529b041db39bfa93f7bc43cb926236f86e4 | []
| no_license | ver007/pythonjumpstart | 66fb111e6af197fad3e853b2c2d712a1b57a7d59 | 5b1f52479abd07456e2da494149e491d398f3b7d | refs/heads/master | 2021-01-21T01:34:35.501870 | 2015-05-13T14:10:13 | 2015-05-13T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | #!/usr/bin/env python
users = [ line.split(':')[0] for line in open('/etc/passwd') if '#' not in line and '!' in line ]
users.sort()
for (i, n) in enumerate(users):
print i, ":", n
| [
"[email protected]"
]
| |
01b32f0e13acf20a01c271a146bc3a109c71506a | 0533cdcaa13edd15d60523be1c806985f2a27be3 | /life/addRestaurants.py | 523f72ba7849fd028b5157a5be5b00d20fa7c69f | []
| no_license | cs160su18/p5-junseoky21 | bfcebb141ea9501d866707939c6435506f49664c | 27a39c4cbc795e2f2e6805d9f3ae9fc78b2013c1 | refs/heads/master | 2020-03-23T11:08:27.179003 | 2018-07-22T23:19:57 | 2018-07-22T23:19:57 | 141,485,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from life.models import *
Mc = Restaurant(name="McDonalds", latitude=37.8716, longitude=-122.2727)
Mc.save()
Mc = Restaurant(name="Bongo Burger", latitude=37.8653, longitude=-122.2582)
Mc.save()
Mc = Restaurant(name="Taco Bell", latitude=37.8678, longitude=-122.2576)
Mc.save()
| [
"[email protected]"
]
| |
0933da67fd790e5811ce8b580f16a0ab1a3f6a75 | 32bbbd6dbd100bbb9a2282f69ac3b7b34516347f | /Study/keras/keras44_cifar100_2_cnn.py | 88e3f8742ac013b4f6a6c64966e550971666ddae | []
| no_license | kimjh1753/AIA_Academy_Study | 2162d4d4f1a6b8ca1870f86d540df45a8742f359 | 6022718ae7f9e5170a19c4786d096c8042894ead | refs/heads/master | 2023-05-07T12:29:12.920693 | 2021-06-05T01:09:33 | 2021-06-05T01:09:33 | 324,136,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | # 1. 데이터
from tensorflow.keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
print(x_train.shape, y_train.shape) # (50000, 32, 32, 3) (50000, 1)
print(x_test.shape, y_test.shape) # (10000, 32, 32, 3) (10000, 1)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3])/255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3])/255.
# OneHotEncoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape, y_test.shape) # (50000, 100) (10000, 100)
# 2. 모델 구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout
model = Sequential()
model.add(Conv2D(filters=10, kernel_size=(2,2), padding='same',
strides=1, input_shape=(32, 32, 3)))
model.add(Dropout(0.2))
model.add(Conv2D(9, (2,2), padding='valid'))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dense(100, activation='softmax'))
# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='loss', patience=30, mode='auto')
model.fit(x_train, y_train, epochs=2000, batch_size=2000, validation_split=0.2, verbose=1, callbacks=[es])
# 4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=1)
print("loss : ", loss)
print("loss : ", acc)
# keras cifar100 cnn
# loss : 5.992544174194336
# loss : 0.23280000686645508 | [
"[email protected]"
]
| |
973325cd65d92a99bb447908d6527aa29d2f230e | b9cb9229b042be70e5c7918b9e89006bfb01d506 | /uniforms.py | b142a544242f109a0ca9fc1dd063e9b507f3c877 | []
| no_license | adibenc/sentimen-aa | d2defbc38ab785ba4b254b8b1f3aab4af9d1f887 | eb4d145fe51ec9baa59703dd643e5b432c97c7f2 | refs/heads/main | 2023-05-13T07:49:43.295866 | 2021-05-28T10:20:40 | 2021-05-28T10:20:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,724 | py | import pandas as pd
import numpy as np
class BaseIO:
datas = {
"csv": None,
"xls": None,
"dataframe": None,
}
def __init__(self):
pass
def fromCsv(self, filename):
pass
def toCsv(self, filename):
pass
def toXls(self, filename):
pass
def inputFmt(self, name, filename):
return {
"name": name,
"filename": filename,
}
baseio = BaseIO()
"""
https://stackabuse com/how-to-merge-dataframes-in-pandas/#mergedataframesusingappend
pd.concat(dataframes, axis=0, join='outer', ignore_index=False, keys=None,
levels=None, names=None, verify_integrity=False, sort=False, copy=True)
Here are the most commonly used parameters for the concat() function:
objs is the list of DataFrame objects ([df1, df2, ...]) to be concatenated
axis defines the direction of the concatenation, 0 for row-wise and 1 for column-wise
join can either be inner (intersection) or outer (union)
ignore_index by default set to False which allows the index values to remain as they were in the original DataFrames, can cause duplicate index values. If set to True, it will ignore the original values and re-assign index values in sequential order
keys allows us to construct a hierarchical index. Think of it as another level of the index that appended on the outer left of the DataFrame that helps us to distinguish indices when values are not unique
"""
def concatDataframeRows(dataframes):
return pd.concat(dataframes, axis=0, join='outer')
def initialize():
crawled = [
baseio.inputFmt("ruu.minol", 'ruu.minol.csv'),
baseio.inputFmt("ruu.minol2", 'ruu.minol2.csv'),
baseio.inputFmt("ruu.minuman.beralkohol", 'ruu.minuman.beralkohol.csv'),
baseio.inputFmt("ruu.minuman.beralkohol2", 'ruu.minuman.beralkohol2.csv'),
baseio.inputFmt("ruu.miras", 'ruu.miras.csv'),
baseio.inputFmt("ruu.miras2", 'ruu.miras2.csv'),
]
useCols = [
'status_id',
'created_at',
'screen_name',
'text',
'preprocessed',
'classify_data',
'classified'
]
dataframes = []
# print(crawled)
for c in crawled:
# get classified
filen = './' + c['name'] + '.classified.csv'
print(filen)
df = pd.read_csv(filen, header=0, lineterminator='\n', usecols=useCols)
dataframes.append(df)
# preprocessor.results['sentimen.y1'][c['name']] = c
concated = concatDataframeRows(dataframes)
concated.to_csv("ruu.all.classified.csv")
initialize()
"""
'status_id', 'created_at', 'screen_name', 'text', 'preprocessed', 'classify_data', 'classified'
""" | [
"[email protected]"
]
| |
84cc0587d07c1b8dd4cdaa6a6736f3482679e698 | aec4dc0c6780e1dc63fb1a840541771b2931198f | /app/models/follow.py | 7a7d81cd0bdd0513286e51112a7feab36650310e | []
| no_license | dunnjm814/mySampler | e1864fa8e7407eaa31a32ceec5252840d7ad5dde | a272ed13e19a2d6593ef6a7ae6d20441d16e9546 | refs/heads/main | 2023-06-14T22:50:30.477166 | 2021-07-14T00:35:03 | 2021-07-14T00:35:03 | 343,591,654 | 8 | 0 | null | 2021-07-14T00:35:04 | 2021-03-01T23:59:18 | Python | UTF-8 | Python | false | false | 195 | py | from .db import db
follows = db.Table(
"follows",
db.Column("follower_id", db.Integer, db.ForeignKey("users.id")),
db.Column("followed_id", db.Integer, db.ForeignKey("users.id"))
)
| [
"[email protected]"
]
| |
6a1e6c2874181f6c5859c830e394359834617163 | 747f759311d404af31c0f80029e88098193f6269 | /extra-addons/training_doc/__init__.py | 4cb47ad014a13cc816addb240e952f246358cbea | []
| no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jesús Martín <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import training_doc
import wizard
| [
"[email protected]"
]
| |
12a717c45c8c69c635adf443185ebd07138871b6 | aa39aa66215492d85c254116045e5516c40d4b80 | /Brilliant/toy_lstm.py | f39f6268252c2b6b7ecd30b862929c8bbe6ff758 | []
| no_license | JoaoAreias/Artificial-intelligence | f7a2260ceba9a6c0bbd7d9c5db4a347c2d3a965f | 37c99d7cb094f7e084dd4a52fac2e54e02bf1473 | refs/heads/master | 2021-08-02T16:16:10.380044 | 2021-07-30T14:24:06 | 2021-07-30T14:24:06 | 152,242,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """A toy LSTM to solve
brilliant.org's challanges
[INCOMPLETE]
"""
from scipy.special import expit
import numpy as np
class LSTM(object):
def __init__(self):
self.C = None
self.h = None
def _forget_gate(self):
"""Models the output of a forget gate"""
ft = np.array([1, 0, 0.5])
return self.C*ft
def _input_gate(self, X):
"""Models the output of an input gate"""
Wi = None
X = np.concatenate(self.h, X)
b = None
return expit(np.matmul(Wi, X) + b)
def _tanh_gate(self, X):
"""Creates possible new additions to the cell
state"""
Wc = None
X = np.concatenate(self.h, X)
b = None
return np.tanh(np.matmul(Wc, X) + b)
def _update_cell_state(self, X):
self.C = self._forget_gate() + self._input_gate(X)* self._tanh_gate(X)
| [
"[email protected]"
]
| |
066606616f939cd8455b4c5eaeb8039be54a8ed4 | c4e872fc1cbfd78006cfd6826f57b7db3d94c622 | /web/pack.py | 458c0ab36b94e98bed1c68ccfe455611c09bcad6 | [
"Apache-2.0"
]
| permissive | rajeshpandit107/AccDNN | 7049eb3e14193f59f03d61fbf7d3ed7fbfb8d22b | e250a0c020d0ba84f4c4ec098d58b5d1c5fb6d10 | refs/heads/master | 2023-03-29T22:56:08.042630 | 2021-04-09T14:58:28 | 2021-04-09T14:58:28 | 255,531,572 | 0 | 1 | Apache-2.0 | 2020-04-14T06:45:03 | 2020-04-14T06:45:02 | null | UTF-8 | Python | false | false | 910 | py | #!/usr/bin/env python
from settings import *
def pack_layer_profile(layer_name, layer_type, cpf, kpf, macs, dsps, weights_num, delay, bandwidth, blks):
return {'name':layer_name, 'type':layer_type, 'cpf':cpf, 'kpf':kpf, 'macs':macs, \
'dsps':dsps, 'weights':weights_num, 'delay':delay, 'bandwidth':bandwidth, 'blks':blks}
def pack_model_profile(layer_info, batch_size, total_macs, total_dsps, \
total_weights_num, total_blks, total_ddr_bandwidth, throughput, utilization):
return {'layer_info':layer_info, 'batch_size':batch_size, 'total_macs':total_macs, \
'total_dsps': total_dsps, 'total_weights_num':total_weights_num, 'total_blks':total_blks,
'total_ddr_bandwidth':total_ddr_bandwidth, 'throughput':throughput, 'utilization':utilization}
def pack_optim_info(layer_name, cpf, kpf):
return {'name':layer_name, 'cpf':cpf, 'kpf':kpf}
| [
"[email protected]"
]
| |
af26b2e4c69d706a2290188dd72518c77b79a727 | 8c6c5daf87e7335ea2450f117c8dbb20cd586b04 | /blog/migrations/0003_auto_20190717_1433.py | 2f5c89a127022a175d0a3dd638d17224df5fb4df | []
| no_license | lucifer0508/demoblog | 62cef204ee6278371e215952a6d7a641bda43e05 | f45b0fce32e6b8a999ab68da74fe3790eb6f8fba | refs/heads/master | 2020-06-21T13:50:48.224315 | 2019-07-17T22:22:54 | 2019-07-17T22:22:54 | 197,471,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 2.2.3 on 2019-07-17 09:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_blogdata_email'),
]
operations = [
migrations.AlterField(
model_name='blogdata',
name='email',
field=models.CharField(default='Empty...', max_length=40),
),
]
| [
"[email protected]"
]
| |
550ed41dde0be83f539c8f99ae6123610cd360c3 | c424cc8b499d61da10dba06cc582d271be481b5b | /bin/pscp | c6d17e566eba1c8dfd5752cbae05cfa0e8957c5f | []
| no_license | westfly/mssh | c908e7a369bcb4f8c48975e4ae92f9a8b341df98 | 4cece9eb08c5d03a238fd55754b12e1551514bd1 | refs/heads/master | 2021-01-10T02:16:42.130330 | 2015-12-27T10:37:32 | 2015-12-27T10:37:32 | 48,640,888 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | #!/usr/bin/env python
# -*- Mode: python -*-
# Copyright (c) 2009, Andrew McNabb
# Copyright (c) 2003-2008, Brent N. Chun
"""Parallel scp to the set of nodes in hosts.txt.
For each node, we essentially do a scp [-r] local user@host:remote. This
program also uses the -q (quiet) and -C (compression) options. Note that
remote must be an absolute path.
"""
import os
import re
import sys
parent, bindir = os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))
if os.path.exists(os.path.join(parent, 'psshlib')):
sys.path.insert(0, parent)
from psshlib import psshutil
from psshlib.task import Task
from psshlib.manager import Manager
from psshlib.cli import common_parser, common_defaults
def option_parser():
parser = common_parser()
parser.usage = "%prog [OPTIONS] -h hosts.txt local remote"
parser.epilog = ("Example: pscp -h hosts.txt -l irb2 foo.txt " +
"/home/irb2/foo.txt")
parser.add_option('-r', '--recursive', dest='recursive',
action='store_true', help='recusively copy directories (OPTIONAL)')
return parser
def parse_args():
parser = option_parser()
defaults = common_defaults()
parser.set_defaults(**defaults)
opts, args = parser.parse_args()
if len(args) < 1:
parser.error('Paths not specified.')
if len(args) < 2:
parser.error('Remote path not specified.')
if not opts.host_files and not opts.host_entries:
parser.error('Hosts not specified.')
return opts, args
def do_pscp(hosts, localargs, remote, opts):
if opts.outdir and not os.path.exists(opts.outdir):
os.makedirs(opts.outdir)
if opts.errdir and not os.path.exists(opts.errdir):
os.makedirs(opts.errdir)
manager = Manager(opts)
for host, port, user in hosts:
cmd = []
if opts.common_password:
cmd += ['sshpass', '-p', opts.common_password]
cmd += ['scp', '-qC']
if opts.options:
cmd += ['-o', opts.options]
if port:
cmd += ['-P', port]
if opts.recursive:
cmd.append('-r')
if opts.extra:
cmd.extend(opts.extra)
cmd.extend(localargs)
if user:
cmd.append('%s@%s:%s' % (user, host, remote))
else:
cmd.append('%s:%s' % (host, remote))
t = Task(host, port, user, cmd, opts)
manager.add_task(t)
manager.run()
if __name__ == "__main__":
opts, args = parse_args()
localargs = args[0:-1]
remote = args[-1]
if not re.match("^/", remote):
print("Remote path %s must be an absolute path" % remote)
sys.exit(3)
hosts = psshutil.read_hosts(opts.host_files, default_user=opts.user)
if opts.host_entries:
for entry in opts.host_entries:
hosts.append(psshutil.parse_host(entry, default_user=opts.user))
do_pscp(hosts, localargs, remote, opts)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.