blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a144841bd9fe8ec5e8663356c33fac86b2dbf31 | acb082b215e6d214a5065f76c0454dcf1fb2a533 | /src/cobra/core/auth/access.py | e3881977e6cf7cd7420cdef66f2da916e6890e6d | [
"Apache-2.0"
] | permissive | lyoniionly/django-cobra | 130b25cd897cc94b6a8da722e9a83ecea3b00c49 | 2427e5cf74b7739115b1224da3306986b3ee345c | refs/heads/master | 2016-09-06T10:23:10.607025 | 2016-02-22T14:16:49 | 2016-02-22T14:16:49 | 29,646,851 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | from __future__ import absolute_import
from cobra.core.loading import get_model
__all__ = ['from_user', 'from_member', 'DEFAULT', 'SCOPES']
OrganizationMember = get_model('organization', 'OrganizationMember')
SCOPES = set([
'org:read',
'org:write',
'org:delete',
'team:read',
'team:write',
'team:delete',
'project:read',
'project:write',
'project:delete',
'event:read',
'event:write',
'event:delete',
])
class BaseAccess(object):
is_global = False
is_active = False
sso_is_valid = False
teams = ()
scopes = frozenset()
def has_scope(self, scope):
if not self.is_active:
return False
return scope in self.scopes
def has_team(self, team):
if not self.is_active:
return False
if self.is_global:
return True
return team in self.teams
class Access(BaseAccess):
# TODO(dcramer): this is still a little gross, and ideally backend access
# would be based on the same scopes as API access so theres clarity in
# what things mean
def __init__(self, scopes, is_global, is_active, teams, sso_is_valid):
self.teams = teams
self.scopes = scopes
self.is_global = is_global
self.is_active = is_active
self.sso_is_valid = sso_is_valid
def from_user(user, organization):
if user.is_superuser:
return Access(
scopes=SCOPES,
is_global=True,
is_active=True,
teams=(),
sso_is_valid=True,
)
if not organization:
return DEFAULT
try:
om = OrganizationMember.objects.get(
user=user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
return DEFAULT
return from_member(om)
def from_member(member):
# TODO(dcramer): we want to optimize this access pattern as its several
# network hops and needed in a lot of places
if member.has_global_access:
teams = ()
else:
teams = member.teams.all()
# try:
# auth_provider = AuthProvider.objects.get(
# organization=member.organization_id,
# )
# except AuthProvider.DoesNotExist:
# sso_is_valid = True
# else:
# try:
# auth_identity = AuthIdentity.objects.get(
# auth_provider=auth_provider,
# )
# except AuthIdentity.DoesNotExist:
# sso_is_valid = False
# else:
# sso_is_valid = auth_identity.is_valid(member)
sso_is_valid = True
return Access(
is_global=member.has_global_access,
is_active=True,
sso_is_valid=sso_is_valid,
scopes=member.scopes,
teams=teams,
)
class NoAccess(BaseAccess):
@property
def sso_is_valid(self):
return True
@property
def is_global(self):
return False
@property
def is_active(self):
return False
@property
def teams(self):
return ()
@property
def scopes(self):
return frozenset()
DEFAULT = NoAccess()
| [
"[email protected]"
] | |
59620cdbfa9167aeb555cca708e1f7ede5412081 | 7d31324f874130bc5059314048193f474f2a820a | /gui.py | 6a41ea6e86f10145d7f19d02e451b0a0f571e17f | [] | no_license | deshudiosh/PyTSF | 2ae13c9724b6e290016aad6329db5c175bd319fa | 6c3a8b6b35a4fc602f10aa10aff72d9bc887ccdb | refs/heads/master | 2021-01-20T01:36:11.138441 | 2017-10-27T18:57:31 | 2017-10-27T18:57:31 | 89,306,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | import remi.gui as gui
from remi import start, App
class PyTsfGui(App):
def __init__(self, *args):
super(PyTsfGui, self).__init__(*args)
def main(self):
# margin 0px auto allows to center the app to the screen
container = gui.VBox(width=400, margin='0px auto')
container.style['background'] = '#808080'
logo = gui.Label('PyTSF', width='80%', height=60, margin='0px auto')
logo.style['margin'] = 'auto'
panel = gui.HBox(width=400, height=100, margin='0px auto')
dropdown = gui.DropDown()
refresh = gui.Button('R')
options = gui.Button("O")
go = gui.Button("Go!")
panel.append(dropdown)
panel.append(refresh)
panel.append(options)
panel.append(go)
container.append(logo)
container.append(panel)
# returning the root widget
return container
def startApp():
start(PyTsfGui, address="127.0.0.20", debug=True, enable_file_cache=False, multiple_instance=True) | [
"[email protected]"
] | |
2bc3743b880efe5c8a8739a55179572019820af5 | c31e46965ea47cdb0c61a6b525aecea45dbf4d0b | /gram/views.py | b442f39e96c2ad10dff14a8042bb03981ca1e2de | [] | no_license | marysinaida/-Instagram | 687563c7b9a44adcdd09217ed02ff5c2c24623d2 | cb0768ad24b7650db6b7fbfd9d445232b154cece | refs/heads/master | 2022-12-06T07:02:55.786066 | 2020-01-08T06:57:47 | 2020-01-08T06:57:47 | 228,771,439 | 0 | 0 | null | 2022-11-22T05:13:53 | 2019-12-18T06:16:03 | Python | UTF-8 | Python | false | false | 2,960 | py | from django.shortcuts import render,get_object_or_404
from django.utils import timezone
from .forms import PostForm
from .models import Post,Profile
from django.contrib.auth.models import User
from django.views.generic import (ListView,CreateView,DetailView)
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
class PostListView(ListView):
template_name = "post_list.html"
queryset = Post.objects.all().filter(created_date__lte=timezone.now()).order_by('-created_date')
context_object_name = "posts"
success_url = '/'
class PostCreateView(CreateView):
template_name = "post_create.html"
form_class = PostForm
queryset = Post.objects.all()
success_url = '/'
def form_valid(self,form):
print(form.cleaned_data)
form.instance.author = self.request.user
return super().form_valid(form)
class PostDetailView(DetailView):
template_name ="post_details.html"
queryset = Post.objects.all().filter(created_date__lte=timezone.now())
def get_object(self):
id_ = self.kwargs.get('id')
return get_object_or_404(Post,id=id_)
def signUp(request):
return render(request,'registration/registration_form.html')
@login_required(login_url='/accounts/login/')
def login(request):
return render(request,'registration/login.html')
@login_required(login_url='/profile')
def search_results(request):
if 'username' in request.GET and request.GET["username"]:
search_term = request.GET.get("username")
searched_users = User.objects.filter(username = search_term)
message = f"{search_term}"
profile_pic = User.objects.all()
return render(request, 'search.html', {'message':message, 'results':searched_users, 'profile_pic':profile_pic})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {'message':message})
def profile(request):
# image = request.user.profile.posts.all()
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
prof_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and prof_form.is_valid():
user_form.save()
prof_form.save()
return HttpResponseRedirect(request.path_info)
return render(request, 'profile.html', {})
return render(request, 'profile.html', {})
def timeline(request):
posts = Post.objects.all().filter(created_date__lte=timezone.now()).order_by('-created_date')
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if edit_form.is_valid():
form.save()
return render(request, 'post_list.html', {'form': form, 'posts': posts})
return render(request, 'post_list.html', {'posts': posts})
| [
"[email protected]"
] | |
e0d376cdb3b6cbbbbeea5b2ea6c2a81b88fbf34c | 422c9cc1c5ef7eba24610e66d6a74ec2e16bf39e | /install_isolated/lib/python2.7/dist-packages/turtle_actionlib/msg/_ShapeFeedback.py | 332cdc621b2cf527210a07a88ad56ccb86dca474 | [] | no_license | twighk/ROS-Pi3 | 222c735d3252d6fce43b427cdea3132f93025002 | 9f2912c44ae996040f143c1e77e6c714162fc7d2 | refs/heads/master | 2021-01-01T05:16:20.278770 | 2016-05-08T19:24:15 | 2016-05-08T19:24:15 | 58,306,257 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,927 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from turtle_actionlib/ShapeFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ShapeFeedback(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "turtle_actionlib/ShapeFeedback"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#feedback
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ShapeFeedback, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| [
"[email protected]"
] | |
10103e3ce17a1843505618c2c3c6c0c49ea5b1c2 | 36163a05070a9cd0daab7c8b18b49053e0365bed | /src/python/WMCore/TaskQueue/TQComp/Apis/TQStateApi.py | cd257a2580c452ccf40cd98fd09fb388cf4f34cd | [] | no_license | sryufnal/WMCore | 472b465d1e9cff8af62b4f4a4587fd3927f82786 | 9575691bd7383e4de8bcdf83714ec71b3fec6aa7 | refs/heads/master | 2021-01-16T00:27:39.208561 | 2011-09-09T20:36:15 | 2011-09-09T20:36:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,529 | py | #!/usr/bin/env python
"""
API to query the TQ queue about its state.
It inherits the ability to connect to the TQ database
from TQComp.Apis.TQApi.
"""
__all__ = []
#import logging
import threading
import time
from inspect import stack
from TQComp.Apis.TQApi import TQApi
from TQComp.Apis.TQApiData import TASK_FIELDS, PILOT_FIELDS
class TQStateApi(TQApi):
"""
API to query the TQ queue about its state.
"""
def __init__(self, logger, tqRef, dbIface = None):
"""
Constructor. Refer to the constructor of parent TQApi.
"""
# Call our parent to set everything up
TQApi.__init__(self, logger, tqRef, dbIface)
def getStateOfTasks(self, taskIds = []):
"""
Returns a dict with the provided task IDs as keys and their
corresponding states as values.
"""
self.transaction.begin()
result = self.queries.getStateOfTasks(taskIds)
self.transaction.commit()
return result
def getTasks(self, filter={}, fields=[], limit=None, asDict=False):
"""
Returns the filtered contents of the tasks DB.
The filter argument can be used to select the type of tasks to
retrieve. It must be a dict containing fields as keys and the values
they should have. If any of the keys does not correspond to an
existing field, it will be ignored.
The optional argument fields may contain a list of fields to return.
Otherwise, all are returned. The optional argument limit can be used
to limit the maximum number of records returned.
If the optional argument 'asDict' is True, the result is a dict with
field names as keys; otherwise, the result is a list of field values.
"""
# self.logger.debug('%s: Starting' % ('getTasks'))
return self.__getTable__(filter, fields, limit, asDict, 'tq_tasks', \
TASK_FIELDS)
def getTasksArchive(self, filter={}, fields=[], limit=None, asDict=False):
"""
Returns the filtered contents of the tasks_archive DB.
Arguments as the same as those of the getTasks method.
"""
self.logger.debug('%s: Starting' % ('getTasksArchive'))
return self.__getTable__(filter, fields, limit, asDict, \
'tq_tasks_archive', TASK_FIELDS)
def getPilots(self, filter={}, fields=[], limit=None, asDict=False):
"""
Returns the filtered contents of the pilots DB.
The filter argument can be used to select the type of pilots to
retrieve. It must be a dict containing fields as keys and the values
they should have. If any of the keys does not correspond to an
existing field, it will be ignored.
The optional argument fields may contain a list of fields to return.
Otherwise, all are returned. The optional argument limit can be used
to limit the maximum number of records returned.
If the optional argument 'asDict' is True, the result is a dict with
field names as keys; otherwise, the result is a list of field values.
"""
# self.logger.debug('%s: Starting' % ('getPilots'))
return self.__getTable__(filter, fields, limit, asDict, 'tq_pilots', \
PILOT_FIELDS)
def getPilotsArchive(self, filter={}, fields=[], limit=None, asDict=False):
"""
Returns the filtered contents of the tasks_archive DB.
Arguments as the same as those of the getPilots method.
"""
# self.logger.debug('%s: Starting' % ('getPilotsArchive'))
return self.__getTable__(filter, fields, limit, asDict, \
'tq_pilots_archive', PILOT_FIELDS)
def __getTable__(self, filter, fields, limit, asDict, table, fList):
"""
Internal. For use of getTasks and getPilots.
"""
who = stack()[1][3]
# self.logger.debug('%s: Starting' % ('__getTable__'))
filter2 = {}
for key in filter.keys():
if key in fList:
filter2[key] = filter[key]
fields2 = []
for field in fields:
if field in fList:
fields2.append(field)
if filter and (not filter2):
self.logger.error('%s: Filter keys not valid: %s' % (who, filter))
self.logger.error('%s: Refusing to dump all entries' % (who))
return None
if fields and (not fields2):
self.logger.error('%s: No valid field requested: %s' % \
(who, fields))
self.logger.error('%s: Aborting query' % (who))
return None
if len(filter2) < len(filter):
self.logger.warning('%s: Not all filter keys valid: %s' % \
(who, filter))
self.logger.warning('%s: Using filter: %s' % (who, filter2))
else:
self.logger.debug('%s: Using filter: %s' % (who, filter2))
if len(fields2) < len(fields):
self.logger.warning('%s: Not all fields valid: %s' % (who, fields))
self.logger.warning('%s: Requesting fields: %s' % (who, fields2))
else:
self.logger.debug('%s: Requesting fields: %s' % (who, fields2))
# Perform query
self.transaction.begin()
result = self.queries.selectWithFilter(table, filter2, fields2,\
limit, asDict)
self.transaction.commit()
return result
def getDataPerHost(self, hostPattern = "%"):
"""
Returns a dict with pairs (se, host) as keys and list of
files (names) as values. Only hosts matching the provided
pattern are returned (all by default).
"""
self.transaction.begin()
res = self.queries.getDataPerHost(hostPattern)
self.transaction.commit()
# self.logger.debug("res: %s" % res)
d = {}
prev = ""
for row in res:
if (row[0], row[1]) == prev:
d[(row[0], row[1])].append(row[2])
else:
d[(row[0], row[1])] = [row[2]]
prev = (row[0], row[1])
return d
# TODO: This will go away when we move to cache per host
def getDataPerPilot(self, pilotPattern = "%"):
"""
Returns a dict with pairs pilot as key and a list of
files (names) as values. Only pilots matching the provided
pattern are returned (all by default).
"""
self.transaction.begin()
res = self.queries.getDataPerPilot(pilotPattern)
self.transaction.commit()
# self.logger.debug("res: %s" % res)
d = {}
prev = ""
for row in res:
if row[0] == prev:
d[row[0]].append(row[1])
else:
d[row[0]] = [row[1]]
prev = row[0]
return d
def getPilotsPerHost(self, hostPattern = "%"):
"""
Returns a dict with pairs (se, host) as keys and list of pilots
(ids) as values. Only hosts matching the provided pattern are
returned (all by default).
"""
self.transaction.begin()
res = self.queries.getPilotsPerHost(hostPattern)
self.transaction.commit()
# self.logger.debug("res: %s" % res)
d = {}
prev = ""
for row in res:
if (row[0], row[1]) == prev:
d[(row[0], row[1])].append(row[2])
else:
d[(row[0], row[1])] = [row[2]]
prev = (row[0], row[1])
return d
def getPilotsAtHost(self, host, se, asDict=False):
"""
Returns the pilots that are present in a given host (and se)
and the cache directory for each of them.
If the optional argument 'asDict' is True, the result is returned as
a list with field names as keys; otherwise, result is a list of field
values.
"""
self.transaction.begin()
result = self.queries.getPilotsAtHost(host, se, asDict)
self.transaction.commit()
return result
def countRunning(self):
"""
Returns the number of tasks in the Running state
"""
self.logger.debug('Getting number of running tasks')
# Perform query
self.transaction.begin()
result = self.queries.countRunning()
self.transaction.commit()
return result
def countQueued(self):
"""
Returns the number of tasks in the Queued state
"""
self.logger.debug('Getting number of queued tasks')
# Perform query
self.transaction.begin()
result = self.queries.countQueued()
self.transaction.commit()
return result
def getTaskCounts(self):
"""
Returns a dict with task states as keys and the number of tasks at that
state as values (regardless of assigned site or other considerations).
Returned states at the moment is:
running, queued, failed, done.
"""
result = {}
self.transaction.begin()
result['running'] = self.queries.countRunning()
result['queued'] = self.queries.countQueued()
result['failed'] = self.queries.countFailed()
result['done'] = self.queries.countDone()
self.transaction.commit()
return result
def getPilotCountsBySite(self):
"""
Returns a dict with SEs as keys and dicts as values. These internal
dicts have the strings 'ActivePilots' (for pilots running a task)
and 'IdlePilots' (for pilots registered but not running a task yet)
as keys and the corresponding number of pilots in those states as
values.
Output format:
{
'se1': {'ActivePilots': 30, 'IdlePilots': 5}
'se2': {'ActivePilots': 20, 'IdlePilots': 0}
}
"""
result = {}
self.transaction.begin()
active = self.queries.getActivePilotsBySite()
idle = self.queries.getIdlePilotsBySite()
self.transaction.commit()
print active
print idle
for i in active:
result[i[0]] = {'ActivePilots': i[1]}
result[i[0]]['IdlePilots'] = 0
for i in idle:
if not result.has_key(i[0]):
result[i[0]] = {'ActivePilots': 0}
result[i[0]]['IdlePilots'] = i[1]
if ( len(active) == 0 and len(idle) == 0 ):
result = {"NoRecords":{'ActivePilots':0, 'IdlePilots':0} }
return result
def countTasksBySeReq(self):
"""
Counts the number of queued tasks grouped per the req_se field
(list of valid SEs to run on, or NULL for any). The returned
value is formatted as a list of dicts.
"""
self.transaction.begin()
counts = self.queries.countTasksBySeReq()
for count in counts:
if count['sites']:
count['sites'] = count['sites'].split(',')
self.transaction.commit()
return counts
def archiveTasksById(self, taskIds = []):
"""
Archive all tasks whose Id is included in the 'taskIds' list
(and exist in the queue): copy them from tq_tasks to
tq_tasks_archive, then remove them from tq_tasks.
"""
if taskIds:
self.transaction.begin()
self.queries.archiveTasksById(taskIds)
self.queries.removeTasksById(taskIds)
self.transaction.commit()
def getPilotLogs(self, pilotId, limit = None, fields = None, asDict = True):
"""
Get the records in tq_pilot_log that correspond to the specified
pilotId (or to all if None). If limit is not None, do not return
more than those records. If fields is not None, but a list, return
only the fields whose name is specified in that list (presence of
non existing fields will produce an error). The last argument selects
whether the result must be a dict or a list of the values.
"""
self.transaction.begin()
result = self.queries.getPilotLogs(pilotId, limit, fields, asDict)
self.transaction.commit()
return result
| [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
36da4a1a8ee737c91a2e4cfd313a97f8c36a836a | 56482e0b2ce6517fff41d0f78e0c0ed000d977a1 | /fmcapi/api_objects/helper_functions.py | 73723b685d3181277032d2a1b3b56c4c54e1fb90 | [
"BSD-3-Clause"
] | permissive | banzigaga/fmcapi | ab4d7aaaf4be4f2b0686d07b6272f8b9531577da | fd924de96e200ca8e0d5088b27a5abaf6f915bc6 | refs/heads/master | 2020-12-11T14:45:07.896571 | 2019-12-12T20:02:07 | 2019-12-12T20:02:07 | 233,876,405 | 1 | 0 | BSD-3-Clause | 2020-01-14T15:46:26 | 2020-01-14T15:46:26 | null | UTF-8 | Python | false | false | 4,316 | py | """Misc methods/functions that are used by the fmcapi package's modules."""
import re
import ipaddress
import json
import logging
logging.debug(f"In the {__name__} module.")
def syntax_correcter(value, permitted_syntax="""[.\w\d_\-]""", replacer="_"):
"""
Check 'value' for invalid characters (identified by 'permitted_syntax') and replace them with 'replacer'.
:param value: (str) String to be checked.
:param permitted_syntax: (str) regex of allowed characters.
:param replacer: (str) character used to replace invalid characters.
:return: (str) Modified string with "updated" characters.
"""
logging.debug("In syntax_correcter() helper_function.")
new_value = ""
for char in range(0, len(value)):
if not re.match(permitted_syntax, value[char]):
new_value += replacer
else:
new_value += value[char]
return new_value
def get_networkaddress_type(value):
"""
Check to see whether 'value' is a host, range, or network.
:param value: (str) x.x.x.x, x.x.x.x/xx, or x.x.x.x-x.x.x.x
:return: (str) 'host', 'network', or 'range'
"""
logging.debug("In get_networkaddress_type() helper_function.")
if "/" in value:
ip, bitmask = value.split("/")
if ip == "32" or bitmask == "128":
return "host"
else:
return "network"
else:
if "-" in value:
return "range"
else:
return "host"
def is_ip(ip):
"""
Check whether the provided string is an IP address.
:param ip: (str) x.x.x.x
:return: (boolean)
"""
logging.debug("In is_ip() helper_function.")
try:
ipaddress.ip_address(ip)
except ValueError as err:
logging.error(err)
return False
return True
def is_ip_network(ip):
"""
Check whether provided string is a valid network address.
See if the provided IP/SM is the "network address" of the subnet.
:param ip: (str) x.x.x.x/xx
:return: (boolean)
"""
logging.debug("In is_ip_network() helper_function.")
try:
ipaddress.ip_network(ip)
except ValueError as err:
logging.error(err)
return False
return True
def validate_ip_bitmask_range(value="", value_type=""):
"""
We need to check the provided IP address (or range of addresses) and make sure the IPs are valid.
:param value: (str) x.x.x.x, x.x.x.x/xx, or x.x.x.x-x.x.x.x
:param value_type: (str) 'host', 'network', or 'range'
:return: (dict) {value=value_fixed, valid=boolean}
"""
logging.debug("In validate_ip_bitmask_range() helper_function.")
return_dict = {"value": value, "valid": False}
if value_type == "range":
for ip in value.split("-"):
if is_ip(ip):
return_dict["valid"] = True
elif value_type == "host" or value_type == "network":
if is_ip_network(value):
return_dict["valid"] = True
return return_dict["valid"]
def mocked_requests_get(**kwargs):
"""
Use to "mock up" a response from using the "requests" library to avoid actually using the "requests" library.
:param kwargs:
:return: (boolean)
"""
logging.debug("In mocked_requests_get() helper_function.")
class MockResponse:
def __init__(self, **kwargs):
logging.debug("In MockResponse __init__ method.")
self.text = json.dumps(kwargs["text"])
self.status_code = kwargs["status_code"]
def close(self):
logging.debug("In MockResponse close method.")
return True
return MockResponse(**kwargs)
def validate_vlans(start_vlan, end_vlan=""):
"""
Validate that the start_vlan and end_vlan numbers are in 1 - 4094 range. If not, then return 1, 4094.
:param start_vlan: (int) Lower VLAN number in range.
:param end_vlan: (int) Upper VLAN number in range.
:return: (int) start_vlan, (int) end_vlan)
"""
logging.debug("In validate_vlans() helper_function.")
if end_vlan == "":
end_vlan = start_vlan
if int(end_vlan) < int(start_vlan):
start_vlan, end_vlan = end_vlan, start_vlan
if 0 < int(start_vlan) < 4095 and 0 < int(end_vlan) < 4095:
return start_vlan, end_vlan
else:
return 1, 4094
| [
"[email protected]"
] | |
258e3bc17596939fef5f5019ea6b0ffe123d50ca | a512b8893b0d2de827d6292e810f3a98b41e132c | /Week3/Day4/Solutions/Python/prog4.py | d6cc6209f9f236ad8243f6ba1b9ee1e54bfd07d1 | [] | no_license | Audarya07/Daily-Flash-Codes | d771079fd0d470e2d3e05679f17f32fb64b4f426 | cf96ca2b1676b038e243fac67be778381492ffeb | refs/heads/master | 2022-11-06T15:37:47.180729 | 2020-06-25T16:20:55 | 2020-06-25T16:20:55 | 274,960,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | x = 7
for i in range(4):
for j in range(i+1):
print(x,end=" ")
x-=1
x+=1
print()
| [
"[email protected]"
] | |
7036e36ad6e119dac97c2175715c6b24857595cf | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AnttechOceanbaseVerificationcodeVerifyResponse.py | 2808330b22ff50c4c507563278735baa72cdba13 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AnttechOceanbaseVerificationcodeVerifyResponse(AlipayResponse):
def __init__(self):
super(AnttechOceanbaseVerificationcodeVerifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AnttechOceanbaseVerificationcodeVerifyResponse, self).parse_response_content(response_content)
| [
"[email protected]"
] | |
ca482b4f1f57cfad75b936c220d8d2f4d27aa7ae | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/f6a.py | b76a37dd4ff530a849fa7cefd2685a4fec605875 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'f6A':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
f5c468beecaa5a2685e310d47134afa02ab63714 | 386d5d4f8f102e701d02b326cd066f520e3dff9f | /ProjectApplication/project_core/migrations/0166_organisation_long_name_english.py | 3e520cccaf379fa51b06988bbdeee96f5063231b | [
"MIT"
] | permissive | Swiss-Polar-Institute/project-application | ae2561c3ae2c1d5412d165d959ce2e5886135e0a | 7dc4a9f7e0f8d28c89977b85f99bc5e35ea77d43 | refs/heads/master | 2023-08-31T04:01:23.492272 | 2023-08-25T14:33:02 | 2023-08-25T14:33:02 | 206,330,401 | 7 | 5 | MIT | 2023-09-13T08:03:53 | 2019-09-04T13:49:39 | Python | UTF-8 | Python | false | false | 496 | py | # Generated by Django 3.2.3 on 2021-05-24 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0165_callcareerstage_enabled'),
]
operations = [
migrations.AddField(
model_name='organisation',
name='long_name_english',
field=models.CharField(blank=True, help_text='English name by which the organisation is known', max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
09499b5a40b99525ef5398364dfabc50873929aa | d69995905dcd2522e53082c32c10f582b6779cba | /apps/testsuites/utils.py | 046c1755163e1f7524679acbd2cd94c4a979ed31 | [] | no_license | liqi629/learn_nm_drf | 010b35ab4254267a601a13f29e2ea3adb3be615e | ad0eef6fe2b338d613974850977de51b9d82ccc6 | refs/heads/master | 2023-02-10T14:33:33.521450 | 2021-01-13T09:14:23 | 2021-01-13T09:14:23 | 324,942,039 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 854 | py |
import re
from datetime import datetime
from apps.testcases.models import Testcases
def modify_output(results):
datas_list = []
for item in results:
mtch = re.search(r'(.*)T(.*)\..*?',item['create_time'])
item['create_time'] = mtch.group(1) +' '+mtch.group(2)
mtch = re.search(r'(.*)T(.*)\..*?', item['update_time'])
item['update_time'] = mtch.group(1) + ' ' + mtch.group(2)
datas_list.append(item)
return datas_list
def get_testcases_by_interface_ids(ids_list):
"""
通过接口ID获取用例
:param ids_list:
:return:
"""
one_list =[]
for interface_id in ids_list:
testcases_qs = Testcases.objects.values_list('id',flat=True).\
filter(interface_id=interface_id,is_delete=False)
one_list.extend(list(testcases_qs))
return one_list | [
"[email protected]"
] | |
cb5c4ccba717cd9c2920942e071419e95cf3aa0d | 2d276785c3663d4798be462115291c4706dbd255 | /Python从菜鸟到高手/chapter2/demo2.03.py | 5e0bf41280f733cc7bad2b80a3e3d347a440b9a8 | [] | no_license | bupthl/Python | 81c92433bd955663e6cda5fe7cab5ea3d067c3de | bdb33aeeb179a43100b9ef7129a925c63a133fd3 | refs/heads/master | 2022-02-21T11:02:40.195265 | 2019-08-16T05:49:18 | 2019-08-16T05:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | '''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
# 需要在Windows下执行
import os
import sys
f_handler=open('out.log', 'w')
oldstdout = sys.stdout
sys.stdout=f_handler
os.system('cls')
sys.stdout = oldstdout
| [
"[email protected]"
] | |
90f5d0ecdb0f132e76809d77dca276ad9a766253 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/aio/_base_client_async.py | 06b3136394d128cf3c39652676f430c56596b3be | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 6,981 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Any, List, Mapping, Optional, Union, TYPE_CHECKING
from uuid import uuid4
from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential
from azure.core.pipeline.policies import (
ContentDecodePolicy,
AsyncBearerTokenCredentialPolicy,
AsyncRedirectPolicy,
DistributedTracingPolicy,
HttpLoggingPolicy,
UserAgentPolicy,
ProxyPolicy,
AzureSasCredentialPolicy,
RequestIdPolicy,
CustomHookPolicy,
NetworkTraceLoggingPolicy,
)
from azure.core.pipeline.transport import (
AsyncHttpTransport,
HttpRequest,
)
from .._generated.aio import AzureTable
from .._base_client import AccountHostsMixin, get_api_version, extract_batch_part_metadata
from .._authentication import SharedKeyCredentialPolicy
from .._constants import STORAGE_OAUTH_SCOPE
from .._error import RequestTooLargeError, TableTransactionError, _decode_error
from .._policies import StorageHosts, StorageHeadersPolicy
from .._sdk_moniker import SDK_MONIKER
from ._policies_async import AsyncTablesRetryPolicy
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
class AsyncTablesBaseClient(AccountHostsMixin):
def __init__( # pylint: disable=missing-client-constructor-parameter-credential
self,
endpoint: str,
*,
credential: Optional[Union[AzureSasCredential, AzureNamedKeyCredential, "AsyncTokenCredential"]] = None,
**kwargs: Any
) -> None:
super(AsyncTablesBaseClient, self).__init__(endpoint, credential=credential, **kwargs) # type: ignore
self._client = AzureTable(
self.url,
policies=kwargs.pop('policies', self._policies),
**kwargs
)
self._client._config.version = get_api_version(kwargs, self._client._config.version) # pylint: disable=protected-access
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *args):
await self._client.__aexit__(*args)
async def close(self) -> None:
"""This method is to close the sockets opened by the client.
It need not be used when using with a context manager.
"""
await self._client.close()
def _configure_credential(self, credential):
# type: (Any) -> None
if hasattr(credential, "get_token"):
self._credential_policy = AsyncBearerTokenCredentialPolicy( # type: ignore
credential, STORAGE_OAUTH_SCOPE
)
elif isinstance(credential, SharedKeyCredentialPolicy):
self._credential_policy = credential # type: ignore
elif isinstance(credential, AzureSasCredential):
self._credential_policy = AzureSasCredentialPolicy(credential) # type: ignore
elif isinstance(credential, AzureNamedKeyCredential):
self._credential_policy = SharedKeyCredentialPolicy(credential) # type: ignore
elif credential is not None:
raise TypeError("Unsupported credential: {}".format(credential))
def _configure_policies(self, **kwargs):
return [
RequestIdPolicy(**kwargs),
StorageHeadersPolicy(**kwargs),
UserAgentPolicy(sdk_moniker=SDK_MONIKER, **kwargs),
ProxyPolicy(**kwargs),
self._credential_policy,
ContentDecodePolicy(response_encoding="utf-8"),
AsyncRedirectPolicy(**kwargs),
StorageHosts(**kwargs),
AsyncTablesRetryPolicy(**kwargs),
CustomHookPolicy(**kwargs),
NetworkTraceLoggingPolicy(**kwargs),
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
async def _batch_send(self, *reqs: "HttpRequest", **kwargs) -> List[Mapping[str, Any]]:
"""Given a series of request, do a Storage batch call."""
# Pop it here, so requests doesn't feel bad about additional kwarg
policies = [StorageHeadersPolicy()]
changeset = HttpRequest("POST", None) # type: ignore
changeset.set_multipart_mixed(
*reqs, policies=policies, boundary="changeset_{}".format(uuid4())
)
request = self._client._client.post( # pylint: disable=protected-access
url="https://{}/$batch".format(self._primary_hostname),
headers={
"x-ms-version": self.api_version,
"DataServiceVersion": "3.0",
"MaxDataServiceVersion": "3.0;NetFx",
"Content-Type": "application/json",
"Accept": "application/json"
},
)
request.set_multipart_mixed(
changeset,
policies=policies,
enforce_https=False,
boundary="batch_{}".format(uuid4()),
)
pipeline_response = await self._client._client._pipeline.run(request, **kwargs) # pylint: disable=protected-access
response = pipeline_response.http_response
# TODO: Check for proper error model deserialization
if response.status_code == 413:
raise _decode_error(
response,
error_message="The transaction request was too large",
error_type=RequestTooLargeError)
if response.status_code != 202:
raise _decode_error(response)
parts_iter = response.parts()
parts = []
async for p in parts_iter:
parts.append(p)
error_parts = [p for p in parts if not 200 <= p.status_code < 300]
if any(error_parts):
if error_parts[0].status_code == 413:
raise _decode_error(
response,
error_message="The transaction request was too large",
error_type=RequestTooLargeError)
raise _decode_error(
response=error_parts[0],
error_type=TableTransactionError,
)
return [extract_batch_part_metadata(p) for p in parts]
class AsyncTransportWrapper(AsyncHttpTransport):
"""Wrapper class that ensures that an inner client created
by a `get_client` method does not close the outer transport for the parent
when used in a context manager.
"""
def __init__(self, async_transport):
self._transport = async_transport
async def send(self, request, **kwargs):
return await self._transport.send(request, **kwargs)
async def open(self):
pass
async def close(self):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args): # pylint: disable=arguments-differ
pass
| [
"[email protected]"
] | |
feb64c06c982a14449c43ee53cc8a6183106fe90 | 093b7b3ce929fa1383d5c1571271807f171aec23 | /rocket/entities/api/endpoint.py | 971b53d78b7948b93078e3f8e4b2b12806ebba05 | [] | no_license | takaaki-mizuno/smart-rocket-cli | fa728a13bc80781aea182922f7a0ee2c546518b1 | 1b4a522c54e94857803f9be246f0c480e50dae3d | refs/heads/master | 2020-03-21T02:06:54.115723 | 2018-06-25T22:43:32 | 2018-06-25T22:43:32 | 137,980,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | from .parameter import Parameter
from .response import Reference
class Endpoint:
def __init__(self, path, method, definition, spec):
self.path = path
self.method = method
self.definition = definition
self.spec = spec
self.parameters = []
self.response = None
self.parse()
def parse(self):
self.parse_parameters()
self.parse_response()
def parse_parameters(self):
if 'parameters' not in self.definition:
return
for parameter in self.definition['parameters']:
self.parameters.append(Parameter(parameter))
def parse_response(self):
if 'responses' not in self.definition:
return
for name, definition in self.definition['responses'].items():
if name[0:1] == '2' and 'schema' in definition:
self.response = Reference(definition['schema']['$ref'])
| [
"[email protected]"
] | |
8d5c12675fbee82c4d20320e48951b0b9f1842e2 | f716ec8240b775170283cb6d43da50d0ff3561b7 | /testkraken/testing_functions/check_output.py | 9eeb9c524b75d5c5bbe91ba9471dee63bc318ab0 | [] | no_license | jdkent/testkraken | 63493df89a5ae427dab9da14fc794de96cda6cef | 3b5d83f0eeaad7969902cfa867e6b4a1d4d7e691 | refs/heads/master | 2020-06-01T11:39:57.286709 | 2019-06-07T09:55:16 | 2019-06-07T09:55:16 | 190,766,540 | 0 | 0 | null | 2019-06-07T15:24:49 | 2019-06-07T15:24:49 | null | UTF-8 | Python | false | false | 3,766 | py | #/usr/bin/env python
from __future__ import division
import json
import os, inspect
from glob import glob
import pandas as pd
import numpy as np
import pdb
def creating_dataframe(files_list):
""" reads every json file from the files_list and creates one data frame """
outputmap = {0: 'voxels', 1: 'volume'}
df = pd.DataFrame()
for (i, filename) in enumerate(files_list):
with open(filename, 'rt') as fp:
in_dict = json.load(fp)
# in cwl i'm loosing the directory name
#subject = filename.split(os.path.sep)[-3]
subject = "subject_{}".format(i)
in_dict_mod = {}
for k, v in in_dict.items():
if isinstance(v, list):
for idx, value in enumerate(v):
in_dict_mod["%s_%s" % (k, outputmap[idx])] = value
else:
in_dict_mod[k] = v
df[subject] = pd.Series(in_dict_mod)
return df.T
def check_output(file_out, file_ref=None, name=None, **kwargs):
if type(file_ref) is list:
expected_files = file_ref
elif type(file_ref) is str:
expected_files = [file_ref]
if type(file_out) is list:
output_files = file_out
elif type(file_out) is str:
output_files = [file_out]
df_exp = creating_dataframe(expected_files)
df_out = creating_dataframe(output_files)
#df_exp.to_csv('output/ExpectedOutput.csv')
#df_out.to_csv('output/ActualOutput.csv')
# DJ TOD: this doesn't work, check with the original repo
#df_diff = df_exp - df_out
#df_diff = df_diff.dropna()
report_filename = "report_{}.json".format(name)
out = {}
# chosing just a few columns
keys_test = ["white_voxels", "gray_voxels", "csf_voxels",
"Right-Hippocampus_voxels", "Right-Amygdala_voxels", "Right-Caudate_voxels"]
out["index_name"] = list(df_exp.index)
for key in keys_test:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))] = []
for subj in df_exp.index:
for key in keys_test:
if df_exp.loc[subj, key] != 0.:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))].append(round(
1. * abs(df_exp.loc[subj, key] - df_out.loc[subj, key]) / df_exp.loc[subj, key], 5))
elif df_out.loc[subj, key] != 0.:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))].append(1.)
else:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))].append(0.)
out["regr"] = []
for i, subj in enumerate(out["index_name"]):
list_tmp = []
for k in out.keys():
if k not in ["index_name", "regr"]:
list_tmp.append(out[k][i])
try:
assert max(list_tmp) < 0.05
out["regr"].append("PASSED")
except(AssertionError):
out["regr"].append("FAILED")
#out_max = {"max_diff": max(diff)}
with open(report_filename, "w") as f:
json.dump(out, f)
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-out", nargs="+", dest="file_out",
help="file with the output for testing")
parser.add_argument("-ref", nargs="+", dest="file_ref",
help="file with the reference output")
parser.add_argument("-name", dest="name",
help="name of the test provided by a user")
args = parser.parse_args()
check_output(**vars(args))
| [
"[email protected]"
] | |
533ca7f861f7a6a34ff0265d601d017f0f9835f7 | b6d82335dfe93f86977e4cbafe592eff32536712 | /src/aws_hashicorp_packer_reaper/schema.py | f028da830fb0a93c2da236b6aabc65f3a6ed1a36 | [
"Apache-2.0"
] | permissive | felixubierar/aws-hashicorp-packer-reaper | 2bb9709b3c95c53d519fa5d009cbafc48232b1b4 | 9b29f6896f43a01f47d2a894059e4afa191a8ff2 | refs/heads/master | 2023-05-19T17:01:23.872830 | 2021-06-14T08:57:50 | 2021-06-14T08:57:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | import durations
from aws_hashicorp_packer_reaper.logger import log
from jsonschema.exceptions import ValidationError
from jsonschema import validators, Draft7Validator, FormatChecker, validators
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"mode": {
"type": "string",
"description": "of operations",
"enum": ["stop", "terminate"],
},
"older_than": {
"type": "string",
"description": "period since launched",
"format": "duration",
"default": "2h",
},
"dry_run": {
"type": "boolean",
"description": "if you only want output",
"default": False,
},
"tags": {
"type": "array",
"description": "to select EC2 instances with",
"items": {"type": "string", "minLength": 1},
},
},
"required": [
"mode",
"older_than",
],
}
@FormatChecker.cls_checks("duration")
def duration_checker(value) -> bool:
"""
checks whether the `value` is a valid duration.
>>> duration_checker({})
False
>>> duration_checker(1.0)
False
>>> duration_checker("2h")
True
>>> duration_checker("hundred days")
False
"""
try:
if isinstance(value, str):
durations.Duration(value)
return True
except durations.exceptions.InvalidTokenError as e:
pass
return False
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
validator = extend_with_default(Draft7Validator)(schema, format_checker=FormatChecker())
def validate(request: dict) -> bool:
"""
return True and completes the missing values if the dictionary matches the schema, otherwise False.
>>> validate({"mode": "stoep"})
False
>>> validate({"mode": "stop", "older_than": "sdfsdfsf dagen"})
False
>>> x = {"mode": "stop"}
>>> validate(x)
True
>>> print(x)
{'mode': 'stop', 'older_than': '2h', 'dry_run': False}
"""
try:
validator.validate(request)
return True
except ValidationError as e:
log.error("invalid request received: %s" % str(e.message))
return False
| [
"[email protected]"
] | |
3d6fae9b8527d4f8823a630b37b8fecb7d2d8207 | b9f21bc90eed396dde950c30a1b482be0fb8ba30 | /AtCoder/ABC/114-B_1.py | 85c142e193b383c2ee95f438be3c37a3edec97b1 | [] | no_license | nanigasi-san/nanigasi | 127a21db1b31759908fd74cebabe240e5abf8267 | 5e3c3e78344dd9558cafe439beb272b9a80d0f3a | refs/heads/master | 2020-04-03T18:57:40.132489 | 2019-06-19T15:03:38 | 2019-06-19T15:03:38 | 155,504,101 | 1 | 0 | null | 2019-02-20T09:40:05 | 2018-10-31T05:33:11 | Python | UTF-8 | Python | false | false | 155 | py | S = list(input())
list = []
for _ in range(len(S)-2):
Sx = S[_]+S[_+1]+S[_+2]
sa = int(Sx)-753
list.append(abs(sa))
list.sort()
print(list[0])
| [
"[email protected]"
] | |
55cd6829443eff74f5355c68f65b9b11ca2a37db | 1ac99f8065a2646bdb8ea9003fd5930341fb0cf4 | /flow_of_controls/while_basicprogram.py | 5e2dae225febd8aa313d2dcaf587b41771c78bb9 | [] | no_license | krishnanunni-pr/Pyrhon-Django | 894547f3d4d22dce3fff14e88815122c12c145b5 | c59471f947ceb103bb27a19e8a2a160e8ada529b | refs/heads/master | 2023-07-29T19:40:38.199104 | 2021-09-09T18:12:24 | 2021-09-09T18:12:24 | 385,128,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | #print i
i=10
while i>0:
print(i)
i-=1 | [
"[email protected]"
] | |
a4f92fc07660a377a8f5ca8b05dec21c348b7a8e | 2caa47f0bdb2f03469a847c3ba39496de315d992 | /Contest/ABC086/c/main.py | 1e87c7d1f6b031f11c2f42d5104c63354529a69a | [
"CC0-1.0"
] | permissive | mpses/AtCoder | 9023e44885dc67c4131762281193c24b69d3b6da | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | refs/heads/master | 2023-03-23T17:00:11.646508 | 2021-03-20T12:21:19 | 2021-03-20T12:21:19 | 287,489,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/env python3
n, *a = map(int, open(0).read().split())
a = [0]*3 + a
for i in range(n):
i *= 3
t, x, y = map(lambda j:abs(a[i+3+j]-a[i+j]), [0,1,2])
d = x+y
if d>t or d%2-t%2: print("No"); exit()
print("Yes") | [
"[email protected]"
] | |
dd7863b3fccfeb68a73493c51a60650a5878a877 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping13/StrippingB0q2DplusMuX.py | 14901f74eb54b516839193dda755bc482337e0e4 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,405 | py | # $Id: StrippingB0q2DplusMuX.py,v 1.6 2010-09-03 00:05:35 rlambert Exp $
"""
Module for constuction of B0q->DplusMuNuX lines
==== Description of the lines ====
B0q->DplusMuNuX0 with X0 being neutrino, gamma, pi0, whatever,
Dq->KKpi, as required for Afs analysis in semileptonics.
(Ds+->KKpi, D+->KKpi)
There are three lines:
A) Presel, sidebands in all variables, to be prescaled as rate increases
B) MC09, Full MC09-tuned offline selection
C) Tuned stripping selection (tuned on real data)
The Presel is initially scaled to 10%, which should be fine up to 100 pb-1.
The MC09 selection expects 10^7 events in 1 fb-1, and is the hardest selection at the moment.
The Tuned selection is inbetween Presel and MC09, to achieve the target retention ~0.01% and timing of <0.4 ms/event
In the first stage, this was tuned on some early preselected data, where it was found the MC09 cuts on kinematic variables could be applied.
The PID and tracking cuts were optimised on the data itself, and a slightly looser version of the optimal selection is now used here.
It was found that the 12b cuts were too tight in the Tuned selection, so now they have been reduced.
We will soon add an MC10 selection, and prescale away the MC09 selection at that time.
==== Description of the configuration ====
The selection cuts are stored in the dictionaries: confdict['Presel'] or 'MC09' or 'Tuned'.
The cuts are stored only if they are different between the lines, common cuts are hardcoded.
The configuration class makes all the selections and the lines, when passed a correct dictionary.
The lines look basically like this:
1) Choose a Muon
2) Make a D
3) Make a B
To look at all the configurable cuts, see StrippingB0q2DplusMuX.confdict
==== How to use ====
To configure all lines, just instantiate the class:
all=B0q2DplusMuXAllLinesConf('B0q2DplusMuX', confdict)
Then to print all cuts do:
all.printCuts()
You can configure one line at a time with the B0q2DplusMuXOneLineConf class:
one=B0q2DplusMuXOneLineConf('B0q2DplusMuX'+'Tuned',confdict['Tuned'])
"""
__author__ = [ 'Rob Lambert' ]
__date__ = '2010-08-11'
__version = '$Revision: 1.6 $'
#### Which VertexFitter to use? ####
#### Next is the dictionary of all tunable cuts ########
#### It is separated into the different lines ########
confdict={
'Presel' : { 'Prescale' : 0.02 ,
'Postscale' : 1.0 ,
#muon paramters
'MuPT' : 500, #MeV
'MuPidPi' : -5.,
'MuPidK' : -10.,
'MuTrChi2' : 9,
#kaon parameters
'KPT' : 300, #MeV
'KIP' : 0.03, #mm
'KPidPi' : -5,
'KPidMu' : -40,
'KPidP' : -40,
'KTrChi2' : 16,
'KIPChi2' : 4,
#pion parameters
'PiPidK' : -40,
'PiIP' : 0.01, #mm
'PiPidMu' : -5,
'PiTrChi2' : 16,
'PiIPChi2' : 4,
#D-resonance parameters
'DPT' : 1000, #MeV
'D_APT' : 1000, #MeV
'D_VPCHI2' : 0.000250,
'D_BPVVDCHI2' : 49,
#B-resonance parameters
'B_VPCHI2' : 0.000250,
'B_BPVDIRA' : 0.997
},
'Tuned' : { 'Prescale' : 1.0 ,
'Postscale' : 1.0 ,
#muon paramters
'MuPT' : 500, #MeV
'MuPidPi' : -1.,
'MuPidK' : -5,
'MuTrChi2' : 5,
#kaon parameters
'KPT' : 400, #MeV
'KIP' : 0.04, #mm
'KPidPi' : 0,
'KPidMu' : 5,
'KPidP' : -10,
'KTrChi2' : 5,
'KIPChi2' : 4,
#pion parameters
'PiPidK' : -10,
'PiIP' : 0.04, #mm
'PiPidMu' : -5,
'PiTrChi2' : 10,
'PiIPChi2' : 7,
#D-resonance parameters
'DPT' : 1500, #MeV
'D_APT' : 1200, #MeV
'D_VPCHI2' : 0.010,
'D_BPVVDCHI2' : 144,
#B-resonance parameters
'B_VPCHI2' : 0.010,
'B_BPVDIRA' : 0.9980
},
'MC09' : { 'Prescale' : 1.0 ,
'Postscale' : 1.0 ,
#muon paramters
'MuPT' : 600, #MeV
'MuPidPi' : 0.,
'MuPidK' : -10,
'MuTrChi2' : 2,
#kaon parameters
'KPT' : 400, #MeV
'KIP' : 0.03, #mm
'KPidPi' : 7,
'KPidMu' : 10,
'KPidP' : -10,
'KTrChi2' : 3,
'KIPChi2' : 4,
#pion parameters
'PiPidK' : -5,
'PiIP' : 0.01, #mm
'PiPidMu' : -2,
'PiTrChi2' : 9,
'PiIPChi2' : 7,
#D-resonance parameters
'DPT' : 1500, #MeV
'D_APT' : 1200, #MeV
'D_VPCHI2' : 0.0150,
'D_BPVVDCHI2' : 144,
#B-resonance parameters
'B_VPCHI2' : 0.0150,
'B_BPVDIRA' : 0.9980
}
}
from StrippingUtils.Utils import LineBuilder, MasterLineBuilder
name = "B0q2DplusMuX"
class B0q2DplusMuXAllLinesConf(MasterLineBuilder):
"""
Configuration object for all B0qDplusMuX lines
B0qDplusMuX attempts to strip (KKpi)Mu.
Several different lines are used, and for more information call help on the module
usage:
configdict={'LineNameSuffix' : {...},
'LineNameSuffix2': {...} }
B0q2DplusMuXAllLinesConf('B0q2DplusMuX', config, offLines=[] )
To turn off lines which otherwise would be created, add the name
of the line to offLines.
To only configure/run one line, it's better to use the B0q2DplusMuXOneLineConf class.
The created lines appear as a tuple from the lines() method
To print out all the cuts, use the printCuts method
"""
def __init__(self, name, config):
'''In the constructor we make all the lines, and configure them all
name is the initial name of the lines, i.e. B0q2DplusMuX
config is the dictionary of {LineSuffix : configuration}
'''
MasterLineBuilder.__init__(self, name, config, B0q2DplusMuXOneLineConf)
def printCuts(self):
'''Print out all the configured cuts for the lines you asked for'''
for aline in self.slaves():
print '===='
aline.printCuts()
class B0q2DplusMuXOneLineConf(LineBuilder):
"""
Configuration object for a B0qDplusMuX line
usage: config={...}
B0q2DplusMuXConf(name, config)
Will make lines called name with the config configurations
The cuts are configuration parameter only if they are different between the lines,
common cuts are hardcoded.
Use conf.printCuts to check the cuts in python
The selections are available individually as MuSel, DSel and B0Sel
The lines method returns the configured line
"""
Line=None
Selections=[]
TopSelectionSeq=None
MuCut=''
KCut=''
PiCut=''
DCombCut=''
DCut=''
BCombCut=''
BCut=''
MuSel=None
DSel=None
B0Sel=None
__configuration_keys__=[
'Prescale',
'Postscale',
#muon paramters
'MuPT',
'MuPidPi',
'MuPidK',
'MuTrChi2',
#kaon parameters
'KPT',
'KIP',
'KPidPi',
'KPidMu',
'KPidP',
'KTrChi2',
'KIPChi2',
#pion parameters
'PiPidK',
'PiIP',
'PiPidMu',
'PiTrChi2',
'PiIPChi2',
#D-resonance parameters
'DPT',
'D_APT',
'D_VPCHI2',
'D_BPVVDCHI2',
#B-resonance parameters
'B_VPCHI2',
'B_BPVDIRA'
]
def __init__(self, name, config):
'''The constructor of the configuration class.
Requires a name which is added to the end of each algorithm name
and a configuration dictionary, config, which must provide all the settings
which differ between the lines'''
LineBuilder.__init__(self, name, config)
#from StrippingSelections.Utils import checkConfig
#
#checkConfig(B0q2DplusMuXOneLineConf.__configuration_keys__,
# config)
### first we define the cuts from the configuration ###
### it's nice to see all the cuts in one place ###
self.MuCut ="((ISMUON) & (HASMUON) & (ISLONG) "\
"& (PT > %(MuPT)s *MeV) & (TRCHI2DOF< %(MuTrChi2)s ) "\
"& (PIDmu-PIDpi > %(MuPidPi)s ) & (PIDmu-PIDK > %(MuPidK)s )) "\
"& (MIPDV(PRIMARY) > 0.01) & (MIPCHI2DV(PRIMARY) > 2.0) " % config
self.KCut= "(P > 2.0*GeV)"\
" & (PT> %(KPT)s*MeV) & (TRCHI2DOF < %(KTrChi2)s) "\
" & (PIDK-PIDpi > %(KPidPi)s ) & (PIDK-PIDmu > %(KPidMu)s) & (PIDK-PIDp > %(KPidP)s )"\
" & (MIPDV(PRIMARY) > %(KIP)s) & (MIPCHI2DV(PRIMARY) > %(KIPChi2)s )" % config
self.PiCut="(P > 2.0*GeV) & (PT>300*MeV)"\
" & (TRCHI2DOF < %(PiTrChi2)s) "\
" & (PIDpi-PIDK > %(PiPidK)s ) & (PIDpi-PIDmu > %(PiPidMu)s)"\
" & (MIPDV(PRIMARY) > %(PiIP)s ) & (MIPCHI2DV(PRIMARY) > %(PiIPChi2)s )" % config
#self.DCombCut="(ADAMASS('D_s-')<210*MeV) & (APT>%(D_APT)s *MeV) & (AMAXDOCA('')<0.3*mm)" % config
self.DCombCut="(ADAMASS('D_s-')<210*MeV) & (DAMASS('D_s-')<120*MeV) & (APT> %(D_APT)s *MeV) & (ADOCACUT( 0.3*mm , '' ))" % config # & ( ACUTDOCACHI2(20,''))" % config
self.DCut="("\
"in_range(1768.*MeV, M, 2068.*MeV)"\
"& (PT > %(DPT)s *MeV) "\
"& (VFASPF(VPCHI2) > %(D_VPCHI2)s ) "\
"& (BPVDIRA > 0.99) "\
"& (BPVVDZ > 1.0*mm) "\
"& (BPVVDCHI2 > %(D_BPVVDCHI2)s ) "\
")" % config
#"& (VFASPF(VCHI2/VDOF) < 16.0) "\
#"& (M > 1768.*MeV) & (M < 2068.*MeV) "\
self.BCombCut="((AM > 2000.*MeV) & (AMAXDOCA('')<0.3*mm))"
self.BCut = "("\
"in_range(2200.*MeV,M,6300.*MeV) & "\
"(VFASPF(VPCHI2)> %(B_VPCHI2)s ) & "\
"((CHILD(1,VFASPF(VZ))-VFASPF(VZ))>0*mm) "\
"& (BPVDIRA> %(B_BPVDIRA)s ) "\
"& (BPVVDCHI2>1) "\
")" % config
#"(M < 6300.*MeV) & (M > 2200.*MeV) & "\
#" & (VFASPF(VCHI2/VDOF)<16) "\
### Now make all the selections ###
self.__MakeMuSel__()
self.__MakeDplus__()
self.__MakeB0__()
from StrippingConf.StrippingLine import StrippingLine
### Now make a stripping line ###
B0qLine=StrippingLine(self._name,
prescale = config['Prescale'],
postscale = config['Postscale'],
algos = [ self.B0Sel ]
)
self.registerLine(B0qLine)
### Collect them all together in a nice way ###
self.Line=B0qLine
#self.TopSelectionSeq=SeqB0q2DplusMuX
self.Selections=[self.MuSel, self.DSel, self.B0Sel]
def printCuts(self):
'''Print the compiled cut values'''
print 'name', self._name
print 'MuCut', self.MuCut
print 'KCut', self.KCut
print 'PiCut', self.PiCut
print 'DCombCut', self.DCombCut
print 'DCut', self.DCut
print 'BCombCut', self.BCombCut
print 'BCut', self.BCut
############ Functions to make Selections #######################
def __MakeMuSel__(self):
"""
the bachelor muon selection, takes some keyword arguements to make a muon selection
"""
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
from PhysSelPython.Wrappers import Selection
from StandardParticles import StdLooseMuons
MuForB0q = FilterDesktop(Code=self.MuCut)
SelMuForB0q = Selection("SelMuFor"+self._name,
Algorithm=MuForB0q, RequiredSelections = [StdLooseMuons])
self.MuSel=SelMuForB0q
def __MakeDplus__(self):
"""
Dplus for the selection
Don't want to use the standard, because the decay descriptor is sub-optimal
Here [D_s- -> K+ K- pi-]cc
Which can be assosciated in this selection to:
[(Meson & Charm & Strange) ==> K+ K- pi- {pi0} ]
or
[(Meson & Charm & Down) ==> K+ K- pi- {pi0} ]
"""
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from PhysSelPython.Wrappers import Selection
from StandardParticles import StdLooseKaons, StdLoosePions
Dplus2KKpiForB0q = CombineParticles(
DecayDescriptor = "[D_s- -> K+ K- pi-]cc",
DaughtersCuts = {
"K+" : self.KCut,
"pi+" : self.PiCut
} ,
CombinationCut = self.DCombCut,
MotherCut = self.DCut
)
SelDplus2KKpiForB0q = Selection("SelDplus2KKpiFor"+self._name,
Algorithm=Dplus2KKpiForB0q,
RequiredSelections = [StdLooseKaons,StdLoosePions])
self.DSel=SelDplus2KKpiForB0q
def __MakeB0__(self):
"""
B0q selection:
[B_s0 -> D_s- mu+]cc, [B_s0 -> D_s+ mu+]cc
But really this can be associated to anything of the form:
[ (Meson & Beauty & Strange) => l+ Nu ( (Meson & Charm & Strange) ==> K+ K- pi- {pi0} ) {pi0} ]CC
or
[ (Meson & Beauty & Down) => l+ Nu ( (Meson & Charm & Down) ==> K+ K- pi- {pi0} ) {pi0} ]CC
"""
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from PhysSelPython.Wrappers import Selection
CombB0q2DplusMuX = CombineParticles(
DecayDescriptors = ["[B_s0 -> D_s- mu+]cc", "[B_s0 -> D_s+ mu+]cc"], #includes wrong charges
CombinationCut = self.BCombCut,
MotherCut = self.BCut
)
SelB0q2DplusMuX = Selection("Sel"+self._name, Algorithm=CombB0q2DplusMuX,
RequiredSelections = [self.MuSel, self.DSel])
self.B0Sel=SelB0q2DplusMuX
| [
"[email protected]"
] | |
26523c4e362f033a64402149db1156139b7ddef3 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/carbonui/control/browser/browserutil.py | f1123476fb25f4a4385f9b0fdc156e55cad377b4 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 44,014 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\carbonui\control\browser\browserutil.py
from carbonui.primitives.frame import FrameCoreOverride as Frame
import blue
import urllib2
import urllib
import urlparse
from carbonui.primitives.container import Container
from carbonui.primitives.line import Line
from carbonui.util.bunch import Bunch
from carbonui.util.various_unsorted import GetBrowser, ParseHTMLColor
import nturl2path
import re
import carbonui.const as uiconst
import localization
class NewBrowserForm():
__guid__ = 'corebrowserutil.NewBrowserForm'
def __init__(self, attrs, browser):
if getattr(attrs, 'action', None) is None:
setattr(attrs, 'action', GetBrowser(browser).sr.currentURL.split('?')[0])
if str(getattr(attrs, 'method', 'get')).lower() not in ('get', 'post'):
setattr(attrs, 'method', 'get')
self.submitting = 0
self.attrs = attrs
self.browser = browser
self.fields = []
def fixup(self, m):
return '&#' + hex(int(m.group(1)))[1:] + ';'
def decode(self, u):
return re.sub('&#(\\d+);', self.fixup, u.encode('cp1252', 'xmlcharrefreplace'))
def OnSubmit(self, *etc):
if self.submitting:
uicore.Message('Busy')
return
self.submitting = 1
e = []
for attrs, wnd in self.fields:
if not attrs.name:
continue
if getattr(attrs, 'type', None).lower() == 'submit' and wnd not in etc:
continue
getval = getattr(self, 'GetValue_%s' % attrs.type.lower(), lambda *args: None)
val = getval(wnd, attrs)
if val is not None:
e.append((attrs.name, val))
d = []
for key, val in e:
if type(val) == list:
for v in val:
if isinstance(v, unicode):
v = self.decode(v)
d.append((key, v))
else:
if isinstance(val, unicode):
val = self.decode(val)
d.append((key, val))
s = urllib.urlencode(d)
if isinstance(s, unicode):
s = self.decode(val)
browser = GetBrowser(self.browser)
if not browser:
return
if getattr(self.attrs, 'method', 'get').lower() == 'get':
if 'localsvc:' in self.attrs.action:
from carbonui.control.baselink import BaseLinkCoreOverride as BaseLink
BaseLink().LocalSvcCall(self.attrs.action[9:] + ''.join([ name for name, value in d ]))
self.submitting = 0
else:
browser.GoTo('?'.join((self.attrs.action, s)))
elif self.attrs.method.lower() == 'post':
browser.GoTo(self.attrs.action, s)
def GetField(self, name = None):
if name is None:
return self.fields[-1][1]
for attrs, wnd in self.fields:
if attrs.name == name:
return wnd
else:
return
def GetFields(self):
d = {}
for attrs, wnd in self.fields:
if not attrs.name:
continue
if getattr(attrs, 'type', None) in ('submit', 'hidden'):
continue
getval = getattr(self, 'GetValue_%s' % attrs.type.lower(), lambda *args: None)
val = getval(wnd, attrs)
if val is not None:
d[attrs.name] = val
return d
def SetFields(self, d):
for attrs, wnd in self.fields:
if not attrs.name:
continue
setval = getattr(self, 'SetValue_%s' % attrs.type.lower(), lambda *args: None)
if not d.has_key(attrs.name):
if getattr(attrs, 'type', None) == 'checkbox':
setval(wnd, attrs, 0)
continue
setval(wnd, attrs, d[attrs.name])
def AddInput(self, attrs, add = 1):
attrs.vspace = getattr(attrs, 'vspace', 1)
if attrs.type is None:
attrs.type = 'text'
create = getattr(self, 'Create_%s' % attrs.type.lower(), None)
if create:
wnd = create(attrs)
else:
wnd = Container()
browser = GetBrowser(self.browser)
if not browser:
return
if add:
self.fields.append((attrs, wnd))
attrs.control = wnd
attrs.align = getattr(attrs, 'align', None)
obj = Bunch()
obj.font = None
obj.key = 'input_%s' % attrs.type.lower()
obj.type = '<input>'
obj.attrs = attrs
wnd.state = uiconst.UI_HIDDEN
if hasattr(self.browser, 'sr'):
wnd.SetParent(self.browser.sr.cacheContainer)
startup = getattr(self, 'Startup_%s' % attrs.type.lower(), None)
if startup:
startup(wnd, attrs)
obj.width = wnd.width + 5
obj.height = wnd.height + 5
obj.valign = 1
if add:
obj.control = wnd
wnd.loaded = 1
else:
wnd.Close()
return obj
def AddTextArea(self, attrs, add = 1):
attrs.type = 'textarea'
return self.AddInput(attrs, add)
def AddSelect(self, attrs, options, add = 1):
attrs.type = 'select'
attrs.options = options
return self.AddInput(attrs, add)
def StdGetValue(self, wnd, attrs):
return wnd.GetValue()
def StdSetValue(self, wnd, attrs, val):
wnd.SetValue(val)
def GetValue_submit(self, wnd, attrs):
return getattr(attrs, 'value', None) or 'Submit'
def GetValue_hidden(self, wnd, attrs):
return attrs.value
def Create_textarea(self, attrs):
from carbonui.control.edit import EditCoreOverride as Edit
wnd = Edit(name='textarea', align=uiconst.RELATIVE)
wnd.width = getattr(attrs, 'width', None) or 8 * int(attrs.cols) or 350
wnd.height = getattr(attrs, 'height', None) or 18 * int(attrs.rows) or 45
return wnd
def Startup_textarea(self, wnd, attrs):
if getattr(attrs, 'maxlength', None):
wnd.SetMaxLength(attrs.maxlength)
if hasattr(attrs, 'readonly'):
wnd.ReadOnly()
if attrs.value:
wnd.SetValue(attrs.value)
color = self.browser.attrStack[-1]['color']
Line(parent=wnd, align=uiconst.TOTOP, color=color, weight=1)
Line(parent=wnd, align=uiconst.TOBOTTOM, color=color, weight=1)
Line(parent=wnd, align=uiconst.TOLEFT, color=color, weight=1)
Line(parent=wnd, align=uiconst.TORIGHT, color=color, weight=1)
def GetValue_textarea(self, wnd, attrs):
return wnd.GetValue().replace('<br>', '\r\n')
SetValue_textarea = StdSetValue
def Create_text(self, attrs):
from carbonui.control.singlelineedit import SinglelineEditCoreOverride as SinglelineEdit
wnd = SinglelineEdit(name='textedit', align=uiconst.RELATIVE, pos=(0,
0,
getattr(attrs, 'width', None) or min(200, 7 * (attrs.size or 30)),
16))
return wnd
def Startup_text(self, wnd, attrs, password = 0):
wnd.OnReturn = self.OnSubmit
if password:
wnd.SetPasswordChar('*')
maxlength = getattr(attrs, 'maxlength', None)
if maxlength is not None:
wnd.SetMaxLength(int(maxlength))
if attrs.value:
wnd.SetValue(attrs.value, updateIndex=0)
if hasattr(self.browser, 'attrStack'):
color = self.browser.attrStack[-1]['color']
Line(parent=wnd, align=uiconst.TOTOP, color=color, weight=1)
Line(parent=wnd, align=uiconst.TOBOTTOM, color=color, weight=1)
Line(parent=wnd, align=uiconst.TOLEFT, color=color, weight=1)
Line(parent=wnd, align=uiconst.TORIGHT, color=color, weight=1)
GetValue_text = StdGetValue
SetValue_text = StdSetValue
Create_password = Create_text
def Startup_password(self, wnd, attrs):
self.Startup_text(wnd, attrs, password=1)
wnd.SetPasswordChar('\x95')
GetValue_password = StdGetValue
SetValue_password = StdSetValue
def Create_checkbox(self, attrs):
from carbonui.control.checkbox import CheckboxCoreOverride as Checkbox
cbox = Checkbox(pos=(0, 0, 0, 0), align=uiconst.RELATIVE)
cbox.data = {}
attrs.vspace = 3
return cbox
def Startup_checkbox(self, wnd, attrs):
checked = getattr(attrs, 'checked', '').lower() == 'checked'
if not checked and attrs.__dict__.has_key('checked') and getattr(attrs, 'checked', None) is None:
checked = 1
wnd.SetChecked(checked, 0)
def SetValue_checkbox(self, wnd, attrs, val):
if val == attrs.value:
wnd.SetValue(1)
else:
wnd.SetValue(0)
def GetValue_checkbox(self, wnd, attrs):
checked = wnd.GetValue()
if checked:
return attrs.value or 'on'
else:
return None
def Create_radio(self, attrs):
ret = self.Create_checkbox(attrs)
ret.SetGroup(attrs.name)
ret.width = ret.height = 14
return ret
Startup_radio = Startup_checkbox
GetValue_radio = GetValue_checkbox
SetValue_radio = SetValue_checkbox
def Create_select(self, attrs):
if getattr(attrs, 'size', None) is not None or getattr(attrs, 'height', 0):
from carbonui.control.combo import SelectCore as Select
return Select(name='select', align=uiconst.RELATIVE, pos=(0,
0,
getattr(attrs, 'width', None) or 128,
getattr(attrs, 'height', None) or int(attrs.size) * 18 - 1 + attrs.vspace * 2))
from carbonui.control.combo import ComboCoreOverride as Combo
c = Combo(name='selection_%s' % attrs.name, align=uiconst.RELATIVE)
if getattr(attrs, 'width', None) is not None:
c.width = getattr(attrs, 'width', None)
return c
def Startup_select(self, wnd, attrs):
if getattr(attrs, 'size', None) is not None or getattr(attrs, 'height', 0):
wnd.Startup([ (k, v, s) for k, v, s in attrs.options ])
if not hasattr(attrs, 'multiple'):
wnd.multiSelect = 0
for each in wnd.children:
if each.name in ('_underlay',):
each.Close()
if each.name == 'activeframe':
each.Flush()
Frame(parent=each, color=self.browser.attrStack[-1]['color'], padding=(-1, -1, -1, -1))
Line(parent=wnd, align=uiconst.TOTOP, color=getattr(attrs, 'fontcolor', None), weight=1)
Line(parent=wnd, align=uiconst.TOBOTTOM, color=getattr(attrs, 'fontcolor', None), weight=1)
Line(parent=wnd, align=uiconst.TOLEFT, color=getattr(attrs, 'fontcolor', None), weight=1)
Line(parent=wnd, align=uiconst.TORIGHT, color=getattr(attrs, 'fontcolor', None), weight=1)
Container(name='push', parent=wnd, align=uiconst.TOTOP, pos=(0,
0,
0,
attrs.vspace), idx=0)
Container(name='push', parent=wnd, align=uiconst.TOBOTTOM, pos=(0,
0,
0,
attrs.vspace), idx=0)
else:
default = None
for key, value, selected in attrs.options:
if selected:
default = key
break
if getattr(attrs, 'width', None) is not None:
wnd.Startup([ (k, v) for k, v, s in attrs.options ], default=default)
else:
wnd.Startup([ (k, v) for k, v, s in attrs.options ], default=default, adjustWidth=1)
clipper = wnd.GetChild('clipper')
clipper.clipChildren = 1
for each in wnd.children:
if each.name == 'selected':
wnd.sr.activeframe = Frame(parent=each, color=getattr(attrs, 'fontcolor', None), padding=(-1, -1, -1, -1))
wnd.sr.activeframe.state = uiconst.UI_HIDDEN
Frame(parent=each, color=getattr(attrs, 'fontcolor', None))
Container(name='push', parent=each, align=uiconst.TOTOP, pos=(0,
0,
0,
attrs.vspace), idx=0)
Container(name='push', parent=each, align=uiconst.TOBOTTOM, pos=(0,
0,
0,
attrs.vspace), idx=0)
for child in each.children:
if child.name in ('_underlay',):
child.Close()
break
def GetValue_select(self, wnd, attrs):
v = wnd.GetValue()
if getattr(wnd, 'multiSelect', 1) == 0 and type(v) == list:
return v[0]
return v
def SetValue_select(self, wnd, attrs, val):
wnd.SetValue(val)
def Create_submit(self, attrs):
raise NotImplementedError('This functionality has been removed')
class Css():
__guid__ = 'corebrowserutil.Css'
def __init__(self):
self.Reset()
def Reset(self):
self.classes = []
self.s = {}
def copy(self):
new = Css()
new.classes = self.classes[:]
return new
def ApplyCSS(self, tag, attrs, attrStack):
classlist = []
for c in self.classes:
if self.CompareSelector(attrStack[-1], c) == 1:
b = 0
if c['deptag']:
i = len(attrStack) - 2
if i < 0:
continue
for t in c['deptag']:
if b == 1 or i < 0:
break
if t['op'] == ' ':
while not self.CompareSelector(attrStack[i], t):
i -= 1
if i < 0:
b = 1
break
if i < 0:
b = 1
break
i -= 1
elif t['op'] == '>':
if self.CompareSelector(attrStack[i], t):
i -= 1
continue
else:
b = 1
break
elif t['op'] == '*':
i -= 1
if i < 0:
break
while not self.CompareSelector(attrStack[i], t):
i -= 1
if i < 0:
b = 1
break
if i < 0:
b = 1
break
i -= 1
elif t['op'] == '+':
if attrStack[i]['lasttag'] == t['tag']:
i -= 1
continue
else:
b = 1
break
if b == 0:
classlist.append(c)
classlist.sort(lambda x, y: cmp(x['prio'], y['prio']))
c = {}
for each in classlist:
c.update(each)
return c
def CompareSelector(self, astack, csel):
tag = astack['tag']
tid = getattr(astack['attr'], 'id', None)
cl = getattr(astack['attr'], 'class', '').lower()
if csel['tag'] not in (tag, '*') and csel['tag']:
return
if csel['class']:
if csel['class'] not in cl.split(' '):
return
if csel['id'] and tid != csel['id']:
return
return 1
def SplitSelector(self, sel):
s = {}
s['tag'] = None
s['id'] = None
s['class'] = None
s['attr'] = None
s['pclass'] = None
sel = sel.strip().lower()
if sel.count(':'):
sel, pclass = sel.split(':', 1)
s['pclass'] = pclass
if sel == '':
sel = '*'
ccount = sel.count('.')
acount = sel.count('[')
icount = sel.count('#')
if ccount + acount + icount > 1:
return
if ccount == 1:
h = sel.split('.')
if len(h) == 1:
s['class'] = h[0]
else:
s['tag'] = h[0]
s['class'] = h[1]
return s
if acount == 1:
h = sel.split('[')
if sel[-1] == ']':
if len(h) == 1:
s['attr'] = h[0]
else:
s['tag'] = h[0]
s['attr'] = h[1]
else:
return
return s
if icount == 1:
h = sel.split('#')
if len(h) == 1:
s['id'] = h[0]
else:
s['tag'] = h[0]
s['id'] = h[1]
return s
s['tag'] = sel
return s
def GetClass(self, classID):
classID = classID.lower()
for c in self.classes:
if c['id'] == classID:
return c
if c['class'] == classID:
return c
def HandlePseudoClass(self, pclass):
links = [('link', 'link-color'),
('visited', 'vlink-color'),
('active', 'alink-color'),
('hover', 'alink-color')]
d = {}
for dclass, attr in links:
if pclass == dclass and 'color' in self.s:
d[attr] = self.s['color']
self.s = d.copy()
def ParseCSSData(self, cssdata):
lines = ''
for dataAndComment in cssdata.split('*/'):
data = dataAndComment.split('/*', 1)[0]
lines += data
for styleClass in lines.split('}'):
if not styleClass:
continue
if styleClass.count('{') != 1:
continue
attr, styles = styleClass.strip().split('{')
self.s = {}
prio = 0
if not styles:
continue
data, imp = self.ParseStyleData(styles.strip())
self.ParseStyle(data)
selector = depsel = depop = None
prio += attr.count('.') * 100 + attr.count('#') * 1000 + attr.count('[') * 10 + attr.count(' ') + 1 - (attr.count('.') + attr.count('#') + attr.count('['))
selectors = attr.replace('\t', ' ').replace(' ', ' ').strip().split(' ')
selector = selectors[-1]
selectors = selectors[:-1]
selectors.reverse()
deptags = []
op = ' '
for t in selectors:
if t in ('>', '*', '+'):
op = t
else:
d = self.SplitSelector(t)
if d:
d['op'] = op
deptags.append(d)
op = ' '
for sel in selector.split(','):
attrs = self.SplitSelector(sel)
if attrs:
if attrs['pclass']:
self.HandlePseudoClass(attrs['pclass'])
s = self.s.copy()
s['tag'] = attrs['tag']
s['class'] = attrs['class']
s['id'] = attrs['id']
s['attr'] = attrs['attr']
s['deptag'] = deptags
s['prio'] = prio
self.classes.append(s)
if len(imp):
self.s = {}
self.ParseStyle(imp)
for sel in selector.split(','):
attrs = self.SplitSelector(sel)
if attrs:
if attrs['pclass']:
self.HandlePseudoClass(attrs['pclass'])
s = self.s.copy()
s['tag'] = attrs['tag']
s['class'] = attrs['class']
s['id'] = attrs['id']
s['attr'] = attrs['attr']
s['deptag'] = deptags
s['prio'] = 10000
self.classes.append(s)
def ParseStyleData(self, cssdata):
data = {}
imp = {}
for attrAndValue in cssdata.split(';'):
if not attrAndValue:
continue
if ':' in attrAndValue:
attr, value = attrAndValue.strip().split(':', 1)
else:
continue
if attr and value:
attr = attr.strip().lower()
if '!' in value:
value, important = value.split('!', -1)
if important.strip().lower() == 'important':
imp[attr] = value.strip()
continue
data[attr] = value.strip()
return (data, imp)
def ParseCSS(self, attrs = None):
self.s = {}
sattr = getattr(attrs, 'style', None)
if sattr:
data, imp = self.ParseStyleData(sattr)
data.update(imp)
return self.ParseStyle(data)
def ParseStyle(self, style = None):
if style:
for k, v in style.iteritems():
if k in self.styleDict:
eval('self.%s' % self.styleDict[k])(k, v)
return self.s
def ParseStyleNum(self, value):
value = str(value).lower().strip()
if value.isdigit():
return int(value)
if value.endswith('%'):
return value
if value[-2:] in self.absStyleUnits and value[:-2].isdigit():
return int(self.absStyleUnits[value[-2:]] * float(value[:-2].strip()))
try:
return int(value)
except:
return None
absStyleUnits = {'px': 1.0,
'in': 100.0,
'cm': 100.0 / 2.54,
'mm': 100.0 / 25.4,
'pt': 100.0 / 72.0,
'pc': 100.0 / 6.0}
def ParseFontSize(self, k, v):
if v:
if not v.isdigit():
if v in self.fontSize:
v = self.fontSize[v]
v = self.ParseStyleNum(v)
if v is None or not type(v) == int and not v.isdigit():
return
if v in self.fontFamilies:
self.s['font-size'] = int(v)
self.s['font-family'] = self.fontFamilies[int(v)]
else:
self.s['font-size'] = int(v or 10)
self.s['font-family'] = 'sans'
def ParseFontStyle(self, k, v):
if v:
if v == 'normal':
self.s['font-style'] = 'n'
if v in ('italic', 'oblique'):
self.s['font-style'] = 'i'
def ParseFontWeight(self, k, v):
if v:
if v == 'normal':
self.s['font-weight'] = 'n'
elif v in ('bold', 'bolder'):
self.s['font-weight'] = 'b'
def ParseTextDecoration(self, k, v):
if v:
if v == 'none':
self.s['text-decoration'] = ''
return
s = str(v)
s = s.replace('underline', 'u').replace('line-through', 's').replace('overline', 'o')
for t in s:
if t.strip() not in ('u', 's', 'o'):
return
self.s['text-decoration'] = s
def ParseTextAlign(self, k, v):
if v in ('left', 'right', 'center'):
self.s['text-align'] = v
def ParseFont(self, k, v):
if v:
for param in v.split(' '):
self.ParseFontSize('', param)
self.ParseFontStyle('', param)
self.ParseFontWeight('', param)
self.ParseTextDecoration('', param)
def ParseMargin(self, k, v):
if v:
a = []
for p in v.replace(' ', ' ').split(' '):
p = self.ParseStyleNum(p)
if p:
a.append(p)
if len(a) > 0:
self.s['margin-left'] = a[[0,
1,
1,
3][len(a) - 1]]
self.s['margin-right'] = a[[0,
1,
1,
1][len(a) - 1]]
self.s['margin-top'] = a[[0,
0,
0,
0][len(a) - 1]]
self.s['margin-bottom'] = a[[0,
0,
2,
2][len(a) - 1]]
def ParsePadding(self, k, v):
if v:
v = self.ParseStyleNum(v)
if v:
self.s['padding-left'] = self.s['padding-right'] = v
self.s['padding-top'] = self.s['padding-bottom'] = v
def ParseBorderWidth(self, k, v):
if v:
if v in self.borderWidths:
v = self.borderWidths[v]
else:
v = self.ParseStyleNum(v)
if v:
if k == 'border-width':
for i in ('left', 'right', 'top', 'bottom'):
self.s['border-' + i + '-width'] = v
else:
self.s[k] = v
def ParseBorderColor(self, k, v):
if v:
v = ParseHTMLColor(v, 1, error=1)
if v:
self.s['border-left-color'] = self.s['border-right-color'] = v
self.s['border-top-color'] = self.s['border-bottom-color'] = v
def ParseBorderStyles(self, k, v):
if v:
if v in self.borderStyles:
self.s[k] = self.borderStyles[v]
def ParseBorderStyle(self, k, v):
if v in self.borderStyles:
self.s['border-left-style'] = self.borderStyles[v]
self.s['border-right-style'] = self.borderStyles[v]
self.s['border-top-style'] = self.borderStyles[v]
self.s['border-bottom-style'] = self.borderStyles[v]
def ParseBorder(self, k, v):
for each in v.split(' '):
if each in self.borderStyles:
self.ParseBorderStyle(k, each)
continue
self.ParseBorderColor(k, each)
self.ParseBorderWidth('border-width', each)
def ParseBorders(self, k, v):
for each in v.split(' '):
if each in self.borderStyles:
self.ParseBorderStyles(k, each)
continue
self.ParseColor(k + '-color', each)
self.ParseBorder(k + '-width', each)
def ParseBorderCollapse(self, k, v):
if v in self.borderCollapse:
self.s['border-collapse'] = self.borderCollapse[v]
else:
self.s['border-collapse'] = 0
def ParseColor(self, k, v):
if v:
v = ParseHTMLColor(v, 1, error=1)
if v:
self.s[k] = v
def ParseBackground(self, k, v):
for each in v.split(' '):
each = each.replace(' ', '').lower()
if each.find('url(') != -1:
self.ParseBackgroundImage(None, each)
elif each.find('repeat') != -1:
self.ParseBackgroundRepeat(None, each)
elif v:
color = ParseHTMLColor(each, 1, error=1)
if color:
self.s['background-color'] = color
def ParseBackgroundRepeat(self, k, v):
if v in ('repeat', 'repeat-x', 'repeat-y', 'no-repeat'):
self.s['background-repeat'] = v
def ParseBackgroundImage(self, k, v):
if v and v.startswith('url(') and v.endswith(')'):
self.s['background-image'] = v[4:-1]
def ParseBackgroundAttachment(self, k, v):
if v in ('fixed', 'scroll'):
self.s['background-attachment'] = v
def ParseBackgroundPosition(self, k, v):
if v:
v = v.split(' ')
if len(v) in (2, 4):
self.ParseAbsSize('background-image-left', v[0])
self.ParseAbsSize('background-image-top', v[1])
if len(v) == 4:
self.ParseAbsSize('background-image-width', v[2])
self.ParseAbsSize('background-image-height', v[3])
self.s['background-position'] = []
for i in v:
if i in ('top', 'center', 'middle', 'bottom', 'left', 'right'):
self.s['background-position'].append(i)
def ParseAbsSize(self, k, v):
if v:
v = self.ParseStyleNum(v)
if v:
self.s[k] = int(v)
def ParsePosAbsSize(self, k, v):
if v:
v = self.ParseStyleNum(v)
if v:
if type(v) == int:
self.s[k] = max(v, 0)
else:
self.s[k] = v
def ParseVerticalAlign(self, k, v):
if v in self.vertStyles:
self.s['vertical-align'] = self.vertStyles[v]
def ParseHorizontalAlign(self, k, v):
if v in ('left', 'right', 'center'):
self.s['horizontal-align'] = v
def ParsePosition(self, k, v):
if v in ('absolute', 'fixed', 'relative', 'static'):
self.s['position'] = v
def ParseFloat(self, k, v):
if v in ('left', 'right'):
self.s['float'] = v
elif v == 'none':
self.s['float'] = None
elif v == 'inherit' and self.s.has_key('float'):
del self.s['float']
fontSize = {'small': 8,
'x-small': 7,
'xx-small': 6,
'smaller': 8,
'medium': 10,
'large': 12,
'larger': 14,
'x-large': 20}
fontFamilies = {8: 'sans',
10: 'sans',
12: 'sans',
14: 'sans',
20: 'sans'}
vertStyles = {'top': 0,
'middle': 1,
'bottom': 2,
'baseline': 3,
'sub': 4,
'super': 5}
borderStyles = {'none': None,
'hidden': 0,
'solid': 1,
'groove': 2,
'ridge': 2,
'inset': 2,
'outset': 2}
borderWidths = {'thin': 1,
'medium': 2,
'thick': 3}
borderCollapse = {'seperate': 0,
'collapse': 1}
absStyles = ('text-indent', 'margin-left', 'margin-right', 'margin-top', 'margin-bottom', 'padding-left', 'padding-right', 'padding-top', 'padding-bottom', 'border-left-width', 'border-right-width', 'border-top-width', 'border-bottom-width', 'letter-spacing', 'word-spacing', 'line-height')
styleDict = {'text-indent': 'ParseAbsSize',
'margin-left': 'ParsePosAbsSize',
'margin-right': 'ParsePosAbsSize',
'margin-top': 'ParsePosAbsSize',
'margin-bottom': 'ParsePosAbsSize',
'padding-left': 'ParsePosAbsSize',
'padding-right': 'ParsePosAbsSize',
'padding-top': 'ParsePosAbsSize',
'padding-bottom': 'ParsePosAbsSize',
'border-left-width': 'ParseBorderWidth',
'border-right-width': 'ParseBorderWidth',
'border-top-width': 'ParseBorderWidth',
'border-bottom-width': 'ParseBorderWidth',
'border-left-style': 'ParseBorderStyles',
'border-right-style': 'ParseBorderStyles',
'border-top-style': 'ParseBorderStyles',
'border-bottom-style': 'ParseBorderStyles',
'letter-spacing': 'ParseAbsSize',
'word-spacing': 'ParseAbsSize',
'line-height': 'ParsePosAbsSize',
'font-size': 'ParseFontSize',
'font-weight': 'ParseFontWeight',
'font-style': 'ParseFontStyle',
'text-decoration': 'ParseTextDecoration',
'text-align': 'ParseTextAlign',
'font': 'ParseFont',
'margin': 'ParseMargin',
'padding': 'ParsePadding',
'border-width': 'ParseBorderWidth',
'border-color': 'ParseBorderColor',
'border-style': 'ParseBorderStyle',
'border': 'ParseBorder',
'border-left': 'ParseBorders',
'border-right': 'ParseBorders',
'border-top': 'ParseBorders',
'border-bottom': 'ParseBorders',
'color': 'ParseColor',
'link-color': 'ParseColor',
'alink-color': 'ParseColor',
'vlink-color': 'ParseColor',
'background': 'ParseBackground',
'background-color': 'ParseColor',
'border-left-color': 'ParseColor',
'border-right-color': 'ParseColor',
'border-top-color': 'ParseColor',
'border-bottom-color': 'ParseColor',
'vertical-align': 'ParseVerticalAlign',
'align': 'ParseHorizontalAlign',
'background-repeat': 'ParseBackgroundRepeat',
'background-image': 'ParseBackgroundImage',
'background-image-color': 'ParseColor',
'background-image-width': 'ParseAbsSize',
'background-image-height': 'ParseAbsSize',
'background-image-left': 'ParseAbsSize',
'background-image-top': 'ParseAbsSize',
'background-attachment': 'ParseBackgroundAttachment',
'background-position': 'ParseBackgroundPosition',
'border-collapse': 'ParseBorderCollapse',
'position': 'ParsePosition',
'float': 'ParseFloat',
'left': 'ParseAbsSize',
'right': 'ParseAbsSize',
'top': 'ParseAbsSize',
'bottom': 'ParseAbsSize',
'width': 'ParsePosAbsSize',
'min-width': 'ParsePosAbsSize',
'max-width': 'ParsePosAbsSize',
'height': 'ParsePosAbsSize',
'min-height': 'ParsePosAbsSize',
'max-height': 'ParsePosAbsSize'}
def GetStringFromURL(url, data = None, cookie = None):
if cookie:
header = {'Cookie': cookie}
else:
header = {}
header['User-Agent'] = 'CCP-minibrowser/3.0'
for k, v in header.iteritems():
if isinstance(v, unicode):
header[k] = v.encode('ascii', 'xmlcharrefreplace')
else:
header[k] = str(v)
try:
url = url.encode('ascii')
except:
if not url.startswith('file:'):
raise urllib2.URLError('URL contained non-ascii characters')
try:
r = urllib2.Request(url, data, header)
if url.lower().find('local://') == 0:
return OpenLocalURL(url, r, data, cookie)
return urllib2.urlopen(r)
except ValueError as what:
if what.args[0].startswith('invalid literal for int():'):
raise urllib2.URLError('malformed URL')
raise
def OpenLocalURL(url, r, data, cookie):
path = url[url.find(':') + 2:]
class FakeSocket:
def __init__(self, req, data):
self.request = req
self.header = ['HTTP']
self.buff = ''
self.isFake = True
method = 'GET'
contentLength = ''
contentType = ''
post = ''
if data:
method = 'POST'
contentLength = 'Content-Length: %s\r\n' % len(data)
contentType = 'Content-Type: application/x-www-form-urlencoded\r\n'
post = data + '\r\n'
self.buff = '%(method)s %(path)s HTTP/1.0\r\nHost: dummy:0\r\nUser-agent: EVE\r\nEve.trusted: no\r\n%(contenttype)s%(contentlength)s\r\n%(data)s\r\n' % {'method': method,
'path': path,
'contenttype': contentType,
'contentlength': contentLength,
'data': post}
def Read(self):
return self.buff
def Write(self, what):
self.buff = what
class FakeInfo:
def __init__(self):
self.headers = {}
class FakeResponse:
def __init__(self, buf):
self.buf = buf
self.inf = FakeInfo()
self.url = ''
self.headers = {}
def read(self):
return self.buf
def info(self):
return self.inf
fakeSocket = FakeSocket(r, data)
import gps
ep = gps.RawTransport(fakeSocket, '')
conn = sm.GetService('http')
conn.Handle(ep)
buf = ep.Read()
code = int(buf[9:12])
if code == 302:
lines = buf.split('\r\n')
loc = ''
lkey = 'location: '
for l in lines:
if l.lower().find(lkey) >= 0:
loc = l[len(lkey):]
break
loc, a = ParseURL(loc, 'local://')
return GetStringFromURL(loc, data, cookie)
content = buf[buf.find('\r\n\r\n') + 4:]
return FakeResponse(content)
def ParseURL(url, current = None):
url = url.encode('ascii')
if current:
current = current.encode('ascii')
if current.find('local://') == 0 and url.find('://') == -1:
url = 'local://' + url
else:
url = urlparse.urljoin(current, url)
elif url.find(':/') == -1:
url = 'http://' + url
repl = None
if 'res:/' in url:
repl = ('res:/', blue.paths.ResolvePath(u'res:/'))
elif 'script:/' in url:
repl = ('script:/', blue.paths.ResolvePath(u'script:/'))
elif 'cache:/' in url:
repl = ('cache:/', blue.paths.ResolvePath(u'cache:/'))
if repl:
url = url.replace(repl[0], 'file:' + nturl2path.pathname2url(repl[1]))
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return (urlparse.urlunsplit((scheme,
netloc,
path,
query,
'')), fragment)
def DirUp(url, force = 1):
i = url[:-1].rfind('/')
if i == -1:
if force:
raise ValueError, 'Bad URL (no parent dir): %s' % url
return url
return url[:i] + '/'
def DefaultHomepage():
import eve.client.script.ui.control.browser.browserutil as evebrowserutil
if hasattr(evebrowserutil, 'DefaultHomepage'):
home = evebrowserutil.DefaultHomepage()
if home is not None:
return home
return 'http://www.google.com'
def DefaultCachePath():
return blue.paths.ResolvePathForWriting(u'cache:/Browser')
class CrashedBrowserViewHost(object):
alive = False
def __getattr__(self, attr):
return CrashedBrowserViewHostAttribute(attr)
def __setattr__(self, attr, value):
pass
class CrashedBrowserViewHostAttribute():
def __init__(self, attr):
self.attr = attr
def __call__(self, *args, **kwargs):
print 'Calling %s%s%s on a crashed BrowserView' % (self.attr, args, kwargs)
def __repr__(self):
return '<CrashedBrowserViewHostAttribute %s>' % self.attr
def NextPowerOfTwo(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
return n + 1
class LoadErrors():
BLACKLIST = 100
WHITELIST = 101
FAILED = -2
ABORTED = -3
FILE_NOT_FOUND = -4
OPERATION_TIMED_OUT = -7
ACCESS_DENIED = -10
CONNECTION_CLOSED = -100
CONNECTION_RESET = -101
CONNECTION_REFUSED = -102
CONNECTION_ABORTED = -103
CONNECTION_FAILED = -104
NAME_NOT_RESOLVED = -105
INTERNET_DISCONNECTED = -106
SSL_PROTOCOL_ERROR = -107
ADDRESS_INVALID = -108
ADDRESS_UNREACHABLE = -109
CERT_ERRORS_BEGIN = -200
CERT_COMMON_NAME_INVALID = -200
CERT_DATE_INVALID = -201
CERT_AUTHORITY_INVALID = -202
CERT_CONTAINS_ERRORS = -203
CERT_NO_REVOCATION_MECHANISM = -204
CERT_UNABLE_TO_CHECK_REVOCATION = -205
CERT_REVOKED = -206
CERT_INVALID = -207
CERT_ERRORS_END = -208
INVALID_URL = -300
DISALLOWED_URL_SCHEME = -301
UNKNOWN_URL_SCHEME = -302
TOO_MANY_REDIRECTS = -311
UNSAFE_PORT = -312
INVALID_RESPONSE = -320
INVALID_CHUNKED_ENCODING = -321
METHOD_NOT_SUPPORTED = -322
UNEXPECTED_PROXY_AUTH = -323
EMPTY_RESPONSE = -324
RESPONSE_HEADERS_TOO_BIG = -325
def GetErrorString(errorCode):
errorString = ''
if errorCode == LoadErrors.FILE_NOT_FOUND:
errorString += localization.GetByLabel('UI/Shared/HTMLError1')
elif errorCode == LoadErrors.OPERATION_TIMED_OUT:
errorString += localization.GetByLabel('UI/Browser/LoadErrorTimedOut')
elif errorCode == LoadErrors.ACCESS_DENIED:
errorString += localization.GetByLabel('UI/Generic/AccessDenied')
elif errorCode == LoadErrors.CONNECTION_CLOSED:
errorString += localization.GetByLabel('UI/Browser/LoadErrorConnectionClosed')
elif errorCode == LoadErrors.CONNECTION_RESET:
errorString += localization.GetByLabel('UI/Browser/LoadErrorConnectionReset')
elif errorCode == LoadErrors.CONNECTION_REFUSED:
errorString += localization.GetByLabel('UI/Browser/LoadErrorConnectionRefused')
elif errorCode == LoadErrors.CONNECTION_ABORTED:
errorString += localization.GetByLabel('UI/Browser/LoadErrorConnectionAborted')
elif errorCode == LoadErrors.CONNECTION_FAILED:
errorString += localization.GetByLabel('UI/Browser/LoadErrorConnectionFailed')
elif errorCode == LoadErrors.NAME_NOT_RESOLVED:
errorString += localization.GetByLabel('UI/Browser/LoadErrorNameNotResolved')
elif errorCode == LoadErrors.INTERNET_DISCONNECTED:
errorString += localization.GetByLabel('UI/Browser/LoadErrorConnectionLost')
elif errorCode == LoadErrors.ADDRESS_INVALID:
errorString += localization.GetByLabel('UI/Browser/LoadErrorAddressInvalid')
elif errorCode == LoadErrors.ADDRESS_UNREACHABLE:
errorString += localization.GetByLabel('UI/Browser/LoadErrorAddressUnreachable')
elif errorCode <= LoadErrors.CERT_ERRORS_BEGIN and errorCode >= LoadErrors.CERT_ERRORS_END or errorCode == LoadErrors.SSL_PROTOCOL_ERROR:
if errorCode == LoadErrors.CERT_COMMON_NAME_INVALID:
errorString += localization.GetByLabel('UI/Browser/LoadErrorCertificateNameMismatch')
elif errorCode == LoadErrors.CERT_DATE_INVALID:
errorString += localization.GetByLabel('UI/Browser/LoadErrorCertificateExpired')
else:
errorString += localization.GetByLabel('UI/Browser/LoadErrorCertificateErrors')
elif errorCode == LoadErrors.INVALID_URL or errorCode == LoadErrors.DISALLOWED_URL_SCHEME or errorCode == LoadErrors.UNKNOWN_URL_SCHEME:
errorString += localization.GetByLabel('UI/Browser/LoadErrorInvalidURL')
elif errorCode == LoadErrors.INVALID_RESPONSE or errorCode == LoadErrors.EMPTY_RESPONSE or errorCode == LoadErrors.RESPONSE_HEADERS_TOO_BIG:
errorString += localization.GetByLabel('UI/Browser/LoadErrorResponseInvalid')
return errorString
exports = {'corebrowserutil.ParseURL': ParseURL,
'corebrowserutil.DirUp': DirUp,
'corebrowserutil.GetStringFromURL': GetStringFromURL,
'corebrowserutil.DefaultHomepage': DefaultHomepage,
'corebrowserutil.DefaultCachePath': DefaultCachePath,
'corebrowserutil.CrashedBrowserViewHost': CrashedBrowserViewHost,
'corebrowserutil.LoadErrors': LoadErrors,
'corebrowserutil.GetErrorString': GetErrorString,
'corebrowserutil.NextPowerOfTwo': NextPowerOfTwo}
| [
"[email protected]"
] | |
671f43d908baa3c118b7e3e44c09aee84552f4d5 | ff58ba25d940ed34d9684efab04adef85d1e1c0f | /ENV/lib/python2.6/site-packages/gunicorn/app/pasterapp.py | 388478181468378e0cb86f5300a8946ea4738159 | [] | no_license | afsmith/Kneto-Sello | e9046a81ff83652531adc55aab3f90f77af5b5be | a1b12daf8a04ef485ddcaa2944b2d87878a8cdd0 | refs/heads/master | 2021-03-27T17:31:23.830989 | 2013-06-04T07:29:58 | 2013-06-04T07:29:58 | 6,720,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,270 | py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import logging
import os
import pkg_resources
import sys
import ConfigParser
from paste.deploy import loadapp, loadwsgi
SERVER = loadwsgi.SERVER
from gunicorn.app.base import Application
from gunicorn.config import Config
class PasterBaseApplication(Application):
def app_config(self):
cx = loadwsgi.loadcontext(SERVER, self.cfgurl, relative_to=self.relpath)
gc, lc = cx.global_conf.copy(), cx.local_conf.copy()
cfg = {}
host, port = lc.pop('host', ''), lc.pop('port', '')
if host and port:
cfg['bind'] = '%s:%s' % (host, port)
elif host:
cfg['bind'] = host
cfg['workers'] = int(lc.get('workers', 1))
cfg['umask'] = int(lc.get('umask', 0))
cfg['default_proc_name'] = gc.get('__file__')
for k, v in gc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
for k, v in lc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
return cfg
def configure_logging(self):
if hasattr(self, "cfgfname"):
self.logger = logging.getLogger('gunicorn')
# from paste.script.command
parser = ConfigParser.ConfigParser()
parser.read([self.cfgfname])
if parser.has_section('loggers'):
if sys.version_info >= (2, 6):
from logging.config import fileConfig
else:
# Use our custom fileConfig -- 2.5.1's with a custom Formatter class
# and less strict whitespace (which were incorporated into 2.6's)
from gunicorn.logging_config import fileConfig
config_file = os.path.abspath(self.cfgfname)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
return
super(PasterBaseApplication, self).configure_logging()
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
cfgfname = os.path.normpath(os.path.join(os.getcwd(), args[0]))
cfgfname = os.path.abspath(cfgfname)
if not os.path.exists(cfgfname):
parser.error("Config file not found: %s" % cfgfname)
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
sys.path.insert(0, self.relpath)
pkg_resources.working_set.add_entry(self.relpath)
return self.app_config()
def load(self):
return loadapp(self.cfgurl, relative_to=self.relpath)
class PasterServerApplication(PasterBaseApplication):
def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
self.cfg = Config()
self.app = app
self.callable = None
gcfg = gcfg or {}
cfgfname = gcfg.get("__file__")
if cfgfname is not None:
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
cfg = kwargs.copy()
if port and not host.startswith("unix:"):
bind = "%s:%s" % (host, port)
else:
bind = host
cfg["bind"] = bind
if gcfg:
for k, v in list(gcfg.items()):
cfg[k] = v
cfg["default_proc_name"] = cfg['__file__']
try:
for k, v in list(cfg.items()):
if k.lower() in self.cfg.settings and v is not None:
self.cfg.set(k.lower(), v)
except Exception, e:
sys.stderr.write("\nConfig error: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
self.configure_logging()
def load_config(self):
if not hasattr(self, "cfgfname"):
return
cfg = self.app_config()
for k,v in cfg.items():
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
def load(self):
if hasattr(self, "cfgfname"):
return loadapp(self.cfgurl, relative_to=self.relpath)
return self.app
def run():
"""\
The ``gunicorn_paster`` command for launcing Paster compatible
apllications like Pylons or Turbogears2
"""
from gunicorn.app.pasterapp import PasterApplication
PasterApplication("%prog [OPTIONS] pasteconfig.ini").run()
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
"""\
A paster server.
Then entry point in your paster ini file should looks like this:
[server:main]
use = egg:gunicorn#main
host = 127.0.0.1
port = 5000
"""
from gunicorn.app.pasterapp import PasterServerApplication
PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
| [
"[email protected]"
] | |
4cc88ed87b6aaa38699ceb8033c67c89bfceda02 | fdafe3308a26359559c61cb8b561363771100788 | /src/models/cifir10_classification/data/DataImage.py | 1a6b74d8b32a380b0ebf61ed349777290783247e | [] | no_license | yyHaker/tensorflow_study | e18ff3107365081643f8ea8c48a99c82a60c2000 | 7ea658444db9aa37b9c2d9b1e71dabe7f493be7c | refs/heads/master | 2021-09-13T16:52:11.142932 | 2018-05-02T10:27:42 | 2018-05-02T10:27:42 | 115,399,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,047 | py | # -*- coding:utf-8 -*-
"""
read the image data from the data file
"""
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
import os
import cv2
import logging
import csv
import random
# reproduce
np.random.seed(1)
class DataImage(object):
"""the image data object
vocabs below:
self.class_to_id:
self.id_to_class:
self.imgname_to_classid:
"""
def __init__(self, image_width=224, image_height=224, images_dir='data/image_scene_data/data',
labels_path='data/image_scene_data/list.csv',
categories_path='data/image_scene_data/categories.csv'):
# resize the image to what size
self.image_width = image_width
self.image_height = image_height
self.logger = logging.getLogger("image classification")
self.build_class_vocab(categories_path)
self.load_image_data(images_dir, labels_path)
self.analyze_image_data()
self._split_train_valid(valid_rate=0.9)
self.n_train = self.train_images.shape[0]
self.n_valid = self.valid_images.shape[0]
# self.n_test = self.test_images.shape[0]
def analyze_image_data(self):
images_labels = self.images_labels
class_count = {}
labels = []
for image_label in images_labels:
labels.append(image_label[1])
lbl_set = set(labels)
for lbl in lbl_set:
class_count[self.id_to_class[lbl]] = labels.count(lbl)
self.logger.info("The training image data infor: {}".format(class_count))
def build_class_vocab(self, categories_path):
"""
build id_to_class and class_to_id vocab..
:param categories_path:
:return:
"""
self.logger.info("build id_to_class and class_to_id vocab....")
categories_csv = csv.reader(open(categories_path, 'r', encoding='utf-8'))
id_to_class = {}
class_to_id = {}
for c in categories_csv:
if "ID" in c:
continue
id_to_class[c[0]] = c[2]
for k, v in id_to_class.items():
class_to_id[v] = k
self.class_to_id, self.id_to_class = class_to_id, id_to_class
self.logger.info("build class vocab done")
self.logger.info("id_to_class: {}".format(id_to_class))
self.logger.info("class_to_id: {}".format(class_to_id))
def load_image_data(self, images_dir, labels_path):
"""
load image data, and build vocabs.
:param images_dir: teh image data dir
:return:
"""
# read labels
self.logger.info("read labels....")
self.imgname_to_classid = {}
list_csv = csv.reader(open(labels_path, 'r'))
for img in list_csv:
if "FILE_ID" in img:
continue
self.imgname_to_classid[img[0]] = img[1]
# read and show images
self.logger.info("read images....")
images_labels = []
count = 0
for filename in os.listdir(images_dir):
img = cv2.imread(os.path.join(images_dir, filename))
real = cv2.resize(img, (self.image_width, self.image_height)) # numpy array
count += 1
if count % 1000 == 0:
self.logger.info("read images: {}".format(count))
# show image (get the image label)
image_name = filename.split('.')[0]
class_id = self.imgname_to_classid[image_name]
label_name = self.id_to_class[class_id]
images_labels.append((real, class_id))
# cv2.namedWindow(label_name)
# cv2.imshow(label_name, real)
# cv2.waitKey(0)
# if count % 5 == 0:
# cv2.destroyAllWindows()
# cv2.destroyAllWindows()
self.logger.info("total read images and labels: {}".format(count))
# conver to numpy array
self.images_labels = np.array(images_labels)
def _split_train_valid(self, valid_rate=0.9):
# shuffle
np.random.shuffle(self.images_labels)
# split the data
thresh = int(valid_rate * self.images_labels.shape[0])
self.train_images, self.train_labels = [], []
self.valid_images, self.valid_labels = [], []
for img, lbl in self.images_labels[: thresh]:
self.train_images.append(img)
self.train_labels.append(lbl)
for img, lbl in self.images_labels[thresh:]:
self.valid_images.append(img)
self.valid_labels.append(lbl)
# free memory
del self.images_labels
self.train_images, self.train_labels = np.array(self.train_images
, dtype=np.uint8), np.array(self.train_labels, dtype=int)
self.valid_images, self.valid_labels = np.array(self.valid_images
, dtype=np.uint8), np.array(self.valid_labels, dtype=int)
self.logger.info("split data result: train images{}, valid images: {}".format(
self.train_images.shape[0], self.valid_images.shape[0]))
def data_augmentation(self, images, mode='train', flip=False,
crop=False, crop_shape=(32, 32, 3), whiten=False,
noise=False, noise_mean=0, noise_std=0.01):
"""
data augmentation.
:param images:
:param mode:
:param flip:
:param crop: if crop the image
:param crop_shape: crop a shape of the image
:param whiten:
:param noise:
:param noise_mean:
:param noise_std:
:return:
"""
if not crop_shape:
crop_shape = (self.image_width, self.image_height)
# 图像切割
if crop:
if mode == 'train':
images = self._image_crop(images, shape=crop_shape)
elif mode == 'test':
images = self._image_crop_test(images, shape=crop_shape)
# 图像翻转
if flip:
images = self._image_flip(images)
# 图像白化
if whiten:
images = self._image_whitening(images)
# 图像噪声
if noise:
images = self._image_noise(images, mean=noise_mean, std=noise_std)
return images
def _image_crop(self, images, shape):
# 图像切割
new_images = []
for i in range(images.shape[0]):
old_image = images[i, :, :, :]
left = np.random.randint(old_image.shape[0] - shape[0] + 1)
top = np.random.randint(old_image.shape[1] - shape[1] + 1)
new_image = old_image[left: left + shape[0], top: top + shape[1], :]
new_images.append(new_image)
return np.array(new_images)
def _image_crop_test(self, images, shape):
# 图像切割
new_images = []
for i in range(images.shape[0]):
old_image = images[i, :, :, :]
left = int((old_image.shape[0] - shape[0]) / 2)
top = int((old_image.shape[1] - shape[1]) / 2)
new_image = old_image[left: left + shape[0], top: top + shape[1], :]
new_images.append(new_image)
return np.array(new_images)
def _image_flip(self, images):
# 图像翻转
for i in range(images.shape[0]):
old_image = images[i, :, :, :]
if np.random.random() < 0.5:
new_image = cv2.flip(old_image, 1)
else:
new_image = old_image
images[i, :, :, :] = new_image
return images
def _image_whitening(self, images):
# 图像白化
for i in range(images.shape[0]):
old_image = images[i, :, :, :]
new_image = (old_image - np.mean(old_image)) / np.std(old_image)
images[i, :, :, :] = new_image
return images
def _image_noise(self, images, mean=0, std=0.01):
# 图像噪声
for i in range(images.shape[0]):
old_image = images[i, :, :, :]
new_image = old_image
for i in range(images.shape[0]):
for j in range(images.shape[1]):
for k in range(images.shape[2]):
new_image[i, j, k] += random.gauss(mean, std)
images[i, :, :, :] = new_image
return images
if __name__ == "__main__":
logger = logging.getLogger("image classification")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
# 默认输出到console
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
image_data = DataImage(image_width=224, image_height=224,
images_dir='image_scene_data/data',
labels_path='image_scene_data/list.csv',
categories_path='image_scene_data/categories.csv')
| [
"[email protected]"
] | |
16291f7291e7ead6e270a41e3cc62a5634ed2532 | d25a8b0e9f8a4f48504a49e094085f92d8d8e08a | /tests/__init__.py | f5c12299b95821cfd20dc9d1f9152536d6790db3 | [
"MIT"
] | permissive | William-Lake/doc_db | c7ad963bc0ff3a75b9a690bf44025e2aa6d1773b | 022b3e08d10d104fd838c7a094091e78d771ebe1 | refs/heads/master | 2020-03-31T01:34:59.815672 | 2019-01-30T18:36:39 | 2019-01-30T18:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | # -*- coding: utf-8 -*-
"""Unit test package for doc_db."""
| [
"noreply"
] | noreply |
1382dcbea411dd5c7a5227efcd1b1545775e46c7 | 8ecf97e0f12037ccd0b63f265fddd3bf84229d7a | /keras/inception_autoencoder_predict.py | 966fa0964b70a98e8e0ee7e382925390c8b2781a | [] | no_license | ArifSohaib/video_collission_detection | 158af02ce55d8e7f39b532417010b882106bdb2f | 79ac145f4a3d0386ffac4f4fc2cc6efda056e53f | refs/heads/master | 2021-09-11T01:36:09.085256 | 2018-04-05T19:44:40 | 2018-04-05T19:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | from inception_autoencoder import build_autoencoder
import numpy as np
import pickle
import matplotlib.pyplot as plt
from sklearn import preprocessing
def main():
impact_data = np.loadtxt('../data/features/impacts_period1.npy')
full_data = np.loadtxt('../data/features/period1_full.npy')
min_max_scaler = preprocessing.MinMaxScaler()
impact_data = min_max_scaler.fit_transform(impact_data)
full_data = min_max_scaler.fit_transform(full_data)
model = build_autoencoder()
model.load_weights('../data/weights/autoencoder_weights.h5')
predict_impact = model.predict(impact_data)
predict_full = model.predict(full_data)
mse_full = ((predict_full - full_data) ** 2).mean(axis=1)
mse_impact = ((predict_impact - impact_data) ** 2).mean(axis=1)
mean_mse = mse_full.mean()
print("full mse avg {}".format(mean_mse))
print("impact mse avg {}".format(mse_impact.mean()))
print("full mse min {}".format(mse_full.min()))
print("impact mse min {}".format(mse_impact.min()))
print("full mse max {}".format(mse_full.max()))
print("impact mse max {}".format(mse_impact.max()))
plt.hist(mse_full, label='full_mse stats')
plt.show()
plt.hist(mse_impact, label='impact_mse stats')
plt.show()
full_percentile = np.percentile(mse_full, 50)
impact_percentile = np.percentile(mse_impact, 50);
print("full mse percentile {}".format(full_percentile))
print("impact mse percentile {}".format(impact_percentile))
print("length of full data {}".format(len(mse_full)))
pred_impact_idx = []
#running the above statistics, we can say that if the mse is above the max of impact mse, then it is not an impact
for idx, err in enumerate(mse_full):
if err > impact_percentile:
pred_impact_idx.append(idx)
with open('../data/frames/frames_vid1_div5.pkl', 'rb') as f:
confirmed_idx = pickle.load(f)
confirmed_idx = sorted(confirmed_idx.values())
"""
for each value in confirmed_idx we need 10 numbers before and 10 after it(totaling 29 * 20 = 580)
"""
full_idx = []
for idx in confirmed_idx:
for i in range(-10, 10):
full_idx.append(idx+i)
true_count = 0
false_pos = 0
#to check accuracy, we can compare against idx's computed before
idx_count = 0;
for idx in pred_impact_idx:
if idx in full_idx:
true_count += 1
else:
false_pos += 1
print("num predictions {}".format(len(pred_impact_idx)))
print("true count {}".format(true_count))
print("length of pred impacts {}".format(len(full_idx)))
print("false pos {}".format(false_pos))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
490e204bc88e69d4ea255b9c9b4e172ee01ae582 | eb26f2a53339cc9880c193225919bd37078113aa | /flask/mysite/app.py | 1ce6098bfcafb1745e57a8640c7e316044ddd05d | [] | no_license | nnyong/TIL-c9 | c2590ea94c13221a45f274beb22a0f03b09bf6d4 | 2e2803ee60467ffbad7d9a704b0029476ebead0b | refs/heads/master | 2020-04-17T17:17:53.798516 | 2019-05-02T03:08:56 | 2019-05-02T03:08:56 | 166,777,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | import os, csv
from flask import Flask, render_template, request
app=Flask(__name__)
@app.route('/')
def index():
return 'Hello World!'
@app.route('/greeting/<string:name>')
def greeting(name):
return f'반갑습니다! {name}님!'
@app.route('/cube/<int:num>')
def cube(num):
result=num**3
return str(result)
@app.route('/html_file')
def html_file():
return render_template('html_file.html')
@app.route('/hi/<name>')
def hi(name):
return render_template('hi.html', name_in_html=name)
@app.route('/fruits')
def fruits():
fruits=['apple','banana','mango','melon']
return render_template('fruits.html',fruits=fruits)
@app.route('/send')
def send():
return render_template('send.html')
@app.route('/receive')
def receive():
# request.args
# {'who':'junwoo','message':'hello'}
who=request.args.get('who')
message=request.args.get('message')
with open('guestbook.csv','a',encoding='utf8',newline='') as f:
writer=csv.DictWriter(f,fieldnames=['who','message'])
writer.writerow({
'who': who,
'message': message
})
return render_template('receive.html',name=who, message=message)
@app.route('/guestbook')
def guestbook():
messages=[]
with open('guestbook.csv','r',encoding='utf8',newline='') as f:
reader=csv.DictReader(f)
for row in reader:
messages.append(row)
return render_template('guestbook.html',messages=messages)
if __name__=='__main__':
app.run(host=os.getenv('IP'), port=os.getenv('PORT'), debug=True) | [
"[email protected]"
] | |
382f5f8646b4091b64a3993e4837f70508663a68 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/gensim/2018/12/keyedvectors.py | 0b2be1d732c1683206a01a2df672c98c0f737f6a | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 85,738 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <[email protected]>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements word vectors and their similarity look-ups.
Since trained word vectors are independent from the way they were trained (:class:`~gensim.models.word2vec.Word2Vec`,
:class:`~gensim.models.fasttext.FastText`, :class:`~gensim.models.wrappers.wordrank.WordRank`,
:class:`~gensim.models.wrappers.varembed.VarEmbed` etc), they can be represented by a standalone structure,
as implemented in this module.
The structure is called "KeyedVectors" and is essentially a mapping between *entities*
and *vectors*. Each entity is identified by its string id, so this is a mapping between {str => 1D numpy array}.
The entity typically corresponds to a word (so the mapping maps words to 1D vectors),
but for some models, the key can also correspond to a document, a graph node etc. To generalize
over different use-cases, this module calls the keys **entities**. Each entity is
always represented by its string id, no matter whether the entity is a word, a document or a graph node.
Why use KeyedVectors instead of a full model?
=============================================
+---------------------------+--------------+------------+-------------------------------------------------------------+
| capability | KeyedVectors | full model | note |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| continue training vectors | ❌ | ✅ | You need the full model to train or update vectors. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| smaller objects | ✅ | ❌ | KeyedVectors are smaller and need less RAM, because they |
| | | | don't need to store the model state that enables training. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| save/load from native | | | Vectors exported by the Facebook and Google tools |
| fasttext/word2vec format | ✅ | ❌ | do not support further training, but you can still load |
| | | | them into KeyedVectors. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| append new vectors | ✅ | ✅ | Add new entity-vector entries to the mapping dynamically. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| concurrency | ✅ | ✅ | Thread-safe, allows concurrent vector queries. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| shared RAM | ✅ | ✅ | Multiple processes can re-use the same data, keeping only |
| | | | a single copy in RAM using |
| | | | `mmap <https://en.wikipedia.org/wiki/Mmap>`_. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| fast load | ✅ | ✅ | Supports `mmap <https://en.wikipedia.org/wiki/Mmap>`_ |
| | | | to load data from disk instantaneously. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
TL;DR: the main difference is that KeyedVectors do not support further training.
On the other hand, by shedding the internal data structures necessary for training, KeyedVectors offer a smaller RAM
footprint and a simpler interface.
How to obtain word vectors?
===========================
Train a full model, then access its `model.wv` property, which holds the standalone keyed vectors.
For example, using the Word2Vec algorithm to train the vectors
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Word2Vec
>>>
>>> model = Word2Vec(common_texts, size=100, window=5, min_count=1, workers=4)
>>> word_vectors = model.wv
Persist the word vectors to disk with
.. sourcecode:: pycon
>>> from gensim.test.utils import get_tmpfile
>>> from gensim.models import KeyedVectors
>>>
>>> fname = get_tmpfile("vectors.kv")
>>> word_vectors.save(fname)
>>> word_vectors = KeyedVectors.load(fname, mmap='r')
The vectors can also be instantiated from an existing file on disk
in the original Google's word2vec C format as a KeyedVectors instance
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> wv_from_text = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False) # C text format
>>> wv_from_bin = KeyedVectors.load_word2vec_format(datapath("euclidean_vectors.bin"), binary=True) # C bin format
What can I do with word vectors?
================================
You can perform various syntactic/semantic NLP word tasks with the trained vectors.
Some of them are already built-in
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>>
>>> word_vectors = api.load("glove-wiki-gigaword-100") # load pre-trained word-vectors from gensim-data
>>>
>>> result = word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
>>> print("{}: {:.4f}".format(*result[0]))
queen: 0.7699
>>>
>>> result = word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
>>> print("{}: {:.4f}".format(*result[0]))
queen: 0.8965
>>>
>>> print(word_vectors.doesnt_match("breakfast cereal dinner lunch".split()))
cereal
>>>
>>> similarity = word_vectors.similarity('woman', 'man')
>>> similarity > 0.8
True
>>>
>>> result = word_vectors.similar_by_word("cat")
>>> print("{}: {:.4f}".format(*result[0]))
dog: 0.8798
>>>
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>>
>>> similarity = word_vectors.wmdistance(sentence_obama, sentence_president)
>>> print("{:.4f}".format(similarity))
3.4893
>>>
>>> distance = word_vectors.distance("media", "media")
>>> print("{:.1f}".format(distance))
0.0
>>>
>>> sim = word_vectors.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
>>> print("{:.4f}".format(sim))
0.7067
>>>
>>> vector = word_vectors['computer'] # numpy vector of a word
>>> vector.shape
(100,)
>>>
>>> vector = word_vectors.wv.word_vec('office', use_norm=True)
>>> vector.shape
(100,)
Correlation with human opinion on word similarity
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> similarities = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))
And on word analogies
.. sourcecode:: pycon
>>> analogy_scores = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))
and so on.
"""
from __future__ import division # py3 "true division"
from collections import deque
from itertools import chain
import logging
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # noqa:F401
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
from numpy import dot, float32 as REAL, empty, memmap as np_memmap, \
double, array, zeros, vstack, sqrt, newaxis, integer, \
ndarray, sum as np_sum, prod, argmax, divide as np_divide
import numpy as np
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import string_types, integer_types
from six.moves import zip, range
from scipy import sparse, stats
from gensim.utils import deprecated
from gensim.models.utils_any2vec import _save_word2vec_format, _load_word2vec_format, _compute_ngrams, _ft_hash
logger = logging.getLogger(__name__)
class Vocab(object):
"""A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class BaseKeyedVectors(utils.SaveLoad):
"""Abstract base class / interface for various types of word vectors."""
def __init__(self, vector_size):
self.vectors = zeros((0, vector_size))
self.vocab = {}
self.vector_size = vector_size
self.index2entity = []
def save(self, fname_or_handle, **kwargs):
super(BaseKeyedVectors, self).save(fname_or_handle, **kwargs)
@classmethod
def load(cls, fname_or_handle, **kwargs):
return super(BaseKeyedVectors, cls).load(fname_or_handle, **kwargs)
def similarity(self, entity1, entity2):
"""Compute cosine similarity between two entities, specified by their string id."""
raise NotImplementedError()
def most_similar(self, **kwargs):
"""Find the top-N most similar entities.
Possibly have `positive` and `negative` list of entities in `**kwargs`.
"""
return NotImplementedError()
def distance(self, entity1, entity2):
"""Compute distance between vectors of two input entities, specified by their string id."""
raise NotImplementedError()
def distances(self, entity1, other_entities=()):
"""Compute distances from a given entity (its string id) to all entities in `other_entity`.
If `other_entities` is empty, return the distance between `entity1` and all entities in vocab.
"""
raise NotImplementedError()
def get_vector(self, entity):
"""Get the entity's representations in vector space, as a 1D numpy array.
Parameters
----------
entity : str
Identifier of the entity to return the vector for.
Returns
-------
numpy.ndarray
Vector for the specified entity.
Raises
------
KeyError
If the given entity identifier doesn't exist.
"""
if entity in self.vocab:
result = self.vectors[self.vocab[entity].index]
result.setflags(write=False)
return result
else:
raise KeyError("'%s' not in vocabulary" % entity)
def add(self, entities, weights, replace=False):
"""Append entities and theirs vectors in a manual way.
If some entity is already in the vocabulary, the old vector is kept unless `replace` flag is True.
Parameters
----------
entities : list of str
Entities specified by string ids.
weights: {list of numpy.ndarray, numpy.ndarray}
List of 1D np.array vectors or a 2D np.array of vectors.
replace: bool, optional
Flag indicating whether to replace vectors for entities which already exist in the vocabulary,
if True - replace vectors, otherwise - keep old vectors.
"""
if isinstance(entities, string_types):
entities = [entities]
weights = np.array(weights).reshape(1, -1)
elif isinstance(weights, list):
weights = np.array(weights)
in_vocab_mask = np.zeros(len(entities), dtype=np.bool)
for idx, entity in enumerate(entities):
if entity in self.vocab:
in_vocab_mask[idx] = True
# add new entities to the vocab
for idx in np.nonzero(~in_vocab_mask)[0]:
entity = entities[idx]
self.vocab[entity] = Vocab(index=len(self.vocab), count=1)
self.index2entity.append(entity)
# add vectors for new entities
self.vectors = vstack((self.vectors, weights[~in_vocab_mask]))
# change vectors for in_vocab entities if `replace` flag is specified
if replace:
in_vocab_idxs = [self.vocab[entities[idx]].index for idx in np.nonzero(in_vocab_mask)[0]]
self.vectors[in_vocab_idxs] = weights[in_vocab_mask]
def __setitem__(self, entities, weights):
"""Add entities and theirs vectors in a manual way.
If some entity is already in the vocabulary, old vector is replaced with the new one.
This method is alias for :meth:`~gensim.models.keyedvectors.BaseKeyedVectors.add` with `replace=True`.
Parameters
----------
entities : {str, list of str}
Entities specified by their string ids.
weights: {list of numpy.ndarray, numpy.ndarray}
List of 1D np.array vectors or 2D np.array of vectors.
"""
if not isinstance(entities, list):
entities = [entities]
weights = weights.reshape(1, -1)
self.add(entities, weights, replace=True)
def __getitem__(self, entities):
"""Get vector representation of `entities`.
Parameters
----------
entities : {str, list of str}
Input entity/entities.
Returns
-------
numpy.ndarray
Vector representation for `entities` (1D if `entities` is string, otherwise - 2D).
"""
if isinstance(entities, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.get_vector(entities)
return vstack([self.get_vector(entity) for entity in entities])
def __contains__(self, entity):
return entity in self.vocab
def most_similar_to_given(self, entity1, entities_list):
"""Get the `entity` from `entities_list` most similar to `entity1`."""
return entities_list[argmax([self.similarity(entity1, entity) for entity in entities_list])]
def closer_than(self, entity1, entity2):
"""Get all entities that are closer to `entity1` than `entity2` is to `entity1`."""
all_distances = self.distances(entity1)
e1_index = self.vocab[entity1].index
e2_index = self.vocab[entity2].index
closer_node_indices = np.where(all_distances < all_distances[e2_index])[0]
return [self.index2entity[index] for index in closer_node_indices if index != e1_index]
def rank(self, entity1, entity2):
"""Rank of the distance of `entity2` from `entity1`, in relation to distances of all entities from `entity1`."""
return len(self.closer_than(entity1, entity2)) + 1
class WordEmbeddingsKeyedVectors(BaseKeyedVectors):
"""Class containing common methods for operations over word vectors."""
def __init__(self, vector_size):
super(WordEmbeddingsKeyedVectors, self).__init__(vector_size=vector_size)
self.vectors_norm = None
self.index2word = []
@property
@deprecated("Attribute will be removed in 4.0.0, use self instead")
def wv(self):
return self
@property
def index2entity(self):
return self.index2word
@index2entity.setter
def index2entity(self, value):
self.index2word = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors instead")
def syn0(self):
return self.vectors
@syn0.setter
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors instead")
def syn0(self, value):
self.vectors = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_norm instead")
def syn0norm(self):
return self.vectors_norm
@syn0norm.setter
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_norm instead")
def syn0norm(self, value):
self.vectors_norm = value
def __contains__(self, word):
return word in self.vocab
def save(self, *args, **kwargs):
"""Save KeyedVectors.
Parameters
----------
fname : str
Path to the output file.
See Also
--------
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.load`
Load saved model.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['vectors_norm'])
super(WordEmbeddingsKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""Get `word` representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
Input word
use_norm : bool, optional
If True - resulting vector will be L2-normalized (unit euclidean length).
Returns
-------
numpy.ndarray
Vector representation of `word`.
Raises
------
KeyError
If word not in vocabulary.
"""
if word in self.vocab:
if use_norm:
result = self.vectors_norm[self.vocab[word].index]
else:
result = self.vectors[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def get_vector(self, word):
return self.word_vec(word)
def words_closer_than(self, w1, w2):
"""Get all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
"""
return super(WordEmbeddingsKeyedVectors, self).closer_than(w1, w2)
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):
"""Find the top-N most similar words.
Positive words contribute positively towards the similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive : list of str, optional
List of words that contribute positively.
negative : list of str, optional
List of words that contribute negatively.
topn : int, optional
Number of top-N similar words to return.
restrict_vocab : int, optional
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
list of (str, float)
Sequence of (word, similarity).
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
else:
mean.append(weight * self.word_vec(word, use_norm=True))
if word in self.vocab:
all_words.add(self.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""Find the top-N most similar words.
Parameters
----------
word : str
Word
topn : {int, False}, optional
Number of top-N similar words to return. If topn is False, similar_by_word returns
the vector of similarity scores.
restrict_vocab : int, optional
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
list of (str, float)
Sequence of (word, similarity).
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""Find the top-N most similar words by vector.
Parameters
----------
vector : numpy.array
Vector from which similarities are to be computed.
topn : {int, False}, optional
Number of top-N similar words to return. If topn is False, similar_by_vector returns
the vector of similarity scores.
restrict_vocab : int, optional
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
list of (str, float)
Sequence of (word, similarity).
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def similarity_matrix(self, dictionary, tfidf=None, threshold=0.0, exponent=2.0, nonzero_limit=100, dtype=REAL):
"""Construct a term similarity matrix for computing Soft Cosine Measure.
This creates a sparse term similarity matrix in the :class:`scipy.sparse.csc_matrix` format for computing
Soft Cosine Measure between documents.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
A dictionary that specifies a mapping between words and the indices of rows and columns
of the resulting term similarity matrix.
tfidf : :class:`gensim.models.tfidfmodel.TfidfModel`, optional
A model that specifies the relative importance of the terms in the dictionary. The rows
of the term similarity matrix will be build in a decreasing order of importance of terms,
or in the order of term identifiers if None.
threshold : float, optional
Only pairs of words whose embeddings are more similar than `threshold` are considered
when building the sparse term similarity matrix.
exponent : float, optional
The exponent applied to the similarity between two word embeddings when building the term similarity matrix.
nonzero_limit : int, optional
The maximum number of non-zero elements outside the diagonal in a single row or column
of the term similarity matrix. Setting `nonzero_limit` to a constant ensures that the
time complexity of computing the Soft Cosine Measure will be linear in the document
length rather than quadratic.
dtype : numpy.dtype, optional
Data-type of the term similarity matrix.
Returns
-------
:class:`scipy.sparse.csc_matrix`
Term similarity matrix.
See Also
--------
:func:`gensim.matutils.softcossim`
The Soft Cosine Measure.
:class:`~gensim.similarities.docsim.SoftCosineSimilarity`
A class for performing corpus-based similarity queries with Soft Cosine Measure.
Notes
-----
The constructed matrix corresponds to the matrix Mrel defined in section 2.1 of
`Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3: Soft-Cosine Semantic Similarity
between Questions for Community Question Answering", 2017
<http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`_.
"""
logger.info("constructing a term similarity matrix")
matrix_order = len(dictionary)
matrix_nonzero = [1] * matrix_order
matrix = sparse.identity(matrix_order, dtype=dtype, format="dok")
num_skipped = 0
# Decide the order of rows.
if tfidf is None:
word_indices = deque(sorted(dictionary.keys()))
else:
assert max(tfidf.idfs) < matrix_order
word_indices = deque([
index for index, _
in sorted(tfidf.idfs.items(), key=lambda x: (x[1], -x[0]), reverse=True)
])
# Traverse rows.
for row_number, w1_index in enumerate(list(word_indices)):
word_indices.popleft()
if row_number % 1000 == 0:
logger.info(
"PROGRESS: at %.02f%% rows (%d / %d, %d skipped, %.06f%% density)",
100.0 * (row_number + 1) / matrix_order, row_number + 1, matrix_order,
num_skipped, 100.0 * matrix.getnnz() / matrix_order**2)
w1 = dictionary[w1_index]
if w1 not in self.vocab:
num_skipped += 1
continue # A word from the dictionary is not present in the word2vec model.
# Traverse upper triangle columns.
if matrix_order <= nonzero_limit + 1: # Traverse all columns.
columns = (
(w2_index, self.similarity(w1, dictionary[w2_index]))
for w2_index in word_indices
if dictionary[w2_index] in self.vocab)
else: # Traverse only columns corresponding to the embeddings closest to w1.
num_nonzero = matrix_nonzero[w1_index] - 1
columns = (
(dictionary.token2id[w2], similarity)
for _, (w2, similarity)
in zip(
range(nonzero_limit - num_nonzero),
self.most_similar(positive=[w1], topn=nonzero_limit - num_nonzero)
)
if w2 in dictionary.token2id
)
columns = sorted(columns, key=lambda x: x[0])
for w2_index, similarity in columns:
# Ensure that we don't exceed `nonzero_limit` by mirroring the upper triangle.
if similarity > threshold and matrix_nonzero[w2_index] <= nonzero_limit:
element = similarity**exponent
matrix[w1_index, w2_index] = element
matrix_nonzero[w1_index] += 1
matrix[w2_index, w1_index] = element
matrix_nonzero[w2_index] += 1
logger.info(
"constructed a term similarity matrix with %0.6f %% nonzero elements",
100.0 * matrix.getnnz() / matrix_order**2
)
return matrix.tocsc()
def wmdistance(self, document1, document2):
"""Compute the Word Mover's Distance between two documents.
When using this code, please consider citing the following papers:
* `Ofir Pele and Michael Werman "A linear time histogram metric for improved SIFT matching"
<http://www.cs.huji.ac.il/~werman/Papers/ECCV2008.pdf>`_
* `Ofir Pele and Michael Werman "Fast and robust earth mover's distances"
<https://ieeexplore.ieee.org/document/5459199/>`_
* `Matt Kusner et al. "From Word Embeddings To Document Distances"
<http://proceedings.mlr.press/v37/kusnerb15.pdf>`_.
Parameters
----------
document1 : list of str
Input document.
document2 : list of str
Input document.
Returns
-------
float
Word Mover's distance between `document1` and `document2`.
Warnings
--------
This method only works if `pyemd <https://pypi.org/project/pyemd/>`_ is installed.
If one of the documents have no words that exist in the vocab, `float('inf')` (i.e. infinity)
will be returned.
Raises
------
ImportError
If `pyemd <https://pypi.org/project/pyemd/>`_ isn't installed.
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info(
"At least one of the documents had no words that were in the vocabulary. "
"Aborting (returning inf)."
)
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if t1 not in docset1 or t2 not in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
def most_similar_cosmul(self, positive=None, negative=None, topn=10):
"""Find the top-N most similar words, using the multiplicative combination objective,
proposed by `Omer Levy and Yoav Goldberg "Linguistic Regularities in Sparse and Explicit Word Representations"
<http://www.aclweb.org/anthology/W14-1618>`_. Positive words still contribute positively towards the similarity,
negative words negatively, but with less susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively - a potentially sensible but untested extension of the method.
With a single positive example, rankings will be the same as in the default
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.most_similar`.
Parameters
----------
positive : list of str, optional
List of words that contribute positively.
negative : list of str, optional
List of words that contribute negatively.
topn : int, optional
Number of top-N similar words to return.
Returns
-------
list of (str, float)
Sequence of (word, similarity).
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = {
self.vocab[word].index for word in positive + negative
if not isinstance(word, ndarray) and word in self.vocab
}
positive = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in positive
]
negative = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.vectors_norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.vectors_norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""Which word from the given list doesn't go with the others?
Parameters
----------
words : list of str
List of words.
Returns
-------
str
The word further away from the mean of all words.
"""
self.init_sims()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
@staticmethod
def cosine_similarities(vector_1, vectors_all):
"""Compute cosine similarities between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.ndarray
Vector from which similarities are to be computed, expected shape (dim,).
vectors_all : numpy.ndarray
For each row in vectors_all, distance from vector_1 is computed, expected shape (num_vectors, dim).
Returns
-------
numpy.ndarray
Contains cosine distance between `vector_1` and each row in `vectors_all`, shape (num_vectors,).
"""
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def distances(self, word_or_vector, other_words=()):
"""Compute cosine distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : {str, numpy.ndarray}
Word or vector from which distances are to be computed.
other_words : iterable of str
For each word in `other_words` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`.
Raises
-----
KeyError
If either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if not other_words:
other_vectors = self.vectors
else:
other_indices = [self.vocab[word].index for word in other_words]
other_vectors = self.vectors[other_indices]
return 1 - self.cosine_similarities(input_vector, other_vectors)
def distance(self, w1, w2):
"""Compute cosine distance between two words.
Calculate 1 - :meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
float
Distance between `w1` and `w2`.
"""
return 1 - self.similarity(w1, w2)
def similarity(self, w1, w2):
"""Compute cosine similarity between two words.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
float
Cosine similarity between `w1` and `w2`.
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""Compute cosine similarity between two sets of words.
Parameters
----------
ws1 : list of str
Sequence of words.
ws2: list of str
Sequence of words.
Returns
-------
numpy.ndarray
Similarities between `ws1` and `ws2`.
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('At least one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
@staticmethod
def _log_evaluate_word_analogies(section):
"""Calculate score by section, helper for
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.evaluate_word_analogies`.
Parameters
----------
section : dict of (str, (str, str, str, str))
Section given from evaluation.
Returns
-------
float
Accuracy score.
"""
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i)", section['section'], 100.0 * score, correct, correct + incorrect)
return score
def evaluate_word_analogies(self, analogies, restrict_vocab=300000, case_insensitive=True, dummy4unknown=False):
"""Compute performance of the model on an analogy test set.
This is modern variant of :meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.accuracy`, see
`discussion on GitHub #1935 <https://github.com/RaRe-Technologies/gensim/pull/1935>`_.
The accuracy is reported (printed to log and returned as a score) for each section separately,
plus there's one aggregate summary at the end.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
See also `Analogy (State of the art) <https://aclweb.org/aclwiki/Analogy_(State_of_the_art)>`_.
Parameters
----------
analogies : str
Path to file, where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines.
See `gensim/test/test_data/questions-words.txt` as example.
restrict_vocab : int, optional
Ignore all 4-tuples containing a word not in the first `restrict_vocab` words.
This may be meaningful if you've sorted the model vocabulary by descending frequency (which is standard
in modern word embedding models).
case_insensitive : bool, optional
If True - convert all words to their uppercase form before evaluating the performance.
Useful to handle case-mismatch between training tokens and words in the test set.
In case of multiple case variants of a single word, the vector for the first occurrence
(also the most frequent if vocabulary is sorted) is taken.
dummy4unknown : bool, optional
If True - produce zero accuracies for 4-tuples with out-of-vocabulary words.
Otherwise, these tuples are skipped entirely and not used in the evaluation.
Returns
-------
score : float
The overall evaluation score on the entire evaluation set
sections : list of dict of {str : str or list of tuple of (str, str, str, str)}
Results broken down by each section of the evaluation set. Each dict contains the name of the section
under the key 'section', and lists of correctly and incorrectly predicted 4-tuples of words under the
keys 'correct' and 'incorrect'.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
oov = 0
logger.info("Evaluating word analogies for top %i words in the model on %s", restrict_vocab, analogies)
sections, section = [], None
quadruplets_no = 0
for line_no, line in enumerate(utils.smart_open(analogies)):
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self._log_evaluate_word_analogies(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("Missing section header before line #%i in %s" % (line_no, analogies))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("Skipping invalid line #%i in %s", line_no, analogies)
continue
quadruplets_no += 1
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero accuracy for line #%d with OOV words: %s', line_no, line.strip())
section['incorrect'].append((a, b, c, expected))
else:
logger.debug("Skipping line #%i with OOV words: %s", line_no, line.strip())
continue
original_vocab = self.vocab
self.vocab = ok_vocab
ignore = {a, b, c} # input words to be ignored
predicted = None
# find the most likely prediction using 3CosAdd (vector offset) method
# TODO: implement 3CosMul and set-based methods for solving analogies
sims = self.most_similar(positive=[b, c], negative=[a], topn=5, restrict_vocab=restrict_vocab)
self.vocab = original_vocab
for element in sims:
predicted = element[0].upper() if case_insensitive else element[0]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self._log_evaluate_word_analogies(section)
total = {
'section': 'Total accuracy',
'correct': list(chain.from_iterable(s['correct'] for s in sections)),
'incorrect': list(chain.from_iterable(s['incorrect'] for s in sections)),
}
oov_ratio = float(oov) / quadruplets_no * 100
logger.info('Quadruplets with out-of-vocabulary words: %.1f%%', oov_ratio)
if not dummy4unknown:
logger.info(
'NB: analogies containing OOV words were skipped from evaluation! '
'To change this behavior, use "dummy4unknown=True"'
)
analogies_score = self._log_evaluate_word_analogies(total)
sections.append(total)
# Return the overall score and the full lists of correct and incorrect analogies
return analogies_score, sections
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info(
"%s: %.1f%% (%i/%i)",
section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect
)
@deprecated("Method will be removed in 4.0.0, use self.evaluate_word_analogies() instead")
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
"""Compute accuracy of the model.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Parameters
----------
questions : str
Path to file, where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines.
See `gensim/test/test_data/questions-words.txt` as example.
restrict_vocab : int, optional
Ignore all 4-tuples containing a word not in the first `restrict_vocab` words.
This may be meaningful if you've sorted the model vocabulary by descending frequency (which is standard
in modern word embedding models).
most_similar : function, optional
Function used for similarity calculation.
case_insensitive : bool, optional
If True - convert all words to their uppercase form before evaluating the performance.
Useful to handle case-mismatch between training tokens and words in the test set.
In case of multiple case variants of a single word, the vector for the first occurrence
(also the most frequent if vocabulary is sorted) is taken.
Returns
-------
list of dict of (str, (str, str, str)
Full lists of correct and incorrect predictions divided by sections.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("Missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("Skipping invalid line #%i in %s", line_no, questions)
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("Skipping line #%i with OOV words: %s", line_no, line.strip())
continue
original_vocab = self.vocab
self.vocab = ok_vocab
ignore = {a, b, c} # input words to be ignored
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab)
self.vocab = original_vocab
for index in matutils.argsort(sims, reverse=True):
predicted = self.index2word[index].upper() if case_insensitive else self.index2word[index]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': list(chain.from_iterable(s['correct'] for s in sections)),
'incorrect': list(chain.from_iterable(s['incorrect'] for s in sections)),
}
self.log_accuracy(total)
sections.append(total)
return sections
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.info('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.info('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.info('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
"""Compute correlation of the model with human similarity judgments.
Notes
-----
More datasets can be found at
* http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html
* https://www.cl.cam.ac.uk/~fh295/simlex.html.
Parameters
----------
pairs : str
Path to file, where lines are 3-tuples, each consisting of a word pair and a similarity value.
See `test/test_data/wordsim353.tsv` as example.
delimiter : str, optional
Separator in `pairs` file.
restrict_vocab : int, optional
Ignore all 4-tuples containing a word not in the first `restrict_vocab` words.
This may be meaningful if you've sorted the model vocabulary by descending frequency (which is standard
in modern word embedding models).
case_insensitive : bool, optional
If True - convert all words to their uppercase form before evaluating the performance.
Useful to handle case-mismatch between training tokens and words in the test set.
In case of multiple case variants of a single word, the vector for the first occurrence
(also the most frequent if vocabulary is sorted) is taken.
dummy4unknown : bool, optional
If True - produce zero accuracies for 4-tuples with out-of-vocabulary words.
Otherwise, these tuples are skipped entirely and not used in the evaluation.
Returns
-------
pearson : tuple of (float, float)
Pearson correlation coefficient with 2-tailed p-value.
spearman : tuple of (float, float)
Spearman rank-order correlation coefficient between the similarities from the dataset and the
similarities produced by the model itself, with 2-tailed p-value.
oov_ratio : float
The ratio of pairs with unknown words.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
for line_no, line in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.info('Skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero similarity for line #%d with OOV words: %s', line_no, line.strip())
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('Skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
similarity_model.append(self.similarity(a, b)) # Similarity from the model
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
if dummy4unknown:
oov_ratio = float(oov) / len(similarity_gold) * 100
else:
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
def init_sims(self, replace=False):
"""Precompute L2-normalized vectors.
Parameters
----------
replace : bool, optional
If True - forget the original vectors and only keep the normalized ones = saves lots of memory!
Warnings
--------
You **cannot continue training** after doing a replace.
The model becomes effectively read-only: you can call
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.most_similar`,
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity`, etc., but not train.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in range(self.vectors.shape[0]):
self.vectors[i, :] /= sqrt((self.vectors[i, :] ** 2).sum(-1))
self.vectors_norm = self.vectors
else:
self.vectors_norm = (self.vectors / sqrt((self.vectors ** 2).sum(-1))[..., newaxis]).astype(REAL)
class Word2VecKeyedVectors(WordEmbeddingsKeyedVectors):
"""Mapping between words and vectors for the :class:`~gensim.models.Word2Vec` model.
Used to perform operations on the vectors such as vector lookup, distance, similarity etc.
"""
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in
fvocab : str, optional
Optional file path used to save the vocabulary
binary : bool, optional
If True, the data will be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int, optional
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards).
"""
# from gensim.models.word2vec import save_word2vec_format
_save_word2vec_format(
fname, self.vocab, self.vectors, fvocab=fvocab, binary=binary, total_vec=total_vec)
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Warnings
--------
The information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary.Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
Returns
-------
:class:`~gensim.models.keyedvectors.Word2VecKeyedVectors`
Loaded model.
"""
# from gensim.models.word2vec import load_word2vec_format
return _load_word2vec_format(
cls, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype)
def get_keras_embedding(self, train_embeddings=False):
"""Get a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings.
Parameters
----------
train_embeddings : bool
If False, the weights are frozen and stopped from being updated.
If True, the weights can/will be further trained/updated.
Returns
-------
`keras.layers.Embedding`
Embedding layer.
Raises
------
ImportError
If `Keras <https://pypi.org/project/Keras/>`_ not installed.
Warnings
--------
Current method work only if `Keras <https://pypi.org/project/Keras/>`_ installed.
"""
try:
from keras.layers import Embedding
except ImportError:
raise ImportError("Please install Keras to use this function")
weights = self.vectors
# set `trainable` as `False` to use the pretrained word embedding
# No extra mem usage here as `Embedding` layer doesn't create any new matrix for weights
layer = Embedding(
input_dim=weights.shape[0], output_dim=weights.shape[1],
weights=[weights], trainable=train_embeddings
)
return layer
KeyedVectors = Word2VecKeyedVectors # alias for backward compatibility
class Doc2VecKeyedVectors(BaseKeyedVectors):
def __init__(self, vector_size, mapfile_path):
super(Doc2VecKeyedVectors, self).__init__(vector_size=vector_size)
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.vectors_docs = []
self.mapfile_path = mapfile_path
self.vector_size = vector_size
self.vectors_docs_norm = None
@property
def index2entity(self):
return self.offset2doctag
@index2entity.setter
def index2entity(self, value):
self.offset2doctag = value
@property
@deprecated("Attribute will be removed in 4.0.0, use docvecs.vectors_docs instead")
def doctag_syn0(self):
return self.vectors_docs
@property
@deprecated("Attribute will be removed in 4.0.0, use docvecs.vectors_docs_norm instead")
def doctag_syn0norm(self):
return self.vectors_docs_norm
def __getitem__(self, index):
"""Get vector representation of `index`.
Parameters
----------
index : {str, list of str}
Doctag or sequence of doctags.
Returns
-------
numpy.ndarray
Vector representation for `index` (1D if `index` is string, otherwise - 2D).
"""
if index in self:
if isinstance(index, string_types + integer_types + (integer,)):
return self.vectors_docs[self._int_index(index, self.doctags, self.max_rawint)]
return vstack([self[i] for i in index])
raise KeyError("tag '%s' not seen in training corpus/invalid" % index)
def __contains__(self, index):
if isinstance(index, integer_types + (integer,)):
return index < self.count
else:
return index in self.doctags
def __len__(self):
return self.count
def save(self, *args, **kwargs):
"""Save object.
Parameters
----------
fname : str
Path to the output file.
See Also
--------
:meth:`~gensim.models.keyedvectors.Doc2VecKeyedVectors.load`
Load object.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['vectors_docs_norm'])
super(Doc2VecKeyedVectors, self).save(*args, **kwargs)
def init_sims(self, replace=False):
"""Precompute L2-normalized vectors.
Parameters
----------
replace : bool, optional
If True - forget the original vectors and only keep the normalized ones = saves lots of memory!
Warnings
--------
You **cannot continue training** after doing a replace.
The model becomes effectively read-only: you can call
:meth:`~gensim.models.keyedvectors.Doc2VecKeyedVectors.most_similar`,
:meth:`~gensim.models.keyedvectors.Doc2VecKeyedVectors.similarity`, etc., but not train and infer_vector.
"""
if getattr(self, 'vectors_docs_norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in range(self.vectors_docs.shape[0]):
self.vectors_docs[i, :] /= sqrt((self.vectors_docs[i, :] ** 2).sum(-1))
self.vectors_docs_norm = self.vectors_docs
else:
if self.mapfile_path:
self.vectors_docs_norm = np_memmap(
self.mapfile_path + '.vectors_docs_norm', dtype=REAL,
mode='w+', shape=self.vectors_docs.shape)
else:
self.vectors_docs_norm = empty(self.vectors_docs.shape, dtype=REAL)
np_divide(
self.vectors_docs, sqrt((self.vectors_docs ** 2).sum(-1))[..., newaxis], self.vectors_docs_norm)
def most_similar(self, positive=None, negative=None, topn=10, clip_start=0, clip_end=None, indexer=None):
"""Find the top-N most similar docvecs from the training set.
Positive docvecs contribute positively towards the similarity, negative docvecs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
TODO: Accept vectors of out-of-training-set docs, as if from inference.
Parameters
----------
positive : list of {str, int}, optional
List of doctags/indexes that contribute positively.
negative : list of {str, int}, optional
List of doctags/indexes that contribute negatively.
topn : int, optional
Number of top-N similar docvecs to return.
clip_start : int
Start clipping index.
clip_end : int
End clipping index.
Returns
-------
list of ({str, int}, float)
Sequence of (doctag/index, similarity).
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
clip_end = clip_end or len(self.vectors_docs_norm)
if isinstance(positive, string_types + integer_types + (integer,)) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.vectors_docs_norm[self._int_index(doc, self.doctags, self.max_rawint)])
all_docs.add(self._int_index(doc, self.doctags, self.max_rawint))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.vectors_docs_norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [
(self._index_to_doctag(sim + clip_start, self.offset2doctag, self.max_rawint), float(dists[sim]))
for sim in best
if (sim + clip_start) not in all_docs
]
return result[:topn]
def doesnt_match(self, docs):
"""Which document from the given list doesn't go with the others from the training set?
TODO: Accept vectors of out-of-training-set docs, as if from inference.
Parameters
----------
docs : list of {str, int}
Sequence of doctags/indexes.
Returns
-------
{str, int}
Doctag/index of the document farthest away from the mean of all the documents.
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s", docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(
self.vectors_docs_norm[self._int_index(doc, self.doctags, self.max_rawint)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""Compute cosine similarity between two docvecs from the training set.
TODO: Accept vectors of out-of-training-set docs, as if from inference.
Parameters
----------
d1 : {int, str}
Doctag/index of document.
d2 : {int, str}
Doctag/index of document.
Returns
-------
float
The cosine similarity between the vectors of the two documents.
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""Compute cosine similarity between two sets of docvecs from the trained set.
TODO: Accept vectors of out-of-training-set docs, as if from inference.
Parameters
----------
ds1 : list of {str, int}
Set of document as sequence of doctags/indexes.
ds2 : list of {str, int}
Set of document as sequence of doctags/indexes.
Returns
-------
float
The cosine similarity between the means of the documents in each of the two sets.
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def distance(self, d1, d2):
"""
Compute cosine distance between two documents.
"""
return 1 - self.similarity(d1, d2)
# required by base keyed vectors class
def distances(self, d1, other_docs=()):
"""Compute cosine distances from given `d1` to all documents in `other_docs`.
TODO: Accept vectors of out-of-training-set docs, as if from inference.
Parameters
----------
d1 : {str, numpy.ndarray}
Doctag/index of document.
other_docs : iterable of {str, int}
Sequence of doctags/indexes.
If None or empty, distance of `d1` from all doctags in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all documents in `other_docs` from input `d1`.
"""
input_vector = self[d1]
if not other_docs:
other_vectors = self.vectors_docs
else:
other_vectors = self[other_docs]
return 1 - WordEmbeddingsKeyedVectors.cosine_similarities(input_vector, other_vectors)
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""Compute cosine similarity between two post-bulk out of training documents.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
An instance of a trained `Doc2Vec` model.
doc_words1 : list of str
Input document.
doc_words2 : list of str
Input document.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
steps : int, optional
Number of epoch to train the new document.
Returns
-------
float
The cosine similarity between `doc_words1` and `doc_words2`.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
def save_word2vec_format(self, fname, prefix='*dt_', fvocab=None,
total_vec=None, binary=False, write_first_line=True):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in.
prefix : str, optional
Uniquely identifies doctags from word vocab, and avoids collision
in case of repeated string in doctag and word vocab.
fvocab : str, optional
UNUSED.
total_vec : int, optional
Explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
binary : bool, optional
If True, the data will be saved in binary word2vec format, else it will be saved in plain text.
write_first_line : bool, optional
Whether to print the first line in the file. Useful when saving doc-vectors after word-vectors.
"""
total_vec = total_vec or len(self)
with utils.smart_open(fname, 'ab') as fout:
if write_first_line:
logger.info("storing %sx%s projection weights into %s", total_vec, self.vectors_docs.shape[1], fname)
fout.write(utils.to_utf8("%s %s\n" % (total_vec, self.vectors_docs.shape[1])))
# store as in input order
for i in range(len(self)):
doctag = u"%s%s" % (prefix, self._index_to_doctag(i, self.offset2doctag, self.max_rawint))
row = self.vectors_docs[i]
if binary:
fout.write(utils.to_utf8(doctag) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (doctag, ' '.join("%f" % val for val in row))))
@staticmethod
def _int_index(index, doctags, max_rawint):
"""Get int index for either string or int index."""
if isinstance(index, integer_types + (integer,)):
return index
else:
return max_rawint + 1 + doctags[index].offset
@staticmethod
def _index_to_doctag(i_index, offset2doctag, max_rawint):
"""Get string key for given `i_index`, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - max_rawint - 1
if 0 <= candidate_offset < len(offset2doctag):
return offset2doctag[candidate_offset]
else:
return i_index
# for backward compatibility
def index_to_doctag(self, i_index):
"""Get string key for given `i_index`, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.offset2doctag[candidate_offset]
else:
return i_index
# for backward compatibility
def int_index(self, index, doctags, max_rawint):
"""Get int index for either string or int index"""
if isinstance(index, integer_types + (integer,)):
return index
else:
return max_rawint + 1 + doctags[index].offset
class FastTextKeyedVectors(WordEmbeddingsKeyedVectors):
"""Vectors and vocab for :class:`~gensim.models.fasttext.FastText`."""
def __init__(self, vector_size, min_n, max_n):
super(FastTextKeyedVectors, self).__init__(vector_size=vector_size)
self.vectors_vocab = None
self.vectors_vocab_norm = None
self.vectors_ngrams = None
self.vectors_ngrams_norm = None
self.buckets_word = None
self.hash2index = {}
self.min_n = min_n
self.max_n = max_n
self.num_ngram_vectors = 0
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_vocab instead")
def syn0_vocab(self):
return self.vectors_vocab
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_vocab_norm instead")
def syn0_vocab_norm(self):
return self.vectors_vocab_norm
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_ngrams instead")
def syn0_ngrams(self):
return self.vectors_ngrams
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_ngrams_norm instead")
def syn0_ngrams_norm(self):
return self.vectors_ngrams_norm
def __contains__(self, word):
"""Check if `word` or any character ngrams in `word` are present in the vocabulary.
A vector for the word is guaranteed to exist if current method returns True.
Parameters
----------
word : str
Input word.
Returns
-------
bool
True if `word` or any character ngrams in `word` are present in the vocabulary, False otherwise.
"""
if word in self.vocab:
return True
else:
char_ngrams = _compute_ngrams(word, self.min_n, self.max_n)
return any(_ft_hash(ng) % self.bucket in self.hash2index for ng in char_ngrams)
def save(self, *args, **kwargs):
"""Save object.
Parameters
----------
fname : str
Path to the output file.
See Also
--------
:meth:`~gensim.models.keyedvectors.FastTextKeyedVectors.load`
Load object.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get(
'ignore', ['vectors_norm', 'vectors_vocab_norm', 'vectors_ngrams_norm', 'buckets_word'])
super(FastTextKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""Get `word` representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
Input word
use_norm : bool, optional
If True - resulting vector will be L2-normalized (unit euclidean length).
Returns
-------
numpy.ndarray
Vector representation of `word`.
Raises
------
KeyError
If word and all ngrams not in vocabulary.
"""
if word in self.vocab:
return super(FastTextKeyedVectors, self).word_vec(word, use_norm)
else:
# from gensim.models.fasttext import compute_ngrams
word_vec = np.zeros(self.vectors_ngrams.shape[1], dtype=np.float32)
ngrams = _compute_ngrams(word, self.min_n, self.max_n)
if use_norm:
ngram_weights = self.vectors_ngrams_norm
else:
ngram_weights = self.vectors_ngrams
ngrams_found = 0
for ngram in ngrams:
ngram_hash = _ft_hash(ngram) % self.bucket
if ngram_hash in self.hash2index:
word_vec += ngram_weights[self.hash2index[ngram_hash]]
ngrams_found += 1
if word_vec.any():
return word_vec / max(1, ngrams_found)
else: # No ngrams of the word are present in self.ngrams
raise KeyError('all ngrams for word %s absent from model' % word)
def init_sims(self, replace=False):
"""Precompute L2-normalized vectors.
Parameters
----------
replace : bool, optional
If True - forget the original vectors and only keep the normalized ones = saves lots of memory!
Warnings
--------
You **cannot continue training** after doing a replace.
The model becomes effectively read-only: you can call
:meth:`~gensim.models.keyedvectors.FastTextKeyedVectors.most_similar`,
:meth:`~gensim.models.keyedvectors.FastTextKeyedVectors.similarity`, etc., but not train.
"""
super(FastTextKeyedVectors, self).init_sims(replace)
if getattr(self, 'vectors_ngrams_norm', None) is None or replace:
logger.info("precomputing L2-norms of ngram weight vectors")
if replace:
for i in range(self.vectors_ngrams.shape[0]):
self.vectors_ngrams[i, :] /= sqrt((self.vectors_ngrams[i, :] ** 2).sum(-1))
self.vectors_ngrams_norm = self.vectors_ngrams
else:
self.vectors_ngrams_norm = \
(self.vectors_ngrams / sqrt((self.vectors_ngrams ** 2).sum(-1))[..., newaxis]).astype(REAL)
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in
fvocab : str, optional
Optional file path used to save the vocabulary
binary : bool, optional
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int, optional
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards).
"""
# from gensim.models.word2vec import save_word2vec_format
_save_word2vec_format(
fname, self.vocab, self.vectors, fvocab=fvocab, binary=binary, total_vec=total_vec)
| [
"[email protected]"
] | |
e9ec52b3a17bacb2dda1181371225ad96a07f778 | 90e4fe85b70221ae82f99ca6930da980ef8f674a | /Anul_1/Semestru_1/FundamentalsOfProg/Seminar/s08p1_I6/src/store/ui/console.py | e9bdf474e451dc086c98f9ea7956991ddff0ca45 | [] | no_license | stefangeorgescu970/university-assignments | 108235e047b963efb6cd1b952f6b96849e1dc3d3 | 9253cc084b74a62035c96a4a2accfbba43812c16 | refs/heads/master | 2022-12-10T14:49:20.299356 | 2020-04-28T14:05:41 | 2020-04-28T14:05:41 | 259,648,446 | 0 | 0 | null | 2022-12-07T20:33:05 | 2020-04-28T13:43:02 | C++ | UTF-8 | Python | false | false | 2,116 | py | """
@author: radu
"""
import traceback
from store.domain.dto import OrderDTO
from store.domain.validators import StoreException
from util.common import MyUtil
class Console(object):
def __init__(self, product_controller, order_controller, statistics_controller):
self.__product_controller = product_controller
self.__order_controller = order_controller
self.__statistics_controller = statistics_controller
def run_console(self):
# TODO implement an menu or cmd based console
self.__init_data()
print("all products:")
self.__print_all_products()
print("all orders:")
self.__print_all_orders()
print("products filtered by name (name containing the string 'p'):")
MyUtil.print_list(self.__product_controller.filter_products_by_name("p"))
print("the cost of all orders is: ", self.__statistics_controller.compute_all_orders_cost())
print("the orders with the cost greater than 2 is:")
MyUtil.print_list(self.__statistics_controller.filter_orders(2))
self.__print_sorted_orders()
def __print_all_products(self):
MyUtil.print_list(self.__product_controller.get_all())
def __print_all_orders(self):
MyUtil.print_list(self.__order_controller.get_all())
def __init_data(self):
try:
self.__product_controller.add_product(1, "p1", 100)
self.__product_controller.add_product(2, "p2", 200)
self.__product_controller.add_product(3, "bla", 300)
self.__order_controller.add_order(1, 1, 2)
self.__order_controller.add_order(2, 1, 3)
self.__order_controller.add_order(3, 2, 4)
except StoreException as se:
print("exception when initializing data: ", se)
traceback.print_exc()
def __print_sorted_orders(self):
print("the orders sorted descending by cost and ascending by name:")
sorted_orders = self.__statistics_controller.sort_orders()
for i in range(0, len(sorted_orders)):
print(i + 1, sorted_orders[i])
| [
"[email protected]"
] | |
b65f85e32d7186281a2604cc160bd674c86d13a5 | 7aecab27c231c5207f26a1682543b0d6c5093c06 | /server/dancedeets/util/korean_dates.py | c6c4fe2fe48b14f79ae353c0f2e1153c24e04d9e | [] | no_license | mikelambert/dancedeets-monorepo | 685ed9a0258ea2f9439ae4ed47ebf68bb5f89256 | 4eff1034b9afd3417d168750ea3acfaecd20adc6 | refs/heads/master | 2022-08-10T07:16:32.427913 | 2018-04-15T22:05:58 | 2018-04-15T22:05:58 | 75,126,334 | 24 | 2 | null | 2022-07-29T22:28:45 | 2016-11-29T22:04:44 | Python | UTF-8 | Python | false | false | 2,382 | py | # -*-*- encoding: utf-8 -*-*-
import datetime
import re
DAWN = 6 # sun rises at 6am-ish
_DATETIME_SPAN_SEPARATOR = ur'(?:~|/|부터)' # '부터' = 'from'
_D_DATE = ur'(?P<day>\d+)[일\(]'
_MD_DATE = ur'(?:(?P<month>\d+)월\s*)?' + _D_DATE
_YMD_DATE = ur'(?:(?P<year>\d+)[년녀]\s*)?' + _MD_DATE
# WEEKDAY = r'(?:\(.\)| ..일)?'
_AM_PM_TIME = ur'(?:(?P<ampm>오후|오전) )?(?P<hour>\d+)시 ?(?:(?P<minute>\d+)분|(?P<half>반))?'
_TIME = ur'(?:(?P<dawn>새벽)|%s)' % _AM_PM_TIME
def _extract_date(m, date_default=None):
return datetime.date(
int(m.group('year') or date_default.year), int(m.group('month') or date_default.month), int(m.group('day') or date_default.day)
)
def _extract_time(m, time_default=None):
if m.group('dawn'):
return datetime.time(DAWN)
if m.group('half'):
minute = 30
else:
minute = int(m.group('minute') or 0)
if unicode(m.group('ampm')) == u'오후':
ampm_offset = 12
elif m.group('ampm') == u'오전':
ampm_offset = 0
else:
ampm_offset = 12
hour = int(m.group('hour'))
if hour == 12:
hour = 0
return datetime.time(hour + ampm_offset, minute)
def parse_times(s):
elems = re.split(_DATETIME_SPAN_SEPARATOR, s, 2)
if len(elems) == 1:
start_str, end_str = elems[0], None
else:
start_str, end_str = elems[0], elems[1]
start_date_match = re.search(_YMD_DATE, start_str)
start_time_match = re.search(_TIME, start_str)
start_datetime = _extract_date(start_date_match)
if start_time_match:
start_datetime = datetime.datetime.combine(start_datetime, _extract_time(start_time_match))
end_datetime = None
if end_str:
end_date_match = re.search(_YMD_DATE, end_str)
end_time_match = re.search(_TIME, end_str)
if end_date_match or end_time_match:
if end_date_match:
end_datetime = _extract_date(end_date_match, date_default=start_datetime)
else:
if isinstance(start_datetime, datetime.datetime):
end_datetime = start_datetime.date()
else:
end_datetime = start_datetime
if end_time_match:
end_datetime = datetime.datetime.combine(end_datetime, _extract_time(end_time_match))
return (start_datetime, end_datetime)
| [
"[email protected]"
] | |
220ac4e18bfaea6b2a5fc122ae8bcc38836508db | fafb89a3552e4dbb47d134966462ef5f3f37f576 | /KEMP/v0.6_ovelap_ok/fdtd3d/cpu/test/pbc_multi_plot.py | 218f06c00053697b3fa19d4b987ec597ccaa7b61 | [] | no_license | EMinsight/fdtd_accelerate | 78fa1546df5264550d12fba3cf964838b560711d | a566c60753932eeb646c4a3dea7ed25c7b059256 | refs/heads/master | 2021-12-14T03:26:52.070069 | 2012-07-25T08:25:21 | 2012-07-25T08:25:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,528 | py | import numpy as np
import sys, os
sys.path.append( os.path.expanduser('~') )
from kemp.fdtd3d.cpu import QueueTask, Fields, Core, Pbc, IncidentDirect
tmax = 150
tfunc = lambda tstep: np.sin(0.05 * tstep)
# plot
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('image', interpolation='nearest', origin='lower')
fig = plt.figure(figsize=(14,8))
# z-axis
nx, ny, nz = 180, 160, 2
fields = Fields(QueueTask(), nx, ny, nz)
Core(fields)
Pbc(fields, 'xyz')
IncidentDirect(fields, 'ey', (20, 0, 0), (20, -1, -1), tfunc)
IncidentDirect(fields, 'ex', (0, 20, 0), (-1, 20, -1), tfunc)
for tstep in xrange(1, tmax+1):
fields.update_e()
fields.update_h()
fields.enqueue_barrier()
ax1 = fig.add_subplot(2, 3, 1)
ax1.imshow(fields.get('ey')[:,:,nz/2].T, vmin=-1.1, vmax=1.1)
ax1.set_title('%s, ey[20,:,:]' % repr(fields.ns))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax2 = fig.add_subplot(2, 3, 4)
ax2.imshow(fields.get('ex')[:,:,nz/2].T, vmin=-1.1, vmax=1.1)
ax2.set_title('%s, ex[:,20,:]' % repr(fields.ns))
ax2.set_xlabel('x')
ax2.set_ylabel('y')
# y-axis
nx, ny, nz = 180, 2, 160
fields = Fields(QueueTask(), nx, ny, nz)
Core(fields)
Pbc(fields, 'xyz')
IncidentDirect(fields, 'ez', (20, 0, 0), (20, -1, -1), tfunc)
IncidentDirect(fields, 'ex', (0, 0, 20), (-1, -1, 20), tfunc)
for tstep in xrange(1, tmax+1):
fields.update_e()
fields.update_h()
fields.enqueue_barrier()
ax1 = fig.add_subplot(2, 3, 2)
ax1.imshow(fields.get('ez')[:,ny/2,:].T, vmin=-1.1, vmax=1.1)
ax1.set_title('%s, ez[20,:,:]' % repr(fields.ns))
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax2 = fig.add_subplot(2, 3, 5)
ax2.imshow(fields.get('ex')[:,ny/2,:].T, vmin=-1.1, vmax=1.1)
ax2.set_title('%s, ex[:,:,20]' % repr(fields.ns))
ax2.set_xlabel('x')
ax2.set_ylabel('z')
# x-axis
nx, ny, nz = 2, 180, 160
fields = Fields(QueueTask(), nx, ny, nz)
Core(fields)
Pbc(fields, 'xyz')
IncidentDirect(fields, 'ez', (0, 20, 0), (-1, 20, -1), tfunc)
IncidentDirect(fields, 'ey', (0, 0, 20), (-1, -1, 20), tfunc)
for tstep in xrange(1, tmax+1):
fields.update_e()
fields.update_h()
fields.enqueue_barrier()
ax1 = fig.add_subplot(2, 3, 3)
ax1.imshow(fields.get('ez')[nx/2,:,:].T, vmin=-1.1, vmax=1.1)
ax1.set_title('%s, ez[:,20,:]' % repr(fields.ns))
ax1.set_xlabel('y')
ax1.set_ylabel('z')
ax2 = fig.add_subplot(2, 3, 6)
ax2.imshow(fields.get('ey')[nx/2,:,:].T, vmin=-1.1, vmax=1.1)
ax2.set_title('%s, ey[:,:,20]' % repr(fields.ns))
ax2.set_xlabel('y')
ax2.set_ylabel('z')
#plt.savefig('./png/%.6d.png' % tstep)
plt.show()
| [
"[email protected]"
] | |
b04305f109a7f6222d39d666b9d65f277cae3196 | ce972e94fcdf19d6809d94c2a73595233d1f741d | /catkin_ws/build/tf/cmake/tf-genmsg-context.py | 552ade3ed0568d977725f0742c6cd2ff5e746bcd | [] | no_license | WilliamZipanHe/reward_shaping_ttr | cfa0e26579f31837c61af3e09621b4dad7eaaba2 | df56cc0153147bb067bc3a0eee0e1e4e1044407f | refs/heads/master | 2022-02-23T05:02:00.120626 | 2019-08-07T21:52:50 | 2019-08-07T21:52:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/local-scratch/xlv/catkin_ws/src/geometry/tf/msg/tfMessage.msg"
services_str = "/local-scratch/xlv/catkin_ws/src/geometry/tf/srv/FrameGraph.srv"
pkg_name = "tf"
dependencies_str = "geometry_msgs;sensor_msgs;std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "tf;/local-scratch/xlv/catkin_ws/src/geometry/tf/msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/local-scratch/xlv/miniconda3/envs/py35_no_specific/bin/python3.5"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
] | |
93ec3d2ea48c1385a38a8b7ceeee9acbb8f10be1 | fa0f12a6d63be22b588133bfb9c130f1eeecab3d | /myvenv/lib/python3.7/site-packages/pip/_internal/operations/freeze.py | 6d6ca7f62ca26deb7b07837a5ea2382969478da6 | [] | no_license | 8th-caulion/high-hat | 6b2c455be14b5e617bf993cfb67c68975df3aa65 | fc1f9793747892b7b58f066c45ab95d3f0269db9 | refs/heads/master | 2023-08-02T12:07:36.540488 | 2020-06-03T17:36:32 | 2020-06-03T17:36:32 | 267,542,957 | 0 | 6 | null | 2021-09-22T19:09:26 | 2020-05-28T09:04:29 | Python | UTF-8 | Python | false | false | 14,660 | py | <<<<<<< HEAD
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
=======
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
from __future__ import absolute_import
import collections
import logging
import os
<<<<<<< HEAD
=======
import re
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import RequirementParseError
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.req.constructors import (
<<<<<<< HEAD
install_req_from_editable,
install_req_from_line,
)
from pip._internal.req.req_file import COMMENT_RE
from pip._internal.utils.direct_url_helpers import (
direct_url_as_pep440_direct_reference,
dist_get_direct_url,
)
from pip._internal.utils.misc import (
dist_is_editable,
get_installed_distributions,
=======
install_req_from_editable, install_req_from_line,
)
from pip._internal.req.req_file import COMMENT_RE
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
<<<<<<< HEAD
from typing import (
Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union
)
from pip._internal.cache import WheelCache
from pip._vendor.pkg_resources import (
=======
from typing import ( # noqa: F401
Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union
)
from pip._internal.cache import WheelCache # noqa: F401
from pip._vendor.pkg_resources import ( # noqa: F401
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
Distribution, Requirement
)
RequirementInfo = Tuple[Optional[Union[str, Requirement]], bool, List[str]]
logger = logging.getLogger(__name__)
def freeze(
requirement=None, # type: Optional[List[str]]
find_links=None, # type: Optional[List[str]]
local_only=None, # type: Optional[bool]
user_only=None, # type: Optional[bool]
<<<<<<< HEAD
paths=None, # type: Optional[List[str]]
=======
skip_regex=None, # type: Optional[str]
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
isolated=False, # type: bool
wheel_cache=None, # type: Optional[WheelCache]
exclude_editable=False, # type: bool
skip=() # type: Container[str]
):
# type: (...) -> Iterator[str]
find_links = find_links or []
<<<<<<< HEAD
for link in find_links:
yield '-f {}'.format(link)
installations = {} # type: Dict[str, FrozenRequirement]
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only,
paths=paths):
try:
req = FrozenRequirement.from_dist(dist)
except RequirementParseError as exc:
# We include dist rather than dist.project_name because the
# dist string includes more information, like the version and
# location. We also include the exception message to aid
# troubleshooting.
logger.warning(
'Could not generate requirement for distribution %r: %s',
dist, exc
=======
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
for link in find_links:
yield '-f %s' % link
installations = {} # type: Dict[str, FrozenRequirement]
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
try:
req = FrozenRequirement.from_dist(dist)
except RequirementParseError:
logger.warning(
"Could not parse requirement: %s",
dist.project_name
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
)
continue
if exclude_editable and req.editable:
continue
<<<<<<< HEAD
installations[req.canonical_name] = req
=======
installations[req.name] = req
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options = set() # type: Set[str]
# keep track of which files a requirement is in so that we can
# give an accurate warning if a requirement appears multiple times.
req_files = collections.defaultdict(list) # type: Dict[str, List[str]]
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
<<<<<<< HEAD
=======
(skip_match and skip_match(line)) or
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = install_req_from_editable(
line,
isolated=isolated,
<<<<<<< HEAD
=======
wheel_cache=wheel_cache,
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
)
else:
line_req = install_req_from_line(
COMMENT_RE.sub('', line).strip(),
isolated=isolated,
<<<<<<< HEAD
=======
wheel_cache=wheel_cache,
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path, line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
<<<<<<< HEAD
else:
line_req_canonical_name = canonicalize_name(
line_req.name)
if line_req_canonical_name not in installations:
# either it's not installed, or it is installed
# but has been processed already
if not req_files[line_req.name]:
logger.warning(
"Requirement file [%s] contains %s, but "
"package %r is not installed",
req_file_path,
COMMENT_RE.sub('', line).strip(),
line_req.name
)
else:
req_files[line_req.name].append(req_file_path)
else:
yield str(installations[
line_req_canonical_name]).rstrip()
del installations[line_req_canonical_name]
req_files[line_req.name].append(req_file_path)
=======
elif line_req.name not in installations:
# either it's not installed, or it is installed
# but has been processed already
if not req_files[line_req.name]:
logger.warning(
"Requirement file [%s] contains %s, but "
"package %r is not installed",
req_file_path,
COMMENT_RE.sub('', line).strip(), line_req.name
)
else:
req_files[line_req.name].append(req_file_path)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
req_files[line_req.name].append(req_file_path)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
# Warn about requirements that were included multiple times (in a
# single requirements file or in different requirements files).
for name, files in six.iteritems(req_files):
if len(files) > 1:
logger.warning("Requirement %s included multiple times [%s]",
name, ', '.join(sorted(set(files))))
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
<<<<<<< HEAD
if installation.canonical_name not in skip:
=======
if canonicalize_name(installation.name) not in skip:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
yield str(installation).rstrip()
def get_requirement_info(dist):
# type: (Distribution) -> RequirementInfo
"""
Compute and return values (req, editable, comments) for use in
FrozenRequirement.from_dist().
"""
if not dist_is_editable(dist):
return (None, False, [])
location = os.path.normcase(os.path.abspath(dist.location))
from pip._internal.vcs import vcs, RemoteNotFoundError
<<<<<<< HEAD
vcs_backend = vcs.get_backend_for_dir(location)
if vcs_backend is None:
req = dist.as_requirement()
logger.debug(
'No VCS found for editable requirement "%s" in: %r', req,
=======
vc_type = vcs.get_backend_type(location)
if not vc_type:
req = dist.as_requirement()
logger.debug(
'No VCS found for editable requirement {!r} in: {!r}', req,
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
location,
)
comments = [
'# Editable install with no version control ({})'.format(req)
]
return (location, True, comments)
try:
<<<<<<< HEAD
req = vcs_backend.get_src_requirement(location, dist.project_name)
=======
req = vc_type.get_src_requirement(location, dist.project_name)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
except RemoteNotFoundError:
req = dist.as_requirement()
comments = [
'# Editable {} install with no remote ({})'.format(
<<<<<<< HEAD
type(vcs_backend).__name__, req,
=======
vc_type.__name__, req,
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
)
]
return (location, True, comments)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
<<<<<<< HEAD
vcs_backend.name,
=======
vc_type.name,
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
)
return (None, True, [])
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
else:
if req is not None:
return (req, True, [])
logger.warning(
'Could not determine repository location of %s', location
)
comments = ['## !! Could not determine repository location']
return (None, False, comments)
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
# type: (str, Union[str, Requirement], bool, Iterable[str]) -> None
self.name = name
<<<<<<< HEAD
self.canonical_name = canonicalize_name(name)
=======
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
self.req = req
self.editable = editable
self.comments = comments
@classmethod
def from_dist(cls, dist):
# type: (Distribution) -> FrozenRequirement
<<<<<<< HEAD
# TODO `get_requirement_info` is taking care of editable requirements.
# TODO This should be refactored when we will add detection of
# editable that provide .dist-info metadata.
req, editable, comments = get_requirement_info(dist)
if req is None and not editable:
# if PEP 610 metadata is present, attempt to use it
direct_url = dist_get_direct_url(dist)
if direct_url:
req = direct_url_as_pep440_direct_reference(
direct_url, dist.project_name
)
comments = []
if req is None:
# name==version requirement
=======
req, editable, comments = get_requirement_info(dist)
if req is None:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
req = dist.as_requirement()
return cls(dist.project_name, req, editable, comments=comments)
def __str__(self):
req = self.req
if self.editable:
<<<<<<< HEAD
req = '-e {}'.format(req)
=======
req = '-e %s' % req
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
| [
"[email protected]"
] | |
c1f2cdad0c15be0461cfad4fc0ce8cad5a672d6f | eca90951b53822740812e40572e209728f71c261 | /models.py | a109ee157f955568d9590b918a0010e3fee55a02 | [] | no_license | tungvx/reporting | b7117879be773fccd90dbdb36a9e1220edc1d202 | 98f54c821aad761c0ab0ab83a8faad232ece1b41 | refs/heads/master | 2021-01-16T18:20:46.613907 | 2012-03-25T13:42:06 | 2012-03-25T13:42:06 | 3,491,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | # -*- coding: utf-8 -*-
import datetime
from django.db import models
from django import forms
class Upload(models.Model): #Upload files table in databases
filename = models.CharField(max_length=255)
upload_time = models.DateTimeField('time uploaded')
description = models.CharField(max_length=255)
filestore = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class Spreadsheet_report(models.Model): # model to store the information about the spreadsheet used by user
created_time = models.DateTimeField('time created')
description = models.CharField(max_length=255)
spreadsheet_link = models.CharField(max_length=255)
output_link = models.CharField(max_length=255)
title = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class upload_file_form(forms.Form): # Define a simple form for uploading excels file
description = forms.CharField(max_length=255,required=True)
file = forms.FileField(required=True,)
def handle_uploaded_file(f,location,filename):
#Save file upload content to uploaded folder
fd = open('%s/%s' % (location, str(filename)), 'wb') #Create new file for write
for chunk in f.chunks():
fd.write(chunk) #Write file data
fd.close() #Close the file
class spreadsheet_report_form(forms.Form):
description = forms.CharField(max_length=255,required=True)
spreadsheet_link = forms.CharField(max_length=255,required=False) | [
"[email protected]"
] | |
ba265bb9d96f3ceeec3b311c1c36ce36f9c18206 | f50114692187a054bf2627695c6380d5ac79a20c | /q0028.py | 26642f410f3a67d77ed2d7e0af65fad57a49ecc3 | [] | no_license | petitepirate/interviewQuestions | c0cb8775932f90ff5c8e4ef80be468ef4155052f | 209322c1f1ddbe8111dc2c5e9c35aaf787e0196a | refs/heads/master | 2023-07-17T12:15:22.847440 | 2021-06-14T02:32:39 | 2021-06-14T02:32:39 | 286,884,022 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,685 | py | # This problem was asked by Palantir.
# Write an algorithm to justify text. Given a sequence of words and an integer line length
# k, return a list of strings which represents each line, fully justified.
# More specifically, you should have as many words as possible in each line. There should
# be at least one space between each word. Pad extra spaces when necessary so that each line
# has exactly length k. Spaces should be distributed as equally as possible, with the extra
# spaces, if any, distributed starting from the left.
# If you can only fit one word on a line, then you should pad the right-hand side with spaces.
# Each word is guaranteed not to be longer than k.
# For example, given the list of words ["the", "quick", "brown", "fox", "jumps", "over",
# "the", "lazy", "dog"] and k = 16, you should return the following:
# ["the quick brown", # 1 extra space on the left
# "fox jumps over", # 2 extra spaces distributed evenly
# "the lazy dog"] # 4 extra spaces distributed evenly
# ________________________________________________________________________________________
# Solution
# It seems like the justification algorithm is independent from the groupings, so immediately
# we should figure out two things:
# How to group lines together so that it is as close to k as possible (without going over)
# Given a grouping of lines, justifying the text by appropriately distributing spaces
# To solve the first part, let's write a function group_lines that takes in all the words in
# our input sequence as well as out target line length k, and return a list of list of words
# that represents the lines that we will eventually justify. Our main strategy will be to
# iterate over all the words, keep a list of words for the current line, and because we want
# to fit as many words as possible per line, estimate the current line length, assuming only
# one space between each word. Once we go over k, then save the word and start a new line with
# it. So our function will look something like this:
import math
def min_line(words):
return ' '.join(words)
def group_lines(words, k):
'''
Returns groupings of |words| whose total length, including 1 space in between,
is less than |k|.
'''
groups = []
current_sum = 0
current_line = []
for _, word in enumerate(words):
# Check if adding the next word would push it over
# the limit. If it does, then add |current_line| to
# group. Also reset |current_line| properly.
if len(min_line(current_line + [word])) > k:
groups.append(current_line)
current_line = []
current_line.append(word)
# Add the last line to groups.
groups.append(current_line)
return groups
# Then, we'll want to actually justify each line. We know for sure each line we feed
# from group_lines is the maximum number of words we can pack into a line and no more.
# What we can do is first figure out how many spaces we have available to distribute
# between each word. Then from that, we can calculate how much base space we should
# have between each word by dividing it by the number of words minus one. If there are
# any leftover spaces to distribute, then we can keep track of that in a counter, and
# as we rope in each new word we'll add the appropriate number of spaces. We can't add
# more than one leftover space per word.
def justify(words, length):
'''
Precondition: |words| can fit in |length|.
Justifies the words using the following algorithm:
- Find the smallest spacing between each word (available_spaces / spaces)
- Add a leftover space one-by-one until we run out
'''
if len(words) == 1:
word = words[0]
num_spaces = length - len(word)
spaces = ' ' * num_spaces
return word + spaces
spaces_to_distribute = length - sum(len(word) for word in words)
number_of_spaces = len(words) - 1
smallest_space = math.floor(spaces_to_distribute / number_of_spaces)
leftover_spaces = spaces_to_distribute - \
(number_of_spaces * smallest_space)
justified_words = []
for word in words:
justified_words.append(word)
current_space = ' ' * smallest_space
if leftover_spaces > 0:
current_space += ' '
leftover_spaces -= 1
justified_words.append(current_space)
return ''.join(justified_words).rstrip()
# The final solution should just combine our two functions:
def justify_text(words, k):
return [justify(group, k) for group in group_lines(words, k)]
| [
"[email protected]"
] | |
ede19815e880082d79936d9cd70e6f8e3e80d2d2 | 0ceb9ffeb2c087b8ae75c1a1179387fe36379f17 | /test_users.py | 31f33f5ba427527d4314a2b07fc4c576bc50f3d0 | [] | no_license | chetat/eveno | 47a3a99ebe4c5ea10252e1a21c45129e61b3e2ba | dbc138cde6e48039614cea52d3dc7bcad869a1dd | refs/heads/master | 2022-12-09T13:51:46.923622 | 2021-03-09T10:21:25 | 2021-03-09T10:21:25 | 224,212,711 | 0 | 0 | null | 2022-09-16T18:20:10 | 2019-11-26T14:34:12 | Python | UTF-8 | Python | false | false | 1,623 | py | import os
import json
from app import create_app, sqlalchemy as db
from sqlalchemy import create_engine, text
from flask_sqlalchemy import SQLAlchemy
from app.config import TestingConfig
from models import initialize_db
import unittest
class UsersTestCase(unittest.TestCase):
"""This class represents the Event App test case"""
def setUp(self):
"""Executed before each test.
Define test variables and initialize app."""
self.app = create_app(TestingConfig)
self.client = self.app.test_client
self.user = {
"email": "[email protected]",
"firstname": "Yeku Wilfred",
"lastname": "chetat",
"phone": "671357962",
"password": "weezybaby"
}
with self.app.app_context():
# create all tables
db.create_all()
initialize_db()
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
def test_create_user(self):
res = self.client().post("api/v1/users", json=self.user)
self.assertTrue(res.status_code, 200)
def test_get_users(self):
res = self.client().get("api/v1/users")
self.assertTrue(res.status_code, 200)
def test_invalid_credentials(self):
res = self.client().post("api/v1/auth", json={"email": "yekuwilfred@gmailcom",
"password": "wybaby"
})
self.assertEqual(res.status_code, 404)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
0cc046943de4f6e8e84509edc6055886e1602b1e | 024a515e8741ecc88b4cc20c067a5ef7785375f2 | /preproc_dataset.py | 0241efc2f5bb8f421152bf5b2a64df8c226e9aa6 | [
"MIT"
] | permissive | PCJohn/maml | e3752a842557210e955f68479e0a056766647d54 | 17362ed5f9d85863ead5774c3e08e163c260bd16 | refs/heads/master | 2020-03-28T11:37:42.014350 | 2018-10-01T17:08:12 | 2018-10-01T17:08:12 | 148,232,484 | 0 | 0 | null | 2018-09-10T23:35:04 | 2018-09-10T23:35:03 | null | UTF-8 | Python | false | false | 1,756 | py | import os
import numpy as np
from PIL import Image
dataset = 'miniimagenet'
train_count = 10 # train samples per class
val_count = 590 # val samples per class
mnist_dir = '/home/prithvi/dsets/MNIST/trainingSet/'
omniglot_dir = '/home/prithvi/dsets/Omniglot/train'
miniimagenet_dir = '/home/prithvi/dsets/miniimagenet/train'
save_dir = './data'
if dataset == 'mnist':
data_dir = mnist_dir
size = (28,28)
elif dataset == 'omniglot':
data_dir = omniglot_dir
size = (28,28)
elif dataset == 'miniimagenet':
data_dir = miniimagenet_dir
size = (84,84)
if __name__ == '__main__':
train_dir = os.path.join(save_dir,'metatrain')
val_dir = os.path.join(save_dir,'metaval')
if not (os.path.exists(train_dir)):
os.system('mkdir '+train_dir)
if not (os.path.exists(val_dir)):
os.system('mkdir '+val_dir)
for cls in os.listdir(data_dir):
cls_dir = os.path.join(data_dir,cls)
cls_train_dir = os.path.join(train_dir,cls)
if not os.path.exists(cls_train_dir):
os.system('mkdir '+cls_train_dir)
cls_val_dir = os.path.join(val_dir,cls)
if not os.path.exists(cls_val_dir):
os.system('mkdir '+cls_val_dir)
samples = map(lambda x:(Image.open(x).resize(size,resample=Image.LANCZOS),
os.path.split(x)[-1]),
[os.path.join(cls_dir,s) for s in os.listdir(cls_dir)])
np.random.shuffle(samples)
train,val = samples[:train_count],samples[train_count:train_count+val_count]
for s,fname in train:
s.save(os.path.join(cls_train_dir,fname))
for s,fname in val:
s.save(os.path.join(cls_val_dir,fname))
| [
"[email protected]"
] | |
976fe04075eb0ae35fbceb4cf4f95288fd19f182 | 9a0ada115978e9600ad7f1eab65fcc8825f637cf | /work_in_progress/kezhi_paper/classification/intermedia_classification_WT_new.py | 23736c730b2fe6d41790fd9dfaa294059b6a9fd6 | [] | no_license | ver228/work-in-progress | c1971f8d72b9685f688a10e4c5a1b150fa0812da | ef5baecc324da4550f81edb0513d38f039ee3429 | refs/heads/master | 2018-12-16T22:18:55.457290 | 2018-09-14T09:27:49 | 2018-09-14T09:27:49 | 56,165,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 06 16:16:01 2017
@author: kezhili
"""
import numpy as np
import tables
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
no_neuron = 1040 # step size in the mesh
no_fea = 30
fea_no = 30
names = ["Nearest Neighbors", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest"]
if __name__ == '__main__':
fdata = 'interm_LR_WTmemory_interm_result_30features_WT.hdf5'
with tables.File(fdata, 'r') as fid:
X = fid.get_node('/X3')[:]
Y = fid.get_node('/Y3')[:]
#because of the fix of a previous error in the dataset,
#where 'LSJ1' and 'L5J1' are actually the same class
Y[97:107] = Y[107]
with open('result_30_features.txt') as f:
lines = f.read().split('\n')
X_ind = [x == 'True' for x in lines if x]
Xp = X[:,X_ind]
cross_validation_fold = 5
for n_estimators in [10, 100, 1000]:
clf2 = RandomForestClassifier(n_estimators=n_estimators)
c_val = cross_val_score(clf2, Xp, Y, cv = cross_validation_fold)
print(np.mean(c_val), np.std(c_val)) | [
"[email protected]"
] | |
694c9e252cceb994dcb65e0a0c42be5b6500d395 | 10e89eb922a5c122079a55234169e5b0e7af0819 | /histore/cli/base.py | 06f81c1b370a21633cbbc13230aeb3a0d81faa09 | [
"BSD-3-Clause"
] | permissive | Sandy4321/histore | 5753dd0008d1ae3400506181f22789aa9fdb43ba | d600052514a1c5f672137f76a6e1388184b17cd4 | refs/heads/master | 2023-04-15T07:57:49.762415 | 2021-04-28T11:12:31 | 2021-04-28T11:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | # This file is part of the History Store (histore).
#
# Copyright (C) 2018-2021 New York University.
#
# The History Store (histore) is released under the Revised BSD License. See
# file LICENSE for full license details.
"""Command line interface to interact with a manager for archives on the local
file system.
"""
import click
import os
import sys
from histore.archive.manager.persist import PersistentArchiveManager
from histore.cli.archive import create_archive, delete_archive, list_archives, rename_archive
from histore.cli.snapshot import checkout_snapshot, commit_snapshot, list_snapshots
import histore.config as config
# -- Init the archive manager -------------------------------------------------
@click.command(name='init')
@click.option(
'-b', '--basedir',
required=False,
type=click.Path(file_okay=False, dir_okay=True),
help='Base directory for archive files'
)
@click.option(
'-c', '--dbconnect',
required=False,
help='Connect URL for the database'
)
def init_manager(basedir, dbconnect):
"""Initialize the archive manager directory."""
# Test if the base directory exists and is empty.
basedir = basedir if basedir is not None else config.BASEDIR()
if os.path.isdir(basedir):
if os.listdir(basedir):
click.echo('Not an empty directory {}.'.format(basedir))
sys.exit(-1)
# Create instance of persistent archive manager to setup directories and
# files.
PersistentArchiveManager(
basedir=basedir,
dbconnect=dbconnect,
create=True
)
click.echo("Initialized in {}.".format(os.path.abspath(basedir)))
# -- Create command group -----------------------------------------------------
@click.group()
def cli(): # pragma: no cover
"""Command line interface for HISTORE archive manager."""
pass
cli.add_command(init_manager)
cli.add_command(checkout_snapshot)
cli.add_command(commit_snapshot)
cli.add_command(create_archive)
cli.add_command(delete_archive)
cli.add_command(list_archives)
cli.add_command(list_snapshots)
cli.add_command(rename_archive)
| [
"[email protected]"
] | |
d605f5ceb43331ec9fadb7f52b44bb25329cddd3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03860/s390012203.py | a8bd0d3f0f60bc1222c64af89e721898ed697180 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | a = input()
initial_middle = a[8]
print('A'+ initial_middle +'C')
| [
"[email protected]"
] | |
8c8c0f407803a6d1d73f88221236034dfd1a132e | 7325479ff4074cb71096a1b192cb8bfce9729189 | /19 Python Built-in Functions/Python bytearray.py | 6fecd53cd7405429ce7c0fbcf99041653ae82c6a | [] | no_license | riyadhswe/Python_Javatpoint | 2220ab3818d7eb9b25f07e3b70e5c9f46f3cccbc | e42953c3fe0567040de850dddd0656b3ed1a17c2 | refs/heads/master | 2023-02-02T02:56:07.974814 | 2020-12-15T11:45:38 | 2020-12-15T11:45:38 | 293,787,032 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | string = "Python is programming language."
# string with encoding 'utf-8'
arr = bytearray(string, 'utf-8')
print(arr) | [
"[email protected]"
] | |
7b4029c8b2039d20fcfa67a38e0af84120540d04 | 21e35e3889cd0064474343a3b84aa289a01f8fac | /third_example_flask_app/classes.py | 89d5863ab289c20518eb2bebec2bf9439ba3ca5f | [] | no_license | EricSchles/learn_python | b8fbc2b38647efb8e7176ac0d20708ffe1691007 | 8a5eb76aa333253a6c01f76d36dacad6bcf931ea | refs/heads/master | 2021-01-13T16:45:53.606240 | 2017-01-31T21:10:53 | 2017-01-31T21:10:53 | 77,006,920 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | class Integer:
def __init__(self,number):
self.number = number
def add(self,other_integer):
self.number += other_integer
def subtract(self,other_integer):
self.number -= other_integer
def __str__(self):
return repr(self.number)
if __name__ == '__main__':
number = Integer(5)
number.add(7)
print(number)
| [
"[email protected]"
] | |
9629e60fd5a07f4f2ca46385317661d71bf72660 | 26d5c795d8aa83bf5cb3f228675ff51e2f704f57 | /scripts/s3stream-dl | 7676dcc4c1287bba92fbf83a97df5657a705d475 | [] | no_license | binarymachines/mercury | 8e13bb10c67a056fe88e02f558d73f1f1b95d028 | db3e2425f4e77a44a97c740f7fff90312a1bd33f | refs/heads/master | 2023-07-08T11:35:26.867494 | 2023-06-25T00:46:23 | 2023-06-25T00:46:23 | 94,708,610 | 2 | 6 | null | 2023-02-15T21:50:06 | 2017-06-18T19:31:50 | Python | UTF-8 | Python | false | false | 8,504 | #!/usr/bin/env python
'''
Usage:
s3stream-dl --manifest_uri <s3_uri> --format=<fmt> --list
s3stream-dl --manifest_uri <s3_uri> --format=<fmt> [--delimiter=<delimiter>]
s3stream-dl [-p] --path <s3_uri> --pfx <prefix> --format <fmt> [--d=<delimiter>] --xcfg=<xfile_cfg> --xmap=<xfile_map> --ncfg=<ngst_cfg> --ntarget=<ngst_target> [--nchannel=<channel>]
s3stream-dl [-p] --path <s3_uri> --pfx <prefix> --format=<fmt> --ncfg=<ngst_cfg> --ntarget=<ngst_target> [--nchannel=<channel>]
Options:
-p,--parallel : stream bucket contents in parallel
'''
import os, sys
import json
import multiprocessing as mp
from snap import common
import docopt
import sh
from sh import aws # AWS CLI must already be installed
from sh import gsutil
from sh import xfile # Mercury ETL toolkit must already be installed
from sh import ngst
class DATA_FORMAT(object):
CSV = 'csv'
PARQUET = 'parquet'
class Mode():
SERIAL = 'serial'
PARALLEL = 'parallel'
def list_bucket_files_for_prefix(prefix, bucket_uri, directory, data_format):
extension = data_format
target_uri = os.path.join(bucket_uri, directory, '%s_*.%s' % (tablename, extension))
filenames = [name.lstrip().rstrip() for name in gsutil.ls(target_uri)]
return filenames
def get_bucket_filecount_for_table(tablename, bucket_uri, directory, data_format):
return len(list_bucket_files_for_table(tablename, bucket_uri, directory, data_format))
def stream_file_contents_direct_to_ngst(file_uri,
ngst_configfile,
ngst_target,
mode,
channel=None):
module = __name__
parent = os.getppid()
pid = os.getpid()
if channel:
ngst_cmd = ngst.bake('--config', ngst_configfile, '--target', ngst_target, '--channel=%s' % channel)
else:
ngst_cmd = ngst.bake('--config', ngst_configfile, '--target', ngst_target)
for line in ngst_cmd(gsutil('cp', file_uri, '-', _piped=True), _iter=True):
if mode == Mode.SERIAL:
print(line, file=sys.stderr)
else:
print('[%s:%s (child_proc_%s)]: %s' % (module, parent, pid, line), file=sys.stderr)
def relay_file_contents_to_ngst(file_uri,
data_format,
delimiter,
xfile_configfile,
xfile_map,
ngst_configfile,
ngst_target,
mode,
channel=None):
module = __name__
parent = os.getppid()
pid = os.getpid()
if delimiter:
xfile_cmd = xfile.bake('--config', xfile_configfile, '--delimiter', delimiter, '--map', xfile_map, '-s')
elif data_format == DATA_FORMAT.JSON:
xfile_cmd = xfile.bake('--config', xfile_configfile, '--json', '--map', xfile_map, '-s')
else:
raise Exception('only csv and json formats are currently supported.')
if channel:
ngst_cmd = ngst.bake('--config', ngst_configfile, '--target', ngst_target, '--channel=%s' % channel)
else:
ngst_cmd = ngst.bake('--config', ngst_configfile, '--target', ngst_target)
for line in ngst_cmd(xfile_cmd(gsutil('cp', file_uri, '-', _piped=True), _piped=True), _iter=True):
if mode == Mode.SERIAL:
print(line, file=sys.stderr)
else:
print('[%s:%s (child_proc_%s)]: %s' % (module, parent, pid, line), file=sys.stderr)
def stream_file_contents(file_uri, xfile_configfile, delimiter, xfile_map, mode):
module = __name__
parent = os.getppid()
pid = os.getpid()
xfile_cmd = xfile.bake('--config', xfile_configfile, '--delimiter', delimiter, '--map', xfile_map, '-s')
for line in xfile_cmd(gsutil('cp', file_uri, '-', _piped=True), _iter=True):
if mode == Mode.SERIAL:
print(line, file=sys.stderr)
else:
print('[%s:%s (child_proc_%s)]: %s' % (module, parent, pid, line), file=sys.stderr)
def main(args):
data_format = args['--format']
if data_format == DATA_FORMAT.CSV:
if args.get('--xcfg') is not None and args.get('--d') is None:
print('### csv chosen as the data format, but no delimiter specified.', file=sys.stderr)
elif data_format != DATA_FORMAT.JSON:
print('!!! supported data formats are "csv" and "json".', file=sys.stderr)
return
tablename = args['<table>']
bucket = args['<bucket>']
directory = ''
if args.get('--dir') is not None:
directory = args['--dir']
if args.get('--list'):
files = list_bucket_files_for_table(tablename, bucket, directory, data_format)
print('\n'.join(files))
return
parallel_mode = False
if args['--parallel']:
parallel_mode = True
xfile_bypass_mode = False
xfile_config = args.get('--xcfg')
xfile_map = args.get('--xmap')
if xfile_config is None and xfile_map is None:
xfile_bypass_mode = True
print('### operating in xfile_bypass mode.', file=sys.stderr)
ngst_config = args.get('--ncfg')
ngst_target = args.get('--ntarget')
delimiter = args.get('--d') # if no delimiter is supplied, we will assume JSON data
if parallel_mode:
for file_uri in list_bucket_files_for_table(tablename, bucket, directory, data_format):
channel_id = args.get('--nchannel') # can be null
if xfile_bypass_mode:
try:
stream_args = (file_uri,
ngst_config,
ngst_target,
Mode.PARALLEL,
channel_id)
p = mp.Process(target=stream_file_contents_direct_to_ngst,
args=stream_args)
p.start()
p.join()
except sh.ErrorReturnCode as e:
print(e.stderr)
except Exception as e:
print(e)
else:
try:
stream_args = (file_uri,
data_format,
delimiter,
xfile_config,
xfile_map,
ngst_config,
ngst_target,
Mode.PARALLEL,
channel_id)
p = mp.Process(target=relay_file_contents_to_ngst,
args=stream_args)
p.start()
p.join()
except sh.ErrorReturnCode as e:
print(e.stderr)
except Exception as e:
print(e)
else:
for file_uri in list_bucket_files_for_table(tablename, bucket, directory, data_format):
channel_id = args.get('--nchannel')
try:
if xfile_bypass_mode:
stream_file_contents_direct_to_ngst(file_uri,
ngst_config,
ngst_target,
Mode.SERIAL,
channel_id)
else:
relay_file_contents_to_ngst(file_uri,
data_format,
delimiter,
xfile_config,
xfile_map,
ngst_config,
ngst_target,
Mode.SERIAL,
channel_id)
except sh.ErrorReturnCode as e:
print(e.stderr)
except Exception as e:
print(e)
if __name__ == '__main__':
args = docopt.docopt(__doc__)
main(args) | [
"[email protected]"
] | ||
6c81ceed055c2d375b64725926e5208cb180263e | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Cura/plugins/VersionUpgrade/VersionUpgrade25to26/__init__.py | c74b3218b6ee2cb2b18f90149702826398f3ef7d | [
"GPL-3.0-only",
"LGPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 2,027 | py | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, TYPE_CHECKING
from . import VersionUpgrade25to26
if TYPE_CHECKING:
from UM.Application import Application
upgrade = VersionUpgrade25to26.VersionUpgrade25to26()
def getMetaData() -> Dict[str, Any]:
return {
"version_upgrade": {
# From To Upgrade function
("preferences", 4000000): ("preferences", 4000001, upgrade.upgradePreferences),
# NOTE: All the instance containers share the same general/version, so we have to update all of them
# if any is updated.
("quality_changes", 2000000): ("quality_changes", 2000001, upgrade.upgradeInstanceContainer),
("user", 2000000): ("user", 2000001, upgrade.upgradeInstanceContainer),
("definition_changes", 2000000): ("definition_changes", 2000001, upgrade.upgradeInstanceContainer),
("machine_stack", 3000000): ("machine_stack", 3000001, upgrade.upgradeMachineStack),
},
"sources": {
"quality_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality"}
},
"preferences": {
"get_version": upgrade.getCfgVersion,
"location": {"."}
},
"user": {
"get_version": upgrade.getCfgVersion,
"location": {"./user"}
},
"definition_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
},
"machine_stack": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
}
}
}
def register(app: "Application") -> Dict[str, Any]:
return { "version_upgrade": upgrade }
| [
"[email protected]"
] | |
6d9ced454145fc2b7691e9934b6e754a3ceb726d | 18536f8145457a193b976eec44ee92535f588e54 | /tests/functional/s3api/test_select_object_content.py | 87750a30e5124bb75e76a8d9613cd5c8052055b8 | [
"Apache-2.0"
] | permissive | jamsheedsaeed/awsapp | 07e4ec6b9e07f679106db8e61f104ee9065e6af0 | 5498a3652b1d471a8695e14ca9739140e88a4b29 | refs/heads/master | 2023-01-01T23:44:16.181967 | 2020-06-19T06:56:48 | 2020-06-19T06:56:48 | 273,418,893 | 0 | 0 | NOASSERTION | 2022-12-26T20:16:39 | 2020-06-19T06:16:31 | Python | UTF-8 | Python | false | false | 4,758 | py | #!/usr/bin/env python
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import tempfile
import shutil
from awscli.testutils import BaseAWSCommandParamsTest
from awscli.testutils import BaseAWSHelpOutputTest
class TestGetObject(BaseAWSCommandParamsTest):
prefix = ['s3api', 'select-object-content']
def setUp(self):
super(TestGetObject, self).setUp()
self.parsed_response = {'Payload': self.create_fake_payload()}
self._tempdir = tempfile.mkdtemp()
def tearDown(self):
super(TestGetObject, self).tearDown()
shutil.rmtree(self._tempdir)
def create_fake_payload(self):
yield {'Records': {'Payload': b'a,b,c,d\n'}}
# These next two events are ignored because they aren't
# "Records".
yield {'Progress': {'Details': {'BytesScanned': 1048576,
'BytesProcessed': 37748736}}}
yield {'Records': {'Payload': b'e,f,g,h\n'}}
yield {'Stats': {'Details': {'BytesProcessed': 62605400,
'BytesScanned': 1662276}}}
yield {'End': {}}
def test_can_stream_to_file(self):
filename = os.path.join(self._tempdir, 'outfile')
cmdline = self.prefix[::]
cmdline.extend(['--bucket', 'mybucket'])
cmdline.extend(['--key', 'mykey'])
cmdline.extend(['--expression', 'SELECT * FROM S3Object'])
cmdline.extend(['--expression-type', 'SQL'])
cmdline.extend(['--request-progress', 'Enabled=True'])
cmdline.extend(['--input-serialization',
'{"CSV": {}, "CompressionType": "GZIP"}'])
cmdline.extend(['--output-serialization', '{"CSV": {}}'])
cmdline.extend([filename])
expected_params = {
'Bucket': 'mybucket',
'Key': u'mykey',
'Expression': 'SELECT * FROM S3Object',
'ExpressionType': 'SQL',
'InputSerialization': {'CSV': {}, 'CompressionType': 'GZIP'},
'OutputSerialization': {'CSV': {}},
'RequestProgress': {'Enabled': True},
}
stdout = self.assert_params_for_cmd(cmdline, expected_params)[0]
self.assertEqual(stdout, '')
with open(filename, 'r') as f:
contents = f.read()
self.assertEqual(contents, (
'a,b,c,d\n'
'e,f,g,h\n'
))
def test_errors_are_propagated(self):
self.http_response.status_code = 400
self.parsed_response = {
'Error': {
'Code': 'CastFailed',
'Message': 'Attempt to convert from one data type to another',
}
}
cmdline = self.prefix + [
'--bucket', 'mybucket',
'--key', 'mykey',
'--expression', 'SELECT * FROM S3Object',
'--expression-type', 'SQL',
'--request-progress', 'Enabled=True',
'--input-serialization', '{"CSV": {}, "CompressionType": "GZIP"}',
'--output-serialization', '{"CSV": {}}',
os.path.join(self._tempdir, 'outfile'),
]
expected_params = {
'Bucket': 'mybucket',
'Key': u'mykey',
'Expression': 'SELECT * FROM S3Object',
'ExpressionType': 'SQL',
'InputSerialization': {'CSV': {}, 'CompressionType': 'GZIP'},
'OutputSerialization': {'CSV': {}},
'RequestProgress': {'Enabled': True},
}
self.assert_params_for_cmd(
cmd=cmdline, params=expected_params,
expected_rc=254,
stderr_contains=(
'An error occurred (CastFailed) when '
'calling the SelectObjectContent operation'),
)
class TestHelpOutput(BaseAWSHelpOutputTest):
def test_output(self):
self.driver.main(['s3api', 'select-object-content', 'help'])
# We don't want to be super picky because the wording may change
# We just want to verify the Output section was customized.
self.assert_contains(
'Output\n======\n'
'This command generates no output'
)
self.assert_not_contains('[outfile')
self.assert_contains('outfile')
| [
"[email protected]"
] | |
d9fe6f00109d7ea449564f348bea0fbcf2feca43 | 001184c168b93118f0429b11bab55fe108928b5d | /Week 3- Programming Assignments/Orientation2.py | 533af81246e273eec244ce942ae5e3e9300f2085 | [] | no_license | harrypotter0/ml-robot | caee1f9695427b8a83f64a41420f89b948fdb801 | 06e6672ba47fd73d4077ff5a6f48bdfafaea6597 | refs/heads/master | 2021-09-14T04:28:31.987037 | 2018-05-08T14:30:04 | 2018-05-08T14:30:04 | 115,803,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,177 | py | # In this exercise, write a program that will
# run your previous code twice.
# Please only modify the indicated area below!
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class robot:
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0;
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
#myrobot = robot()
#myrobot.set_noise(5.0, 0.1, 5.0)
#myrobot.set(30.0, 50.0, pi/2)
#myrobot = myrobot.move(-pi/2, 15.0)
#print myrobot.sense()
#myrobot = myrobot.move(-pi/2, 10.0)
#print myrobot.sense()
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER/MODIFY CODE BELOW ####
myrobot = robot()
myrobot = myrobot.move(0.1, 5.0)
Z = myrobot.sense()
N = 1000
p = []
for i in range(N):
x = robot()
x.set_noise(0.05, 0.05, 5.0)
p.append(x)
ntimes = 28
for i in range(ntimes):
p2 = []
for i in range(N):
p2.append(p[i].move(0.1, 5.0))
p = p2
w = []
for i in range(N):
w.append(p[i].measurement_prob(Z))
p3 = []
index = int(random.random() * N)
beta = 0.0
mw = max(w)
for i in range(N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % N
p3.append(p[index])
p = p3
print p #Leave this print statement for grading purposes!
| [
"[email protected]"
] | |
d91241e28a7781f4e1b8d3e0aaae4e0162f0622f | a3eccc652f83815318bdb033a33573c5b1e073e9 | /nac/crm/views/add_lead.py | 7d6cf4cdf3c36cb198817a0d24a038fb42afd38d | [] | no_license | jsvelu/coms-dev--old | 8139fa511e2985b4d71550f1c59402069d09edf3 | de300ad6ef947d29380972a6efe809f4ef05d7e1 | refs/heads/main | 2023-07-17T20:44:36.101738 | 2021-09-04T21:56:38 | 2021-09-04T21:56:38 | 403,158,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | from authtools.views import resolve_url_lazy
from django.urls import reverse_lazy
from django.views.generic.edit import FormView
from rules.contrib.views import PermissionRequiredMixin
from crm.forms.lead import LeadForm
from customers.models import Customer
from dealerships.models import Dealership
class AddLeadView(PermissionRequiredMixin, FormView):
template_name = 'crm/add_lead.html'
form_class = LeadForm
success_url = reverse_lazy('crm:lead_listing')
raise_exception = True
permission_required = 'customers.add_customer'
def get_allowed_dealerships(self):
dealer_choices = None
if self.request.user.has_perm('customers.manage_self_and_dealership_leads_only'):
dealer_choices = Dealership.objects.filter(dealershipuser=self.request.user)
if self.request.user.has_perm('crm.manage_all_leads'):
dealer_choices = Dealership.objects.all()
return dealer_choices
def get_form_kwargs(self):
kwargs = super(AddLeadView, self).get_form_kwargs()
dealer_choices = [(dealership.id, dealership.name) for dealership in self.get_allowed_dealerships()]
kwargs.update({'dealership_choices': dealer_choices})
return kwargs
def get_context_data(self, **kwargs):
context = super(AddLeadView, self).get_context_data(**kwargs)
context['sub_heading'] = 'Add Lead'
return context
def form_valid(self, form):
lead = form.save(commit=False)
lead.lead_type = Customer.LEAD_TYPE_LEAD
lead.save()
return super(AddLeadView, self).form_valid(form)
| [
"[email protected]"
] | |
987eefa3b4567bac22ef4aa1bb712480e2cf395b | e7669caf1a4ce9053bb720bcfa2d85a4ee35ea65 | /pyrosetta_toolkit/window_modules/protocol_builder/ProtocolBuilder.py | 9317ac4215847ebde667e595866fa8e5255693b9 | [
"BSD-3-Clause"
] | permissive | SchiefLab/Jade | f81190321df061f8317db869a64a7b87a13d664a | 791d26892aacf21bca733aa9c1c6ea3f2be51958 | refs/heads/master | 2023-01-10T15:14:08.431470 | 2018-11-07T17:11:02 | 2018-11-07T17:11:02 | 41,381,258 | 7 | 2 | NOASSERTION | 2022-12-26T20:35:53 | 2015-08-25T18:39:20 | Python | UTF-8 | Python | false | false | 25,209 | py | #!/usr/bin/env python
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington CoMotion, email: [email protected].
## @file /GUIs/pyrosetta_toolkit/window_modules/protocol_builder/ProtocolBuilder.py
## @brief Original protocol builder. DEPRECATED. being replaced by rosetta script creator.
## @author Jared Adolf-Bryfogle ([email protected])
from rosetta import *
from Tkinter import *
import glob
import tkFileDialog
import tkMessageBox
import tkSimpleDialog
#Protocol Variables (Main)
#Difinitions for PROTOCOLS - SHOULD CHANGE!!!
#THESE SHOULD BE IN SEPERATE FILE!!
ProtTypesDic = dict()
ProtTypes = ("Low-Res(Frag)", "High-Res", "Loop Minimization", "Full Minimization", "Vaccinity", "Interface", "ShearMoves", "Design(ResFile)", "CDR-Specific", "Tools")
ProtTypesDic["ShearMoves"]=("Shear Custom", "Shear CustomFile", "Shear 1", "Shear 2", "Shear 3", "Shear 4", "Shear 5", "Shear 10", "Shear 15", "Shear 20", "Shear 30", "Shear 50", "Shear 80", "Shear 100", "Shear 120", "Shear 150", "Shear 180")
ProtTypesDic["Low-Res(Frag)"] = ("Low: Pure Frag Anneal", "Low-Perturb CCD: Default", "Low-Perturb KIC")
ProtTypesDic["High-Res"] = ("Hi-Refine CCD: Default", "Hi-Refine KIC")
ProtTypesDic["Vaccinity"] = ("Pack Vaccinity", "Pack Vaccinity (SCWRL)", "Relax Vaccinity", "LowPureFragAnneal Vaccinity", "Minimize Vaccinity")
ProtTypesDic["Interface"] = ("Pack Interface", "Pack Interface (SCWRL)", "Relax Interface", "Minimize Interface")
ProtTypesDic["Tools"] = ("CCD Loop Closure", "Linearize Loops", "Randomize Loops")
ProtTypesDic["Full Minimization"]=("Fast Relax", "Fast Relax BB", "Classic Relax", "Backrub", "Minimize", "Optimize Rot", "SCWRL")
ProtTypesDic["Loop Minimization"]=("Classic Loop Relax", "Fast Loop Relax", "Fast Loop Relax BB", "Loop Backrub", "Loop Minimize", "Loop Optimize Rot", "SCWRL(Loops)", "SCWRL(Seq file)")
ProtTypesDic["CDR-Specific"]=("EMPTY", " ")
ProtTypesDic["Design(ResFile)"] = ("PackDesign", "Pack Vaccinity Design", "Pack Interface Design")
#Need to Set Prot and Set Frag upon startup....
#Have
#Adds Data into protocol list.
def getProtocolLists(LisBox):
VarItem = LisBox.get(LisBox.curselection())
x = len(DicProtocol)
x+=1
#Creates Dictionaries to tie them all together
p = "p" + repr(x)
print p
#OLD WAY: DicProtocol[p] = [VarItem, int(VarRounds.get()), int(Varcheck_button_Cen.get()), int(VarPym.get()), int(VarFragLen.get())]
DicProtocol[p]=dict()
#NEW WAY: LOAD INTO DICTofDICT
#NEWNEW way is to have a class hold all of these in an object!!!
#[name] = VarItem(name)
#[rounds] = Rounds (integer)
#[centroid]= Centroid (integer) 0 or 1
#[obs] = PyMOL Observer (integer) 0 or 1
#[shear] = Shear Settings
#[lisloops] = Loop Settings
#[score] = Score Function!!! (Literally)
#Accessory Files
#[frag] = Fragment Length (int) 9 or 3. Should be context dependendant.
#[fragfile] = Accessory File - Will be actual frag file soon.
#Vaccinity:
#[neighborcuttoff] = Neighbor Distance for vaccinity measure
#[onlyaround] = 0 or 1. 1 means only around vaccinity, not including residues
#[fixloopbb] = 0 or 1. 1 indicates to keep bb of loop fixed during Relax Vaccinity
#[fixvacbb] = 0 or 1. 1 indicates to keep bb of vaccinity fixed during Relax Vaccinity.
#Interface
#[interfacecutoff] = Interface Cuttoff distance
#[interfacefixed] = Chains to keep fixed in Interface Protocols: (A-B-C)
#[interfacepack] = Chains to only allow repacking for Relax Protocols
#Only Around Combinations: (onlyaround, fixloopbb, fixvacbb)
#0, 0, 0: Relax All
#0, 1, 0: Relax Vaccinity, Keep Loop chi open
#0, 0, 1: Relax Loop, Keep vaccinity chi open
#1, 0, 0: Relax Vaccinity, Keep Loop chi closed
#Other : All default to Pack Vaccinity - So we change the name to Pack Vaccinity.
DicProtocol[p] = dict()
#Asks User to input the number of Rounds Desired.
rounds = tkSimpleDialog.askinteger(title = 'rounds', prompt = 'Please enter the number of rounds desired: ', initialvalue = 1)
VarRounds.set(repr(rounds))
DicProtocol[p]['name']=VarItem
#OLD: DicProtocol[p]['rounds'] = int(VarRounds.get())
DicProtocol[p]['rounds'] = rounds
DicProtocol[p]['centroid'] = int(Varcheck_button_Cen.get())
DicProtocol[p]['obs']= int(VarPym.get())
#DicProtocol[p]['frag'] = int(VarFraglen.get())
#DicProtocol[p]['score] = Get Currently Set Score
DicProtocol[p]['score'] = score
#Adds the LisLoop - Python has references too, so be careful!
DicProtocol[p]['lisloops'] = LisLoop[:]
#This handles protocol specific data
#Shear Data
VarItemSplit = VarItem.split()
#Peudofix for the rest of the code where it tests itemsplit[1] etc.
if len(VarItemSplit) ==1:
VarItemSplit[1:4] = "0"
#if "Frag":
#Prompt for FragLength and Choose FragFile
#This Needs to be protocol specific prompting.
#Sets ShearSettings from WINDOW Prompt
if (VarItemSplit[0] == "Shear" and VarItemSplit[1] =="Custom"):
print "Appending Custom Shear Protocol"
ShearSettings=[int(Shear1.get()), int(Shear2.get()), int(Shear3.get()), int(Shear4.get()), ShearKT.get(), shearAnneal.get()]
DicProtocol[p]['shearsettings']=ShearSettings[:]
elif (VarItemSplit[0] == "Shear" and VarItemSplit[1]=="CustomFile"):
print "Appending Accessory Shear File"
elif (VarItemSplit[0] == "Shear"):
print "Appending default shear settings..."
ShearSettings = []
for i in range(0, 4):
ShearSettings.append(VarItemSplit[1])
ShearSettings.append(ShearKT.get()); ShearSettings.append(shearAnneal.get())
DicProtocol[p]['shearsettings'] = ShearSettings[:]
#OLD: DicProtocol[p].append(ShearSettings)
#Asks user for Cutoff Length and Vaccinity Settings for each protocol.
if VarItemSplit[1] == "Vaccinity":
length = tkSimpleDialog.askfloat(title = "vaccinity", prompt = "entry_er Vaccinity Cuttoff (A)", initialvalue = 5.0)
DicProtocol[p]['neighborcutoff'] = length
if VarItemSplit[0]=="Pack" and VarItemSplit[1] == "Vaccinity":
onlyaround = tkMessageBox.askquestion(title = "onlyaround", message = "Pack/Relax Given residues in addition to vaccinity?", default = tkMessageBox.YES)
if onlyaround == "yes":
print "Packing Vaccinity and residues chosen..."
DicProtocol[p]['onlyaround']=0
else:
print "Only packing Vaccinity...."
DicProtocol[p]['onlyaround']=1
#Relax and Minimize Vaccinity Prompts
if VarItem == "Relax Vaccinity" or VarItem == "Minimize Vaccinity":
#OLD: fixBB = tkMessageBox.askquestion(title = "fix surrounding", message = "Fix Backbone of Vaccinity?", default = tkMessageBox.YES)
#OLD: if fixBB == 'yes':
#OLD: DicProtocol[p]['fixvacbb'] = 1
#OLD: else:
#OLD: DicProtocol[p]['fixvacbb'] = 0
#OLD: if onlyaround == "yes":
#OLD: fixBBLoop = tkMessageBox.askquestion(title = 'Loop', message = "Fix Backbone of Loop Residues?", default = tkMessageBox.NO)
#OLD: if fixBBLoop == 'yes':
#OLD: DicProtocol[p]['fixloopbb'] = 1
#OLD: else:
#OLD: DicProtocol[p]['fixloopbb'] = 0
#OLD: if (onlyaround == "no" and fixBB == 'yes'):
#OLD: tkMessageBox.showinfo(message = "BB vaccinity fixed, no packing of loop residues - Runnng Rosetta Pack Residues instead of relax.")
#OLD: DicProtocol[p]['name']="Pack Vaccinity"
#OLD: if onlyaround == 'yes' and fixBB =='yes' and fixBBLoop == 'yes':
#OLD: tkMessageBox.showinfo(message = "BB vaccinity fixed for loop and surrounding residues - Running Rosetta Pack Residues instead of relax.")
#OLD: DicProtocol[p]['name']="Pack Vaccinity"
vacwindow= vaccinityrelaxwindow(c)
#c.wait_window(vacwindow)
if vacwindow.result == None:
print "Canceled. Using Default settings for protocol."
FixTarget = "Open"; FixVaccinity = "Open"; FixBoth = "UnSet"
else:
(FixTarget, FixVaccinity, FixBoth) = vacwindow.result
#Here, we translate what each of these mean (Hopefully):
target = [0, 0]; #(BB, Chi) 0 means to keep; 1 means to fix
vaccinity = [0, 0]
#BOTH
if FixBoth =="Vaccinity":
vaccinity =[1, 1]
print "Only Relaxing Target. You Should run Loop Relax instead."
elif FixBoth == "Target":
target = [1, 1]
print "Only Relaxing Vaccinity"
#TARGET
if FixTarget == "Open":
print "Opening Target"
target=[0,0]
elif FixTarget == "Fix":
print "Fixing all atoms of Target"
target = [1, 1]
elif FixTarget =="Fix BB":
print "Fixing backbone of Target"
target[0]=1
elif FixTarget =="Fix Chi":
print "Fixing rotamers of Target"
target[1]=1
#VACCINITY
if FixVaccinity =="Open":
print "Opening Vaccinity"
vaccinity = [0, 0]
elif FixVaccinity =="Fix":
print "Fixing all atoms of Vaccinity"
vaccinity = [1, 1]
elif FixVaccinity =="Fix BB":
print "Fixing backbone of Target"
vaccinity[0]=1
elif FixVaccinity =="Fix Chi":
vaccinity[1]=1
print vaccinity; print target
print "Final Settings for Relax Protocol:\n"
#TARGET
if target[0] ==1 and target[1]==1:
print "Fixing Target Completely"
elif target[0]==0 and target[1]==0:
print "Relaxing Target Completely"
elif target[0]==0:
print "Relaxing BackBone of Target"
elif target[1]==0:
print "Repacking Rotamers of Target"
#VACCINITY
if vaccinity[0] ==1 and vaccinity[1]==1:
print "Fixing Vaccinity Completely"
elif vaccinity[0]==0 and target[1]==0:
print "Relaxing Vaccinity Completely"
elif vaccinity[0]==0:
print "Relaxing BackBone of Vaccinity"
elif vaccinity[1]==0:
print "Repacking Rotamers of Vaccinity"
print "\n\n"
print "Switching Protocols as Needed:"
if vaccinity == [1, 1] and target == [1, 1]:
tkMessageBox.showerror(message = "Everything is fixed! Remove Protocol and Try Again!")
return
if vaccinity[0]==1 and target[0]==1:
print "Only running Side Chain Packing Protocol..."
DicProtocol[p]['vaccinity']=vaccinity
DicProtocol[p]['target'] = target
#print FixBB
#print FixChi
#print FixBoth
#return
#Interface Prompts
print VarItem; print VarItemSplit[1]
if VarItemSplit[1] == "Interface":
#Chain Cutoff Distance
length = tkSimpleDialog.askfloat(title = "interface", prompt = "entry_er Interface Cuttoff (A)", initialvalue = 6.0)
interface = tkSimpleDialog.askstring(title = "interface", prompt = "entry_er Interface (A:CD / AB:CD:F)")
#Options if you are specifically looking for interfaces between multiple chains.
#Handles Fully Restricking some chains
intopt = tkMessageBox.askquestion(title = "interface", message = "Fix any Interface Chains?", default = tkMessageBox.NO)
if intopt =="yes":
interfaceKeep = tkSimpleDialog.askstring(title = "interface", prompt = "entry_er chains to keep fixed (A-B-C) :", initialvalue = "0")
DicProtocol[p]['interfacefixed'] = interfaceKeep
else:
DicProtocol[p]['interfacefixed']=0
#Handles Restricting some chains to repacking
if VarItem == "Relax Interface":
intopt2 = tkMessageBox.askquestion(title = 'interface', message = "Restrict any Interface Chains to rePacking?", default = tkMessageBox.NO)
if intopt2 =="yes":
interfacePack = tkSimpleDialog.askstring(title="interface", prompt = "Enter chains to only allow repacking (A:B:C)")
DicProtocol[p]['interfacepack']=interfacePack
else:
DicProtocol[p]['interfacepack']=0
DicProtocol[p]['interfacecutoff'] = length
DicProtocol[p]['interface'] = interface
#Asks for Tolerance and Type for minmimization
if VarItemSplit[1] =="Minimize" or VarItemSplit[0] == "Minimize":
tol = tkSimpleDialog.askfloat(title = "tolerance", prompt = "Please Enter Tolerance", initialvalue = .1)
DicProtocol[p]['tolerance'] = tol
#OLD: print DicProtocol
def getFragLists(LisBox):
VarItem = LisBox.get(LisBox.curselection())
VarItem = pwd + "/FragSets_Designs/" +VarItem
x = len(DicProtocol)
#Creates Dictionaries to tie them all together
p = "p" + repr(x)
print p
#OLD: DicProtocol[p]['frag']=int(VarFragLen.get())
#Will check_button_ck if this is a frag or design file.
length = tkSimpleDialog.askinteger(title="frag", prompt="entry_er Fragment Length", initialvalue=3)
DicProtocol[p]['frag'] = length
DicProtocol[p]['fragfile'] = VarItem
print DicProtocol
def clearLists(List):
DicProtocol.clear()
List.delete(0, END)
print "Protocol Directions Reset"
#REMOVE PROTOCOL from DicProtocol and ListBox
def RemoveListItem(LisIn):
#print DicProtocol
tup = LisIn.curselection()
num = tup[0]
#print "Num" + num
LisIn.delete(LisIn.curselection())
num = int(num)+1
key = "p"+repr(num)
#print "Key" +key
tot = len(DicProtocol)
#print "total Protocols"
end = tot
start = num-1
#Start ReArranging at Num. Num represents deleted number +1.
if num == tot:#Takes Care of End
DicProtocol.pop(("p"+repr(tot)));
print "Protocol Removed from Queue"
return
else:
DicProtocol.pop(key)
if num >= 1:
for i in range(num, end):
#print i
#print num
#print "\n"
DicProtocol[("p"+repr(num))]=DicProtocol[("p"+repr(num+1))]
num+=1
DicProtocol.pop(("p"+repr(tot))); #Removes the last item, as this will be copied into the -1.
#print DicProtocol
print "Protocol Removed from Queue"
class MainProtocols():
def __init__(self, main):
self.main = main
self.repeats = StringVar()
self.repeats.set(0)
self.DicProtocol = dict(); #Main Protocol Dictionary
self.Centroid = StringVar(); #Centroid check_button_ckbox
self.Centroid.set(0)
self.FragLen = StringVar(); #Fragment Size
self.RoundVar = StringVar(); #Number of Protocol Rounds
#ADD Protocol: Places data into Protocol List using getProtocolLists
def SetandUpdateListsP(self, LisIn, LisOut):
LisItem = LisIn.get(LisIn.curselection())
getProtocolLists(self.LisProt1)
LisItem = self.RoundVar.get()+ "-"+LisItem
LisOut.insert(END, LisItem)
#ADD Accessory: Places data into Protocol by concatinating pwd, folder, and item.
def SetandUpdateListsF(self, LisIn, LisOut):
LisItem = LisIn.get(LisIn.curselection())
LisItem = pwd + "/FragSets_Designs/" +LisItem
LisItem = self.FragLen.get() + "-"+ LisItem
getFragLists(self.LisProt1Frag)
#UPDATES Listboxes of protocols after choosing type:
def updateLisProt(self, ListTypes, ListTypesFull):
ListTypesFull.delete(0, END)
type = ListTypes.get(ListTypes.curselection())
#print type
for res in ProtTypesDic[type]:
ListTypesFull.insert(END, res)
#Shows ShearControl Window. Should be chosen if/as Shear Control is being added.
def shoShearControl(self):
WinShear = Toplevel(self.main)
shearCon = ShearControl(WinShear)
shearCon.setTk()
shearCon.shoTk()
def kickProtocol(self):
"""
Handles kicking off the protocol with number of repeats.
"""
repeat = int(self.repeats.get())
count=1
if p.total_residue() == 0:
tkMessageBox.showerror(message = "No Pose Loaded! ")
return
decoys.set(tkSimpleDialog.askstring(title = "decoys", prompt = "Please entry_er the Number of Decoys Desired:", initialvalue = decoys.get()))
if int(decoys.get()) > 1:
print dirnameout.get()
if dirnameout.get() == "0":
dirnameout.set(tkFileDialog.askdirectory(initialdir = pwd, title = "Pick directory for decoys: "))
Outname.set(tkSimpleDialog.askstring(title = "decoy name", prompt = "Please enter a base name for the decoys: ", initialvalue = output.entry_Outname.get()))
if (dirnameout.get()==None)| (Outname.get()==None):
return
else:
print "Saving decoys to: " + dirnameout.get()
print "Base name for decoys: " + output.entry_Outname.get()
if repeat >=1:
print "Repeating protocol..."
print repeat
start = "1"+"-"+repr(len(self.DicProtocol))
numbers = tkSimpleDialog.askstring(title = "Simple Repeats", prompt = "Please Specify which protocols to repeat (1-4)", initialvalue = start)
numbers = numbers.split("-")
start = int(numbers[0]); end = int(numbers[1])
length = len(self.DicProtocol)
for i in range(1, repeat):
print i
for x in range(start, end+1):
print x
newkey = count+length
print "newkey:"+repr(newkey)
newkey = "p"+repr(newkey)
oldkey = "p"+repr(x)
print "oldkey:"+repr(oldkey)
self.DicProtocol[newkey] = self.DicProtocol[oldkey]
count+=1
print self.DicProtocol
p.assign(general_tools.protocols().initLoopProtocols(p, self.DicProtocol, decoys.get(), dirnameout.get() + "/" + output.entry_Outname.get(), LisLoop))
else:
p.assign(general_tools.protocols().initLoopProtocols(p, self.DicProtocol, decoys.get(), dirnameout.get() + "/" + output.entry_Outname.get(), LisLoop))
#This is the default kick protocol.
#DELETES Accessory File
def delAccessory(self):
acc = self.LisProt1Frag.get(self.LisProt1Frag.curselection())
file = pwd +"/FragSets_Designs/"+acc
os.remove(file)
self.LisProt1Frag.delete(self.LisProt1Frag.curselection())
#Prints Protocol Information:
def shoProt(self):
tup = self.LisSeeProt.curselection()
num = tup[0]
num = int(num)+1
key = "p"+repr(num)
print key
for settings in self.DicProtocol[key]:
print settings + " : "+repr(self.DicProtocol[key][settings])
Lmode.LisLoops.delete(0, END)
#print "Start: "+repr(LisLoop)
for i in range(0, len(LisLoop)):
LisLoop.pop(i)
#print "Deleted "+ repr(LisLoop)
for loops in self.DicProtocol[key]['lisloops']:
Lmode.LisLoops.insert(END, loops)
LisLoop.append(loops)
#print "Added " + repr(LisLoop)
return LisLoop
def editProt(self):
"""
Does Not WORK!!!?
"""
index = self.LisSeeProt.curselection()
LisItem = self.LisSeeProt.get(index)
tup = self.LisSeeProt.curselection()
num = tup[0]
num = int(num)+1
key = "p"+repr(num)
print key
getProtocolLists(self.LisSeeProt)
LisItem = self.RoundVar.get()+ "-"+LisItem
self.LisSeeProt.insert(index, LisItem)
def makeWindow(self, main):
self.check_button_Cen = Checkbutton(self.main, text="Centroid Mode", variable=self.Centroid)
self.label_Ed1 = Label(self.main, text="Protocol Builder", font="Arial")
self.label_Dire= Label(self.main, text="--Add protocol then add Accessory File--")
self.button_ProtAdd = Label(self.main, text="Rounds")
#OLD: self.entry_Rounds= Entry(self.main, textvariable=self.RoundVar, justify=CENTER)
self.RoundVar.set("0")
self.entry_FragLen= Entry(self.main, textvariable=self.FragLen, justify=CENTER)
self.FragLen.set("3")
self.button_Sho = Button(self.main, text = "Show Protocol Settings", command = lambda: self.shoProt())
self.button_Edi = Button(self.main, text = "Edit Protocol Settings", command = lambda: self.editProt())
#OLD: self.button_FragAdd = Label(self.main, text="Frag Length")
self.button_ProtRes = Button(self.main, text="Reset Protocol", command=lambda: clearLists(self.LisSeeProt))
self.LisProt1 = Listbox(self.main); self.LisProt1Frag = Listbox(self.main)
self.LisSeeProt = Listbox(self.main); self.LisProtTypes = Listbox(self.main)
#Repetition of Protocol
self.entry_Repeat = Entry(self.main, justify=CENTER, textvariable = self.repeats)
self.label_Repeat = Label(self.main, text = "Repeats")
#Sets Bindings
#Choose Type:
self.LisProtTypes.bind("<ButtonRelease-1>", lambda event: self.updateLisProt(self.LisProtTypes, self.LisProt1))
#Choose Protocol + Frag
self.LisProt1.bind("<Double-Button-1>", lambda event:self.SetandUpdateListsP(self.LisProt1, self.LisSeeProt))
self.LisProt1Frag.bind("<Double-Button-1>", lambda event:self.SetandUpdateListsF(self.LisProt1Frag, self.LisSeeProt))
#Final Listbox
self.LisSeeProt.bind("<Double-Button-1>", lambda event:RemoveListItem(self.LisSeeProt))
#OLD: self.LisSeeProt.bind('<ButtonRelease-1>', lambda event:self.shoProt())
#Shear Moves:
self.label_Shear = Label(self.main, text="Shear Moves")
self.check_button_Shear = Checkbutton(self.main, text = "Anneal?", variable = shearAnneal)
ShearKT.set(1.0)
self.button_Shear = Button(self.main, text = "Custom", command=lambda: self.shoShearControl())
#(self.LisProt1, self.LisSeeProt)
self.button_DelAcc = Button(self.main, text = "Delete Accessory File", command = lambda: self.delAccessory())
#Repeat of protocols
self.label_Rep = Label(self.main, text = "Repeats")
self.entry_Rep = Entry(self.main, justify = CENTER, textvariable = self.repeats)
#Kicks Protocol
self.kickprotocol=Button(self.main, text="Start Protocol", command=lambda: self.kickProtocol())
self.setProt()
self.setFrag()
self.shoProt()
def setProt(self):
"""
Sets up protocol List
"""
for type in ProtTypes:
self.LisProtTypes.insert(END, type)
def setFrag(self):
"""
Sets Fragment Listbox up. Should be able to customize this in the future.
Should be a customize file that tk main looks for. If found, it loads it.
Can save configuration on the fly.
"""
files = os.listdir(pwd+"/FragSets_Designs")
for file in files:
file = file.split("/")
self.LisProt1Frag.insert(END, file[len(file)-1])
#self.LisProt1Frag.insert(END, "CDR_Specific")
#self.LisProt1Frag.insert(END, "CDR_All")
#self.LisProt1Frag.insert(END, "Loops_All")
#self.LisProt1Frag.insert(END, "Sequence_Specific")
#self.LisProt1Frag.insert(END, "Neighbor_Dependant")
def shoTk(self):
self.label_Ed1.grid(row=11, column=3, columnspan=2, pady=15)
self.LisProtTypes.grid(row=13, column=3, rowspan=6); self.LisProt1.grid(row=13, column=4, rowspan=6); self.LisProt1Frag.grid(row=15, column=5, rowspan=6)
#self.button_ProtAdd.grid(row=14, column=2);
#OLD: self.entry_Rounds.grid(row=13, column=2)
#self.entry_FragLen.grid(row=15, column=2)
#OLD: self.button_FragAdd.grid(row=16, column=2)
self.button_ProtRes.grid(row=17, column=2)
self.check_button_Cen.grid(row=18, column=2)
self.label_Dire.grid(row=19, column=3, columnspan=2, sticky=W+E)
self.LisSeeProt.grid(row=20, column=3, rowspan=6, columnspan=2);
self.button_DelAcc.grid(row=21, column=5)
self.button_Sho.grid(row=22, column=5)
self.button_Edi.grid(row=23, column=5)
self.label_Shear.grid(row=12, column=5)
self.check_button_Shear.grid(row=13, column=5)
self.button_Shear.grid(row=14, column=5)
self.label_Rep.grid(row=27, column = 3, columnspan = 2)
self.entry_Rep.grid(row=26, column = 3, columnspan = 2)
self.kickprotocol.grid(row=24, column=2, sticky=W+E)
| [
"[email protected]"
] | |
f2df0a5b18ace630467007d86e19afacb3556e85 | 673f9b85708affe260b892a4eb3b1f6a0bd39d44 | /Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/scapy/utils.py | 5b1a8f7bb5902f7ffccbf08702bef25a0a6f3395 | [
"MIT"
] | permissive | i2tResearch/Ciberseguridad_web | feee3fe299029bef96b158d173ce2d28ef1418e4 | e6cccba69335816442c515d65d9aedea9e7dc58b | refs/heads/master | 2023-07-06T00:43:51.126684 | 2023-06-26T00:53:53 | 2023-06-26T00:53:53 | 94,152,032 | 14 | 0 | MIT | 2023-09-04T02:53:29 | 2017-06-13T00:21:00 | Jupyter Notebook | UTF-8 | Python | false | false | 61,921 | py | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
General utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from decimal import Decimal
import os
import sys
import socket
import collections
import random
import time
import gzip
import re
import struct
import array
import subprocess
import tempfile
import threading
import scapy.modules.six as six
from scapy.modules.six.moves import range
from scapy.config import conf
from scapy.consts import DARWIN, WINDOWS, WINDOWS_XP, OPENBSD
from scapy.data import MTU, DLT_EN10MB
from scapy.compat import orb, raw, plain_str, chb, bytes_base64,\
base64_bytes, hex_bytes, lambda_tuple_converter, bytes_encode
from scapy.error import log_runtime, Scapy_Exception, warning
from scapy.pton_ntop import inet_pton
###########
# Tools #
###########
def issubtype(x, t):
"""issubtype(C, B) -> bool
Return whether C is a class and if it is a subclass of class B.
When using a tuple as the second argument issubtype(X, (A, B, ...)),
is a shortcut for issubtype(X, A) or issubtype(X, B) or ... (etc.).
"""
return isinstance(x, type) and issubclass(x, t)
def get_temp_file(keep=False, autoext="", fd=False):
"""Creates a temporary file.
:param keep: If False, automatically delete the file when Scapy exits.
:param autoext: Suffix to add to the generated file name.
:param fd: If True, this returns a file-like object with the temporary
file opened. If False (default), this returns a file path.
"""
f = tempfile.NamedTemporaryFile(prefix="scapy", suffix=autoext,
delete=False)
if not keep:
conf.temp_files.append(f.name)
if fd:
return f
else:
# Close the file so something else can take it.
f.close()
return f.name
def get_temp_dir(keep=False):
"""Creates a temporary file, and returns its name.
:param keep: If False (default), the directory will be recursively
deleted when Scapy exits.
:return: A full path to a temporary directory.
"""
dname = tempfile.mkdtemp(prefix="scapy")
if not keep:
conf.temp_files.append(dname)
return dname
def sane_color(x):
r = ""
for i in x:
j = orb(i)
if (j < 32) or (j >= 127):
r += conf.color_theme.not_printable(".")
else:
r += chr(j)
return r
def sane(x):
r = ""
for i in x:
j = orb(i)
if (j < 32) or (j >= 127):
r += "."
else:
r += chr(j)
return r
@conf.commands.register
def restart():
"""Restarts scapy"""
if not conf.interactive or not os.path.isfile(sys.argv[0]):
raise OSError("Scapy was not started from console")
if WINDOWS:
try:
res_code = subprocess.call([sys.executable] + sys.argv)
except KeyboardInterrupt:
res_code = 1
finally:
os._exit(res_code)
os.execv(sys.executable, [sys.executable] + sys.argv)
def lhex(x):
if type(x) in six.integer_types:
return hex(x)
elif isinstance(x, tuple):
return "(%s)" % ", ".join(map(lhex, x))
elif isinstance(x, list):
return "[%s]" % ", ".join(map(lhex, x))
else:
return x
@conf.commands.register
def hexdump(x, dump=False):
"""Build a tcpdump like hexadecimal view
:param x: a Packet
:param dump: define if the result must be printed or returned in a variable
:returns: a String only when dump=True
"""
s = ""
x = bytes_encode(x)
x_len = len(x)
i = 0
while i < x_len:
s += "%04x " % i
for j in range(16):
if i + j < x_len:
s += "%02X " % orb(x[i + j])
else:
s += " "
s += " %s\n" % sane_color(x[i:i + 16])
i += 16
# remove trailing \n
s = s[:-1] if s.endswith("\n") else s
if dump:
return s
else:
print(s)
@conf.commands.register
def linehexdump(x, onlyasc=0, onlyhex=0, dump=False):
"""Build an equivalent view of hexdump() on a single line
Note that setting both onlyasc and onlyhex to 1 results in a empty output
:param x: a Packet
:param onlyasc: 1 to display only the ascii view
:param onlyhex: 1 to display only the hexadecimal view
:param dump: print the view if False
:returns: a String only when dump=True
"""
s = ""
s = hexstr(x, onlyasc=onlyasc, onlyhex=onlyhex, color=not dump)
if dump:
return s
else:
print(s)
@conf.commands.register
def chexdump(x, dump=False):
"""Build a per byte hexadecimal representation
Example:
>>> chexdump(IP())
0x45, 0x00, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe7, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01 # noqa: E501
:param x: a Packet
:param dump: print the view if False
:returns: a String only if dump=True
"""
x = bytes_encode(x)
s = ", ".join("%#04x" % orb(x) for x in x)
if dump:
return s
else:
print(s)
@conf.commands.register
def hexstr(x, onlyasc=0, onlyhex=0, color=False):
"""Build a fancy tcpdump like hex from bytes."""
x = bytes_encode(x)
_sane_func = sane_color if color else sane
s = []
if not onlyasc:
s.append(" ".join("%02X" % orb(b) for b in x))
if not onlyhex:
s.append(_sane_func(x))
return " ".join(s)
def repr_hex(s):
""" Convert provided bitstring to a simple string of hex digits """
return "".join("%02x" % orb(x) for x in s)
@conf.commands.register
def hexdiff(x, y):
"""Show differences between 2 binary strings"""
x = bytes_encode(x)[::-1]
y = bytes_encode(y)[::-1]
SUBST = 1
INSERT = 1
d = {(-1, -1): (0, (-1, -1))}
for j in range(len(y)):
d[-1, j] = d[-1, j - 1][0] + INSERT, (-1, j - 1)
for i in range(len(x)):
d[i, -1] = d[i - 1, -1][0] + INSERT, (i - 1, -1)
for j in range(len(y)):
for i in range(len(x)):
d[i, j] = min((d[i - 1, j - 1][0] + SUBST * (x[i] != y[j]), (i - 1, j - 1)), # noqa: E501
(d[i - 1, j][0] + INSERT, (i - 1, j)),
(d[i, j - 1][0] + INSERT, (i, j - 1)))
backtrackx = []
backtracky = []
i = len(x) - 1
j = len(y) - 1
while not (i == j == -1):
i2, j2 = d[i, j][1]
backtrackx.append(x[i2 + 1:i + 1])
backtracky.append(y[j2 + 1:j + 1])
i, j = i2, j2
x = y = i = 0
colorize = {0: lambda x: x,
-1: conf.color_theme.left,
1: conf.color_theme.right}
dox = 1
doy = 0
btx_len = len(backtrackx)
while i < btx_len:
linex = backtrackx[i:i + 16]
liney = backtracky[i:i + 16]
xx = sum(len(k) for k in linex)
yy = sum(len(k) for k in liney)
if dox and not xx:
dox = 0
doy = 1
if dox and linex == liney:
doy = 1
if dox:
xd = y
j = 0
while not linex[j]:
j += 1
xd -= 1
print(colorize[doy - dox]("%04x" % xd), end=' ')
x += xx
line = linex
else:
print(" ", end=' ')
if doy:
yd = y
j = 0
while not liney[j]:
j += 1
yd -= 1
print(colorize[doy - dox]("%04x" % yd), end=' ')
y += yy
line = liney
else:
print(" ", end=' ')
print(" ", end=' ')
cl = ""
for j in range(16):
if i + j < btx_len:
if line[j]:
col = colorize[(linex[j] != liney[j]) * (doy - dox)]
print(col("%02X" % orb(line[j])), end=' ')
if linex[j] == liney[j]:
cl += sane_color(line[j])
else:
cl += col(sane(line[j]))
else:
print(" ", end=' ')
cl += " "
else:
print(" ", end=' ')
if j == 7:
print("", end=' ')
print(" ", cl)
if doy or not yy:
doy = 0
dox = 1
i += 16
else:
if yy:
dox = 0
doy = 1
else:
i += 16
if struct.pack("H", 1) == b"\x00\x01": # big endian
checksum_endian_transform = lambda chk: chk
else:
checksum_endian_transform = lambda chk: ((chk >> 8) & 0xff) | chk << 8
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += b"\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return checksum_endian_transform(s) & 0xffff
def _fletcher16(charbuf):
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/> # noqa: E501
c0 = c1 = 0
for char in charbuf:
c0 += orb(char)
c1 += c0
c0 %= 255
c1 %= 255
return (c0, c1)
@conf.commands.register
def fletcher16_checksum(binbuf):
"""Calculates Fletcher-16 checksum of the given buffer.
Note:
If the buffer contains the two checkbytes derived from the Fletcher-16 checksum # noqa: E501
the result of this function has to be 0. Otherwise the buffer has been corrupted. # noqa: E501
"""
(c0, c1) = _fletcher16(binbuf)
return (c1 << 8) | c0
@conf.commands.register
def fletcher16_checkbytes(binbuf, offset):
"""Calculates the Fletcher-16 checkbytes returned as 2 byte binary-string.
Including the bytes into the buffer (at the position marked by offset) the # noqa: E501
global Fletcher-16 checksum of the buffer will be 0. Thus it is easy to verify # noqa: E501
the integrity of the buffer on the receiver side.
For details on the algorithm, see RFC 2328 chapter 12.1.7 and RFC 905 Annex B. # noqa: E501
"""
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/> # noqa: E501
if len(binbuf) < offset:
raise Exception("Packet too short for checkbytes %d" % len(binbuf))
binbuf = binbuf[:offset] + b"\x00\x00" + binbuf[offset + 2:]
(c0, c1) = _fletcher16(binbuf)
x = ((len(binbuf) - offset - 1) * c0 - c1) % 255
if (x <= 0):
x += 255
y = 510 - c0 - x
if (y > 255):
y -= 255
return chb(x) + chb(y)
def mac2str(mac):
return b"".join(chb(int(x, 16)) for x in plain_str(mac).split(':'))
def valid_mac(mac):
try:
return len(mac2str(mac)) == 6
except ValueError:
pass
return False
def str2mac(s):
if isinstance(s, str):
return ("%02x:" * 6)[:-1] % tuple(map(ord, s))
return ("%02x:" * 6)[:-1] % tuple(s)
def randstring(l):
"""
Returns a random string of length l (l >= 0)
"""
return b"".join(struct.pack('B', random.randint(0, 255)) for _ in range(l))
def zerofree_randstring(l):
"""
Returns a random string of length l (l >= 0) without zero in it.
"""
return b"".join(struct.pack('B', random.randint(1, 255)) for _ in range(l))
def strxor(s1, s2):
"""
Returns the binary XOR of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return b"".join(map(lambda x, y: chb(orb(x) ^ orb(y)), s1, s2))
def strand(s1, s2):
"""
Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return b"".join(map(lambda x, y: chb(orb(x) & orb(y)), s1, s2))
# Workaround bug 643005 : https://sourceforge.net/tracker/?func=detail&atid=105470&aid=643005&group_id=5470 # noqa: E501
try:
socket.inet_aton("255.255.255.255")
except socket.error:
def inet_aton(x):
if x == "255.255.255.255":
return b"\xff" * 4
else:
return socket.inet_aton(x)
else:
inet_aton = socket.inet_aton
inet_ntoa = socket.inet_ntoa
def atol(x):
try:
ip = inet_aton(x)
except socket.error:
ip = inet_aton(socket.gethostbyname(x))
return struct.unpack("!I", ip)[0]
def valid_ip(addr):
try:
addr = plain_str(addr)
except UnicodeDecodeError:
return False
try:
atol(addr)
except (OSError, ValueError, socket.error):
return False
return True
def valid_net(addr):
try:
addr = plain_str(addr)
except UnicodeDecodeError:
return False
if '/' in addr:
ip, mask = addr.split('/', 1)
return valid_ip(ip) and mask.isdigit() and 0 <= int(mask) <= 32
return valid_ip(addr)
def valid_ip6(addr):
try:
addr = plain_str(addr)
except UnicodeDecodeError:
return False
try:
inet_pton(socket.AF_INET6, addr)
except socket.error:
try:
socket.getaddrinfo(addr, None, socket.AF_INET6)[0][4][0]
except socket.error:
return False
return True
def valid_net6(addr):
try:
addr = plain_str(addr)
except UnicodeDecodeError:
return False
if '/' in addr:
ip, mask = addr.split('/', 1)
return valid_ip6(ip) and mask.isdigit() and 0 <= int(mask) <= 128
return valid_ip6(addr)
if WINDOWS_XP:
# That is a hell of compatibility :(
def ltoa(x):
return inet_ntoa(struct.pack("<I", x & 0xffffffff))
else:
def ltoa(x):
return inet_ntoa(struct.pack("!I", x & 0xffffffff))
def itom(x):
return (0xffffffff00000000 >> x) & 0xffffffff
class ContextManagerSubprocess(object):
"""
Context manager that eases checking for unknown command.
Example:
>>> with ContextManagerSubprocess("my custom message", "unknown_command"):
>>> subprocess.Popen(["unknown_command"])
"""
def __init__(self, name, prog):
self.name = name
self.prog = prog
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if isinstance(exc_value, (OSError, TypeError)):
msg = "%s: executing %r failed" % (self.name, self.prog) if self.prog else "Could not execute %s, is it installed ?" % self.name # noqa: E501
if not conf.interactive:
raise OSError(msg)
else:
log_runtime.error(msg, exc_info=True)
return True # Suppress the exception
class ContextManagerCaptureOutput(object):
"""
Context manager that intercept the console's output.
Example:
>>> with ContextManagerCaptureOutput() as cmco:
... print("hey")
... assert cmco.get_output() == "hey"
"""
def __init__(self):
self.result_export_object = ""
try:
import mock # noqa: F401
except Exception:
raise ImportError("The mock module needs to be installed !")
def __enter__(self):
import mock
def write(s, decorator=self):
decorator.result_export_object += s
mock_stdout = mock.Mock()
mock_stdout.write = write
self.bck_stdout = sys.stdout
sys.stdout = mock_stdout
return self
def __exit__(self, *exc):
sys.stdout = self.bck_stdout
return False
def get_output(self, eval_bytes=False):
if self.result_export_object.startswith("b'") and eval_bytes:
return plain_str(eval(self.result_export_object))
return self.result_export_object
def do_graph(graph, prog=None, format=None, target=None, type=None, string=None, options=None): # noqa: E501
"""do_graph(graph, prog=conf.prog.dot, format="svg",
target="| conf.prog.display", options=None, [string=1]):
string: if not None, simply return the graph string
graph: GraphViz graph description
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: filename or redirect. Defaults pipe to Imagemagick's display program # noqa: E501
prog: which graphviz program to use
options: options to be passed to prog"""
if format is None:
if WINDOWS:
format = "png" # use common format to make sure a viewer is installed # noqa: E501
else:
format = "svg"
if string:
return graph
if type is not None:
format = type
if prog is None:
prog = conf.prog.dot
start_viewer = False
if target is None:
if WINDOWS:
target = get_temp_file(autoext="." + format)
start_viewer = True
else:
with ContextManagerSubprocess("do_graph()", conf.prog.display):
target = subprocess.Popen([conf.prog.display],
stdin=subprocess.PIPE).stdin
if format is not None:
format = "-T%s" % format
if isinstance(target, str):
if target.startswith('|'):
target = subprocess.Popen(target[1:].lstrip(), shell=True,
stdin=subprocess.PIPE).stdin
elif target.startswith('>'):
target = open(target[1:].lstrip(), "wb")
else:
target = open(os.path.abspath(target), "wb")
proc = subprocess.Popen("\"%s\" %s %s" % (prog, options or "", format or ""), # noqa: E501
shell=True, stdin=subprocess.PIPE, stdout=target)
proc.stdin.write(bytes_encode(graph))
proc.stdin.close()
proc.wait()
try:
target.close()
except Exception:
pass
if start_viewer:
# Workaround for file not found error: We wait until tempfile is written. # noqa: E501
waiting_start = time.time()
while not os.path.exists(target.name):
time.sleep(0.1)
if time.time() - waiting_start > 3:
warning("Temporary file '%s' could not be written. Graphic will not be displayed.", tempfile) # noqa: E501
break
else:
if conf.prog.display == conf.prog._default:
os.startfile(target.name)
else:
with ContextManagerSubprocess("do_graph()", conf.prog.display):
subprocess.Popen([conf.prog.display, target.name])
_TEX_TR = {
"{": "{\\tt\\char123}",
"}": "{\\tt\\char125}",
"\\": "{\\tt\\char92}",
"^": "\\^{}",
"$": "\\$",
"#": "\\#",
"_": "\\_",
"&": "\\&",
"%": "\\%",
"|": "{\\tt\\char124}",
"~": "{\\tt\\char126}",
"<": "{\\tt\\char60}",
">": "{\\tt\\char62}",
}
def tex_escape(x):
s = ""
for c in x:
s += _TEX_TR.get(c, c)
return s
def colgen(*lstcol, **kargs):
"""Returns a generator that mixes provided quantities forever
trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default""" # noqa: E501
if len(lstcol) < 2:
lstcol *= 2
trans = kargs.get("trans", lambda x, y, z: (x, y, z))
while True:
for i in range(len(lstcol)):
for j in range(len(lstcol)):
for k in range(len(lstcol)):
if i != j or j != k or k != i:
yield trans(lstcol[(i + j) % len(lstcol)], lstcol[(j + k) % len(lstcol)], lstcol[(k + i) % len(lstcol)]) # noqa: E501
def incremental_label(label="tag%05i", start=0):
while True:
yield label % start
start += 1
def binrepr(val):
return bin(val)[2:]
def long_converter(s):
return int(s.replace('\n', '').replace(' ', ''), 16)
#########################
# Enum management #
#########################
class EnumElement:
_value = None
def __init__(self, key, value):
self._key = key
self._value = value
def __repr__(self):
return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value) # noqa: E501
def __getattr__(self, attr):
return getattr(self._value, attr)
def __str__(self):
return self._key
def __bytes__(self):
return bytes_encode(self.__str__())
def __hash__(self):
return self._value
def __int__(self):
return int(self._value)
def __eq__(self, other):
return self._value == int(other)
def __neq__(self, other):
return not self.__eq__(other)
class Enum_metaclass(type):
element_class = EnumElement
def __new__(cls, name, bases, dct):
rdict = {}
for k, v in six.iteritems(dct):
if isinstance(v, int):
v = cls.element_class(k, v)
dct[k] = v
rdict[v] = k
dct["__rdict__"] = rdict
return super(Enum_metaclass, cls).__new__(cls, name, bases, dct)
def __getitem__(self, attr):
return self.__rdict__[attr]
def __contains__(self, val):
return val in self.__rdict__
def get(self, attr, val=None):
return self.__rdict__.get(attr, val)
def __repr__(self):
return "<%s>" % self.__dict__.get("name", self.__name__)
###################
# Object saving #
###################
def export_object(obj):
print(bytes_base64(gzip.zlib.compress(six.moves.cPickle.dumps(obj, 2), 9)))
def import_object(obj=None):
if obj is None:
obj = sys.stdin.read()
return six.moves.cPickle.loads(gzip.zlib.decompress(base64_bytes(obj.strip()))) # noqa: E501
def save_object(fname, obj):
"""Pickle a Python object"""
fd = gzip.open(fname, "wb")
six.moves.cPickle.dump(obj, fd)
fd.close()
def load_object(fname):
"""unpickle a Python object"""
return six.moves.cPickle.load(gzip.open(fname, "rb"))
@conf.commands.register
def corrupt_bytes(s, p=0.01, n=None):
"""Corrupt a given percentage or number of bytes from a string"""
s = array.array("B", bytes_encode(s))
s_len = len(s)
if n is None:
n = max(1, int(s_len * p))
for i in random.sample(range(s_len), n):
s[i] = (s[i] + random.randint(1, 255)) % 256
return s.tostring() if six.PY2 else s.tobytes()
@conf.commands.register
def corrupt_bits(s, p=0.01, n=None):
"""Flip a given percentage or number of bits from a string"""
s = array.array("B", bytes_encode(s))
s_len = len(s) * 8
if n is None:
n = max(1, int(s_len * p))
for i in random.sample(range(s_len), n):
s[i // 8] ^= 1 << (i % 8)
return s.tostring() if six.PY2 else s.tobytes()
#############################
# pcap capture file stuff #
#############################
@conf.commands.register
def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
filename: the name of the file to write packets to, or an open,
writable file-like object. The file descriptor will be
closed at the end of the call, so do not use an object you
do not want to close (e.g., running wrpcap(sys.stdout, [])
in interactive mode will crash Scapy).
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness
sync: do not bufferize writes to the capture file
"""
with PcapWriter(filename, *args, **kargs) as fdesc:
fdesc.write(pkt)
@conf.commands.register
def rdpcap(filename, count=-1):
"""Read a pcap or pcapng file and return a packet list
count: read only <count> packets
"""
with PcapReader(filename) as fdesc:
return fdesc.read_all(count=count)
class PcapReader_metaclass(type):
"""Metaclass for (Raw)Pcap(Ng)Readers"""
def __new__(cls, name, bases, dct):
"""The `alternative` class attribute is declared in the PcapNg
variant, and set here to the Pcap variant.
"""
newcls = super(PcapReader_metaclass, cls).__new__(cls, name, bases, dct) # noqa: E501
if 'alternative' in dct:
dct['alternative'].alternative = newcls
return newcls
def __call__(cls, filename):
"""Creates a cls instance, use the `alternative` if that
fails.
"""
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
filename, fdesc, magic = cls.open(filename)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
if "alternative" in cls.__dict__:
cls = cls.__dict__["alternative"]
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
try:
i.f.seek(-4, 1)
except Exception:
pass
raise Scapy_Exception("Not a supported capture file")
return i
@staticmethod
def open(filename):
"""Open (if necessary) filename, and read the magic."""
if isinstance(filename, six.string_types):
try:
fdesc = gzip.open(filename, "rb")
magic = fdesc.read(4)
except IOError:
fdesc = open(filename, "rb")
magic = fdesc.read(4)
else:
fdesc = filename
filename = getattr(fdesc, "name", "No name")
magic = fdesc.read(4)
return filename, fdesc, magic
class RawPcapReader(six.with_metaclass(PcapReader_metaclass)):
"""A stateful pcap reader. Each packet is returned as a string"""
read_allowed_exceptions = () # emulate SuperSocket
nonblocking_socket = True
PacketMetadata = collections.namedtuple("PacketMetadata",
["sec", "usec", "wirelen", "caplen"]) # noqa: E501
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
if magic == b"\xa1\xb2\xc3\xd4": # big endian
self.endian = ">"
self.nano = False
elif magic == b"\xd4\xc3\xb2\xa1": # little endian
self.endian = "<"
self.nano = False
elif magic == b"\xa1\xb2\x3c\x4d": # big endian, nanosecond-precision
self.endian = ">"
self.nano = True
elif magic == b"\x4d\x3c\xb2\xa1": # little endian, nanosecond-precision # noqa: E501
self.endian = "<"
self.nano = True
else:
raise Scapy_Exception(
"Not a pcap capture file (bad magic: %r)" % magic
)
hdr = self.f.read(20)
if len(hdr) < 20:
raise Scapy_Exception("Invalid pcap file (too short)")
vermaj, vermin, tz, sig, snaplen, linktype = struct.unpack(
self.endian + "HHIIII", hdr
)
self.linktype = linktype
def __iter__(self):
return self
def next(self):
"""implement the iterator protocol on a set of packets in a pcap file
pkt is a tuple (pkt_data, pkt_metadata) as defined in
RawPcapReader.read_packet()
"""
try:
return self.read_packet()
except EOFError:
raise StopIteration
__next__ = next
def read_packet(self, size=MTU):
"""return a single packet read from the file as a tuple containing
(pkt_data, pkt_metadata)
raise EOFError when no more packets are available
"""
hdr = self.f.read(16)
if len(hdr) < 16:
raise EOFError
sec, usec, caplen, wirelen = struct.unpack(self.endian + "IIII", hdr)
return (self.f.read(caplen)[:size],
RawPcapReader.PacketMetadata(sec=sec, usec=usec,
wirelen=wirelen, caplen=caplen))
def dispatch(self, callback):
"""call the specified callback routine for each packet read
This is just a convenience function for the main loop
that allows for easy launching of packet processing in a
thread.
"""
for p in self:
callback(p)
def read_all(self, count=-1):
"""return a list of all packets in the pcap file
"""
res = []
while count != 0:
count -= 1
try:
p = self.read_packet()
except EOFError:
break
res.append(p)
return res
def recv(self, size=MTU):
""" Emulate a socket
"""
return self.read_packet(size=size)[0]
def fileno(self):
return self.f.fileno()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.close()
# emulate SuperSocket
@staticmethod
def select(sockets, remain=None):
return sockets, None
class PcapReader(RawPcapReader):
def __init__(self, filename, fdesc, magic):
RawPcapReader.__init__(self, filename, fdesc, magic)
try:
self.LLcls = conf.l2types[self.linktype]
except KeyError:
warning("PcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype, self.linktype)) # noqa: E501
self.LLcls = conf.raw_layer
def read_packet(self, size=MTU):
rp = super(PcapReader, self).read_packet(size=size)
if rp is None:
raise EOFError
s, pkt_info = rp
try:
p = self.LLcls(s)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
from scapy.sendrecv import debug
debug.crashed_on = (self.LLcls, s)
raise
p = conf.raw_layer(s)
power = Decimal(10) ** Decimal(-9 if self.nano else -6)
p.time = Decimal(pkt_info.sec + power * pkt_info.usec)
p.wirelen = pkt_info.wirelen
return p
def read_all(self, count=-1):
res = RawPcapReader.read_all(self, count)
from scapy import plist
return plist.PacketList(res, name=os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet(size=size)
class RawPcapNgReader(RawPcapReader):
"""A stateful pcapng reader. Each packet is returned as a
string.
"""
alternative = RawPcapReader
PacketMetadata = collections.namedtuple("PacketMetadata",
["linktype", "tsresol",
"tshigh", "tslow", "wirelen"])
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
# A list of (linktype, snaplen, tsresol); will be populated by IDBs.
self.interfaces = []
self.blocktypes = {
1: self.read_block_idb,
2: self.read_block_pkt,
3: self.read_block_spb,
6: self.read_block_epb,
}
if magic != b"\x0a\x0d\x0d\x0a": # PcapNg:
raise Scapy_Exception(
"Not a pcapng capture file (bad magic: %r)" % magic
)
# see https://github.com/pcapng/pcapng
blocklen, magic = self.f.read(4), self.f.read(4) # noqa: F841
if magic == b"\x1a\x2b\x3c\x4d":
self.endian = ">"
elif magic == b"\x4d\x3c\x2b\x1a":
self.endian = "<"
else:
raise Scapy_Exception("Not a pcapng capture file (bad magic)")
try:
self.f.seek(0)
except Exception:
pass
def read_packet(self, size=MTU):
"""Read blocks until it reaches either EOF or a packet, and
returns None or (packet, (linktype, sec, usec, wirelen)),
where packet is a string.
"""
while True:
try:
blocktype, blocklen = struct.unpack(self.endian + "2I",
self.f.read(8))
except struct.error:
raise EOFError
block = self.f.read(blocklen - 12)
if blocklen % 4:
pad = self.f.read(4 - (blocklen % 4))
warning("PcapNg: bad blocklen %d (MUST be a multiple of 4. "
"Ignored padding %r" % (blocklen, pad))
try:
if (blocklen,) != struct.unpack(self.endian + 'I',
self.f.read(4)):
warning("PcapNg: Invalid pcapng block (bad blocklen)")
except struct.error:
raise EOFError
res = self.blocktypes.get(blocktype,
lambda block, size: None)(block, size)
if res is not None:
return res
def read_block_idb(self, block, _):
"""Interface Description Block"""
options = block[16:]
tsresol = 1000000
while len(options) >= 4:
code, length = struct.unpack(self.endian + "HH", options[:4])
# PCAP Next Generation (pcapng) Capture File Format
# 4.2. - Interface Description Block
# http://xml2rfc.tools.ietf.org/cgi-bin/xml2rfc.cgi?url=https://raw.githubusercontent.com/pcapng/pcapng/master/draft-tuexen-opsawg-pcapng.xml&modeAsFormat=html/ascii&type=ascii#rfc.section.4.2
if code == 9 and length == 1 and len(options) >= 5:
tsresol = orb(options[4])
tsresol = (2 if tsresol & 128 else 10) ** (tsresol & 127)
if code == 0:
if length != 0:
warning("PcapNg: invalid option length %d for end-of-option" % length) # noqa: E501
break
if length % 4:
length += (4 - (length % 4))
options = options[4 + length:]
self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8]) +
(tsresol,))
def read_block_epb(self, block, size):
"""Enhanced Packet Block"""
intid, tshigh, tslow, caplen, wirelen = struct.unpack(
self.endian + "5I",
block[:20],
)
return (block[20:20 + caplen][:size],
RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501
tsresol=self.interfaces[intid][2], # noqa: E501
tshigh=tshigh,
tslow=tslow,
wirelen=wirelen))
def read_block_spb(self, block, size):
"""Simple Packet Block"""
# "it MUST be assumed that all the Simple Packet Blocks have
# been captured on the interface previously specified in the
# first Interface Description Block."
intid = 0
wirelen, = struct.unpack(self.endian + "I", block[:4])
caplen = min(wirelen, self.interfaces[intid][1])
return (block[4:4 + caplen][:size],
RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501
tsresol=self.interfaces[intid][2], # noqa: E501
tshigh=None,
tslow=None,
wirelen=wirelen))
def read_block_pkt(self, block, size):
"""(Obsolete) Packet Block"""
intid, drops, tshigh, tslow, caplen, wirelen = struct.unpack(
self.endian + "HH4I",
block[:20],
)
return (block[20:20 + caplen][:size],
RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501
tsresol=self.interfaces[intid][2], # noqa: E501
tshigh=tshigh,
tslow=tslow,
wirelen=wirelen))
class PcapNgReader(RawPcapNgReader):
alternative = PcapReader
def __init__(self, filename, fdesc, magic):
RawPcapNgReader.__init__(self, filename, fdesc, magic)
def read_packet(self, size=MTU):
rp = super(PcapNgReader, self).read_packet(size=size)
if rp is None:
raise EOFError
s, (linktype, tsresol, tshigh, tslow, wirelen) = rp
try:
p = conf.l2types[linktype](s)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
if tshigh is not None:
p.time = float((tshigh << 32) + tslow) / tsresol
p.wirelen = wirelen
return p
def read_all(self, count=-1):
res = RawPcapNgReader.read_all(self, count)
from scapy import plist
return plist.PacketList(res, name=os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet()
class RawPcapWriter:
"""A stream PCAP writer with more control than wrpcap()"""
def __init__(self, filename, linktype=None, gz=False, endianness="",
append=False, sync=False, nano=False):
"""
filename: the name of the file to write packets to, or an open,
writable file-like object.
linktype: force linktype to a given value. If None, linktype is taken
from the first writer packet
gz: compress the capture on the fly
endianness: force an endianness (little:"<", big:">"). Default is native
append: append packets to the capture file instead of truncating it
sync: do not bufferize writes to the capture file
nano: use nanosecond-precision (requires libpcap >= 1.5.0)
"""
self.linktype = linktype
self.header_present = 0
self.append = append
self.gz = gz
self.endian = endianness
self.sync = sync
self.nano = nano
bufsz = 4096
if sync:
bufsz = 0
if isinstance(filename, six.string_types):
self.filename = filename
self.f = [open, gzip.open][gz](filename, append and "ab" or "wb", gz and 9 or bufsz) # noqa: E501
else:
self.f = filename
self.filename = getattr(filename, "name", "No name")
def fileno(self):
return self.f.fileno()
def _write_header(self, pkt):
self.header_present = 1
if self.append:
# Even if prone to race conditions, this seems to be
# safest way to tell whether the header is already present
# because we have to handle compressed streams that
# are not as flexible as basic files
g = [open, gzip.open][self.gz](self.filename, "rb")
if g.read(16):
return
self.f.write(struct.pack(self.endian + "IHHIIII", 0xa1b23c4d if self.nano else 0xa1b2c3d4, # noqa: E501
2, 4, 0, 0, MTU, self.linktype))
self.f.flush()
def write(self, pkt):
"""
Writes a Packet, a SndRcvList object, or bytes to a pcap file.
:param pkt: Packet(s) to write (one record for each Packet), or raw
bytes to write (as one record).
:type pkt: iterable[Packet], Packet or bytes
"""
if isinstance(pkt, bytes):
if not self.header_present:
self._write_header(pkt)
self._write_packet(pkt)
else:
# Import here to avoid a circular dependency
from scapy.plist import SndRcvList
if isinstance(pkt, SndRcvList):
pkt = (p for t in pkt for p in t)
else:
pkt = pkt.__iter__()
for p in pkt:
if not self.header_present:
self._write_header(p)
self._write_packet(p)
def _write_packet(self, packet, sec=None, usec=None, caplen=None,
wirelen=None):
"""
Writes a single packet to the pcap file.
:param packet: bytes for a single packet
:type packet: bytes
:param sec: time the packet was captured, in seconds since epoch. If
not supplied, defaults to now.
:type sec: int or long
:param usec: If ``nano=True``, then number of nanoseconds after the
second that the packet was captured. If ``nano=False``,
then the number of microseconds after the second the
packet was captured
:type usec: int or long
:param caplen: The length of the packet in the capture file. If not
specified, uses ``len(packet)``.
:type caplen: int
:param wirelen: The length of the packet on the wire. If not
specified, uses ``caplen``.
:type wirelen: int
:returns: None
:rtype: None
"""
if caplen is None:
caplen = len(packet)
if wirelen is None:
wirelen = caplen
if sec is None or usec is None:
t = time.time()
it = int(t)
if sec is None:
sec = it
usec = int(round((t - it) *
(1000000000 if self.nano else 1000000)))
elif usec is None:
usec = 0
self.f.write(struct.pack(self.endian + "IIII",
sec, usec, caplen, wirelen))
self.f.write(packet)
if self.sync:
self.f.flush()
def flush(self):
return self.f.flush()
def close(self):
if not self.header_present:
self._write_header(None)
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.flush()
self.close()
class PcapWriter(RawPcapWriter):
"""A stream PCAP writer with more control than wrpcap()"""
def _write_header(self, pkt):
if self.linktype is None:
try:
self.linktype = conf.l2types[pkt.__class__]
# Import here to prevent import loops
from scapy.layers.inet import IP
from scapy.layers.inet6 import IPv6
if OPENBSD and isinstance(pkt, (IP, IPv6)):
self.linktype = 14 # DLT_RAW
except KeyError:
warning("PcapWriter: unknown LL type for %s. Using type 1 (Ethernet)", pkt.__class__.__name__) # noqa: E501
self.linktype = DLT_EN10MB
RawPcapWriter._write_header(self, pkt)
def _write_packet(self, packet, sec=None, usec=None, caplen=None,
wirelen=None):
"""
Writes a single packet to the pcap file.
:param packet: Packet, or bytes for a single packet
:type packet: Packet or bytes
:param sec: time the packet was captured, in seconds since epoch. If
not supplied, defaults to now.
:type sec: int or long
:param usec: If ``nano=True``, then number of nanoseconds after the
second that the packet was captured. If ``nano=False``,
then the number of microseconds after the second the
packet was captured. If ``sec`` is not specified,
this value is ignored.
:type usec: int or long
:param caplen: The length of the packet in the capture file. If not
specified, uses ``len(raw(packet))``.
:type caplen: int
:param wirelen: The length of the packet on the wire. If not
specified, tries ``packet.wirelen``, otherwise uses
``caplen``.
:type wirelen: int
:returns: None
:rtype: None
"""
if hasattr(packet, "time"):
if sec is None:
sec = int(packet.time)
usec = int(round((packet.time - sec) *
(1000000000 if self.nano else 1000000)))
if usec is None:
usec = 0
rawpkt = raw(packet)
caplen = len(rawpkt) if caplen is None else caplen
if wirelen is None:
if hasattr(packet, "wirelen"):
wirelen = packet.wirelen
if wirelen is None:
wirelen = caplen
RawPcapWriter._write_packet(
self, rawpkt, sec=sec, usec=usec, caplen=caplen, wirelen=wirelen)
@conf.commands.register
def import_hexcap():
"""Imports a tcpdump like hexadecimal view
e.g: exported via hexdump() or tcpdump or wireshark's "export as hex"
"""
re_extract_hexcap = re.compile(r"^((0x)?[0-9a-fA-F]{2,}[ :\t]{,3}|) *(([0-9a-fA-F]{2} {,2}){,16})") # noqa: E501
p = ""
try:
while True:
line = input().strip()
if not line:
break
try:
p += re_extract_hexcap.match(line).groups()[2]
except Exception:
warning("Parsing error during hexcap")
continue
except EOFError:
pass
p = p.replace(" ", "")
return hex_bytes(p)
@conf.commands.register
def wireshark(pktlist, wait=False, **kwargs):
"""
Runs Wireshark on a list of packets.
See :func:`tcpdump` for more parameter description.
Note: this defaults to wait=False, to run Wireshark in the background.
"""
return tcpdump(pktlist, prog=conf.prog.wireshark, wait=wait, **kwargs)
@conf.commands.register
def tdecode(pktlist, args=None, **kwargs):
"""
Run tshark on a list of packets.
:param args: If not specified, defaults to ``tshark -V``.
See :func:`tcpdump` for more parameters.
"""
if args is None:
args = ["-V"]
return tcpdump(pktlist, prog=conf.prog.tshark, args=args, **kwargs)
def _guess_linktype_name(value):
"""Guess the DLT name from its value."""
import scapy.data
return next(
k[4:] for k, v in six.iteritems(scapy.data.__dict__)
if k.startswith("DLT") and v == value
)
def _guess_linktype_value(name):
"""Guess the value of a DLT name."""
import scapy.data
if not name.startswith("DLT_"):
name = "DLT_" + name
return scapy.data.__dict__[name]
@conf.commands.register
def tcpdump(pktlist, dump=False, getfd=False, args=None,
prog=None, getproc=False, quiet=False, use_tempfile=None,
read_stdin_opts=None, linktype=None, wait=True):
"""Run tcpdump or tshark on a list of packets.
When using ``tcpdump`` on OSX (``prog == conf.prog.tcpdump``), this uses a
temporary file to store the packets. This works around a bug in Apple's
version of ``tcpdump``: http://apple.stackexchange.com/questions/152682/
Otherwise, the packets are passed in stdin.
This function can be explicitly enabled or disabled with the
``use_tempfile`` parameter.
When using ``wireshark``, it will be called with ``-ki -`` to start
immediately capturing packets from stdin.
Otherwise, the command will be run with ``-r -`` (which is correct for
``tcpdump`` and ``tshark``).
This can be overridden with ``read_stdin_opts``. This has no effect when
``use_tempfile=True``, or otherwise reading packets from a regular file.
pktlist: a Packet instance, a PacketList instance or a list of Packet
instances. Can also be a filename (as a string), an open
file-like object that must be a file format readable by
tshark (Pcap, PcapNg, etc.) or None (to sniff)
dump: when set to True, returns a string instead of displaying it.
getfd: when set to True, returns a file-like object to read data
from tcpdump or tshark from.
getproc: when set to True, the subprocess.Popen object is returned
args: arguments (as a list) to pass to tshark (example for tshark:
args=["-T", "json"]).
prog: program to use (defaults to tcpdump, will work with tshark)
quiet: when set to True, the process stderr is discarded
use_tempfile: When set to True, always use a temporary file to store packets.
When set to False, pipe packets through stdin.
When set to None (default), only use a temporary file with
``tcpdump`` on OSX.
read_stdin_opts: When set, a list of arguments needed to capture from stdin.
Otherwise, attempts to guess.
linktype: A custom DLT value or name, to overwrite the default values.
wait: If True (default), waits for the process to terminate before returning
to Scapy. If False, the process will be detached to the background. If
dump, getproc or getfd is True, these have the same effect as
``wait=False``.
Examples:
>>> tcpdump([IP()/TCP(), IP()/UDP()])
reading from file -, link-type RAW (Raw IP)
16:46:00.474515 IP 127.0.0.1.20 > 127.0.0.1.80: Flags [S], seq 0, win 8192, length 0 # noqa: E501
16:46:00.475019 IP 127.0.0.1.53 > 127.0.0.1.53: [|domain]
>>> tcpdump([IP()/TCP(), IP()/UDP()], prog=conf.prog.tshark)
1 0.000000 127.0.0.1 -> 127.0.0.1 TCP 40 20->80 [SYN] Seq=0 Win=8192 Len=0 # noqa: E501
2 0.000459 127.0.0.1 -> 127.0.0.1 UDP 28 53->53 Len=0
To get a JSON representation of a tshark-parsed PacketList(), one can:
>>> import json, pprint
>>> json_data = json.load(tcpdump(IP(src="217.25.178.5", dst="45.33.32.156"),
... prog=conf.prog.tshark, args=["-T", "json"],
... getfd=True))
>>> pprint.pprint(json_data)
[{u'_index': u'packets-2016-12-23',
u'_score': None,
u'_source': {u'layers': {u'frame': {u'frame.cap_len': u'20',
u'frame.encap_type': u'7',
[...]
u'frame.time_relative': u'0.000000000'},
u'ip': {u'ip.addr': u'45.33.32.156',
u'ip.checksum': u'0x0000a20d',
[...]
u'ip.ttl': u'64',
u'ip.version': u'4'},
u'raw': u'Raw packet data'}},
u'_type': u'pcap_file'}]
>>> json_data[0]['_source']['layers']['ip']['ip.ttl']
u'64'
"""
getfd = getfd or getproc
if prog is None:
prog = [conf.prog.tcpdump]
_prog_name = "windump()" if WINDOWS else "tcpdump()"
elif isinstance(prog, six.string_types):
_prog_name = "{}()".format(prog)
prog = [prog]
else:
raise ValueError("prog must be a string")
from scapy.arch.common import TCPDUMP
if prog[0] == conf.prog.tcpdump and not TCPDUMP:
message = "tcpdump is not available. Cannot use tcpdump() !"
raise Scapy_Exception(message)
if linktype is not None:
# Tcpdump does not support integers in -y (yet)
# https://github.com/the-tcpdump-group/tcpdump/issues/758
if isinstance(linktype, int):
# Guess name from value
try:
linktype_name = _guess_linktype_name(linktype)
except StopIteration:
linktype = -1
else:
# Guess value from name
if linktype.startswith("DLT_"):
linktype = linktype[4:]
linktype_name = linktype
try:
linktype = _guess_linktype_value(linktype)
except KeyError:
linktype = -1
if linktype == -1:
raise ValueError(
"Unknown linktype. Try passing its datalink name instead"
)
prog += ["-y", linktype_name]
# Build Popen arguments
if args is None:
args = []
else:
# Make a copy of args
args = list(args)
stdout = subprocess.PIPE if dump or getfd else None
stderr = open(os.devnull) if quiet else None
if use_tempfile is None:
# Apple's tcpdump cannot read from stdin, see:
# http://apple.stackexchange.com/questions/152682/
use_tempfile = DARWIN and prog[0] == conf.prog.tcpdump
if read_stdin_opts is None:
if prog[0] == conf.prog.wireshark:
# Start capturing immediately (-k) from stdin (-i -)
read_stdin_opts = ["-ki", "-"]
else:
read_stdin_opts = ["-r", "-"]
else:
# Make a copy of read_stdin_opts
read_stdin_opts = list(read_stdin_opts)
if pktlist is None:
# sniff
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + args,
stdout=stdout,
stderr=stderr,
)
elif isinstance(pktlist, six.string_types):
# file
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + ["-r", pktlist] + args,
stdout=stdout,
stderr=stderr,
)
elif use_tempfile:
tmpfile = get_temp_file(autoext=".pcap", fd=True)
try:
tmpfile.writelines(iter(lambda: pktlist.read(1048576), b""))
except AttributeError:
wrpcap(tmpfile, pktlist, linktype=linktype)
else:
tmpfile.close()
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + ["-r", tmpfile.name] + args,
stdout=stdout,
stderr=stderr,
)
else:
# pass the packet stream
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + read_stdin_opts + args,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
try:
proc.stdin.writelines(iter(lambda: pktlist.read(1048576), b""))
except AttributeError:
wrpcap(proc.stdin, pktlist, linktype=linktype)
except UnboundLocalError:
raise IOError("%s died unexpectedly !" % prog)
else:
proc.stdin.close()
if dump:
return b"".join(iter(lambda: proc.stdout.read(1048576), b""))
if getproc:
return proc
if getfd:
return proc.stdout
if wait:
proc.wait()
@conf.commands.register
def hexedit(pktlist):
"""Run hexedit on a list of packets, then return the edited packets."""
f = get_temp_file()
wrpcap(f, pktlist)
with ContextManagerSubprocess("hexedit()", conf.prog.hexedit):
subprocess.call([conf.prog.hexedit, f])
pktlist = rdpcap(f)
os.unlink(f)
return pktlist
def get_terminal_width():
"""Get terminal width (number of characters) if in a window.
Notice: this will try several methods in order to
support as many terminals and OS as possible.
"""
# Let's first try using the official API
# (Python 3.3+)
if not six.PY2:
import shutil
sizex = shutil.get_terminal_size(fallback=(0, 0))[0]
if sizex != 0:
return sizex
# Backups / Python 2.7
if WINDOWS:
from ctypes import windll, create_string_buffer
# http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) # noqa: E501
sizex = right - left + 1
# sizey = bottom - top + 1
return sizex
return None
else:
# We have various methods
sizex = None
# COLUMNS is set on some terminals
try:
sizex = int(os.environ['COLUMNS'])
except Exception:
pass
if sizex:
return sizex
# We can query TIOCGWINSZ
try:
import fcntl
import termios
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
sizex = struct.unpack('HHHH', x)[1]
except IOError:
pass
return sizex
def pretty_list(rtlst, header, sortBy=0, borders=False):
"""Pretty list to fit the terminal, and add header"""
if borders:
_space = "|"
else:
_space = " "
# Windows has a fat terminal border
_spacelen = len(_space) * (len(header) - 1) + (10 if WINDOWS else 0)
_croped = False
# Sort correctly
rtlst.sort(key=lambda x: x[sortBy])
# Append tag
rtlst = header + rtlst
# Detect column's width
colwidth = [max([len(y) for y in x]) for x in zip(*rtlst)]
# Make text fit in box (if required)
width = get_terminal_width()
if conf.auto_crop_tables and width:
width = width - _spacelen
while sum(colwidth) > width:
_croped = True
# Needs to be cropped
# Get the longest row
i = colwidth.index(max(colwidth))
# Get all elements of this row
row = [len(x[i]) for x in rtlst]
# Get biggest element of this row: biggest of the array
j = row.index(max(row))
# Re-build column tuple with the edited element
t = list(rtlst[j])
t[i] = t[i][:-2] + "_"
rtlst[j] = tuple(t)
# Update max size
row[j] = len(t[i])
colwidth[i] = max(row)
if _croped:
log_runtime.info("Table cropped to fit the terminal (conf.auto_crop_tables==True)") # noqa: E501
# Generate padding scheme
fmt = _space.join(["%%-%ds" % x for x in colwidth])
# Append separation line if needed
if borders:
rtlst.insert(1, tuple("-" * x for x in colwidth))
# Compile
rt = "\n".join(((fmt % x).strip() for x in rtlst))
return rt
def __make_table(yfmtfunc, fmtfunc, endline, data, fxyz, sortx=None, sorty=None, seplinefunc=None): # noqa: E501
"""Core function of the make_table suite, which generates the table"""
vx = {}
vy = {}
vz = {}
vxf = {}
# Python 2 backward compatibility
fxyz = lambda_tuple_converter(fxyz)
tmp_len = 0
for e in data:
xx, yy, zz = [str(s) for s in fxyz(*e)]
tmp_len = max(len(yy), tmp_len)
vx[xx] = max(vx.get(xx, 0), len(xx), len(zz))
vy[yy] = None
vz[(xx, yy)] = zz
vxk = list(vx)
vyk = list(vy)
if sortx:
vxk.sort(key=sortx)
else:
try:
vxk.sort(key=int)
except Exception:
try:
vxk.sort(key=atol)
except Exception:
vxk.sort()
if sorty:
vyk.sort(key=sorty)
else:
try:
vyk.sort(key=int)
except Exception:
try:
vyk.sort(key=atol)
except Exception:
vyk.sort()
if seplinefunc:
sepline = seplinefunc(tmp_len, [vx[x] for x in vxk])
print(sepline)
fmt = yfmtfunc(tmp_len)
print(fmt % "", end=' ')
for x in vxk:
vxf[x] = fmtfunc(vx[x])
print(vxf[x] % x, end=' ')
print(endline)
if seplinefunc:
print(sepline)
for y in vyk:
print(fmt % y, end=' ')
for x in vxk:
print(vxf[x] % vz.get((x, y), "-"), end=' ')
print(endline)
if seplinefunc:
print(sepline)
def make_table(*args, **kargs):
__make_table(lambda l: "%%-%is" % l, lambda l: "%%-%is" % l, "", *args, **kargs) # noqa: E501
def make_lined_table(*args, **kargs):
__make_table(lambda l: "%%-%is |" % l, lambda l: "%%-%is |" % l, "",
seplinefunc=lambda a, x: "+".join('-' * (y + 2) for y in [a - 1] + x + [-2]), # noqa: E501
*args, **kargs)
def make_tex_table(*args, **kargs):
__make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a, x: "\\hline", *args, **kargs) # noqa: E501
####################
# WHOIS CLIENT #
####################
def whois(ip_address):
"""Whois client for Python"""
whois_ip = str(ip_address)
try:
query = socket.gethostbyname(whois_ip)
except Exception:
query = whois_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("whois.ripe.net", 43))
s.send(query.encode("utf8") + b"\r\n")
answer = b""
while True:
d = s.recv(4096)
answer += d
if not d:
break
s.close()
ignore_tag = b"remarks:"
# ignore all lines starting with the ignore_tag
lines = [line for line in answer.split(b"\n") if not line or (line and not line.startswith(ignore_tag))] # noqa: E501
# remove empty lines at the bottom
for i in range(1, len(lines)):
if not lines[-i].strip():
del lines[-i]
else:
break
return b"\n".join(lines[3:])
#######################
# PERIODIC SENDER #
#######################
class PeriodicSenderThread(threading.Thread):
def __init__(self, sock, pkt, interval=0.5):
""" Thread to send packets periodically
Args:
sock: socket where packet is sent periodically
pkt: packet to send
interval: interval between two packets
"""
self._pkt = pkt
self._socket = sock
self._stopped = threading.Event()
self._interval = interval
threading.Thread.__init__(self)
def run(self):
while not self._stopped.is_set():
self._socket.send(self._pkt)
time.sleep(self._interval)
def stop(self):
self._stopped.set()
| [
"[email protected]"
] | |
77388ef09898130d90f9cc56214733cdd9160d06 | 4b60c34ba37e7c0611257e7934791fb43d01e254 | /src/Lib/pysparseSuperLU.py | 2524ee2635966f53d120c079893e98ac0bbfb303 | [] | no_license | regmi/pysparse | 0913ff69b5d07b58c20deb5b6f44caeaa8498d64 | ebc2ad045382c69e6bb41217c9431e51736ac4a0 | refs/heads/master | 2021-01-01T16:20:10.075871 | 2010-04-09T22:56:51 | 2010-04-09T22:56:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,502 | py | """
A framework for solving sparse linear systems of equations using an LU
factorization, by means of the supernodal sparse LU factorization package
SuperLU ([DEGLL99]_, [DGL99]_, [LD03]_).
This package is appropriate for factorizing sparse square unsymmetric or
rectangular matrices.
See [SLU]_ for more information.
**References:**
.. [DEGLL99] J. W. Demmel, S. C. Eisenstat, J. R. Gilbert, X. S. Li and
J. W. H. Liu, *A supernodal approach to sparse partial pivoting*,
SIAM Journal on Matrix Analysis and Applications **20**\ (3),
pp. 720-755, 1999.
.. [DGL99] J. W. Demmel, J. R. Gilbert and X. S. Li,
*An Asynchronous Parallel Supernodal Algorithm for Sparse Gaussian
Elimination*, SIAM Journal on Matrix Analysis and Applications
**20**\ (4), pp. 915-952, 1999.
.. [LD03] X. S. Li and J. W. Demmel, *SuperLU_DIST: A Scalable
Distributed-Memory Sparse Direct Solver for Unsymmetric Linear
Systems*, ACM Transactions on Mathematical Software **29**\ (2),
pp. 110-140, 2003.
.. [SLU] http://crd.lbl.gov/~xiaoye/SuperLU
"""
# To look into:
# - allow other data types
__docformat__ = 'restructuredtext'
import pysparseMatrix as psm
import numpy
import resource
from directSolver import PysparseDirectSolver
from pysparse import superlu
def cputime():
return resource.getrusage(resource.RUSAGE_SELF)[0]
class PysparseSuperLUSolver( PysparseDirectSolver ):
"""
`PysparseSuperLUSolver` is a wrapper class around the SuperLu library for
the factorization of full-rank n-by-m matrices. Only matrices with real
coefficients are currently supported.
:parameters:
:A: The matrix to be factorized, supplied as a PysparseMatrix instance.
:keywords:
:symmetric: a boolean indicating that the user wishes to use symmetric
mode. In symmetric mode, ``permc_spec=2`` must be chosen and
``diag_pivot_thresh`` must be small, e.g., 0.0 or 0.1. Since
the value of ``diag_pivot_thresh`` is up to the user, setting
``symmetric`` to ``True`` does *not* automatically set
``permc_spec`` and ``diag_pivot_thresh`` to appropriate
values.
:diag_pivot_thresh: a float value between 0 and 1 representing the
threshold for partial pivoting (0 = no pivoting,
1 = always perform partial pivoting). Default: 1.0.
:drop_tol: the value of a drop tolerance, between 0 and 1, if an
incomplete factorization is desired (0 = exact factorization).
This keyword does not exist if using SuperLU version 2.0 and
below. In more recent version of SuperLU, the keyword is
accepted but has no effect. Default: 0.0
:relax: an integer controling the degree of relaxing supernodes.
Default: 1.
:panel_size: an integer specifying the maximum number of columns to form
a panel. Default: 10.
:permc_spec: an integer specifying the ordering strategy used during the
factorization.
0. natural ordering,
1. MMD applied to the structure of
:math:`\mathbf{A}^T \mathbf{A}`
2. MMD applied to the structure of
:math:`\mathbf{A}^T + \mathbf{A}`
3. COLAMD.
Default: 2.
.. attribute:: LU
A :class:`superlu_context` object encapsulating the factorization.
.. attribute:: sol
The solution of the linear system after a call to :meth:`solve`.
.. attribute:: factorizationTime
The CPU time to perform the factorization.
.. attribute:: solutionTime
The CPU time to perform the forward and backward sweeps.
.. attribute:: lunz
The number of nonzero elements in the factors L and U together after a
call to :meth:`fetch_lunz`.
"""
def __init__(self, A, **kwargs):
PysparseDirectSolver.__init__(self, A, **kwargs)
self.type = numpy.float
self.nrow, self.ncol = A.getShape()
t = cputime()
self.LU = superlu.factorize(A.matrix.to_csr(), **kwargs)
self.factorizationTime = cputime() - t
self.solutionTime = 0.0
self.sol = None
self.L = self.U = None
return
def solve(self, rhs, transpose = False):
"""
Solve the linear system ``A x = rhs``, where ``A`` is the input matrix
and ``rhs`` is a Numpy vector of appropriate dimension. The result is
placed in the :attr:`sol` member of the class instance.
If the optional argument ``transpose`` is ``True``, the transpose system
``A^T x = rhs`` is solved.
"""
if self.sol is None: self.sol = numpy.empty(self.ncol, self.type)
transp = 'N'
if transpose: transp = 'T'
t = cputime()
self.LU.solve(rhs, self.sol, transp)
self.solutionTime = cputime() - t
return
def fetch_lunz(self):
"""
Retrieve the number of nonzeros in the factors L and U together. The
result is stored in the member :attr:`lunz` of the class instance.
"""
self.lunz = self.LU.nnz
def fetch_factors(self):
"""
Not yet available.
"""
raise NotImplementedError
| [
"[email protected]"
] | |
9824125bb2e5cc4986006dddbe094514e3617d37 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2975/60678/236000.py | f7743095883cfe2fd3eb8728f8d5bdd8c431f04d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | a=input()
j=0
b=[];
for i in a:
b.append(i)
b.sort()
a=''
for i in b:
a=a+i
print(a) | [
"[email protected]"
] | |
5e3f127541774244db776a2ad4ca2f70cd1d14b9 | dfdbc9118742bc09d7c7fe6fe42f53b7d1d7977a | /spacegame_ii/parralax.py | 3ef67de9c8ce5c05576be44f185131d62e7cf4e6 | [] | no_license | 602p/spacegame | d1c3a34233ed7c7128d5cbe4c470c168a84700ac | 1350beeb6df2b65a0c041f512fa944cbae4dba2b | refs/heads/master | 2021-01-10T19:28:19.220218 | 2015-04-27T20:37:14 | 2015-04-27T20:37:14 | 27,309,904 | 0 | 0 | null | 2015-04-20T15:12:11 | 2014-11-29T17:39:59 | Python | UTF-8 | Python | false | false | 1,454 | py | from __future__ import division
import pygame, random
import logging
module_logger=logging.getLogger("sg.parralax")
debug, info, warning, error, critical = module_logger.debug, module_logger.info, module_logger.warning, module_logger.error, module_logger.critical
class StarfieldLayer:
def __init__(self, density, color, size, speed):
self.density=density
self.speed=speed
self.particle_surface=pygame.Surface((size, size))
self.particle_surface.fill(color)
def bind(self, (xsize, ysize)):
self.particles=[]
self.size=(xsize, ysize)
def render(self, surface, (xpos, ypos)):
i=0
state=random.getstate()
random.seed(413)
while i!=self.density:
#
pos=(random.uniform(0, self.size[0]), random.uniform(0, self.size[1]))
surface.blit(self.particle_surface,
(
int((((xpos+pos[0])/self.speed)%self.size[0])),
int((((ypos+pos[1])/self.speed)%self.size[1]))
)
)
i+=1
random.setstate(state)
class ParralaxStarfieldScroller:
def __init__(self, size, layers):
self.layers=layers
self.pos=[0,0]
self.bindall(size)
debug("Initilized ParralaxStarfieldScroller with "+str(len(layers))+" layers at []:"+str(size))
def bindall(self, size):
for layer in self.layers:
layer.bind(size)
def render(self, surface):
for layer in self.layers:
layer.render(surface, self.pos)
def move(self, x, y):
self.pos[0]+=x
self.pos[1]+=y
def move_to(self, x, y):
self.pos[0]=x
self.pos[1]=y | [
"[email protected]"
] | |
fe3657f499c9f10b71a26ef58326f2f95d5634cb | 6c4486ab599fd5dea9006e41cdb89db54b47b77c | /tests/products/NGP_OBLIC_Create_Ballpark.py | 92f3d21ced7693e315f188073ea8a95c59c80077 | [] | no_license | kenito2050/Python-Page-Object-Framework-Example | 28ba61cdc1498374be4fc088a1348e0acb754dc2 | 2a3a3e6c74dc7ec7c9acce41030e9487925b9b0c | refs/heads/master | 2020-04-02T15:52:24.286208 | 2018-10-25T01:46:24 | 2018-10-25T01:46:24 | 154,587,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,830 | py | from xml.etree import ElementTree as ET
import xlrd
import time
from pages.producer_center.ballpark.ballpark_Indication import BallPark_Indication
from pages.producer_center.ballpark.ballpark_PAF import BallPark_PAF
from pages.producer_center.ballpark.ballpark_download_send import BallPark_Download_Send
from pages.producer_center.products_programs_page import ProductsAndPrograms
from pages.service_center.agents_page import AgentsPage
from pages.service_center.login_page import LoginPage
from pages.service_center.navigation_bar import NavigationBar
from utilities.Environments.Environments import Environments
from utilities.state_capitals.state_capitals import StateCapitals
from utilities.zip_codes_state_capitals.zip_codes import ZipCodes
from utilities.Faker.Data_Generator import Data_Generator
from utilities.Date_Time_Generator.Date_Time_Generator import Date_Time_Generator
from config_globals import *
class TestCreateQuote():
def test_login_search_for_agent_create_quote(self, browser, env):
Product = "NGP_OBLIC"
driver = browser
## Directory Locations
tests_directory = ROOT_DIR / 'tests'
framework_directory = ROOT_DIR
config_file_directory = CONFIG_PATH
test_case_directory = framework_directory / 'utilities' / 'Excel_Sheets' / 'Products'
test_results_directory = framework_directory / 'utilities' / 'Excel_Sheets' / 'Test_Results'
global test_summary
global test_scenario
global effective_date
global contract_class
global agent
global state
global revenue
global total_num_records
global _OLD_scenario
global limit
global deductible
# Open Test Scenario Workbook; Instantiate worksheet object
# 0 - First Worksheet
# 1 - Second Worksheet...etc
wb = xlrd.open_workbook(str(test_case_directory / Product) + '.xlsx')
sh = wb.sheet_by_index(3)
## Begin For Loop to iterate through Test Scenarios
i = 1
rows = sh.nrows
empty_cell = False
for i in range(1, sh.nrows):
cell_val = sh.cell(i, 0).value
if cell_val == '':
# If Cell Value is empty, set empty_cell to True
empty_cell = True
else:
# If Cell Value is NOT empty, set empty_cell to False
empty_cell = False
# Check to see if cell is NOT empty
# If cell is not empty, read in the values
if empty_cell == False:
test_summary = sh.cell_value(i, 0)
test_scenario = str(round(sh.cell_value(i, 1)))
effective_date = sh.cell_value(i, 2)
contract_class = sh.cell_value(i, 3)
agent = sh.cell_value(i, 4)
state = sh.cell_value(i, 5)
revenue = str(round(sh.cell_value(i, 6)))
total_num_records = (sh.cell_value(i, 7))
_OLD_scenario = sh.cell_value(i, 8)
limit = sh.cell_value(i, 9)
deductible = sh.cell_value(i, 10)
# Else, the cell is empty
# End the Loop
else:
break
# Create Instance of Data Generator
dg = Data_Generator()
# Create Company Name Value
company_name_string = dg.create_full_company_name()
# Create Street Address Value
address_value = dg.create_street_address()
city = StateCapitals.return_state_capital(state)
postal_code = ZipCodes.return_zip_codes(state)
# Create Instance of Date Time Generator
dtg = Date_Time_Generator()
# Create Today's Date
date_today = dtg.return_date_today()
# Access XML to retrieve login credentials
tree = ET.parse(str(config_file_directory / 'resources.xml'))
login_credentials = tree.getroot()
username = (login_credentials[1][0].text)
password = (login_credentials[1][1].text)
## Test Environment
## Select Appropriate URL based on the Environment Value (env)
baseURL = Environments.return_environments(env)
# Maximize Window; Launch URL
driver.get(baseURL)
driver.implicitly_wait(3)
# Call Login methods from Pages.home.login_page.py
lp = LoginPage(driver)
lp.login(username, password)
lp.click_login_button()
nb = NavigationBar(driver)
nb.click_agents()
ap = AgentsPage(driver)
ap.search_for_agent(agent)
ap.click_submit_new_application_as_agent()
pp = ProductsAndPrograms(driver)
pp.click_ballpark()
bp_PAF = BallPark_PAF(driver)
bp_PAF.switch_windows()
bp_PAF.start_ballpark_enter_faker_company_name_valid_zip(company_name_string, postal_code)
bp_PAF.select_contract_class(contract_class)
bp_PAF.click_ballpark_button()
bp_PAF.select_NGP_OBLIC()
time.sleep(3)
# Enter Ad Hoc Effective Date
# bp_PAF.enter_effective_date(ad_hoc_effectiveDate)
# Enter Today's Date as Effective Date
bp_PAF.enter_current_date(date_today)
time.sleep(3)
bp_PAF.enter_revenue(revenue)
bp_PAF.click_ballpark_button()
bp_Indication = BallPark_Indication(driver)
bp_Indication.click_Download_Send_Indication()
bp_Download_Send = BallPark_Download_Send(driver)
bp_Download_Send.input_email()
bp_Download_Send.click_send_email()
# Close Ballpark Window
driver.close()
# Switch to First Window (Service Center)
driver.switch_to.window(driver.window_handles[0])
# Wait
driver.implicitly_wait(3)
# Close Browser
driver.quit() | [
"[email protected]"
] | |
0d6f16c184e2ce478047763e87b14aba93f6cafa | 2b86301d5ad3fecaa5a300cabfe6b4dfc82b78ed | /venv/Lib/site-packages/tornado/web.py | bb2684eb81a0c172288230f3ed046e2ddf4ec3c2 | [
"MIT"
] | permissive | sserrot/champion_relationships | 72823bbe73e15973007e032470d7efdf72af3be0 | 91315d6b7f6e7e678d9f8083b4b3e63574e97d2b | refs/heads/master | 2022-12-21T05:15:36.780768 | 2021-12-05T15:19:09 | 2021-12-05T15:19:09 | 71,414,425 | 1 | 2 | MIT | 2022-12-18T07:42:59 | 2016-10-20T01:35:56 | Python | UTF-8 | Python | false | false | 138,290 | py | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request, or to limit your use of other threads to
`.IOLoop.run_in_executor` and ensure that your callbacks running in
the executor do not refer to Tornado objects.
"""
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import http.cookies
from inspect import isclass
from io import BytesIO
import mimetypes
import numbers
import os.path
import re
import sys
import threading
import time
import tornado
import traceback
import types
import urllib.parse
from urllib.parse import urlencode
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import escape
from tornado import gen
from tornado.httpserver import HTTPServer
from tornado import httputil
from tornado import iostream
import tornado.locale
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.routing import (
AnyMatches,
DefaultHostMatches,
HostMatches,
ReversibleRouter,
Rule,
ReversibleRuleRouter,
URLSpec,
_RuleList,
)
from tornado.util import ObjectDict, unicode_type, _websocket_mask
url = URLSpec
from typing import (
Dict,
Any,
Union,
Optional,
Awaitable,
Tuple,
List,
Callable,
Iterable,
Generator,
Type,
cast,
overload,
)
from types import TracebackType
import typing
if typing.TYPE_CHECKING:
from typing import Set # noqa: F401
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = Union[bytes, unicode_type, int, numbers.Integral, datetime.datetime]
_CookieSecretTypes = Union[str, bytes, Dict[int, str], Dict[int, bytes]]
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class _ArgDefaultMarker:
pass
_ARG_DEFAULT = _ArgDefaultMarker()
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
Applications should not construct `RequestHandler` objects
directly and subclasses should not override ``__init__`` (override
`~RequestHandler.initialize` instead).
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT", "OPTIONS")
_template_loaders = {} # type: Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
_stream_request_body = False
# Will be set in _execute.
_transforms = None # type: List[OutputTransform]
path_args = None # type: List[str]
path_kwargs = None # type: Dict[str, str]
def __init__(
self,
application: "Application",
request: httputil.HTTPServerRequest,
**kwargs: Any
) -> None:
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._prepared_future = None
self.ui = ObjectDict(
(n, self._ui_method(m)) for n, m in application.ui_methods.items()
)
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self, application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
assert self.request.connection is not None
# TODO: need to add set_close_callback to HTTPConnection interface
self.request.connection.set_close_callback( # type: ignore
self.on_connection_close
)
self.initialize(**kwargs) # type: ignore
def _initialize(self) -> None:
pass
initialize = _initialize # type: Callable[..., None]
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a ``URLSpec`` will be
supplied as keyword arguments to ``initialize()``.
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
@property
def settings(self) -> Dict[str, Any]:
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def _unimplemented_method(self, *args: str, **kwargs: str) -> None:
raise HTTPError(405)
head = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
get = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
post = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
delete = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
patch = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
put = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
options = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
def prepare(self) -> Optional[Awaitable[None]]:
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Use ``async def`` or decorate this method with
`.gen.coroutine` to make it asynchronous.
If this method returns an ``Awaitable`` execution will not proceed
until the ``Awaitable`` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self) -> None:
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self) -> None:
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request._body_future.done():
self.request._body_future.set_exception(iostream.StreamClosedError())
self.request._body_future.exception()
def clear(self) -> None:
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders(
{
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
}
)
self.set_default_headers()
self._write_buffer = [] # type: List[bytes]
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self) -> None:
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code: int, reason: str = None) -> None:
"""Sets the status code for our response.
:arg int status_code: Response status code.
:arg str reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`http.client.responses` or "Unknown".
.. versionchanged:: 5.0
No longer validates that the response code is in
`http.client.responses`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
self._reason = httputil.responses.get(status_code, "Unknown")
def get_status(self) -> int:
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name: str, value: _HeaderTypes) -> None:
"""Sets the given response header name and value.
All header values are converted to strings (`datetime` objects
are formatted according to the HTTP specification for the
``Date`` header).
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name: str, value: _HeaderTypes) -> None:
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name: str) -> None:
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value: _HeaderTypes) -> str:
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes): # py3
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode("latin1")
elif isinstance(value, unicode_type): # py2
# TODO: This is inconsistent with the use of latin1 above,
# but it's been that way for a long time. Should it change?
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
@overload
def get_argument(self, name: str, default: str, strip: bool = True) -> str:
pass
@overload # noqa: F811
def get_argument(
self, name: str, default: _ArgDefaultMarker = _ARG_DEFAULT, strip: bool = True
) -> str:
pass
@overload # noqa: F811
def get_argument(
self, name: str, default: None, strip: bool = True
) -> Optional[str]:
pass
def get_argument( # noqa: F811
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the request more than once, we return the
last value.
This method searches both the query and body arguments.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name: str, strip: bool = True) -> List[str]:
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
This method searches both the query and body arguments.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments, strip)
def get_body_arguments(self, name: str, strip: bool = True) -> List[str]:
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.query_arguments, strip)
def get_query_arguments(self, name: str, strip: bool = True) -> List[str]:
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(
self,
name: str,
default: Union[None, str, _ArgDefaultMarker],
source: Dict[str, List[bytes]],
strip: bool = True,
) -> Optional[str]:
args = self._get_arguments(name, source, strip=strip)
if not args:
if isinstance(default, _ArgDefaultMarker):
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(
self, name: str, source: Dict[str, List[bytes]], strip: bool = True
) -> List[str]:
values = []
for v in source.get(name, []):
s = self.decode_argument(v, name=name)
if isinstance(s, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
s = RequestHandler._remove_control_chars_regex.sub(" ", s)
if strip:
s = s.strip()
values.append(s)
return values
def decode_argument(self, value: bytes, name: str = None) -> str:
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(
400, "Invalid unicode in %s: %r" % (name or "url", value[:40])
)
@property
def cookies(self) -> Dict[str, http.cookies.Morsel]:
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name: str, default: str = None) -> Optional[str]:
"""Returns the value of the request cookie with the given name.
If the named cookie is not present, returns ``default``.
This method only returns cookies that were present in the request.
It does not see the outgoing cookies set by `set_cookie` in this
handler.
"""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(
self,
name: str,
value: Union[str, bytes],
domain: str = None,
expires: Union[float, Tuple, datetime.datetime] = None,
path: str = "/",
expires_days: int = None,
**kwargs: Any
) -> None:
"""Sets an outgoing cookie name/value with the given options.
Newly-set cookies are not immediately visible via `get_cookie`;
they are not present until the next request.
expires may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a
`datetime.datetime` object.
Additional keyword arguments are set on the cookies.Morsel
directly.
See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = http.cookies.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == "max_age":
k = "max-age"
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ["httponly", "secure"] and not v:
continue
morsel[k] = v
def clear_cookie(self, name: str, path: str = "/", domain: str = None) -> None:
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires, domain=domain)
def clear_all_cookies(self, path: str = "/", domain: str = None) -> None:
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(
self,
name: str,
value: Union[str, bytes],
expires_days: int = 30,
version: int = None,
**kwargs: Any
) -> None:
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(
name,
self.create_signed_value(name, value, version=version),
expires_days=expires_days,
**kwargs
)
def create_signed_value(
self, name: str, value: Union[str, bytes], version: int = None
) -> bytes:
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(
secret, name, value, version=version, key_version=key_version
)
def get_secure_cookie(
self,
name: str,
value: str = None,
max_age_days: int = 31,
min_version: int = None,
) -> Optional[bytes]:
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
Similar to `get_cookie`, this method only returns cookies that
were present in the request. It does not see outgoing cookies set by
`set_secure_cookie` in this handler.
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(
self.application.settings["cookie_secret"],
name,
value,
max_age_days=max_age_days,
min_version=min_version,
)
def get_secure_cookie_key_version(
self, name: str, value: str = None
) -> Optional[int]:
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
if value is None:
return None
return get_signature_key_version(value)
def redirect(self, url: str, permanent: bool = False, status: int = None) -> None:
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk: Union[str, bytes, dict]) -> None:
"""Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += (
". Lists not accepted for security reasons; see "
+ "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501
)
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name: str, **kwargs: Any) -> "Future[None]":
"""Renders the template with the given arguments as the response.
``render()`` calls ``finish()``, so no other output methods can be called
after it.
Returns a `.Future` with the same semantics as the one returned by `finish`.
Awaiting this `.Future` is optional.
.. versionchanged:: 5.1
Now returns a `.Future` instead of ``None``.
"""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(_unicode(file_part))
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(_unicode(file_part))
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b"</body>")
html = html[:sloc] + utf8(js) + b"\n" + html[sloc:]
if js_embed:
js_bytes = self.render_embed_js(js_embed)
sloc = html.rindex(b"</body>")
html = html[:sloc] + js_bytes + b"\n" + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b"</head>")
html = html[:hloc] + utf8(css) + b"\n" + html[hloc:]
if css_embed:
css_bytes = self.render_embed_css(css_embed)
hloc = html.index(b"</head>")
html = html[:hloc] + css_bytes + b"\n" + html[hloc:]
if html_heads:
hloc = html.index(b"</head>")
html = html[:hloc] + b"".join(html_heads) + b"\n" + html[hloc:]
if html_bodies:
hloc = html.index(b"</body>")
html = html[:hloc] + b"".join(html_bodies) + b"\n" + html[hloc:]
return self.finish(html)
def render_linked_js(self, js_files: Iterable[str]) -> str:
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set() # type: Set[str]
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return "".join(
'<script src="'
+ escape.xhtml_escape(p)
+ '" type="text/javascript"></script>'
for p in paths
)
def render_embed_js(self, js_embed: Iterable[bytes]) -> bytes:
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return (
b'<script type="text/javascript">\n//<![CDATA[\n'
+ b"\n".join(js_embed)
+ b"\n//]]>\n</script>"
)
def render_linked_css(self, css_files: Iterable[str]) -> str:
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set() # type: Set[str]
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return "".join(
'<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths
)
def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes:
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b"\n".join(css_embed) + b"\n</style>"
def render_string(self, template_name: str, **kwargs: Any) -> bytes:
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
assert frame.f_code.co_filename is not None
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self) -> Dict[str, Any]:
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url,
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path: str) -> template.BaseLoader:
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers: bool = False) -> "Future[None]":
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
assert self.request.connection is not None
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
assert chunk is not None
(
self._status_code,
self._headers,
chunk,
) = transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers
)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = b""
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine("", self._status_code, self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk
)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk)
else:
future = Future() # type: Future[None]
future.set_result(None)
return future
def finish(self, chunk: Union[str, bytes, dict] = None) -> "Future[None]":
"""Finishes this response, ending the HTTP request.
Passing a ``chunk`` to ``finish()`` is equivalent to passing that
chunk to ``write()`` and then calling ``finish()`` with no arguments.
Returns a `.Future` which may optionally be awaited to track the sending
of the response to the client. This `.Future` resolves when all the response
data has been sent, and raises an error if the connection is closed before all
data can be sent.
.. versionchanged:: 5.1
Now returns a `.Future` instead of ``None``.
"""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (
self._status_code == 200
and self.request.method in ("GET", "HEAD")
and "Etag" not in self._headers
):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code in (204, 304) or (
self._status_code >= 100 and self._status_code < 200
):
assert not self._write_buffer, (
"Cannot send body with %s" % self._status_code
)
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
assert self.request.connection is not None
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None) # type: ignore
future = self.flush(include_footers=True)
self.request.connection.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
return future
def detach(self) -> iostream.IOStream:
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all
further HTTP processing. Intended for implementing protocols
like websockets that tunnel over an HTTP handshake.
This method is only supported when HTTP/1.1 is used.
.. versionadded:: 5.1
"""
self._finished = True
# TODO: add detach to HTTPConnection?
return self.request.connection.detach() # type: ignore
def _break_cycles(self) -> None:
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None # type: ignore
def send_error(self, status_code: int = 500, **kwargs: Any) -> None:
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response", exc_info=True)
return
self.clear()
reason = kwargs.get("reason")
if "exc_info" in kwargs:
exception = kwargs["exc_info"][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code: int, **kwargs: Any) -> None:
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header("Content-Type", "text/plain")
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish(
"<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>"
% {"code": status_code, "message": self._reason}
)
@property
def locale(self) -> tornado.locale.Locale:
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
loc = self.get_user_locale()
if loc is not None:
self._locale = loc
else:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value: tornado.locale.Locale) -> None:
self._locale = value
def get_user_locale(self) -> Optional[tornado.locale.Locale]:
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default: str = "en_US") -> tornado.locale.Locale:
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self) -> Any:
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_secure_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_secure_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value: Any) -> None:
self._current_user = value
def get_current_user(self) -> Any:
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self) -> str:
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self) -> Optional[str]:
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self) -> bytes:
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
This property is of type `bytes`, but it contains only ASCII
characters. If a character string is required, there is no
need to base64-encode it; just decode the byte string as
UTF-8.
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join(
[
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp))),
]
)
else:
raise ValueError("unknown xsrf cookie version %d", output_version)
if version is None:
if self.current_user and "expires_days" not in cookie_kwargs:
cookie_kwargs["expires_days"] = 30
self.set_cookie("_xsrf", self._xsrf_token, **cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]:
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, "_raw_xsrf_token"):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
assert token is not None
assert timestamp is not None
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(
self, cookie: str
) -> Tuple[Optional[int], Optional[bytes], Optional[float]]:
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask_str, masked_token, timestamp_str = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask_str))
token = _websocket_mask(mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp_str)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True)
return None, None, None
def check_xsrf_cookie(self) -> None:
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
# Prior to release 1.1.1, this check was ignored if the HTTP header
# ``X-Requested-With: XMLHTTPRequest`` was present. This exception
# has been shown to be insecure and has been removed. For more
# information please see
# http://www.djangoproject.com/weblog/2011/feb/08/security/
# http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
token = (
self.get_argument("_xsrf", None)
or self.request.headers.get("X-Xsrftoken")
or self.request.headers.get("X-Csrftoken")
)
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not hmac.compare_digest(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self) -> str:
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return (
'<input type="hidden" name="_xsrf" value="'
+ escape.xhtml_escape(self.xsrf_token)
+ '"/>'
)
def static_url(self, path: str, include_host: bool = None, **kwargs: Any) -> str:
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get(
"static_handler_class", StaticFileHandler
).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name: str, feature: str = "this feature") -> None:
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception(
"You must define the '%s' setting in your "
"application to use %s" % (name, feature)
)
def reverse_url(self, name: str, *args: Any) -> str:
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self) -> Optional[str]:
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self) -> None:
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self) -> bool:
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b"*":
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x: bytes) -> bytes:
return x[2:] if x.startswith(b"W/") else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
async def _execute(
self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes
) -> None:
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict(
(k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items()
)
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in (
"GET",
"HEAD",
"OPTIONS",
) and self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = await result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
future_set_result_unless_cancelled(self._prepared_future, None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
await self.request._body_future
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = await result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
finally:
# Unset result to avoid circular references
result = None
if self._prepared_future is not None and not self._prepared_future.done():
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
May be a coroutine for flow control.
"""
raise NotImplementedError()
def _log(self) -> None:
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self) -> str:
return "%s %s (%s)" % (
self.request.method,
self.request.uri,
self.request.remote_ip,
)
def _handle_request_exception(self, e: BaseException) -> None:
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = [value.status_code, self._request_summary()] + list(value.args)
gen_log.warning(format, *args)
else:
app_log.error( # type: ignore
"Uncaught exception %s\n%r",
self._request_summary(),
self.request,
exc_info=(typ, value, tb),
)
def _ui_module(self, name: str, module: Type["UIModule"]) -> Callable[..., str]:
def render(*args, **kwargs) -> str: # type: ignore
if not hasattr(self, "_active_modules"):
self._active_modules = {} # type: Dict[str, UIModule]
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method: Callable[..., str]) -> Callable[..., str]:
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self) -> None:
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = [
"Allow",
"Content-Encoding",
"Content-Language",
"Content-Length",
"Content-MD5",
"Content-Range",
"Content-Type",
"Last-Modified",
]
for h in headers:
self.clear_header(h)
def stream_request_body(cls: Type[RequestHandler]) -> Type[RequestHandler]:
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
""" # noqa: E501
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls: Type[RequestHandler]) -> bool:
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return cls._stream_request_body
def removeslash(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return None
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return None
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application: "Application", rules: _RuleList = None) -> None:
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule: Rule) -> Rule:
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter( # type: ignore
self.application, rule.target
)
return rule
def get_target_delegate(
self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any
) -> Optional[httputil.HTTPMessageDelegate]:
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(
request, target, **target_params
)
return super(_ApplicationRouter, self).get_target_delegate(
target, request, **target_params
)
class Application(ReversibleRouter):
r"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
.. warning::
Applications that do not use TLS may be vulnerable to :ref:`DNS
rebinding <dnsrebinding>` attacks. This attack is especially
relevant to applications that only listen on ``127.0.0.1`` or
other private networks. Appropriate host patterns must be used
(instead of the default of ``r'.*'``) to prevent this risk. The
``default_host`` argument must not be used in applications that
may be vulnerable to DNS rebinding.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(
self,
handlers: _RuleList = None,
default_host: str = None,
transforms: List[Type["OutputTransform"]] = None,
**settings: Any
) -> None:
if transforms is None:
self.transforms = [] # type: List[Type[OutputTransform]]
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {
"linkify": _linkify,
"xsrf_form_html": _xsrf_form_html,
"Template": TemplateModule,
}
self.ui_methods = {} # type: Dict[str, Callable[..., str]]
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix", "/static/")
static_handler_class = settings.get(
"static_handler_class", StaticFileHandler
)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args["path"] = path
for pattern in [
re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)",
r"/(robots\.txt)",
]:
handlers.insert(0, (pattern, static_handler_class, static_handler_args))
if self.settings.get("debug"):
self.settings.setdefault("autoreload", True)
self.settings.setdefault("compiled_template_cache", False)
self.settings.setdefault("static_hash_cache", False)
self.settings.setdefault("serve_traceback", True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(
self, [Rule(AnyMatches(), self.wildcard_router)]
)
# Automatically reload modified modules
if self.settings.get("autoreload"):
from tornado import autoreload
autoreload.start()
def listen(self, port: int, address: str = "", **kwargs: Any) -> HTTPServer:
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
"""
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern: str, host_handlers: _RuleList) -> None:
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules(
[(DefaultHostMatches(self, host_matcher.host_pattern), host_handlers)]
)
def add_transform(self, transform_class: Type["OutputTransform"]) -> None:
self.transforms.append(transform_class)
def _load_ui_methods(self, methods: Any) -> None:
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n)) for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if (
not name.startswith("_")
and hasattr(fn, "__call__")
and name[0].lower() == name[0]
):
self.ui_methods[name] = fn
def _load_ui_modules(self, modules: Any) -> None:
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n)) for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(
self, request: httputil.HTTPServerRequest
) -> Optional[Awaitable[None]]:
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(
self, request: httputil.HTTPServerRequest, **kwargs: Any
) -> "_HandlerDelegate":
route = self.default_router.find_handler(request)
if route is not None:
return cast("_HandlerDelegate", route)
if self.settings.get("default_handler_class"):
return self.get_handler_delegate(
request,
self.settings["default_handler_class"],
self.settings.get("default_handler_args", {}),
)
return self.get_handler_delegate(request, ErrorHandler, {"status_code": 404})
def get_handler_delegate(
self,
request: httputil.HTTPServerRequest,
target_class: Type[RequestHandler],
target_kwargs: Dict[str, Any] = None,
path_args: List[bytes] = None,
path_kwargs: Dict[str, bytes] = None,
) -> "_HandlerDelegate":
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs
)
def reverse_url(self, name: str, *args: Any) -> str:
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler: RequestHandler) -> None:
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method(
"%d %s %.2fms",
handler.get_status(),
handler._request_summary(),
request_time,
)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(
self,
application: Application,
request: httputil.HTTPServerRequest,
handler_class: Type[RequestHandler],
handler_kwargs: Optional[Dict[str, Any]],
path_args: Optional[List[bytes]],
path_kwargs: Optional[Dict[str, bytes]],
) -> None:
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = [] # type: List[bytes]
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> Optional[Awaitable[None]]:
if self.stream_request_body:
self.request._body_future = Future()
return self.execute()
return None
def data_received(self, data: bytes) -> Optional[Awaitable[None]]:
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
return None
def finish(self) -> None:
if self.stream_request_body:
future_set_result_unless_cancelled(self.request._body_future, None)
else:
self.request.body = b"".join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self) -> None:
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None # type: ignore
def execute(self) -> Optional[Awaitable[None]]:
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get("static_hash_cache", True):
StaticFileHandler.reset()
self.handler = self.handler_class(
self.application, self.request, **self.handler_kwargs
)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
fut = gen.convert_yielded(
self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
)
fut.add_done_callback(lambda f: f.result())
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg str log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg str reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(
self, status_code: int = 500, log_message: str = None, *args: Any, **kwargs: Any
) -> None:
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get("reason", None)
if log_message and not args:
self.log_message = log_message.replace("%", "%%")
def __str__(self) -> str:
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, "Unknown"),
)
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will
end (calling `RequestHandler.finish` if it hasn't already been
called), but the error-handling methods (including
`RequestHandler.write_error`) will not be called.
If `Finish()` was created with no arguments, the pending response
will be sent as-is. If `Finish()` was given an argument, that
argument will be passed to `RequestHandler.finish()`.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
.. versionchanged:: 4.3
Arguments passed to ``Finish()`` will be passed on to
`RequestHandler.finish`.
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name: str) -> None:
super(MissingArgumentError, self).__init__(
400, "Missing argument %s" % arg_name
)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code: int) -> None:
self.set_status(status_code)
def prepare(self) -> None:
raise HTTPError(self._status_code)
def check_xsrf_cookie(self) -> None:
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
`RedirectHandler` supports regular expression substitutions. E.g., to
swap the first and second parts of a path while preserving the remainder::
application = web.Application([
(r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
])
The final URL is formatted with `str.format` and the substrings that match
the capturing groups. In the above example, a request to "/a/b/c" would be
formatted like::
str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
Use Python's :ref:`format string syntax <formatstrings>` to customize how
values are substituted.
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
.. versionchanged:: 5.0
If any query arguments are present, they will be copied to the
destination URL.
"""
def initialize(self, url: str, permanent: bool = True) -> None:
self._url = url
self._permanent = permanent
def get(self, *args: Any) -> None:
to_url = self._url.format(*args)
if self.request.query_arguments:
# TODO: figure out typing for the next line.
to_url = httputil.url_concat(
to_url,
list(httputil.qs_to_qsl(self.request.query_arguments)), # type: ignore
)
self.redirect(to_url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: Dict[str, Optional[str]]
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path: str, default_filename: str = None) -> None:
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls) -> None:
with cls._lock:
cls._static_hashes = {}
def head(self, path: str) -> Awaitable[None]:
return self.get(path, include_body=False)
async def get(self, path: str, include_body: bool = True) -> None:
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if start is not None and start < 0:
start += size
if start < 0:
start = 0
if (
start is not None
and (start >= size or (end is not None and start >= end))
) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified.
# https://tools.ietf.org/html/rfc7233#section-2.1
# A byte-range-spec is invalid if the last-byte-pos value is present
# and less than the first-byte-pos.
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size,))
return
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header(
"Content-Range", httputil._get_content_range(start, end, size)
)
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
await self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self) -> Optional[str]:
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
assert self.absolute_path is not None
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash,)
def set_headers(self) -> None:
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified, content_type)
if cache_time > 0:
self.set_header(
"Expires",
datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time),
)
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self) -> bool:
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
# If client sent If-None-Match, use it, ignore If-Modified-Since
if self.request.headers.get("If-None-Match"):
return self.check_etag_header()
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
assert self.modified is not None
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root: str, path: str) -> str:
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root: str, absolute_path: str) -> Optional[str]:
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory", self.path)
if os.path.isdir(absolute_path) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return None
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(
cls, abspath: str, start: int = None, end: int = None
) -> Generator[bytes, None, None]:
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0) # type: Optional[int]
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath: str) -> str:
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self) -> os.stat_result:
assert self.absolute_path is not None
if not hasattr(self, "_stat_result"):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self) -> int:
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result.st_size
def get_modified_time(self) -> Optional[datetime.datetime]:
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
# NOTE: Historically, this used stat_result[stat.ST_MTIME],
# which truncates the fractional portion of the timestamp. It
# was changed from that form to stat_result.st_mtime to
# satisfy mypy (which disallows the bracket operator), but the
# latter form returns a float instead of an int. For
# consistency with the past (and because we have a unit test
# that relies on this), we truncate the float here, although
# I'm not sure that's the right thing to do.
modified = datetime.datetime.utcfromtimestamp(int(stat_result.st_mtime))
return modified
def get_content_type(self) -> str:
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
assert self.absolute_path is not None
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path: str) -> None:
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(
self, path: str, modified: Optional[datetime.datetime], mime_type: str
) -> int:
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(
cls, settings: Dict[str, Any], path: str, include_version: bool = True
) -> str:
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get("static_url_prefix", "/static/") + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return "%s?v=%s" % (url, version_hash)
def parse_url_path(self, url_path: str) -> str:
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings: Dict[str, Any], path: str) -> Optional[str]:
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings["static_path"], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path: str) -> Optional[str]:
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(
self, fallback: Callable[[httputil.HTTPServerRequest], None]
) -> None:
self.fallback = fallback
def prepare(self) -> None:
self.fallback(self.request)
self._finished = True
self.on_finish()
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request: httputil.HTTPServerRequest) -> None:
pass
def transform_first_chunk(
self,
status_code: int,
headers: httputil.HTTPHeaders,
chunk: bytes,
finishing: bool,
) -> Tuple[int, httputil.HTTPHeaders, bytes]:
return status_code, headers, chunk
def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes:
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(
[
"application/javascript",
"application/x-javascript",
"application/xml",
"application/atom+xml",
"application/json",
"application/xhtml+xml",
"image/svg+xml",
]
)
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request: httputil.HTTPServerRequest) -> None:
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype: str) -> bool:
return ctype.startswith("text/") or ctype in self.CONTENT_TYPES
def transform_first_chunk(
self,
status_code: int,
headers: httputil.HTTPHeaders,
chunk: bytes,
finishing: bool,
) -> Tuple[int, httputil.HTTPHeaders, bytes]:
# TODO: can/should this type be inherited from the superclass?
if "Vary" in headers:
headers["Vary"] += ", Accept-Encoding"
else:
headers["Vary"] = "Accept-Encoding"
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = (
self._compressible_type(ctype)
and (not finishing or len(chunk) >= self.MIN_LENGTH)
and ("Content-Encoding" not in headers)
)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(
mode="w", fileobj=self._gzip_value, compresslevel=self.GZIP_LEVEL
)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes:
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urllib.parse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
assert self.request.uri is not None
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return None
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler: RequestHandler) -> None:
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self) -> Any:
return self.handler.current_user
def render(self, *args: Any, **kwargs: Any) -> str:
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self) -> Optional[str]:
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self) -> Optional[Iterable[str]]:
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self) -> Optional[str]:
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self) -> Optional[Iterable[str]]:
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self) -> Optional[str]:
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self) -> Optional[str]:
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path: str, **kwargs: Any) -> bytes:
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text: str, **kwargs: Any) -> str: # type: ignore
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self) -> str: # type: ignore
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler: RequestHandler) -> None:
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = [] # type: List[Dict[str, Any]]
self._resource_dict = {} # type: Dict[str, Dict[str, Any]]
def render(self, path: str, **kwargs: Any) -> bytes: # type: ignore
def set_resources(**kwargs) -> str: # type: ignore
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError(
"set_resources called with different "
"resources for the same template"
)
return ""
return self.render_string(path, set_resources=set_resources, **kwargs)
def _get_resources(self, key: str) -> Iterable[str]:
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self) -> str:
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self) -> Iterable[str]:
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self) -> str:
return "\n".join(self._get_resources("embedded_css"))
def css_files(self) -> Iterable[str]:
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self) -> str:
return "".join(self._get_resources("html_head"))
def html_body(self) -> str:
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(
self, handler: RequestHandler, ui_modules: Dict[str, Type[UIModule]]
) -> None:
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key: str) -> Callable[..., str]:
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key: str) -> Callable[..., str]:
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
def create_signed_value(
secret: _CookieSecretTypes,
name: str,
value: Union[str, bytes],
version: int = None,
clock: Callable[[], float] = None,
key_version: int = None,
) -> bytes:
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
assert not isinstance(secret, dict)
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s: Union[str, bytes]) -> bytes:
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join(
[
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b"",
]
)
if isinstance(secret, dict):
assert (
key_version is not None
), "Key version must be set when sign key dict is used"
assert version >= 2, "Version must be at least 2 for key version support"
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value: bytes) -> int:
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(
secret: _CookieSecretTypes,
name: str,
value: Union[None, str, bytes],
max_age_days: int = 31,
clock: Callable[[], float] = None,
min_version: int = None,
) -> Optional[bytes]:
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
assert not isinstance(secret, dict)
return _decode_signed_value_v1(secret, name, value, max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value, max_age_days, clock)
else:
return None
def _decode_signed_value_v1(
secret: Union[str, bytes],
name: str,
value: bytes,
max_age_days: int,
clock: Callable[[], float],
) -> Optional[bytes]:
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not hmac.compare_digest(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value: bytes) -> Tuple[int, bytes, bytes, bytes, bytes]:
def _consume_field(s: bytes) -> Tuple[bytes, bytes]:
length, _, rest = s.partition(b":")
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n : n + 1] != b"|":
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1 :]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(
secret: _CookieSecretTypes,
name: str,
value: bytes,
max_age_days: int,
clock: Callable[[], float],
) -> Optional[bytes]:
try:
(
key_version,
timestamp_bytes,
name_field,
value_field,
passed_sig,
) = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[: -len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not hmac.compare_digest(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp_bytes)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value: Union[str, bytes]) -> Optional[int]:
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret: Union[str, bytes], *parts: Union[str, bytes]) -> bytes:
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret: Union[str, bytes], s: bytes) -> bytes:
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path: str) -> bool:
return any(path.startswith(x) for x in ["/", "http:", "https:"])
| [
"[email protected]"
] | |
ce8bc8e8c5475a8d2ed27e43305f5fd02ca3d509 | da481ac79daaa68df0219e92b0d80a1a7aed1bf6 | /python/example/run_all_tests.py | 747477c1ca24629cf95203798b913aa2369a40a8 | [] | no_license | PeterZhouSZ/diff_pd | 5ebad8f8d361a3ac4b8c7fb25c923b85e01c8a31 | 45bf74bc1b601d0aba7cc5becc3d2582136b5c0b | refs/heads/master | 2023-07-06T10:56:59.660391 | 2021-08-13T04:21:07 | 2021-08-13T04:21:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | import sys
sys.path.append('../')
from importlib import import_module
from py_diff_pd.common.common import print_ok, print_error
if __name__ == '__main__':
# If you want to add a new test, simply add its name here --- you can find their names from README.md.
tests = [
# Utilities.
'render_quad_mesh',
# Numerical check.
'actuation_2d',
'actuation_3d',
'collision_2d',
'deformable_backward_2d',
'deformable_backward_3d',
'deformable_quasi_static_3d',
'pd_energy_2d',
'pd_energy_3d',
'pd_forward',
'state_force_2d',
'state_force_3d'
]
failure_cnt = 0
for name in tests:
test_func_name = 'test_{}'.format(name)
module_name = name
test_func = getattr(import_module(module_name), test_func_name)
if test_func(verbose=False):
print_ok('[{}] PASSED.'.format(name))
else:
print_error('[{}] FAILED.'.format(name))
failure_cnt += 1
print('{}/{} tests failed.'.format(failure_cnt, len(tests)))
if failure_cnt > 0:
sys.exit(-1)
| [
"[email protected]"
] | |
2958eecfff5d168b9defe4a24e536c251c58de46 | db331fb24e5b95131413c8d5cebc880674dd30f7 | /foundation/migrations/0009_title_img2.py | d49553924ebcef455145509d19762fb843f2a4e0 | [] | no_license | kmvit/personalsite | 78fb18f428e3c95219d9145cb22bdefdf8896bf9 | 5da20e853055affdad4e6d21f36bae6cfe7507b7 | refs/heads/master | 2021-06-18T13:34:03.800622 | 2017-04-07T12:40:24 | 2017-04-07T12:40:24 | 27,366,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('foundation', '0008_auto_20141201_2156'),
]
operations = [
migrations.AddField(
model_name='title',
name='img2',
field=models.ImageField(null=True, upload_to=b'media', blank=True),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
a27f3a20b6fa8d1ba378c1d002f8540193ad314f | e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb | /src/repairs/orig_10.py | c3ab8fcb1de9b539ab3376dd8e20fd33d3a46618 | [] | no_license | gsakkas/seq2parse | 3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9 | 7ae0681f1139cb873868727f035c1b7a369c3eb9 | refs/heads/main | 2023-04-09T12:29:37.902066 | 2023-01-18T21:32:12 | 2023-01-18T21:32:12 | 417,597,310 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | a = 5
if 0 =< a < 10:
print("test passé")
| [
"[email protected]"
] | |
366b123835d76c2d1d9da799e97242bd5b6eb115 | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/CNNs/Mutants/Mutants_generated_by_MutPy_(code)/mutants_resnet/mutants_resnet_model_VERML_basedOn_nas_fix4_deterministic/227.py | db2b7a8e3c4f11397abeacd5d10e97ec30844aa5 | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,666 | py | """Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-05
_SEED = 7
tf.set_random_seed(_SEED)
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
inputs = tf.layers.batch_normalization(inputs=
inputs, axis=1 if data_format == 'channels_first' else 3, momentum=
_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=
is_training, fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [
pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [
pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
if strides >= 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(inputs=
inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding='SAME' if
strides == 1 else 'VALID', use_bias=False, kernel_initializer=
tf.variance_scaling_initializer(), data_format=
data_format)
def building_block(inputs, filters, is_training, projection_shortcut, strides, data_format):
"""Standard building block for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=3, strides=strides, data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=3, strides=1, data_format=
data_format)
return inputs + shortcut
def bottleneck_block(inputs, filters, is_training, projection_shortcut, strides, data_format):
"""Bottleneck block variant for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first two convolutions. Note that the
third and final convolution will use 4 times as many filters.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=1, strides=1, data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=3, strides=strides, data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=
data_format)
return inputs + shortcut
def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
is_training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
filters_out = 4 * filters if block_fn is bottleneck_block else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(inputs=
inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=
data_format)
inputs = block_fn(inputs, filters, is_training, projection_shortcut, strides,
data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, None, 1, data_format)
return tf.identity(inputs, name)
def cifar10_resnet_v2_generator(resnet_size, num_classes, data_format=None):
"""Generator for CIFAR-10 ResNet v2 models.
Args:
resnet_size: A single integer for the size of the ResNet model.
num_classes: The number of possible classes for image classification.
data_format: The input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
Returns:
The model function that takes in `inputs` and `is_training` and
returns the output tensor of the ResNet model.
Raises:
ValueError: If `resnet_size` is invalid.
"""
if resnet_size % 6 != 2:
raise ValueError('resnet_size must be 6n + 2:', resnet_size)
num_blocks = resnet_size - 2 // 6
if data_format is None:
data_format = 'channels_first' if
tf.test.is_built_with_cuda() else 'channels_last'
def model(inputs, is_training):
"""Constructs the ResNet model given the inputs."""
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = conv2d_fixed_padding(inputs=
inputs, filters=16, kernel_size=3, strides=1, data_format=
data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = block_layer(inputs=
inputs, filters=16, block_fn=building_block, blocks=num_blocks, strides=1, is_training=
is_training, name='block_layer1', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=32, block_fn=building_block, blocks=num_blocks, strides=2, is_training=
is_training, name='block_layer2', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=64, block_fn=building_block, blocks=num_blocks, strides=2, is_training=
is_training, name='block_layer3', data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.average_pooling2d(inputs=
inputs, pool_size=8, strides=1, padding='VALID', data_format=
data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [(-1), 64])
inputs = tf.layers.dense(inputs=inputs, units=num_classes)
inputs = tf.identity(inputs, 'final_dense')
return inputs
return model
def imagenet_resnet_v2_generator(block_fn, layers, num_classes, data_format=None):
"""Generator for ImageNet ResNet v2 models.
Args:
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
layers: A length-4 array denoting the number of blocks to include in each
layer. Each layer consists of blocks that take inputs of the same size.
num_classes: The number of possible classes for image classification.
data_format: The input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
Returns:
The model function that takes in `inputs` and `is_training` and
returns the output tensor of the ResNet model.
"""
if data_format is None:
data_format = 'channels_first' if
tf.test.is_built_with_cuda() else 'channels_last'
def model(inputs, is_training):
"""Constructs the ResNet model given the inputs."""
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = conv2d_fixed_padding(inputs=
inputs, filters=64, kernel_size=7, strides=2, data_format=
data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = tf.layers.max_pooling2d(inputs=
inputs, pool_size=3, strides=2, padding='SAME', data_format=
data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_layer(inputs=
inputs, filters=64, block_fn=block_fn, blocks=layers[0], strides=1, is_training=
is_training, name='block_layer1', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=128, block_fn=block_fn, blocks=layers[1], strides=2, is_training=
is_training, name='block_layer2', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=256, block_fn=block_fn, blocks=layers[2], strides=2, is_training=
is_training, name='block_layer3', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=512, block_fn=block_fn, blocks=layers[3], strides=2, is_training=
is_training, name='block_layer4', data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.average_pooling2d(inputs=
inputs, pool_size=7, strides=1, padding='VALID', data_format=
data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [(-1), 512 if
block_fn is building_block else 2048])
inputs = tf.layers.dense(inputs=inputs, units=num_classes)
inputs = tf.identity(inputs, 'final_dense')
return inputs
return model
def imagenet_resnet_v2(resnet_size, num_classes, data_format=None):
"""Returns the ResNet model for a given size and number of output classes."""
model_params = {18: {'block':
building_block, 'layers': [2, 2, 2, 2]}, 34: {'block':
building_block, 'layers': [3, 4, 6, 3]}, 50: {'block':
bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block':
bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block':
bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block':
bottleneck_block, 'layers': [3, 24, 36, 3]}}
if resnet_size not in model_params:
raise ValueError('Not a valid resnet_size:', resnet_size)
params = model_params[resnet_size]
return imagenet_resnet_v2_generator(
params['block'], params['layers'], num_classes, data_format) | [
"[email protected]"
] | |
79b729366e1e6dd4733ec4c9c4f4199ddbfb9d6a | 7b91755b1c777248050f3cadf23ed34d1f10adef | /Section3/14.py | 86af53d07f05b5c224b8112a78a39b964bf71c05 | [] | no_license | JAntonioMarin/PythonBootcamp | ef2976be0204df44e0c56a521628c73a5d274008 | 6e4af15b725913d3fda60599792e17d7d43d61d2 | refs/heads/master | 2021-01-06T16:52:57.537678 | 2020-03-26T17:36:50 | 2020-03-26T17:36:50 | 241,406,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | print('hello')
print("world")
print('this is also a string')
print(" I'm going on a run ")
print('hello \nworld')
print('hello \tworld')
print(len('hello'))
print(len('I am'))
# Coding Exercise 2: Quic Print Check
print("Hello World")
| [
"[email protected]"
] | |
9591b223d52310912c74667e3148d1398f10c830 | 124fe233f9cc86898756b3c0fc2988c69001f670 | /tests/test_stat.py | 212f9eec83cf924d08398be7ee3032045c77594c | [
"MIT"
] | permissive | jon-rd/jc | 0c4e043ccc40fdf7a0771d48cec4d86321fc4588 | 1e18dd30a824b0463f0cad86e0da7094c47d34f9 | refs/heads/master | 2023-04-16T01:39:16.961164 | 2021-03-05T19:50:37 | 2021-03-05T19:50:37 | 351,145,638 | 0 | 0 | MIT | 2021-04-14T14:13:28 | 2021-03-24T16:14:42 | null | UTF-8 | Python | false | false | 2,659 | py | import os
import json
import unittest
import jc.parsers.stat
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/stat.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_stat = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/stat.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_stat = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/stat.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_stat = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/freebsd12/stat.out'), 'r', encoding='utf-8') as f:
self.freebsd12_stat = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/stat.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_stat_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/stat.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_stat_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/stat.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_stat_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/freebsd12/stat.json'), 'r', encoding='utf-8') as f:
self.freebsd12_stat_json = json.loads(f.read())
def test_stat_nodata(self):
"""
Test 'stat' with no data
"""
self.assertEqual(jc.parsers.stat.parse('', quiet=True), [])
def test_stat_centos_7_7(self):
"""
Test 'stat /bin/*' on Centos 7.7
"""
self.assertEqual(jc.parsers.stat.parse(self.centos_7_7_stat, quiet=True), self.centos_7_7_stat_json)
def test_stat_ubuntu_18_4(self):
"""
Test 'stat /bin/*' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.stat.parse(self.ubuntu_18_4_stat, quiet=True), self.ubuntu_18_4_stat_json)
def test_stat_osx_10_14_6(self):
"""
Test 'stat /foo/*' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.stat.parse(self.osx_10_14_6_stat, quiet=True), self.osx_10_14_6_stat_json)
def test_stat_freebsd12(self):
"""
Test 'stat /foo/*' on FreeBSD12
"""
self.assertEqual(jc.parsers.stat.parse(self.freebsd12_stat, quiet=True), self.freebsd12_stat_json)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3e8474ef201455aa5739a31fe47a4749d1b82850 | e6f4e3afd16a7ee5c7a8fb61f7ed697ce88ef4c4 | /Pro2/abcpp/abcpp/treeplot.py | c8a4ebaadf40cfd0ca32ddbb58e1a29142193621 | [] | no_license | xl0418/Code | 01b58d05f7fae1a5fcfec15894ce0ed8c833fd1a | 75235b913730714d538d6d822a99297da54d3841 | refs/heads/master | 2021-06-03T21:10:31.578731 | 2020-11-17T07:50:48 | 2020-11-17T07:50:48 | 136,896,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,549 | py | import sys, os
import platform
if platform.system() == 'Windows':
sys.path.append('C:/Liang/abcpp_master8/abcpp')
elif platform.system() == 'Darwin':
sys.path.append('/Users/dudupig/Documents/GitHub/Code/Pro2/Python_p2')
from dvtraitsim_py import DVSim
from dvtraitsim_shared import DVTreeData, DVParam
import numpy as np
import matplotlib.pyplot as plt
theta = 0 # optimum of natural selection
r = 1 # growth rate
Vmax = 1
scalar = 10000
K = 10e8
nu = 1 / (100 * K)
timegap = 100
# let's try to find a true simulation:
# trait evolution plot
for no_tree in range(4, 23):
gamma_vec = np.array([0, 0.001, 0.01, 0.1, 0.5, 1])
a_vec = gamma_vec
row_gamma = len(gamma_vec)
count = 0
tree = 'tree' + '%d' % no_tree
example = 'example' + '%d' % no_tree
if platform.system() == 'Windows':
dir_path = 'c:/Liang/Googlebox/Research/Project2'
files = dir_path + '/treesim_newexp/' + example + '/'
td = DVTreeData(path=files, scalar=scalar)
elif platform.system() == 'Darwin':
file = '/Users/dudupig/Documents/GitHub/Code/Pro2/abcpp/tree_data/' + example + '/'
f1, axes1 = plt.subplots(row_gamma, row_gamma, figsize=(9, 9), sharey=True, sharex=True) #
f2, axes2 = plt.subplots(row_gamma, row_gamma, figsize=(9, 9), sharey=True, sharex=True) #
f3, axes3 = plt.subplots(row_gamma, row_gamma, figsize=(9, 9), sharey=True, sharex=True) #
label_a = (['$\\alpha$=0', '$\\alpha$=.001', '$\\alpha$=.01', '$\\alpha$=.1', '$\\alpha$=.5', '$\\alpha$=1'])
label_gamma = (['$\gamma$=0', '$\gamma$=.001', '$\gamma$=.01', '$\gamma$=.1', '$\gamma$=.5', '$\gamma$=1'])
xticks = (0, td.evo_time * scalar / timegap)
xlabels = ['0', '150K']
for index_g in range(len(gamma_vec)):
gamma1 = gamma_vec[index_g]
for index_a in range(len(a_vec)):
a = a_vec[index_a]
print(count)
if index_a >= index_g:
for replicate in range(100):
obs_param = DVParam(gamma=gamma1, a=a, K=K, nu=nu, r=r, theta=theta, Vmax=1, inittrait=0, initpop=500,
initpop_sigma=10.0, break_on_mu=False)
simresult = DVSim(td, obs_param)
if simresult['sim_time'] == td.sim_evo_time:
pic = 0
break
else:
pic = 1
# if pic==0:
evo_time, total_species = simresult['N'].shape
evo_time = evo_time - 1
trait_RI_dr = simresult['Z']
population_RI_dr = simresult['N']
population_RI_dr = population_RI_dr.astype(float)
population_RI_dr[population_RI_dr == 0] = np.nan
V_dr = simresult['V']
num_lines = total_species
x = np.arange(evo_time / timegap + 1)
labels = []
for i in range(1, num_lines + 1):
axes1[index_g, index_a].plot(x, trait_RI_dr[::timegap, i - 1])
axes2[index_g, index_a].plot(x, population_RI_dr[::timegap, i - 1])
axes3[index_g, index_a].plot(x, V_dr[::timegap, i - 1])
axes1[index_g, index_a].set_xticks(xticks)
axes1[index_g, index_a].set_xticklabels(xlabels, minor=False)
axes2[index_g, index_a].set_xticks(xticks)
axes2[index_g, index_a].set_xticklabels(xlabels, minor=False)
axes3[index_g, index_a].set_xticks(xticks)
axes3[index_g, index_a].set_xticklabels(xlabels, minor=False)
if count in range(0, row_gamma):
axes1[index_g, index_a].title.set_text(label_a[count])
axes2[index_g, index_a].title.set_text(label_a[count])
axes3[index_g, index_a].title.set_text(label_a[count])
if count in ([5, 11, 17, 23, 29, 35]):
axes1[index_g, index_a].set_ylabel(label_gamma[int(count / row_gamma)])
axes1[index_g, index_a].yaxis.set_label_position("right")
axes2[index_g, index_a].set_ylabel(label_gamma[int(count / row_gamma)])
axes2[index_g, index_a].yaxis.set_label_position("right")
axes3[index_g, index_a].set_ylabel(label_gamma[int(count / row_gamma)])
axes3[index_g, index_a].yaxis.set_label_position("right")
else:
axes1[index_g, index_a].plot([])
axes2[index_g, index_a].plot([])
axes3[index_g, index_a].plot([])
axes1[index_g, index_a].axis('off')
axes2[index_g, index_a].axis('off')
axes3[index_g, index_a].axis('off')
count += 1
dir_fig = 'C:/Liang/Googlebox/Research/Project2/smc_treeuppertri/' + tree
f1.text(0.84, 0.04, 'Generation', ha='center', fontsize=15)
f1.text(0.04, 0.84, 'Trait mean', va='center', rotation='vertical', fontsize=15)
f2.text(0.84, 0.04, 'Generation', ha='center', fontsize=15)
f2.text(0.04, 0.84, 'Population size', va='center', rotation='vertical', fontsize=15)
f3.text(0.84, 0.04, 'Generation', ha='center', fontsize=15)
f3.text(0.04, 0.84, 'Trait variance', va='center', rotation='vertical', fontsize=15)
f1.savefig(dir_fig + 'TP.png')
plt.close(f1)
f2.savefig(dir_fig + 'NP.png')
plt.close(f2)
f3.savefig(dir_fig + 'VP.png')
plt.close(f3)
plt.close('all')
| [
"[email protected]"
] | |
a823d40e45f948799c58daf72f5a9314e5da1b4c | 25e481ef7fba79285f4c8a7fa2e81c8b2b7f9cce | /saleor/core/permissions.py | 093869518cad2dc41e345780942a66464273b61a | [
"BSD-2-Clause"
] | permissive | arslanahmd/Ghar-Tameer | 59e60def48a14f9452dfefe2edf30e362878191d | 72401b2fc0079e6d52e844afd8fcf57122ad319f | refs/heads/master | 2023-01-31T04:08:26.288332 | 2018-06-07T18:02:01 | 2018-06-07T18:02:01 | 136,231,127 | 0 | 0 | NOASSERTION | 2023-01-11T22:21:42 | 2018-06-05T20:28:11 | Python | UTF-8 | Python | false | false | 957 | py | from django.contrib.auth.models import Permission
MODELS_PERMISSIONS = [
'order.view_order',
'order.edit_order',
'product.view_category',
'product.edit_category',
'product.view_product',
'product.edit_product',
'product.view_properties',
'product.edit_properties',
'product.view_stock_location',
'product.edit_stock_location',
'sale.view_sale',
'sale.edit_sale',
'shipping.view_shipping',
'shipping.edit_shipping',
'site.edit_settings',
'site.view_settings',
'user.view_user',
'user.edit_user',
'user.view_group',
'user.edit_group',
'user.view_staff',
'user.edit_staff',
'user.impersonate_user',
'voucher.view_voucher',
'voucher.edit_voucher',
]
def get_permissions():
codenames = [permission.split('.')[1] for permission in MODELS_PERMISSIONS]
return Permission.objects.filter(codename__in=codenames)\
.prefetch_related('content_type')
| [
"[email protected]"
] | |
9a9f2f15e46689e698a6cf1af6e148036e507196 | 0444e53f4908454e2e8ab9f70877ec76be9f872c | /reportng/migrations/0025_auto_20161204_2336.py | 76552396407602970196277f9a8d5e618d06029d | [
"MIT"
] | permissive | dedayoa/keepintouch | c946612d6b69a20a92617354fef2ce6407382be5 | 2551fc21bb1f6055ab0fc2ec040c6ba874c2838f | refs/heads/master | 2020-03-22T12:53:44.488570 | 2018-08-17T13:00:51 | 2018-08-17T13:00:51 | 140,069,262 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-04 22:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reportng', '0024_auto_20161204_2123'),
]
operations = [
migrations.AlterField(
model_name='emaildeliveryreport',
name='sent_at',
field=models.DateTimeField(null=True, verbose_name='Sent'),
),
]
| [
"[email protected]"
] | |
f6200c41fbfc26030afb9f8196b7c6948fbc18be | ecf0d106831b9e08578845674a457a166b6e0a14 | /OOP/inheritance_EXERCISE/restaurant/project/food/dessert.py | 6f120937221db42a44dd44819bf082a0f5df016c | [] | no_license | ivo-bass/SoftUni-Solutions | 015dad72cff917bb74caeeed5e23b4c5fdeeca75 | 75612d4bdb6f41b749e88f8d9c512d0e00712011 | refs/heads/master | 2023-05-09T23:21:40.922503 | 2021-05-27T19:42:03 | 2021-05-27T19:42:03 | 311,329,921 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from project.food.food import Food
class Dessert(Food):
def __init__(self, name: str, price: float, grams: float, calories: float):
super().__init__(name, price, grams)
self.__calories = calories
@property
def calories(self):
return self.__calories
| [
"[email protected]"
] | |
b0dd547c60357b7f4540d9f1064f30da3da49bfb | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/Median.py | 9fca643773716391f6ec77c8b200529cd534e8b8 | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Find median of an Array of numbers
def Median(ary):
ary.sort()
print(ary)
if len(ary)%2!=0:
mid=int(len(ary)/2)
return ary[mid]
else:
idx1=int(len(ary)/2)-1
idx2=int(len(ary)/2)
return (ary[idx1]+ary[idx2])/2
def main():
ary=[5, 89, 20, 64, 20, 45]
print(Median(ary))
ary = [5, 89, 20, 64, 20, 45, 45, 23, 67, 32, 30]
print(Median(ary))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
66598ec0257be4760b1dc38c8228fa5050235c13 | 6929a33a7259dad9b45192ca088a492085ed2953 | /solutions/0062-unique-paths/unique-paths.py | 1ac2c0fb3f2cb34e18806ccd977c68cdb4bb37c1 | [] | no_license | moqi112358/leetcode | 70366d29c474d19c43180fd4c282cc02c890af03 | fab9433ff7f66d00023e3af271cf309b2d481722 | refs/heads/master | 2022-12-10T01:46:14.799231 | 2021-01-14T05:00:09 | 2021-01-14T05:00:09 | 218,163,960 | 3 | 0 | null | 2022-07-06T20:26:38 | 2019-10-28T23:26:47 | Python | UTF-8 | Python | false | false | 1,747 | py | # A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
# How many possible unique paths are there?
#
#
# Example 1:
#
#
# Input: m = 3, n = 7
# Output: 28
#
#
# Example 2:
#
#
# Input: m = 3, n = 2
# Output: 3
# Explanation:
# From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:
# 1. Right -> Down -> Down
# 2. Down -> Down -> Right
# 3. Down -> Right -> Down
#
#
# Example 3:
#
#
# Input: m = 7, n = 3
# Output: 28
#
#
# Example 4:
#
#
# Input: m = 3, n = 3
# Output: 6
#
#
#
# Constraints:
#
#
# 1 <= m, n <= 100
# It's guaranteed that the answer will be less than or equal to 2 * 109.
#
#
class Solution:
# def uniquePaths(self, m: int, n: int) -> int:
# self.count = 0
# self.searchPath(m, n, 0, 0)
# return self.count
# def searchPath(self, m, n, r, c):
# if r == m - 1 and c == n - 1:
# self.count += 1
# dx = [0, 1]
# dy = [1, 0]
# for i in range(2):
# x = r + dx[i]
# y = c + dy[i]
# if 0 <= x <= m - 1 and 0 <= y <= n - 1:
# self.searchPath(m, n, x, y)
# return
def uniquePaths(self, m: int, n: int) -> int:
res = [[0] * m for i in range(n)]
for i in range(m):
res[0][i] = 1
for i in range(n):
res[i][0] = 1
for i in range(1, n):
for j in range(1, m):
res[i][j] = res[i-1][j] + res[i][j-1]
return res[n-1][m-1]
| [
"[email protected]"
] | |
8c987f3e7fcca91265fa510ed17ea01cb6f62bab | b2487a96bb865cfa0d1906c4e66a4aea9b613ce0 | /pynfb/setup.py | fa813e333cb6ac1dadf94028ece66e8e86523568 | [] | no_license | gurasog/nfb | 1ccbbc7d507525cff65f5d5c756afd98ad86a7c6 | 9ff2c736c5d764f48d921bad3942c4db93390a5d | refs/heads/master | 2022-12-14T23:06:56.523586 | 2020-09-07T07:36:01 | 2020-09-07T07:36:01 | 273,970,746 | 0 | 0 | null | 2020-06-21T19:30:38 | 2020-06-21T19:30:37 | null | UTF-8 | Python | false | false | 164 | py | from cx_Freeze import setup, Executable
setup(
name = "pynfb",
version = "0.1",
description = "Python NFB",
executables = [Executable("main.py")]
) | [
"[email protected]"
] | |
65ba12265ec9faffe3d18ec718e81ba5893e0a1e | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/ScanCodeNotificationRequest.py | 5835e1a20aefefe670ace4df8bd671a5268d659a | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 5,343 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ScanCodeNotificationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ScanCodeNotification')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RealCostAmount(self):
return self.get_query_params().get('RealCostAmount')
def set_RealCostAmount(self,RealCostAmount):
self.add_query_param('RealCostAmount',RealCostAmount)
def get_SalePrice(self):
return self.get_query_params().get('SalePrice')
def set_SalePrice(self,SalePrice):
self.add_query_param('SalePrice',SalePrice)
def get_CommodityId(self):
return self.get_query_params().get('CommodityId')
def set_CommodityId(self,CommodityId):
self.add_query_param('CommodityId',CommodityId)
def get_HolderId(self):
return self.get_query_params().get('HolderId')
def set_HolderId(self,HolderId):
self.add_query_param('HolderId',HolderId)
def get_DeviceType(self):
return self.get_query_params().get('DeviceType')
def set_DeviceType(self,DeviceType):
self.add_query_param('DeviceType',DeviceType)
def get_DeviceCode(self):
return self.get_query_params().get('DeviceCode')
def set_DeviceCode(self,DeviceCode):
self.add_query_param('DeviceCode',DeviceCode)
def get_ApplyPrice(self):
return self.get_query_params().get('ApplyPrice')
def set_ApplyPrice(self,ApplyPrice):
self.add_query_param('ApplyPrice',ApplyPrice)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
def get_OuterCode(self):
return self.get_query_params().get('OuterCode')
def set_OuterCode(self,OuterCode):
self.add_query_param('OuterCode',OuterCode)
def get_QueryStr(self):
return self.get_query_params().get('QueryStr')
def set_QueryStr(self,QueryStr):
self.add_query_param('QueryStr',QueryStr)
def get_Phase(self):
return self.get_query_params().get('Phase')
def set_Phase(self,Phase):
self.add_query_param('Phase',Phase)
def get_BizResult(self):
return self.get_query_params().get('BizResult')
def set_BizResult(self,BizResult):
self.add_query_param('BizResult',BizResult)
def get_TaskType(self):
return self.get_query_params().get('TaskType')
def set_TaskType(self,TaskType):
self.add_query_param('TaskType',TaskType)
def get_BrandUserId(self):
return self.get_query_params().get('BrandUserId')
def set_BrandUserId(self,BrandUserId):
self.add_query_param('BrandUserId',BrandUserId)
def get_Sex(self):
return self.get_query_params().get('Sex')
def set_Sex(self,Sex):
self.add_query_param('Sex',Sex)
def get_CostDetail(self):
return self.get_query_params().get('CostDetail')
def set_CostDetail(self,CostDetail):
self.add_query_param('CostDetail',CostDetail)
def get_ProxyUserId(self):
return self.get_query_params().get('ProxyUserId')
def set_ProxyUserId(self,ProxyUserId):
self.add_query_param('ProxyUserId',ProxyUserId)
def get_AlipayOpenId(self):
return self.get_query_params().get('AlipayOpenId')
def set_AlipayOpenId(self,AlipayOpenId):
self.add_query_param('AlipayOpenId',AlipayOpenId)
def get_BizType(self):
return self.get_query_params().get('BizType')
def set_BizType(self,BizType):
self.add_query_param('BizType',BizType)
def get_BrandNick(self):
return self.get_query_params().get('BrandNick')
def set_BrandNick(self,BrandNick):
self.add_query_param('BrandNick',BrandNick)
def get_V(self):
return self.get_query_params().get('V')
def set_V(self,V):
self.add_query_param('V',V)
def get_ChargeTag(self):
return self.get_query_params().get('ChargeTag')
def set_ChargeTag(self,ChargeTag):
self.add_query_param('ChargeTag',ChargeTag)
def get_Age(self):
return self.get_query_params().get('Age')
def set_Age(self,Age):
self.add_query_param('Age',Age)
def get_ChannelId(self):
return self.get_query_params().get('ChannelId')
def set_ChannelId(self,ChannelId):
self.add_query_param('ChannelId',ChannelId)
def get_Cid(self):
return self.get_query_params().get('Cid')
def set_Cid(self,Cid):
self.add_query_param('Cid',Cid) | [
"[email protected]"
] | |
3f017622fbfb44222dc155a544b055feccde4336 | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /src/arch/x86/isa/insts/x87/control/__init__.py | 01a1e68baf2dbd685876e00b5d56637618e0e383 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 2,479 | py | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
categories = ["initialize",
"wait_for_exceptions",
"clear_exceptions",
"save_and_restore_x87_control_word",
"save_x87_status_word",
"save_and_restore_x87_environment"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec("from . import %s as cat" % category)
microcode += cat.microcode
| [
"[email protected]"
] | |
2bebb31050c1eea8eec0d868ef0ad1ce49749f7d | 38bee274b237e508175be4c37bf357621ed50195 | /pde/pdes/allen_cahn.py | f1a80a46cb539fbec959f00de2c5a1ab654a3641 | [
"MIT"
] | permissive | anna-11/py-pde | 26f0110266fdb21803e665447b1204bedb401d78 | 5b596af5f224e3ec2a7fbea8f87fab3896c19642 | refs/heads/master | 2023-06-17T09:13:34.026794 | 2021-07-09T12:10:36 | 2021-07-09T12:10:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,296 | py | """
A Allen-Cahn equation
.. codeauthor:: David Zwicker <[email protected]>
"""
from typing import Callable # @UnusedImport
import numpy as np
from ..fields import ScalarField
from ..grids.boundaries.axes import BoundariesData
from ..tools.docstrings import fill_in_docstring
from ..tools.numba import jit, nb
from .base import PDEBase, expr_prod
class AllenCahnPDE(PDEBase):
r"""A simple Allen-Cahn equation
The mathematical definition is
.. math::
\partial_t c = \gamma \nabla^2 c - c^3 + c
where :math:`c` is a scalar field and :math:`\gamma` sets the interfacial
width.
"""
explicit_time_dependence = False
interface_width: float
@fill_in_docstring
def __init__(self, interface_width: float = 1, bc: BoundariesData = "natural"):
"""
Args:
interface_width (float):
The diffusivity of the described species
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
"""
super().__init__()
self.interface_width = interface_width
self.bc = bc
@property
def expression(self) -> str:
"""str: the expression of the right hand side of this PDE"""
return f"{expr_prod(self.interface_width, 'laplace(c)')} - c**3 + c"
def evolution_rate( # type: ignore
self,
state: ScalarField,
t: float = 0,
) -> ScalarField:
"""evaluate the right hand side of the PDE
Args:
state (:class:`~pde.fields.ScalarField`):
The scalar field describing the concentration distribution
t (float): The current time point
Returns:
:class:`~pde.fields.ScalarField`:
Scalar field describing the evolution rate of the PDE
"""
assert isinstance(state, ScalarField), "`state` must be ScalarField"
laplace = state.laplace(bc=self.bc, label="evolution rate")
return self.interface_width * laplace - state ** 3 + state # type: ignore
def _make_pde_rhs_numba( # type: ignore
self, state: ScalarField
) -> Callable[[np.ndarray, float], np.ndarray]:
"""create a compiled function evaluating the right hand side of the PDE
Args:
state (:class:`~pde.fields.ScalarField`):
An example for the state defining the grid and data types
Returns:
A function with signature `(state_data, t)`, which can be called
with an instance of :class:`~numpy.ndarray` of the state data and
the time to obtained an instance of :class:`~numpy.ndarray` giving
the evolution rate.
"""
shape = state.grid.shape
arr_type = nb.typeof(np.empty(shape, dtype=state.data.dtype))
signature = arr_type(arr_type, nb.double)
interface_width = self.interface_width
laplace = state.grid.get_operator("laplace", bc=self.bc)
@jit(signature)
def pde_rhs(state_data: np.ndarray, t: float) -> np.ndarray:
"""compiled helper function evaluating right hand side"""
return interface_width * laplace(state_data) - state_data ** 3 + state_data # type: ignore
return pde_rhs # type: ignore
| [
"[email protected]"
] | |
c2568db3ed9af7c59ba95dc30e283b4abd83d2a1 | e9adf4bb294b22add02c997f750e36b6fea23cdc | /nw/nw_logic/nw_rules_bank.py | 432c3a0479a0241af1dc0f9ec9c891ae0851037a | [
"BSD-3-Clause"
] | permissive | bairoliyaprem/python-rules | 5aa70dbf6d9efde0d95dda4b6397e36788c22cfa | ca7ab0addf60179ea61fddad6cd65c77a6792de1 | refs/heads/master | 2022-12-17T22:15:47.745660 | 2020-09-29T00:39:03 | 2020-09-29T00:39:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,863 | py | import sqlalchemy
from sqlalchemy_utils import get_mapper
from python_rules.exec_row_logic.logic_row import LogicRow
from python_rules.rule import Rule
from python_rules.rule_bank.rule_bank import RuleBank
from nw.nw_logic import models
from nw.nw_logic.models import Customer, OrderDetail, Product, Order, Employee
def activate_basic_check_credit_rules():
"""
Issues function calls to activate check credit rules, below.
These rules are executed not now, but on commits
Order is irrelevant - determined by system based on dependency analysis
Their inclusion in classes is for doc / convenience, no semantics
These rules apply to all transactions (automatic re-use), eg.
* place order
* change Order Detail product, quantity
* add/delete Order Detail
* ship / unship order
* delete order
* move order to new customer, etc
"""
def units_in_stock(row: Product, old_row: Product, logic_row: LogicRow):
result = row.UnitsInStock - (row.UnitsShipped - old_row.UnitsShipped)
return result
def congratulate_sales_rep(row: Order, old_row: Order, logic_row: LogicRow):
if logic_row.ins_upd_dlt == "ins" or True: # logic engine fills parents for insert
sales_rep = row.SalesRep # type : Employee
if sales_rep is None:
logic_row.log("no salesrep for this order")
else:
logic_row.log(f'Hi, {sales_rep.Manager.FirstName}, congratulate {sales_rep.FirstName} on their new order')
Rule.constraint(validate=Customer,
as_condition=lambda row: row.Balance <= row.CreditLimit,
error_msg="balance ({row.Balance}) exceeds credit ({row.CreditLimit})")
Rule.sum(derive=Customer.Balance, as_sum_of=Order.AmountTotal,
where=lambda row: row.ShippedDate is None) # *not* a sql select sum...
Rule.sum(derive=Order.AmountTotal, as_sum_of=OrderDetail.Amount)
Rule.formula(derive=OrderDetail.Amount, as_expression=lambda row: row.UnitPrice * row.Quantity)
Rule.copy(derive=OrderDetail.UnitPrice, from_parent=Product.UnitPrice)
Rule.formula(derive=OrderDetail.ShippedDate, as_exp="row.OrderHeader.ShippedDate")
Rule.sum(derive=Product.UnitsShipped, as_sum_of=OrderDetail.Quantity,
where="row.ShippedDate is not None")
Rule.formula(derive=Product.UnitsInStock, calling=units_in_stock)
Rule.commit_row_event(on_class=Order, calling=congratulate_sales_rep)
Rule.count(derive=Customer.UnpaidOrderCount, as_count_of=Order,
where=lambda row: row.ShippedDate is None) # *not* a sql select sum...
Rule.count(derive=Customer.OrderCount, as_count_of=Order)
class InvokePythonFunctions: # use functions for more complex rules, type checking, etc (not used)
@staticmethod
def load_rules(self):
def my_early_event(row, old_row, logic_row):
logic_row.log("early event for *all* tables - good breakpoint, time/date stamping, etc")
def check_balance(row: Customer, old_row, logic_row) -> bool:
"""
Not used... illustrate function alternative (e.g., more complex if/else logic)
specify rule with `calling=check_balance` (instead of as_condition)
"""
return row.Balance <= row.CreditLimit
def compute_amount(row: OrderDetail, old_row, logic_row):
return row.UnitPrice * row.Quantity
Rule.formula(derive="OrderDetail.Amount", calling=compute_amount)
Rule.formula(derive="OrderDetail.Amount", calling=lambda Customer: Customer.Quantity * Customer.UnitPrice)
Rule.early_row_event(on_class="*", calling=my_early_event) # just for debug
Rule.constraint(validate="Customer", calling=check_balance,
error_msg="balance ({row.Balance}) exceeds credit ({row.CreditLimit})")
class DependencyGraphTests:
"""Not loaded"""
def not_loaded(self):
Rule.formula(derive="Tbl.ColA", # or, calling=compute_amount)
as_exp="row.ColB + row.ColC")
Rule.formula(derive="Tbl.ColB", # or, calling=compute_amount)
as_exp="row.ColC")
Rule.formula(derive="Tbl.ColC", # or, calling=compute_amount)
as_exp="row.ColD")
Rule.formula(derive="Tbl.ColD", # or, calling=compute_amount)
as_exp="row.ColE")
Rule.formula(derive="Tbl.ColE", # or, calling=compute_amount)
as_exp="xxx")
class UnusedTests:
"""Not loaded"""
def not_loaded(self):
Rule.constraint(validate="AbUser", # table is ab_user
calling=lambda row: row.username != "no_name")
Rule.count(derive=Customer.OrderCount, as_count_of=Order,
where="ShippedDate not None")
| [
"[email protected]"
] | |
d9c24e51040b6a9a2ca86262d7b8e28bd164dc56 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/learnbgame_jewelcraft/ops_asset/asset_ops.py | 18a52b2efc2714e9fb91c211f43e87e2ceb22cbf | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,608 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import os
import bpy
from bpy.types import Operator
from bpy.props import StringProperty, BoolProperty
import bpy.utils.previews
from ..lib import asset, dynamic_list
class Setup:
def __init__(self):
self.props = bpy.context.window_manager.jewelcraft
self.folder_name = self.props.asset_folder
self.folder = os.path.join(asset.user_asset_library_folder_object(), self.folder_name)
self.asset_name = self.props.asset_list
self.filepath = os.path.join(self.folder, self.asset_name)
class WM_OT_jewelcraft_asset_add_to_library(Operator, Setup):
bl_label = "Add To Library"
bl_description = "Add selected objects to asset library"
bl_idname = "wm.jewelcraft_asset_add_to_library"
bl_options = {"INTERNAL"}
asset_name: StringProperty(name="Asset Name", description="Asset name", options={"SKIP_SAVE"})
@classmethod
def poll(cls, context):
return bool(context.window_manager.jewelcraft.asset_folder)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.separator()
layout.prop(self, "asset_name")
layout.separator()
def execute(self, context):
if not self.asset_name:
self.report({"ERROR"}, "Name must be specified")
return {"CANCELLED"}
filepath = os.path.join(self.folder, self.asset_name)
asset.asset_export(folder=self.folder, filename=self.asset_name + ".blend")
asset.render_preview(256, 256, filepath=filepath + ".png")
dynamic_list.asset_list_refresh()
self.props.asset_list = self.asset_name
context.area.tag_redraw()
return {"FINISHED"}
def invoke(self, context, event):
if not context.selected_objects:
return {"CANCELLED"}
self.asset_name = context.object.name
wm = context.window_manager
return wm.invoke_props_dialog(self)
class WM_OT_jewelcraft_asset_remove_from_library(Operator, Setup):
bl_label = "Remove Asset"
bl_description = "Remove asset from library"
bl_idname = "wm.jewelcraft_asset_remove_from_library"
bl_options = {"INTERNAL"}
@classmethod
def poll(cls, context):
return bool(context.window_manager.jewelcraft.asset_list)
def execute(self, context):
asset_list = dynamic_list.assets(self, context)
last = self.asset_name == asset_list[-1][0]
iterable = len(asset_list) > 1
if os.path.exists(self.filepath + ".blend"):
os.remove(self.filepath + ".blend")
if os.path.exists(self.filepath + ".png"):
os.remove(self.filepath + ".png")
dynamic_list.asset_list_refresh(preview_id=self.folder_name + self.asset_name)
if last and iterable:
self.props.asset_list = asset_list[-2][0]
context.area.tag_redraw()
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_confirm(self, event)
class WM_OT_jewelcraft_asset_rename(Operator, Setup):
bl_label = "Rename Asset"
bl_description = "Rename asset"
bl_idname = "wm.jewelcraft_asset_rename"
bl_options = {"INTERNAL"}
asset_name: StringProperty(name="Asset Name", description="Asset name", options={"SKIP_SAVE"})
@classmethod
def poll(cls, context):
return bool(context.window_manager.jewelcraft.asset_list)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.separator()
layout.prop(self, "asset_name")
layout.separator()
def execute(self, context):
if not self.asset_name:
self.report({"ERROR"}, "Name must be specified")
return {"CANCELLED"}
name_current = self.props.asset_list
file_current = os.path.join(self.folder, name_current + ".blend")
file_preview_current = os.path.join(self.folder, name_current + ".png")
file_new = os.path.join(self.folder, self.asset_name + ".blend")
file_preview_new = os.path.join(self.folder, self.asset_name + ".png")
if not os.path.exists(file_current):
self.report({"ERROR"}, "File not found")
return {"CANCELLED"}
os.rename(file_current, file_new)
if os.path.exists(file_preview_current):
os.rename(file_preview_current, file_preview_new)
dynamic_list.asset_list_refresh()
self.props.asset_list = self.asset_name
context.area.tag_redraw()
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
class WM_OT_jewelcraft_asset_replace(Operator, Setup):
bl_label = "Replace Asset"
bl_description = "Replace current asset with selected objects"
bl_idname = "wm.jewelcraft_asset_replace"
bl_options = {"INTERNAL"}
@classmethod
def poll(cls, context):
return bool(context.window_manager.jewelcraft.asset_list)
def execute(self, context):
asset.asset_export(folder=self.folder, filename=self.asset_name + ".blend")
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_confirm(self, event)
class WM_OT_jewelcraft_asset_preview_replace(Operator, Setup):
bl_label = "Replace Asset Preview"
bl_description = "Replace asset preview image"
bl_idname = "wm.jewelcraft_asset_preview_replace"
bl_options = {"INTERNAL"}
@classmethod
def poll(cls, context):
return bool(context.window_manager.jewelcraft.asset_list)
def execute(self, context):
asset.render_preview(256, 256, filepath=self.filepath + ".png")
dynamic_list.asset_list_refresh(preview_id=self.folder_name + self.asset_name)
context.area.tag_redraw()
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_confirm(self, event)
class WM_OT_jewelcraft_asset_import(Operator, Setup):
bl_label = "JewelCraft Import Asset"
bl_description = "Import selected asset"
bl_idname = "wm.jewelcraft_asset_import"
bl_options = {"REGISTER", "UNDO"}
use_parent: BoolProperty(
name="Parent to selected",
description="Parent imported asset to selected objects (Shortcut: hold Alt when using the tool)",
)
@classmethod
def poll(cls, context):
return bool(context.window_manager.jewelcraft.asset_list)
def execute(self, context):
space_data = context.space_data
use_local_view = bool(space_data.local_view)
collection = context.collection
selected = list(context.selected_objects)
for ob in selected:
ob.select_set(False)
imported = asset.asset_import_batch(filepath=self.filepath + ".blend")
obs = imported.objects
for ob in obs:
collection.objects.link(ob)
ob.select_set(True)
if use_local_view:
ob.local_view_set(space_data, True)
if len(obs) == 1:
ob.location = context.scene.cursor.location
if self.use_parent and selected:
collection.objects.unlink(ob)
asset.ob_copy_and_parent(ob, selected)
elif context.mode == "EDIT_MESH":
asset.ob_copy_to_faces(ob)
bpy.ops.object.mode_set(mode="OBJECT")
context.view_layer.objects.active = ob
return {"FINISHED"}
def invoke(self, context, event):
self.use_parent = event.alt
return self.execute(context)
| [
"[email protected]"
] | |
7ca0ee05969af630f9b2b5a8871790f4991b5c08 | d55f3f715c00bcbd60badb3a31696a1a629600e2 | /students/maks/9/site2/page/management/commands/hello.py | 014dfc85a65d4e56ef7b7b974c84bd8f9de0f150 | [] | no_license | zdimon/wezom-python-course | ea0adaa54444f6deaca81ce54ee8334297f2cd1a | 5b87892102e4eb77a4c12924d2d71716b9cce721 | refs/heads/master | 2023-01-29T02:22:54.220880 | 2020-12-05T11:27:48 | 2020-12-05T11:27:48 | 302,864,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from django.core.management.base import BaseCommand, CommandError
from page.models import Page
class Command(BaseCommand):
def handle(self, *args, **options):
print('Hello command!!!')
Page.objects.all().delete()
page1 = Page()
page1.title = 'Index page'
page1.content = 'content content'
page1.save()
page2 = Page()
page2.title = 'Index page 2'
page2.content = 'content content 2'
page2.save() | [
"[email protected]"
] | |
4df98b423cb6c082c08b54fdbf134f6143af4b37 | cb6f314d0c8f1a943718caa46b47bbe5ef9908f5 | /test/drawings/draw_hmab.py | 21c985c2897367265b4cd012a1bfb34b55a9bbce | [
"MIT"
] | permissive | yunx-z/soln-ml | d702d5afef7d9204c89bdcba44e26f89a68b8923 | f97c991c2ad287e8d295d3058b4a9b1fd50d847b | refs/heads/master | 2022-12-26T18:00:51.457311 | 2020-09-13T08:31:58 | 2020-09-13T08:31:58 | 296,199,472 | 0 | 0 | MIT | 2020-09-17T02:37:20 | 2020-09-17T02:37:20 | null | UTF-8 | Python | false | false | 1,574 | py | import os
import sys
import time
import pickle
import argparse
import numpy as np
import autosklearn.classification
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
dataset_set = 'diabetes,spectf,credit,ionosphere,lymphography,pc4,' \
'messidor_features,winequality_red,winequality_white,splice,spambase,amazon_employee'
parser.add_argument('--datasets', type=str, default=dataset_set)
parser.add_argument('--mth', choices=['ours', 'ausk'], default='ours')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--algo_num', type=int, default=8)
parser.add_argument('--trial_num', type=int, default=100)
project_dir = './'
def plot(mth, dataset, algo_num, trial_num, seed):
if mth == 'ours':
save_path = project_dir + 'data/hmab_%s_%d_%d_%d.pkl' % \
(dataset, trial_num, algo_num, seed)
else:
save_path = project_dir + 'data/ausk_%s_%d.pkl' % (dataset, algo_num)
with open(save_path, 'rb') as f:
result = pickle.load(f)
print('Best validation accuracy: %.4f' % np.max(result[0]))
print('Final Rewards', result[0])
print('Time records', result[1])
print('Action Sequence', result[2])
print('-' * 30)
if __name__ == "__main__":
args = parser.parse_args()
dataset_str = args.datasets
dataset_list = list()
if dataset_str == 'all':
dataset_list = dataset_set
else:
dataset_list = dataset_str.split(',')
for dataset in dataset_list:
plot(args.mth, dataset, args.algo_num, args.trial_num, args.seed)
| [
"[email protected]"
] | |
dce5217f537b1368245960c5d19597735d145b4a | b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf | /001146StepikPyBegin/Stepik001146PyBeginсh07p01st01T01_for_20200420.py | e9b62e163ccebe48ce59797d086fef7a96ccb3fa | [
"Apache-2.0"
] | permissive | SafonovMikhail/python_000577 | 5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4 | f2dccac82a37df430c4eb7425b5d084d83520409 | refs/heads/master | 2022-12-08T10:53:57.202746 | 2022-12-07T09:09:51 | 2022-12-07T09:09:51 | 204,713,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | # реализация множественного ввода
for i in range(5):
num1 = int(input())
print("Квадрат числа равен:", num1 ** 2)
print("Цикл завершен")
| [
"[email protected]"
] | |
884a0b600ab0d1c224f04dbc9a3f59c3b47ca4f1 | fcd965c9333ee328ec51bc41f5bc0300cc06dc33 | /Coding Patterns/Fast & Slow Pointers/Palindrome LinkedList.py | 8e1dc912eb07d240edfe2d100acd686917699403 | [] | no_license | henrylin2008/Coding_Problems | 699bb345481c14dc3faa8bab439776c7070a1cb0 | 281067e872f73a27f76ae10ab0f1564916bddd28 | refs/heads/master | 2023-01-11T11:55:47.936163 | 2022-12-24T07:50:17 | 2022-12-24T07:50:17 | 170,151,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | # Problem Challenge 1: Palindrome LinkedList (medium)
# https://designgurus.org/path-player?courseid=grokking-the-coding-interview&unit=grokking-the-coding-interview_1628743582805_17Unit
# Problem Statement
#
# Given the head of a Singly LinkedList, write a method to check if the LinkedList is a palindrome or not.
#
# Your algorithm should use constant space and the input LinkedList should be in the original form once the algorithm
# is finished. The algorithm should have O(N)O(N) time complexity where ‘N’ is the number of nodes in the LinkedList.
#
# Example 1:
# Input: 2 -> 4 -> 6 -> 4 -> 2 -> null
# Output: true
#
# Example 2:
# Input: 2 -> 4 -> 6 -> 4 -> 2 -> 2 -> null
# Output: false
# Solution
#
# As we know, a palindrome LinkedList will have nodes values that read the same backward or forward. This means that
# if we divide the LinkedList into two halves, the node values of the first half in the forward direction should be
# similar to the node values of the second half in the backward direction. As we have been given a Singly LinkedList,
# we can’t move in the backward direction. To handle this, we will perform the following steps:
# 1. We can use the Fast & Slow pointers method similar to Middle of the LinkedList to find the middle node of the
# LinkedList.
# 2. Once we have the middle of the LinkedList, we will reverse the second half.
# 3. Then, we will compare the first half with the reversed second half to see if the LinkedList represents a
# palindrome.
# 4. Finally, we will reverse the second half of the LinkedList again to revert and bring the LinkedList back to its
# original form.
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def is_palindromic_linked_list(head):
if head is None or head.next is None:
return True
# find middle of the LinkedList
slow, fast = head, head
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
head_second_half = reverse(slow) # reverse the second half
# store the head of reversed part to revert back later
copy_head_second_half = head_second_half
# compare the first and the second half
while head is not None and head_second_half is not None:
if head.value != head_second_half.value:
break # not a palindrome
head = head.next
head_second_half = head_second_half.next
reverse(copy_head_second_half) # revert the reverse of the second half
if head is None or head_second_half is None: # if both halves match
return True
return False
def reverse(head):
prev = None
while head is not None:
next = head.next
head.next = prev
prev = head
head = next
return prev
def main():
head = Node(2)
head.next = Node(4)
head.next.next = Node(6)
head.next.next.next = Node(4)
head.next.next.next.next = Node(2)
print("Is palindrome: " + str(is_palindromic_linked_list(head)))
head.next.next.next.next.next = Node(2)
print("Is palindrome: " + str(is_palindromic_linked_list(head)))
main()
# Time Complexity
# The above algorithm will have a time complexity of O(N) where ‘N’ is the number of nodes in the LinkedList.
#
# Space Complexity
# The algorithm runs in constant space O(1).
| [
"[email protected]"
] | |
dcb3980fb3389f1967bc679b30b0ca7aa6b476c9 | 08d316151302f7ba4ae841c15b7adfe4e348ddf1 | /reviewboard/hostingsvcs/tests/test_fogbugz.py | 97cccab17520fd5b84e7a49e97c38afd16cd410c | [
"MIT"
] | permissive | LloydFinch/reviewboard | aa8cd21fac359d49b3dfc5a68c42b857c0c04bd8 | 563c1e8d4dfd860f372281dc0f380a0809f6ae15 | refs/heads/master | 2020-08-10T20:02:32.204351 | 2019-10-02T20:46:08 | 2019-10-02T20:46:08 | 214,411,166 | 2 | 0 | MIT | 2019-10-11T10:44:55 | 2019-10-11T10:44:54 | null | UTF-8 | Python | false | false | 952 | py | """Unit tests for the FogBugz hosting service."""
from __future__ import unicode_literals
from reviewboard.hostingsvcs.testing import HostingServiceTestCase
class FogBugzTests(HostingServiceTestCase):
"""Unit tests for the FogBugz hosting service."""
service_name = 'fogbugz'
fixtures = ['test_scmtools']
def test_service_support(self):
"""Testing FogBugz service support capabilities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertFalse(self.service_class.supports_repositories)
def test_get_bug_tracker_field(self):
"""Testing FogBugz.get_bug_tracker_field"""
self.assertFalse(
self.service_class.get_bug_tracker_requires_username())
self.assertEqual(
self.service_class.get_bug_tracker_field(None, {
'fogbugz_account_domain': 'mydomain',
}),
'https://mydomain.fogbugz.com/f/cases/%s')
| [
"[email protected]"
] | |
a3ed4c22c27ef24b07b4ce76b60d632ea251f1f0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/212/usersdata/265/87081/submittedfiles/av1_3.py | 1d4a47463f8274d69023d764799f71ec143c55a0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # -*- coding: utf-8 -*-
import math
a = int(input('digite o valor de a: '))
b = int(input('digite o valor de b: '))
c = int(input('digite o valor de c: '))
i=2
j=2
k=2
while i<=a:
if a%i==0:
divisor_a=i
i=1+i
print(i)
while j<=b:
if b%j==0:
divisor_b=j
j=1+j
print(j)
while k<=c:
if c%k==0:
divisor_c=k
k=1+k
print(k) | [
"[email protected]"
] | |
1849bac316628694ea87ff00774d2816a84a04ce | 6e4e395988c641856aa13aa3b68db838f0d47cc0 | /Trying out DB-API.py | 7a7c01c86e3f781089783e9aaf81459aec431027 | [
"MIT"
] | permissive | fatih-iver/Intro-to-Relational-Databases | 2669d060fd2f9c8884e936e541373eecfbe8634e | 28528132378436d6dd1f1bdec96d1e7e285b4e4d | refs/heads/master | 2020-03-16T11:26:54.090473 | 2018-05-09T21:32:38 | 2018-05-09T21:32:38 | 132,648,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # To see how the various functions in the DB-API work, take a look at this code,
# then the results that it prints when you press "Test Run".
#
# Then modify this code so that the student records are fetched in sorted order
# by student's name.
#
import sqlite3
# Fetch some student records from the database.
db = sqlite3.connect("students")
c = db.cursor()
query = "select name, id from students order by name;"
c.execute(query)
rows = c.fetchall()
# First, what data structure did we get?
print("Row data:")
print(rows)
# And let's loop over it too:
print()
print("Student names:")
for row in rows:
print(" ", row[0])
db.close()
| [
"[email protected]"
] | |
fa1d83149490bbaaf6e2373771de847c30a4191b | 10326daa458342fd992f4bd2f9c63c9469ae5a11 | /.graal-git-repo | 419b5f2ded2ecc3adce67832c5ddb0e5577ca1db | [
"MIT",
"CC-BY-SA-4.0"
] | permissive | charig/black-diamonds | b08c1d9dab528a9c9c935414e6af4faf2d5773fb | 0fd46caeec41f57b621143be1e4bd22d5a9a7348 | refs/heads/master | 2020-04-12T20:04:36.490970 | 2018-09-22T13:39:03 | 2018-09-22T13:39:03 | 162,725,323 | 0 | 0 | MIT | 2018-12-21T14:47:34 | 2018-12-21T14:47:34 | null | UTF-8 | Python | false | false | 2,522 | #!/usr/bin/python
# This is a self-updating script encoding the Graal+Truffle and mx repos and
# revision used for testing on CI. We don't use submodules to avoid a hard
# dependency that might bloat the repository of users of BlackDiamonds
#
# To checkout the repos, at the specified version for this version of the code,
# run `./graal-git-repo checkout`
# To update this script, so its revisions point to the latest versions of the
# configured repos, run `./graal-git-repo update-script-revs`
import sys
import os
# We use the following repositories
GRAAL_REPO_URL = "https://github.com/smarr/truffle.git"
MX_REPO_URL = "https://github.com/graalvm/mx.git"
# And these are the repo revisions we test against
GRAAL_REPO_REV = "a9fba6a775ffc60a90959d2eff4e66d15e9867a9"
MX_REPO_REV = "5bc7f83b9d66a31259b90933fcd0aa64d38b8d1e"
def update(lines, var, val):
for idx, line in enumerate(lines):
if line.startswith(var):
print("Updating " + var + " to " + val)
lines[idx] = var.ljust(15) + '= "' + val + '"\n'
break
def run(cmd):
print(cmd)
return os.popen(cmd).read()
if len(sys.argv) == 1:
print("To checkout the Graal+Truffle and MX dependencies use:")
print(" " + __file__ + " checkout")
print("To update the dependencies in this script use:")
print(" " + __file__ + " update-script-revs")
quit()
if sys.argv[1] == "update-script-revs":
graal_head_data = run("git ls-remote " + GRAAL_REPO_URL + " HEAD")
graal_head_rev = graal_head_data.split("\t")[0]
mx_head_data = run("git ls-remote " + MX_REPO_URL + " HEAD")
mx_head_rev = mx_head_data.split("\t")[0]
with open(__file__, 'r') as script_file:
content = script_file.readlines()
update(content, 'GRAAL_REPO_REV', graal_head_rev)
update(content, 'MX_REPO_REV', mx_head_rev)
with open(__file__, 'w') as script_file:
script_file.writelines(content)
def update_repo(folder, repo, rev):
folder = os.path.realpath(folder)
if not os.path.isdir(folder):
print("cloning " + repo)
print(run("git clone --depth 5000 " + repo + " " + folder))
run("git --git-dir=" + folder + "/.git --work-tree=" + folder +
" fetch --depth 5000")
print(run("git --git-dir=" + folder + "/.git --work-tree=" + folder +
" reset --hard " + rev))
if sys.argv[1] == "checkout":
update_repo("graal", GRAAL_REPO_URL, GRAAL_REPO_REV)
update_repo("mx", MX_REPO_URL, MX_REPO_REV)
| [
"[email protected]"
] | ||
d0fc14fbec55dd1b1228445716ac3f914c23cd1f | 8d375652e44b67d73102fee7abc1abaab4cb4329 | /m-old-versions-2/m6/compiler/m6c.py | ab63561b6f0615eda6cd3412281e8f697c1403cd | [
"MIT"
] | permissive | paulscottrobson/old-m-versions | 6d2061e36f2a5aaef388a4786406f876f0a06e0b | c2edb4200d32e066223ace4fd05837a485302645 | refs/heads/master | 2020-04-04T03:09:25.399283 | 2018-11-01T12:14:57 | 2018-11-01T12:14:57 | 155,709,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,854 | py | # ****************************************************************************************
# ****************************************************************************************
#
# Name: m6c.py
# Purpose: Simple M6 Compiler.
# Date: 15th August 2018
# Author: Paul Robson ([email protected])
#
# ****************************************************************************************
# ****************************************************************************************
import os,sys,re
from msystem import *
# ****************************************************************************************
# Exception Class
# ****************************************************************************************
class CompilerException(Exception):
def __init__(self,msg):
self.message = msg
# ****************************************************************************************
# Binary Object Class
# ****************************************************************************************
class Binary(object):
def __init__(self):
self.binary = [0x00] * 0x10000 # empty memory space
mlib = MSystemLibrary() # get library object
core = mlib.getBinary()
for i in range(0,len(core)): # copy binary in
self.binary[0x5B00 + i] = core[i]
self.lastWritten = 0x5B00 + len(core) # last byte written
dic = [x for x in mlib.getDictionary() if x.find("system.info")>=0] # extract sysinfo entry
self.systemInfo = int(dic[0].split("__")[2]) # rip from dictionary
self.codePtr = self.read(self.systemInfo+2)+self.read(self.systemInfo+3) * 256
self.echo = False
#
# Accessors and Mutators
#
def read(self,addr): # access/mutate
return self.binary[addr]
def write(self,addr,data):
self.lastWritten = max(self.lastWritten,addr)
data = data & 0xFF
self.binary[addr] = data
if self.echo:
print("\t{0:04x} : {1:02x} {2:c}".format(addr,data,((data & 0x3F) ^ 0x20) + 0x20))
def cByte(self,data): # compile a byte
self.write(self.codePtr,data)
self.codePtr += 1
def cWord(self,data):
self.cByte(data & 0xFF)
self.cByte(data >> 8)
#
# Set the start address
#
def setMain(self,mainAddress):
self.write(self.systemInfo+6,mainAddress & 0xFF) # word main updates start
self.write(self.systemInfo+7,mainAddress >> 8) # address.
#
# Write a memory block.
#
def writeMemory(self,handle,fromAddr,toAddr):
self.write(self.systemInfo+2,self.codePtr & 0xFF) # update code pointer
self.write(self.systemInfo+3,self.codePtr >> 8)
handle.write(bytes(self.binary[fromAddr:toAddr])) # and write out
#
# Generate a snap or some other loading file.
#
def generateSnapFile(self,stub):
oldPointer = self.codePtr # save code pointer
self.codePtr = 0x4000-27 # 27 byte SNA header
self.cByte(0x3F)
for i in range(0,9):
self.cWord(0)
for i in range(0,4):
self.cByte(0)
self.cWord(0x5AFE) # SP
self.cByte(1)
self.cByte(7)
assert self.codePtr == 0x4000
self.codePtr = oldPointer
self.write(0x5AFE,0x00) # put start $5B00 at
self.write(0x5AFF,0x5B) # TOS.
h = open(stub+".sna","wb") # write as a snap file.
self.writeMemory(h,0x4000-27,0x10000)
h.close()
# ****************************************************************************************
# Dictionary Class
# ****************************************************************************************
class Dictionary(object):
def __init__(self):
core = MSystemLibrary().getDictionary() # get dictionary list
self.words = {} # create the initial library.
for c in core:
c = c.split("__")
self.add(c[0],c[1],int(c[2]))
#
# Add in a new entry
#
def add(self,name,type,address):
name = name.lower().strip() # tidy up
type = type.lower().strip()
if name in self.words: # check duplicates
raise CompilerException("Duplicate name "+name)
self.words[name] = { "name":name,"type":type,"address":address,"private":type == "variable" }
self.words[name]["immediate"] = type == "macro" # macros are immediates
self.last = self.words[name] # remember last definition
#
# Apply "Private" and "Immediate" to last word
#
def makeLastPrivate(self): # make last defined private
self.last["private"] = True
def makeLastImmediate(self): # make last defined immediate
self.last["immediate"] = True
#
# Find a dictionary word
#
def find(self,name): # find by name
name = name.lower().strip()
return self.words[name] if name in self.words else None # search for given key
#
# Purge dictionary of all private words
#
def removeAllPrivate(self):
words = self.words # keep list
self.words = {} # clear it
for w in words.keys(): # and only copy non private
if not words[w]["private"]: # words in.
self.words[w] = words[w]
#
# Output the whole dictionary into memory
#
def createInMemory(self,binary):
#binary.echo = True
# start of dictionary
dp = binary.read(binary.systemInfo+10)+binary.read(binary.systemInfo+11) * 256
words = [x for x in self.words.keys()] # words to do in memory
words.sort(key = lambda x:self.words[x]["address"]) # order
for w in words:
binary.write(dp+0,len(w)+5) # +0 : offset to next
binary.write(dp+1,self.words[w]["address"] & 0xFF) # +1 : address low
binary.write(dp+2,self.words[w]["address"] >> 8) # +2 : adddress high
binary.write(dp+3,0) # +3 : page number
cByte = len(w) + (0x80 if self.words[w]["immediate"] else 0x00)
binary.write(dp+4,cByte) # +4 : length (0..4)
# : private (6)
# : immediate (7)
for i in range(0,len(w)): # +5 : name in 6 bit ASCII
binary.write(dp+5+i,ord(w[i].upper()) & 0x3F)
dp = dp + len(w) + 5 # next memory area
binary.write(dp,0) # offset byte 0, end.
binary.write(binary.systemInfo+0,dp & 0xFF) # update dict next free
binary.write(binary.systemInfo+1,dp >> 8)
#binary.echo = False
# ****************************************************************************************
# Line Compiler
# ****************************************************************************************
class LineCompiler(object):
def __init__(self,binaryObject,dictionary):
self.binary = binaryObject # working objects
self.dictionary = dictionary
self.dictionary.removeAllPrivate()
#
# Compile one line.
#
def compile(self,line):
line = line.replace("\t"," ").strip() # remove tabs amd stro[ ot/]
self.words = [x.strip() for x in line.split(" ") if x.strip() != ""]# get all the words
self.wordIndex = 0
self.inComment = False # Comments end at EOL
w = self.getNextWord() # process all words
while w != "":
self.compileWord(w.lower())
w = self.getNextWord()
#
# Get next word on this line
#
def getNextWord(self):
if self.wordIndex >= len(self.words):
return ""
w = self.words[self.wordIndex]
self.wordIndex += 1
return w
#
# Compile a single word
#
def compileWord(self,word):
#
# Handle comments.
#
if word == "(*" or word == "*)":
self.inComment = (word == "(*")
return
if self.inComment:
return
if self.binary.echo:
print("{0}".format(word))
#
# Integer constant
#
intNum = self.wordToNumber(word)
if intNum is not None:
self.compileConstant(intNum)
return
#
# String constant
#
if word[0] == '"' and word[-1] == '"' and len(word) >= 2: # string constant
self.binary.cByte(0xEB) # ex de,hl
self.binary.cByte(0x21) # LD HL, <string start>
self.binary.cWord(self.binary.codePtr+4)
self.binary.cByte(0x18) # JR <over string>
self.binary.cByte(len(word)-1)
self.binary.cByte(len(word)-2) # length of string
for c in word[1:-1].replace("_"," ").upper():
self.binary.cByte(((ord(c) & 0x3F) ^ 0x20) + 0x20)
return
#
# Colon Definition
#
if word == ':' : # colon definition
newWord = self.getNextWord()
if newWord == "":
raise CompilerException(": without word name")
if self.binary.echo:
print(" **** "+newWord+" ****")
self.dictionary.add(newWord,"word",self.binary.codePtr)
if newWord == "main":
self.binary.setMain(self.binary.codePtr)
return
#
# Words (Macro/Immediate AND direct)
#
wRec = self.dictionary.find(word) # words
if wRec is not None:
if wRec["type"] == "word":
if wRec["immediate"]:
raise CompilerException(word+" is immediate, call cannot be compiled.")
self.binary.cByte(0xCD) # call <addr>
self.binary.cWord(wRec["address"])
if wRec["type"] == "macro":
addr = wRec["address"]+3 # skip over the call MacroExpand
size = self.binary.read(addr) # to size, followed by data
assert size >= 0 and size <= 6,"Macro size ? "+word
for i in range(0,size):
self.binary.cByte(self.binary.read(addr+i+1))
return
#
# Modified words (! @ & # added)
#
wRec = self.dictionary.find(word[:-1]) # modified words
if "!@&#".find(word[-1]) >= 0 and wRec is not None:
if word[-1] == "!": # store
self.binary.cByte(0x22) # ld (address),hl
self.binary.cWord(wRec["address"])
if word[-1] == "@": # load
self.binary.cByte(0xEB) # ex de,hl
self.binary.cByte(0x2A) # ld hl,(address)
self.binary.cWord(wRec["address"])
if word[-1] == "&": # address
self.compileConstant(wRec["address"])
if word[-1] == '#': # array (address + A * 2)
self.binary.cByte(0x29) # add hl,hl (double index)
self.binary.cByte(0x01) # ld bc,address
self.binary.cWord(wRec["address"])
self.binary.cByte(0x09) # add hl,bc
return
#
# Control structures
#
if word == "begin" or word == "until" or word == "-until": # structures
self.compileBeginLoop(word)
return
if word == "if" or word == "-if" or word == "then":
self.compileIfTest(word)
return
if word == "for" or word == "next":
self.compileForLoop(word)
return
#
# Miscellany
#
if word == "private": # make last definition private
self.dictionary.makeLastPrivate()
return
if word == "immediate": # make last definition immediate
self.dictionary.makeLastImmediate()
return
if word == "list.on" or word == "list.off": # control code listing
self.binary.echo = (word == "list.on")
return
if word == "variable": # 2 byte variable
self.dictionary.makeLastPrivate()
self.binary.cWord(0)
return
if word == "array": # array of byte size
self.dictionary.makeLastPrivate()
size = self.wordToNumber(self.getNextWord()) # get size of array
if size is None:
raise CompilerException("Array without valid size")
for i in range(0,size):
self.binary.cByte(0)
return
#
# Finally give up
#
raise CompilerException("Don't understand '"+word+"'")
#
# Convert word to integer if possible.
#
def wordToNumber(self,word):
if re.match("^\-?[0-9]+$",word): # decimal constant
return int(word,10) & 0xFFFF
if re.match("^\$[0-9a-f]+$",word): # hex constant
return int(word[1:],16) & 0xFFFF
return None
#
# Compile code to swap A/B and load a constant
#
def compileConstant(self,constant):
self.binary.cByte(0xEB) # ex de,hl
self.binary.cByte(0x21) # ld hl,<const>
self.binary.cWord(constant & 0xFFFF)
#
# Begin and Until/-Until code
#
def compileBeginLoop(self,word):
if word == "begin":
self.beginAddress = self.binary.codePtr
else:
self.binary.cWord(0xB57C if word[0] != "-" else 0x7CCB) # test for -ve / non-zero
self.binary.cByte(0x28)
self.binary.cByte((self.beginAddress - (self.binary.codePtr + 1)) & 0xFF)
#
# If/-If and Then code
#
def compileIfTest(self,word):
if word == "if" or word == "-if":
self.binary.cWord(0xB57C if word[0] != "-" else 0x7CCB) # test for -ve / non-zero
self.binary.cByte(0x28)
self.ifAddress = self.binary.codePtr
self.binary.cByte(0x00)
else:
self.binary.write(self.ifAddress,self.binary.codePtr - (self.ifAddress+1))
#
# For/Next code
#
def compileForLoop(self,word):
if word == "for":
self.forAddress = self.binary.codePtr
self.binary.cByte(0x2B) # dec HL
self.binary.cByte(0xE5) # push HL
else:
self.binary.cByte(0xE1) # pop HL
self.binary.cWord(0xB57C) # test if zero
self.binary.cByte(0x20) # if nz
self.binary.cByte(self.forAddress-(self.binary.codePtr+1))
# ****************************************************************************************
# Project Compiler
# ****************************************************************************************
class ProjectCompiler(object):
#
def __init__(self,sourceFile):
self.binary = Binary() # create helper objects
self.dictionary = Dictionary()
self.lineCompiler = LineCompiler(self.binary,self.dictionary)
self.imports = {} # list of imports
try:
self.compileFile(sourceFile) # compile source catching errors
except CompilerException as err:
print("*** M6 Error *** {0}:{1} .... {2}".format(ProjectCompiler.FILENAME,ProjectCompiler.LINENUMBER,err.message))
sys.exit(1)
self.dictionary.removeAllPrivate() # remove all private words
self.dictionary.createInMemory(self.binary) # generate directory in memory
self.binary.generateSnapFile(sourceFile[:-3]) # generate SNA file.
#
# Compile a single file
#
def compileFile(self,sourceFile):
if sourceFile[-3:] != ".m6": # .m6 only !
raise CompilerException("Source must be a .m6 file")
if not os.path.isfile(sourceFile): # must exist
raise CompilerException("Cannot find file "+sourceFile)
src = open(sourceFile).readlines() # work through the file
for i in range(0,len(src)):
ProjectCompiler.FILENAME = sourceFile # set error info
ProjectCompiler.LINENUMBER = i + 1
if src[i][:6] == "import": # check for import <file>
impFile = src[i][6:].strip().split(" ")[0].lower() # get the file.
if impFile not in self.imports: # import it if we haven't already
self.compileFile(impFile)
self.imports[impFile] = True
self.dictionary.removeAllPrivate()
else:
self.lineCompiler.compile(src[i])
self.LINENUMBER = 0
if __name__ == "__main__":
for src in sys.argv[1:]:
print("M6C:Building "+src)
ProjectCompiler(src)
| [
"[email protected]"
] | |
967c4163dd3b2b4b3fccef187338d4d020e0e693 | 7c63a96fad4257f4959ffeba0868059fc96566fb | /py/m_lutz-programming_python-4_ed/code/ch_01/step_06/01-cgi_basics/cgi-bin/cgi101.py | 73cd3dab161b31965720666610c168715c174345 | [
"MIT"
] | permissive | ordinary-developer/education | b426148f5690f48e0ed4853adfc3740bd038b72c | 526e5cf86f90eab68063bb7c75744226f2c54b8d | refs/heads/master | 2023-08-31T14:42:37.237690 | 2023-08-30T18:15:18 | 2023-08-30T18:15:18 | 91,232,306 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | #!/usr/local/bin/python3
import cgi
form = cgi.FieldStorage()
print('Content-type: text/html\n')
print('<title>Reply Page</title>')
if not 'user' in form:
print('<h1>Who are you?</h1>')
else:
print('<h1>Hello <i>%s</i>!</h1>' % cgi.escape(form['user'].value))
| [
"[email protected]"
] | |
5f3d1d59dddfd4fbccd0763620228091d57a6323 | eb2668b93899637f04e4c93e01063d0c8175ccde | /Irises_classification/iris_KNN_GridSearch_2_parameters.py | a8ae8beebc2f04cb715db123dca90df089536532 | [] | no_license | D-Katt/AI-Machine-Learning | aad1fe1c8f3f901cb7829919d1b69a106f0ddfab | 1868c92366dccabf8c86c559eee640645b51bb51 | refs/heads/master | 2021-12-19T21:59:04.403188 | 2021-12-07T13:07:46 | 2021-12-07T13:07:46 | 235,104,866 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | # Пример классификации объектов 'iris dataset' с использованием модели KNN.
# Инструмент GridSearchCV используется для настройки 2 гиперпараметров -
# количества "соседей" и весов.
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
data = load_iris() # Загружаем датасет
X = data.data # Извлекаем входные данные (размеры)
y = data.target # Извлекаем итоговые значения (наименования видов)
# Делим данные на учебные и тестовые:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Содаем модель KNN:
knn = KNeighborsClassifier()
# Задаем список значений для настройки параметров:
k_range = range(1, 31)
weight_options = ['uniform', 'distance']
param_grid = {'n_neighbors': k_range, 'weights': weight_options}
# Передаем GridSearchCV оцениваемую модель, список значений параметров
# для выбора лучшего и критерий оценки, указываем разбивку массива
# на 10 частей во время кросс-валидации:
grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy')
# Передаем учебные данные:
grid.fit(X_train, y_train)
print('Максимальная точность прогноза на тестовой выборке:', grid.best_score_)
print('\nПараметры лучшей модели:', grid.best_estimator_)
# Просмотреть среднюю точность и стандартное отклонение по всем вариантам:
results = pd.DataFrame(grid.cv_results_)
print(results[['param_n_neighbors', 'param_weights', 'mean_test_score', 'std_test_score']])
| [
"[email protected]"
] | |
1773433480c17f06c09757f6251e684035134844 | 75dff087b6bec301193b2c145579d38b28249d22 | /Leetcode_Algorithm/Python3/280_Wiggle_Sort.py | 2735d66046a732cbb77e5f70e0db75a365eae4b8 | [] | no_license | ChihYunPai/Data-Structure-and-Algorithms | 2d3d930d8374b62287f7cc8c3741a1d7b3d227b6 | 27a85e20605393a5eca3f8bd7d42c389612493d5 | refs/heads/master | 2023-06-07T04:04:57.186920 | 2021-07-03T03:28:29 | 2021-07-03T03:28:29 | 119,465,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | """
Given an unsorted array nums, reorder it in-place such that nums[0] <= nums[1] >= nums[2] <= nums[3]....
For example, given nums = [3, 5, 2, 1, 6, 4], one possible answer is [1, 6, 2, 5, 3, 4].
"""
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
sortedNums = sorted(nums)
i, j = 0, 0
while i < len(nums):
nums[i] = sortedNums[j]
i += 2
j += 1
i = len(nums) - 1 if len(nums) % 2 == 0 else len(nums) - 2
while i >= 1:
nums[i] = sortedNums[j]
i -= 2
j += 1
| [
"[email protected]"
] | |
cf198967e5721ff051b551643c1902e36c065adf | d66818f4b951943553826a5f64413e90120e1fae | /hackerrank/Python/Validating Roman Numerals/solution.py | 4d3307da4a8b34976111672247c145435592a77a | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 169 | py | import re
regex_pattern = r'(?<=^)M{0,3}(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[VX]|V?I{0,3})(?=$)' # Do not delete 'r'.
print(str(bool(re.match(regex_pattern, input()))))
| [
"[email protected]"
] | |
45db51e7b815934ce09e02643904239397422470 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/lib/python3.6/site-packages/sklearn/externals/joblib/disk.py | 56b6fcc6e11c358e17de25c0a80e8b29c5e5b103 | [] | no_license | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3d0a5346f8b6ea081d787ef43f9520d3be4604ea7341ac86e5950a80c51c19d4
size 3938
| [
"Nqk180998!"
] | Nqk180998! |
d5778be7d85fc4bd898845b67be6e04539e54fd0 | a5af73a980c08eef311cda1a27645f57dc07e0eb | /formApp/templatetags/my_extras.py | 01796028288dc2c86b336e3dfbfc5c09aab23b48 | [] | no_license | pritamSarkar123/django2020-pracTwo | c38ef08537cd48d884a6d75eef67277c38ad49d0 | 84e32249bc5c7d0ba89cd0e1070fee05e2523183 | refs/heads/master | 2022-07-08T21:02:21.124866 | 2020-05-15T04:39:23 | 2020-05-15T04:39:23 | 264,003,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django import template
register = template.Library()
@register.filter(name='cut')
def cut(value, arg):
"""
cuts all the values same as args
"""
return value.replace(arg, '')
| [
"[email protected]"
] | |
47057d9275b82dbfea7d0349e2bd1350c848700a | 25404f4cfb9be3e6f1b3fe31a1554459eb200813 | /my_redis/why_use_redis/count_realtime_users.py | fa7fa21aaed3c6586f1802c58a13f2b7ef57700f | [] | no_license | nightimero/annal_report_test | 1c6eb4b71482f870c753f5084212afd071929f57 | 7bbc76ba703527ba8f4b84fbdb94fd57b37b9887 | refs/heads/master | 2021-09-06T21:18:59.534963 | 2018-02-11T15:31:21 | 2018-02-11T15:31:21 | 103,259,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | # -*- coding:utf-8 -*-
# todo: http://www.cnblogs.com/clover-siyecao/p/5600078.html
# todo: http://debugo.com/python-redis/
# todo: http://flask.pocoo.org/snippets/71/
# todo: https://www.google.com/search?q=python+redis+应用 以后需要搜索加上 应用 2字,更实例。
import time
from redis import Redis
from datetime import datetime
ONLINE_LAST_MINUTES = 5
redis = Redis()
def mark_online(user_id): # 将一个用户标记为online
now = int(time.time()) # 当前的UNIX时间戳
expires = now + (app.config['ONLINE_LAST_MINUTES'] * 60) + 10 # 过期的UNIX时间戳
all_users_key = 'online-users/%d' % (now // 60) # 集合名,包含分钟信息
user_key = 'user-activity/%s' % user_id
p = redis.pipeline()
p.sadd(all_users_key, user_id) # 将用户id插入到包含分钟信息的集合中
p.set(user_key, now) # 记录用户的标记时间
p.expireat(all_users_key, expires) # 设定集合的过期时间为UNIX的时间戳
p.expireat(user_key, expires)
p.execute()
def get_user_last_activity(user_id): # 获得用户的最后活跃时间
last_active = redis.get('user-activity/%s' % user_id) # 如果获取不到,则返回None
if last_active is None:
return None
return datetime.utcfromtimestamp(int(last_active))
def get_online_users(): # 获得当前online用户的列表
current = int(time.time()) // 60
minutes = xrange(app.config['ONLINE_LAST_MINUTES'])
return redis.sunion(['online-users/%d' % (current - x) # 取ONLINE_LAST_MINUTES分钟对应集合的交集
for x in minutes])
| [
"[email protected]"
] | |
45a3de2385c5bb0e77f6e3f913a5e766c392cf7a | 7b15c40c00ba2008024979d0e520a922bc2f8229 | /1st_try/144_Binary_Tree_Preorder_Traversal_1st_try.py | ac7a9b6c27275d70ef87a3427e2ff0de3860a3c6 | [] | no_license | axd8911/Leetcode | aa9875a5b55c7d5e961d9a3ea55823d06eb08a88 | 1c6cab14f4dac4f3f29f1b5ce13bb5289724fdb4 | refs/heads/master | 2022-07-07T12:59:38.251218 | 2021-06-22T06:27:05 | 2021-06-22T06:27:05 | 173,857,144 | 0 | 1 | null | 2022-06-22T01:22:30 | 2019-03-05T02:23:42 | Python | UTF-8 | Python | false | false | 1,143 | py | '''
98.6%
'''
#Iteration
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
output = []
node = [root]
while node:
curr = node.pop()
if curr:
output.append(curr.val)
node.append(curr.right)
node.append(curr.left)
return output
#recursion
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def __init__(self):
self.output = []
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root:
self.output.append(root.val)
self.preorderTraversal(root.left)
self.preorderTraversal(root.right)
return self.output
| [
"[email protected]"
] | |
e97b22133de8154a10a062cd7a9804c4b2e0ce3c | d9257c051591b63533fe53d5e6996689d3a93c50 | /ocean_lib/ocean/ocean_auth.py | d6dfa779bcfb2334876b5d6a83cd12e1f3183c70 | [
"Apache-2.0"
] | permissive | PosthumanMarket/Posthuman.py | 87e79fa826d50584b9cb2d722afa359aa6026e46 | 66538c29bfeb1c31199629b68bc10cc36caa3376 | refs/heads/master | 2023-08-26T06:35:47.866562 | 2021-06-06T18:51:34 | 2021-06-06T18:51:34 | 346,468,458 | 6 | 1 | Apache-2.0 | 2021-06-05T02:03:27 | 2021-03-10T19:27:37 | Python | UTF-8 | Python | false | false | 4,131 | py | """Ocean module."""
# Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import logging
from datetime import datetime
from ocean_lib.config_provider import ConfigProvider
from ocean_lib.web3_internal.utils import add_ethereum_prefix_and_hash_msg
from ocean_lib.web3_internal.web3_provider import Web3Provider
from ocean_lib.data_store.auth_tokens import AuthTokensStorage
from ocean_lib.web3_internal.web3helper import Web3Helper
class OceanAuth:
"""Ocean auth class.
Provide basic management of a user auth token. This token can be used to emulate
sign-in behaviour. The token can be stored and associated with an expiry time.
This is useful in front-end applications that interact with a 3rd-party wallet
apps. The advantage of using the auth token is to reduce the number of confirmation
prompts requiring user action.
The auth token works with a provider service such as Ocean provider-py which also uses this
ocean module to handle auth tokens.
Token format is "signature-timestamp".
"""
DEFAULT_EXPIRATION_TIME = 30 * 24 * 60 * 60 # in seconds
DEFAULT_MESSAGE = "Ocean Protocol Authentication"
def __init__(self, storage_path):
self._tokens_storage = AuthTokensStorage(storage_path)
@staticmethod
def _get_timestamp():
return int(datetime.now().timestamp())
def _get_expiration(self):
return int(ConfigProvider.get_config().auth_token_expiration
or self.DEFAULT_EXPIRATION_TIME)
def _get_raw_message(self):
return ConfigProvider.get_config().auth_token_message or self.DEFAULT_MESSAGE
def _get_message(self, timestamp):
return f'{self._get_raw_message()}\n{timestamp}'
def _get_message_and_time(self):
timestamp = self._get_timestamp()
return self._get_message(timestamp), timestamp
@staticmethod
def is_token_valid(token):
return isinstance(token, str) and token.startswith('0x') and len(token.split('-')) == 2
def get(self, wallet):
"""
:param wallet: Wallet instance signing the token
:return: hex str the token generated/signed by the users wallet
"""
_message, _time = self._get_message_and_time()
try:
prefixed_msg_hash = Web3Helper.sign_hash(
add_ethereum_prefix_and_hash_msg(_message), wallet)
return f'{prefixed_msg_hash}-{_time}'
except Exception as e:
logging.error(f'Error signing token: {str(e)}')
def check(self, token):
"""
:param token: hex str consist of signature and timestamp
:return: hex str ethereum address
"""
parts = token.split('-')
if len(parts) < 2:
return '0x0'
sig, timestamp = parts
if self._get_timestamp() > (int(timestamp) + self._get_expiration()):
return '0x0'
message = self._get_message(timestamp)
address = Web3Helper.personal_ec_recover(message, sig)
return Web3Provider.get_web3().toChecksumAddress(address)
def store(self, wallet):
"""
:param wallet: Wallet instance signing the token
:return:
token that was generated and stored for this users wallet
"""
token = self.get(wallet)
timestamp = token.split('-')[1]
self._tokens_storage.write_token(wallet.address, token, timestamp)
return token
def restore(self, wallet):
"""
:param wallet: Wallet instance to fetch the saved token
:return:
hex str the token retreived from storage
None if no token found for this users wallet
"""
token = self._tokens_storage.read_token(wallet.address)[0]
if not token:
return None
address = self.check(token)
return token if address == wallet.address else None
def is_stored(self, wallet):
"""
:param wallet: Wallet instance
:return: bool whether this wallet has a stored token
"""
return self.restore(wallet) is not None
| [
"[email protected]"
] | |
48168d4ee1ba9c5390d4058ed3fbe9b827386801 | c6431cdf572dd10f0f4d45839e6081124b246f90 | /code/lc297.py | 1bf7215362b793eb944544a277e85f8c3b4e766e | [] | no_license | bendanwwww/myleetcode | 1ec0285ea19a213bc629e0e12fb8748146e26d3d | 427846d2ad1578135ef92fd6549235f104f68998 | refs/heads/master | 2021-09-27T19:36:40.111456 | 2021-09-24T03:11:32 | 2021-09-24T03:11:32 | 232,493,899 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,916 | py | """
序列化是将一个数据结构或者对象转换为连续的比特位的操作,进而可以将转换后的数据存储在一个文件或者内存中,同时也可以通过网络传输到另一个计算机环境,采取相反方式重构得到原数据。
请设计一个算法来实现二叉树的序列化与反序列化。这里不限定你的序列 / 反序列化算法执行逻辑,你只需要保证一个二叉树可以被序列化为一个字符串并且将这个字符串反序列化为原始的树结构。
示例:
你可以将以下二叉树:
1
/ \
2 3
/ \
4 5
序列化为 "[1,2,3,null,null,4,5]"
提示: 这与 LeetCode 目前使用的方式一致,详情请参阅 LeetCode 序列化二叉树的格式。你并非必须采取这种方式,你也可以采用其他的方法解决这个问题。
说明: 不要使用类的成员 / 全局 / 静态变量来存储状态,你的序列化和反序列化算法应该是无状态的。
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
resArray = []
queue = []
queue.append(root)
while len(queue) > 0:
node = queue[0]
del queue[0]
if node is None:
resArray.append('null')
else:
resArray.append(node.val)
queue.append(node.left)
queue.append(node.right)
while len(resArray) > 0:
n = len(resArray) - 1
if resArray[n] is not 'null':
break
else:
del resArray[n]
return '[' + ','.join(map(lambda x: str(x), resArray)) + ']'
def deserialize(self, data):
if data is None or data == '[]':
return None
index = 0
nodeArray = data.replace('[', '').replace(']', '').split(',')
root = TreeNode(nodeArray[index])
queue = []
queue.append(root)
while len(queue) > 0:
node = queue[0]
del queue[0]
if index + 1 >= len(nodeArray) or nodeArray[index + 1] == 'null':
node.left = None
else:
node.left = TreeNode(nodeArray[index + 1])
if index + 2 >= len(nodeArray) or nodeArray[index + 2] == 'null':
node.right = None
else:
node.right = TreeNode(nodeArray[index + 2])
index+= 2
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return root
# def deserialize(self, data):
# if data is None or data == '[]':
# return None
# nodeArray = data.replace('[', '').replace(']', '').split(',')
# root = TreeNode(nodeArray[0])
# self.deserializeRoot(root, nodeArray, 0)
# return root
#
# def deserializeRoot(self, node, array, n):
# nodeLeft = 2 * n + 1
# nodeRight = 2 * n + 2
# if nodeLeft < len(array) and array[nodeLeft] != 'null':
# node.left = TreeNode(array[nodeLeft])
# self.deserializeRoot(node.left, array, nodeLeft)
# else:
# node.left = None
#
# if nodeRight < len(array) and array[nodeRight] != 'null':
# node.right = TreeNode(array[nodeRight])
# self.deserializeRoot(node.right, array, nodeRight)
# else:
# node.right = None
s = Codec()
root = TreeNode(5)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.left = TreeNode(2)
root.right.right = TreeNode(4)
root.right.left.left = TreeNode(3)
root.right.left.right = TreeNode(1)
res1 = s.serialize(root)
res2 = s.deserialize(res1)
print(res1)
print(res2) | [
"[email protected]"
] | |
f3767eaf83ee7afc2f14b3755b237dcea3de3fcb | bf9d6b1ce9b034df2a034ff93f526638720d359f | /accounts/migrations/0004_auto_20170808_0547.py | c1f288ce918585ca8463578033cdb7e088121853 | [] | no_license | toluwanicareer/lms | fc2b1c2c8b728826180f27f461cec5ea95adbd59 | 41de904043d951843ed748d6bf2cffc98462f99d | refs/heads/master | 2021-07-09T13:58:46.986986 | 2017-09-28T05:59:50 | 2017-09-28T05:59:50 | 105,106,347 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-08 04:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0003_auto_20170808_0545'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='user',
),
migrations.AddField(
model_name='client',
name='user',
field=models.OneToOneField(default='1', on_delete=django.db.models.deletion.CASCADE, related_name='person', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='employee',
name='user',
field=models.OneToOneField(default='1', on_delete=django.db.models.deletion.CASCADE, related_name='person_employee', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
79ba2577fcaceca179c1a325377f6431a58f4b35 | c21faf85627b1cfd96494aac73cc40e5f11ebb46 | /results/test_493.py | 99c59e55336937519e7b0be46fe1bf943072d26a | [] | no_license | ekkya/Cyclomatic-Complexity | d02c61e009087e7d51738e60605875741532b878 | 172db2efdd974f5abad964e335552aec974b47cb | refs/heads/master | 2021-08-28T17:13:14.718314 | 2017-12-12T22:04:13 | 2017-12-12T22:04:13 | 112,042,202 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,546 | py | # Script Name : osinfo.py
# Author : Craig Richards
# Created : 5th April 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Displays some information about the OS you are running this script on
import platform
profile = [
platform.architecture(),
platform.dist(),
platform.libc_ver(),
platform.mac_ver(),
platform.machine(),
platform.node(),
platform.platform(),
platform.processor(),
platform.python_build(),
platform.python_compiler(),
platform.python_version(),
platform.system(),
platform.uname(),
platform.version(),
]
for item in profile:
print item# Script Name : logs.py
# Author : Craig Richards
# Created : 13th October 2011
# Last Modified :
# Version : 1.1
# Modifications : 1.1 - Added the variable zip_program so you can set it for the zip program on whichever OS, so to run on a different OS just change the locations of these two variables.
# Description : This script will search for all *.log files in the given directory, zip them using the program you specify and then date stamp them
import os # Load the Library Module
from time import strftime # Load just the strftime Module from Time
logsdir="c:\puttylogs" # Set the Variable logsdir
zip_program="zip.exe" # Set the Variable zip_program - 1.1
for files in os.listdir(logsdir): # Find all the files in the directory
if files.endswith(".log"): # Check to ensure the files in the directory end in .log
files1=files+"."+strftime("%Y-%m-%d")+".zip" # Create the Variable files1, this is the files in the directory, then we add a suffix with the date and the zip extension
os.chdir(logsdir) # Change directory to the logsdir
os.system(zip_program + " " + files1 +" "+ files) # Zip the logs into dated zip files for each server. - 1.1
os.remove(files) # Remove the original log files# Script Name : create_dir_if_not_there.py
# Author : Craig Richards
# Created : 09th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks to see if a directory exists in the users home directory, if not then create it
import os # Import the OS module
home=os.path.expanduser("~") # Set the variable home by expanding the users set home directory
print home # Print the location
if not os.path.exists(home+'/testdir'): # Check to see if the directory exists
os.makedirs(home+'/testdir') # If not create the directory, inside their home directory# Script Name : move_files_over_x_days.py
# Author : Craig Richards
# Created : 8th December 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will move all the files from the src directory that are over 240 days old to the destination directory.
import shutil, sys, time, os # Import the header files
src = 'u:\\test' # Set the source directory
dst = 'c:\\test' # Set the destination directory
now = time.time() # Get the current time
for f in os.listdir(src): # Loop through all the files in the source directory
if os.stat(f).st_mtime < now - 240 * 86400: # Work out how old they are, if they are older than 240 days old
if os.path.isfile(f): # Check it's a file
shutil.move(f, dst) # Move the files
# Script Name : puttylogs.py
# Author : Craig Richards
# Created : 13th October 2011
# Last Modified : 29th February 2012
# Version : 1.2
# Modifications : 1.1 - Added the variable zip_program so you can set it for the zip program on whichever OS, so to run on a different OS just change the locations of these two variables.
# : 1.2 - 29-02-12 - CR - Added shutil module and added one line to move the zipped up logs to the zipped_logs directory
# Description : Zip up all the logs in the given directory
import os # Load the Library Module
import shutil # Load the Library Module - 1.2
from time import strftime # Load just the strftime Module from Time
logsdir="c:\logs\puttylogs" # Set the Variable logsdir
zipdir="c:\logs\puttylogs\zipped_logs" # Set the Variable zipdir - 1.2
zip_program="zip.exe" # Set the Variable zip_program - 1.1
for files in os.listdir(logsdir): # Find all the files in the directory
if files.endswith(".log"): # Check to ensure the files in the directory end in .log
files1=files+"."+strftime("%Y-%m-%d")+".zip" # Create the Variable files1, this is the files in the directory, then we add a suffix with the date and the zip extension
os.chdir(logsdir) # Change directory to the logsdir
os.system(zip_program + " " + files1 +" "+ files) # Zip the logs into dated zip files for each server. - 1.1
shutil.move(files1, zipdir) # Move the zipped log files to the zipped_logs directory - 1.2
os.remove(files) # Remove the original log files
# Script Name : nslookup_check.py
# Author : Craig Richards
# Created : 5th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This very simple script opens the file server_list.txt and the does an nslookup for each one to check the DNS entry
import subprocess # Import the subprocess module
for server in open('server_list.txt'): # Open the file and read each line
subprocess.Popen(('nslookup '+server)) # Run the nslookup command for each server in the list# Script Name : testlines.py
# Author : Craig Richards
# Created : 08th December 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This very simple script open a file and prints out 100 lines of whatever is set for the line variable
line="Test you want to print\n" # This sets the variable for the text that you want to print
f=open('mylines.txt','w') # Create the file to store the output
for i in range(1,101): # Loop 100 times
f.write(line) # Write the text to the file
f.close() # Close the file
# Script Name : ping_subnet.py
# Author : Craig Richards
# Created : 12th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : After supplying the first 3 octets it will scan the final range for available addresses
import os # Load the Library Module
import subprocess # Load the Library Module
import sys # Load the Library Module
filename = sys.argv[0] # Sets a variable for the script name
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print '''
You need to supply the first octets of the address Usage : ''' + filename + ''' 111.111.111 '''
sys.exit(0)
else:
if (len(sys.argv) < 2): # If no arguments are passed then display the help and instructions on how to run the script
sys.exit (' You need to supply the first octets of the address Usage : ' + filename + ' 111.111.111')
subnet = sys.argv[1] # Set the variable subnet as the three octets you pass it
if os.name == "posix": # Check the os, if it's linux then
myping = "ping -c 2 " # This is the ping command
elif os.name in ("nt", "dos", "ce"): # Check the os, if it's windows then
myping = "ping -n 2 " # This is the ping command
f = open('ping_'+subnet+'.log', 'w') # Open a logfile
for ip in range(2,255): # Set the ip variable for the range of numbers
ret = subprocess.call(myping + str(subnet)+"."+str(ip) , shell=True,stdout=f,stderr=subprocess.STDOUT) # Run the command pinging the servers
if ret == 0: # Depending on the response
f.write (subnet+"."+str(ip) + " is alive" + "\n") # Write out that you can receive a reponse
else:
f.write (subnet+"."+str(ip) + " did not respond" + "\n") # Write out you can't reach the box# Script Name : ping_servers.py
# Author : Craig Richards
# Created : 9th May 2012
# Last Modified : 14th May 2012
# Version : 1.1
# Modifications : 1.1 - 14th May 2012 - CR Changed it to use the config directory to store the server files
# Description : This script will, depending on the arguments supplied will ping the servers associated with that application group.
import os # Load the Library Module
import subprocess # Load the Library Module
import sys # Load the Library Module
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print '''
You need to supply the application group for the servers you want to ping, i.e.
dms
swaps
Followed by the site i.e.
155
bromley'''
sys.exit(0)
else:
if (len(sys.argv) < 3): # If no arguments are passed,display the help/instructions on how to run the script
sys.exit ('\nYou need to supply the app group. Usage : ' + filename + ' followed by the application group i.e. \n \t dms or \n \t swaps \n then the site i.e. \n \t 155 or \n \t bromley')
appgroup = sys.argv[1] # Set the variable appgroup as the first argument you supply
site = sys.argv[2] # Set the variable site as the second argument you supply
if os.name == "posix": # Check the os, if it's linux then
myping = "ping -c 2 " # This is the ping command
elif os.name in ("nt", "dos", "ce"): # Check the os, if it's windows then
myping = "ping -n 2 " # This is the ping command
if 'dms' in sys.argv: # If the argument passed is dms then
appgroup = 'dms' # Set the variable appgroup to dms
elif 'swaps' in sys.argv: # Else if the argment passed is swaps then
appgroup = 'swaps' # Set the variable appgroup to swaps
if '155' in sys.argv: # If the argument passed is 155 then
site = '155' # Set the variable site to 155
elif 'bromley' in sys.argv: # Else if the argument passed is bromley
site = 'bromley' # Set the variable site to bromley
filename = sys.argv[0] # Sets a variable for the script name
logdir = os.getenv("logs") # Set the variable logdir by getting the OS environment logs
logfile = 'ping_'+appgroup+'_'+site+'.log' # Set the variable logfile, using the arguments passed to create the logfile
logfilename=os.path.join(logdir, logfile) # Set the variable logfilename by joining logdir and logfile together
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable - 1.2
conffile = (appgroup+'_servers_'+site+'.txt') # Set the variable conffile - 1.2
conffilename=os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together - 1.2
f = open(logfilename, "w") # Open a logfile to write out the output
for server in open(conffilename): # Open the config file and read each line - 1.2
ret = subprocess.call(myping + server, shell=True,stdout=f,stderr=subprocess.STDOUT) # Run the ping command for each server in the list.
if ret == 0: # Depending on the response
f.write (server.strip() + " is alive" + "\n") # Write out that you can receive a reponse
else:
f.write (server.strip() + " did not respond" + "\n") # Write out you can't reach the box
print ("\n\tYou can see the results in the logfile : "+ logfilename); # Show the location of the logfile# Script Name : check_file.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Check a file exists and that we can read the file
import sys # Import the Modules
import os # Import the Modules
# Readfile Functions which open the file that is passed to the script
def readfile(filename):
f = open(filename, 'r')
line = f.read()
print line
def main():
if len(sys.argv) == 2: # Check the arguments passed to the script
filename = sys.argv[1] # The filename is the first argument
if not os.path.isfile(filename): # Check the File exists
print '[-] ' + filename + ' does not exist.'
exit(0)
if not os.access(filename, os.R_OK): # Check you can read the file
print '[-] ' + filename + ' access denied'
exit(0)
else:
print '[-] Usage: ' + str(sys.argv[0]) + ' <filename>' # Print usage if not all parameters passed/Checked
exit(0)
print '[+] Reading from : ' + filename # Display Message and read the file contents
readfile(filename)
if __name__ == '__main__':
main()# Script Name : fileinfo.py
# Author : Not sure where I got this from
# Created : 28th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Show file information for a given file
# get file information using os.stat()
# tested with Python24 vegsaeat 25sep2006
import os
import stat # index constants for os.stat()
import time
# pick a file you have ...
file_name = raw_input("Enter a file name: ")
file_stats = os.stat(file_name)
# create a dictionary to hold file info
file_info = {
'fname': file_name,
'fsize': file_stats [stat.ST_SIZE],
'f_lm': time.strftime("%d/%m/%Y %I:%M:%S %p",time.localtime(file_stats[stat.ST_MTIME])),
'f_la': time.strftime("%d/%m/%Y %I:%M:%S %p",time.localtime(file_stats[stat.ST_ATIME])),
'f_ct': time.strftime("%d/%m/%Y %I:%M:%S %p",time.localtime(file_stats[stat.ST_CTIME]))
}
print
print "file name = %(fname)s" % file_info
print "file size = %(fsize)s bytes" % file_info
print "last modified = %(f_lm)s" % file_info
print "last accessed = %(f_la)s" % file_info
print "creation time = %(f_ct)s" % file_info
print
if stat.S_ISDIR(file_stats[stat.ST_MODE]):
print "This a directory"
else:
print "This is not a directory"
print
print "A closer look at the os.stat(%s) tuple:" % file_name
print file_stats
print
print "The above tuple has the following sequence:"
print """st_mode (protection bits), st_ino (inode number),
st_dev (device), st_nlink (number of hard links),
st_uid (user ID of owner), st_gid (group ID of owner),
st_size (file size, bytes), st_atime (last access time, seconds since epoch),
st_mtime (last modification time), st_ctime (time of creation, Windows)"""# Script Name : dir_test.py
# Author : Craig Richards
# Created : 29th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Tests to see if the directory testdir exists, if not it will create the directory for you
import os # Import the OS module
if not os.path.exists('testdir'): # Check to see if it exists
os.makedirs('testdir') # Create the directory # Script Name : batch_file_rename.py
# Author : Craig Richards
# Created : 6th August 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will batch rename a group of files in a given directory, once you pass the current and new extensions
import os # Load the library module
import sys # Load the library module
work_dir=sys.argv[1] # Set the variable work_dir with the first argument passed
old_ext=sys.argv[2] # Set the variable work_dir with the first argument passed
new_ext=sys.argv[3] # Set the variable work_dir with the first argument passed
files = os.listdir(work_dir) # Set the variable files, by listing everything in the directory
for filename in files: # Loop through the files
file_ext = os.path.splitext(filename)[1] # Get the file extension
if old_ext == file_ext: # Start of the logic to check the file extensions, if old_ext = file_ext
newfile = filename.replace(old_ext, new_ext) # Set newfile to be the filename, replaced with the new extension
os.rename( # Write the files
os.path.join(work_dir, filename),
os.path.join(work_dir, newfile))# Script Name : powerdown_startup.py
# Author : Craig Richards
# Created : 05th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This goes through the server list and pings the machine, if it's up it will load the putty session, if its not it will notify you.
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
def windows(): # This is the function to run if it detects the OS is windows.
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open('startup_list.txt','r'): # Read the list of servers from the list
ret = subprocess.call("ping -n 3 %s" % server, shell=True,stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write ("%s: is alive, loading PuTTY session" % server.strip() + "\n") # Write out to the logfile
subprocess.Popen(('putty -load '+server)) # Load the putty session
else:
f.write ("%s : did not respond" % server.strip() + "\n") # Write to the logfile if the server is down
def linux():
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open('startup_list.txt'): # Read the list of servers from the list
ret = subprocess.call("ping -c 3 %s" % server, shell=True,stdout=open('/dev/null', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write ("%s: is alive" % server.strip() + "\n") # Print a message
subprocess.Popen(['ssh', server.strip()])
else:
f.write ("%s: did not respond" % server.strip() + "\n")
# End of the functions
# Start of the Main Program
if os.name == "posix": # If the OS is linux...
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows function# Script Name : folder_size.py
# Author : Craig Richards
# Created : 19th July 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will scan the current directory and all subdirectories and display the size.
import os # Load the library module
directory = '.' # Set the variable directory to be the current directory
dir_size = 0 # Set the size to 0
for (path, dirs, files) in os.walk(directory): # Walk through all the directories
for file in files: # Get all the files
filename = os.path.join(path, file)
dir_size += os.path.getsize(filename) # Get the sizes, the following lines print the sizes in bytes, Kb, Mb and Gb
print "Folder Size in Bytes = %0.2f Bytes" % (dir_size)
print "Folder Size in Kilobytes = %0.2f KB" % (dir_size/1024.0)
print "Folder Size in Megabytes = %0.2f MB" % (dir_size/1024/1024.0)
print "Folder Size in Gigabytes = %0.2f GB" % (dir_size/1024/1024/1024.0)# Script Name : env_check.py
# Author : Craig Richards
# Created : 14th May 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This script will check to see if all of the environment variables I require are set
import os
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable
conffile = 'env_check.conf' # Set the variable conffile
conffilename=os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together
for env_check in open(conffilename): # Open the config file and read all the settings
env_check = env_check.strip() # Set the variable as itsself, but strip the extra text out
print '[{}]'.format(env_check) # Format the Output to be in Square Brackets
newenv = os.getenv(env_check) # Set the variable newenv to get the settings from the OS what is currently set for the settings out the configfile
if newenv is None: # If it doesn't exist
print env_check, 'is not set' # Print it is not set
else: # Else if it does exist
print 'Current Setting for {}={}\n'.format(env_check, newenv) # Print out the details# Script Name : script_count.py
# Author : Craig Richards
# Created : 27th February 2012
# Last Modified : 20th July 2012
# Version : 1.3
# Modifications : 1.1 - 28-02-2012 - CR - Changed inside github and development functions, so instead of if os.name = "posix" do this else do this etc
# : I used os.path.join, so it condensed 4 lines down to 1
# : 1.2 - 10-05-2012 - CR - Added a line to include PHP scripts.
# : 1.3 - 20-07-2012 - CR - Added the line to include Batch scripts
# Description : This scans my scripts directory and gives a count of the different types of scripts
import os # Load the library module
path = os.getenv("scripts") # Set the variable path by getting the value from the OS environment variable scripts
dropbox = os.getenv("dropbox") # Set the variable dropbox by getting the value from the OS environment variable dropbox
def clear_screen(): # Function to clear the screen
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
os.system('clear') # Clear the Screen
elif os.name in ("nt", "dos", "ce"): # DOS/Windows
os.system('CLS') # Clear the Screen
def count_files(path, extensions): # Start of the function to count the files in the scripts directory, it counts the extension when passed below
counter = 0 # Set the counter to 0
for root, dirs, files in os.walk(path): # Loop through all the directories in the given path
for file in files: # For all the files
counter += file.endswith(extensions) # Count the files
return counter # Return the count
def github(): # Start of the function just to count the files in the github directory
github_dir = os.path.join(dropbox, 'github') # Joins the paths to get the github directory - 1.1
github_count = sum((len(f) for _, _, f in os.walk(github_dir))) # Get a count for all the files in the directory
if github_count > 5: # If the number of files is greater then 5, then print the following messages
print '\nYou have too many in here, start uploading !!!!!'
print 'You have: ' + str(github_count) + ' waiting to be uploaded to github!!'
elif github_count == 0: # Unless the count is 0, then print the following messages
print '\nGithub directory is all Clear'
else: # If it is any other number then print the following message, showing the number outstanding.
print '\nYou have: ' + str(github_count) + ' waiting to be uploaded to github!!'
def development(): # Start of the function just to count the files in the development directory
dev_dir = os.path.join(path, 'development') # Joins the paths to get the development directory - 1.1
dev_count = sum((len(f) for _, _, f in os.walk(dev_dir))) # Get a count for all the files in the directory
if dev_count > 10: # If the number of files is greater then 10, then print the following messages
print '\nYou have too many in here, finish them or delete them !!!!!'
print 'You have: ' + str(dev_count) + ' waiting to be finished!!'
elif dev_count ==0: # Unless the count is 0, then print the following messages
print '\nDevelopment directory is all clear'
else:
print '\nYou have: ' + str(dev_count) + ' waiting to be finished!!' # If it is any other number then print the following message, showing the number outstanding.
clear_screen() # Call the function to clear the screen
print '\nYou have the following :\n'
print 'AutoIT:\t' + str(count_files(path, '.au3')) # Run the count_files function to count the files with the extension we pass
print 'Batch:\t' + str(count_files(path, ('.bat', ',cmd'))) # 1.3
print 'Perl:\t' + str(count_files(path, '.pl'))
print 'PHP:\t' + str(count_files(path, '.php')) # 1.2
print 'Python:\t' + str(count_files(path, '.py'))
print 'Shell:\t' + str(count_files(path, ('.ksh', '.sh', '.bash')))
print 'SQL:\t' + str(count_files(path, '.sql'))
github() # Call the github function
development() # Call the development function# Script Name : script_listing.py
# Author : Craig Richards
# Created : 15th February 2012
# Last Modified : 29th May 2012
# Version : 1.2
# Modifications : 1.1 - 28-02-2012 - CR - Added the variable to get the logs directory, I then joined the output so the file goes to the logs directory
# : 1.2 - 29-05/2012 - CR - Changed the line so it doesn't ask for a directory, it now uses the environment varaible scripts
# Description : This will list all the files in the given directory, it will also go through all the subdirectories as well
import os # Load the library module
logdir = os.getenv("logs") # Set the variable logdir by getting the value from the OS environment variable logs
logfile = 'script_list.log' # Set the variable logfile
path = os.getenv("scripts") # Set the varable path by getting the value from the OS environment variable scripts - 1.2
#path = (raw_input("Enter dir: ")) # Ask the user for the directory to scan
logfilename=os.path.join(logdir, logfile) # Set the variable logfilename by joining logdir and logfile together
log = open(logfilename, 'w') # Set the variable log and open the logfile for writing
for dirpath, dirname, filenames in os.walk(path): # Go through the directories and the subdirectories
for filename in filenames: # Get all the filenames
log.write(os.path.join(dirpath, filename)+'\n') # Write the full path out to the logfile
print "\nYour logfile " , logfilename, "has been created" # Small message informing the user the file has been created | [
"[email protected]"
] | |
1c5a6b732f8b3b71d5f22a5564bcde3bd8ae4ce6 | 348bd616afd274425ad9737964f37d0b13583310 | /docs/source/conf.py | 0f22d1d4e0dc4ed7d10c0bfe6756e0cbb8a1d009 | [] | no_license | whitews/ReFlow | d2062ab03b62c82e250599557a29e86f61e51957 | 27bd33ac3824de6234952d56cbb66b0e77f076a1 | refs/heads/master | 2020-12-25T16:58:41.540154 | 2019-05-01T16:40:53 | 2019-05-01T16:40:53 | 7,260,054 | 4 | 2 | null | 2015-05-15T13:50:24 | 2012-12-20T15:44:52 | JavaScript | UTF-8 | Python | false | false | 7,898 | py | # -*- coding: utf-8 -*-
#
# ReFlow documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 23 19:50:53 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ReFlow'
copyright = u'2013, Scott White'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReFlowdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ReFlow.tex', u'ReFlow Documentation',
u'Scott White', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'reflow', u'ReFlow Documentation',
[u'Scott White'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReFlow', u'ReFlow Documentation',
u'Scott White', 'ReFlow', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"[email protected]"
] | |
dcd336548a0441e2da97d87c3d7fe4acf89333ba | b5e4c4e3abb7f87bfd70ecd912810e2562cecdc5 | /section6/venv/Lib/encodings/cp856.py | 82a2816233075e676cbdfa68ce0b10b47978a778 | [] | no_license | chandshilpa/flaskapi | a89822707dc02f9c588af04f1f33f82a55b627b3 | 5f229d59d155e68e026566919d292c831ea00ed4 | refs/heads/master | 2022-12-09T10:59:14.563256 | 2019-01-08T17:33:46 | 2019-01-08T17:33:46 | 164,698,842 | 0 | 1 | null | 2022-12-07T16:24:53 | 2019-01-08T17:21:32 | Python | UTF-8 | Python | false | false | 11,586 | py | """ Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
""" # "
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors="strict"):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors="strict"):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name="cp856",
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
"\x00" # 0x00 -> NULL
"\x01" # 0x01 -> START OF HEADING
"\x02" # 0x02 -> START OF TEXT
"\x03" # 0x03 -> END OF TEXT
"\x04" # 0x04 -> END OF TRANSMISSION
"\x05" # 0x05 -> ENQUIRY
"\x06" # 0x06 -> ACKNOWLEDGE
"\x07" # 0x07 -> BELL
"\x08" # 0x08 -> BACKSPACE
"\t" # 0x09 -> HORIZONTAL TABULATION
"\n" # 0x0A -> LINE FEED
"\x0b" # 0x0B -> VERTICAL TABULATION
"\x0c" # 0x0C -> FORM FEED
"\r" # 0x0D -> CARRIAGE RETURN
"\x0e" # 0x0E -> SHIFT OUT
"\x0f" # 0x0F -> SHIFT IN
"\x10" # 0x10 -> DATA LINK ESCAPE
"\x11" # 0x11 -> DEVICE CONTROL ONE
"\x12" # 0x12 -> DEVICE CONTROL TWO
"\x13" # 0x13 -> DEVICE CONTROL THREE
"\x14" # 0x14 -> DEVICE CONTROL FOUR
"\x15" # 0x15 -> NEGATIVE ACKNOWLEDGE
"\x16" # 0x16 -> SYNCHRONOUS IDLE
"\x17" # 0x17 -> END OF TRANSMISSION BLOCK
"\x18" # 0x18 -> CANCEL
"\x19" # 0x19 -> END OF MEDIUM
"\x1a" # 0x1A -> SUBSTITUTE
"\x1b" # 0x1B -> ESCAPE
"\x1c" # 0x1C -> FILE SEPARATOR
"\x1d" # 0x1D -> GROUP SEPARATOR
"\x1e" # 0x1E -> RECORD SEPARATOR
"\x1f" # 0x1F -> UNIT SEPARATOR
" " # 0x20 -> SPACE
"!" # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
"#" # 0x23 -> NUMBER SIGN
"$" # 0x24 -> DOLLAR SIGN
"%" # 0x25 -> PERCENT SIGN
"&" # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
"(" # 0x28 -> LEFT PARENTHESIS
")" # 0x29 -> RIGHT PARENTHESIS
"*" # 0x2A -> ASTERISK
"+" # 0x2B -> PLUS SIGN
"," # 0x2C -> COMMA
"-" # 0x2D -> HYPHEN-MINUS
"." # 0x2E -> FULL STOP
"/" # 0x2F -> SOLIDUS
"0" # 0x30 -> DIGIT ZERO
"1" # 0x31 -> DIGIT ONE
"2" # 0x32 -> DIGIT TWO
"3" # 0x33 -> DIGIT THREE
"4" # 0x34 -> DIGIT FOUR
"5" # 0x35 -> DIGIT FIVE
"6" # 0x36 -> DIGIT SIX
"7" # 0x37 -> DIGIT SEVEN
"8" # 0x38 -> DIGIT EIGHT
"9" # 0x39 -> DIGIT NINE
":" # 0x3A -> COLON
";" # 0x3B -> SEMICOLON
"<" # 0x3C -> LESS-THAN SIGN
"=" # 0x3D -> EQUALS SIGN
">" # 0x3E -> GREATER-THAN SIGN
"?" # 0x3F -> QUESTION MARK
"@" # 0x40 -> COMMERCIAL AT
"A" # 0x41 -> LATIN CAPITAL LETTER A
"B" # 0x42 -> LATIN CAPITAL LETTER B
"C" # 0x43 -> LATIN CAPITAL LETTER C
"D" # 0x44 -> LATIN CAPITAL LETTER D
"E" # 0x45 -> LATIN CAPITAL LETTER E
"F" # 0x46 -> LATIN CAPITAL LETTER F
"G" # 0x47 -> LATIN CAPITAL LETTER G
"H" # 0x48 -> LATIN CAPITAL LETTER H
"I" # 0x49 -> LATIN CAPITAL LETTER I
"J" # 0x4A -> LATIN CAPITAL LETTER J
"K" # 0x4B -> LATIN CAPITAL LETTER K
"L" # 0x4C -> LATIN CAPITAL LETTER L
"M" # 0x4D -> LATIN CAPITAL LETTER M
"N" # 0x4E -> LATIN CAPITAL LETTER N
"O" # 0x4F -> LATIN CAPITAL LETTER O
"P" # 0x50 -> LATIN CAPITAL LETTER P
"Q" # 0x51 -> LATIN CAPITAL LETTER Q
"R" # 0x52 -> LATIN CAPITAL LETTER R
"S" # 0x53 -> LATIN CAPITAL LETTER S
"T" # 0x54 -> LATIN CAPITAL LETTER T
"U" # 0x55 -> LATIN CAPITAL LETTER U
"V" # 0x56 -> LATIN CAPITAL LETTER V
"W" # 0x57 -> LATIN CAPITAL LETTER W
"X" # 0x58 -> LATIN CAPITAL LETTER X
"Y" # 0x59 -> LATIN CAPITAL LETTER Y
"Z" # 0x5A -> LATIN CAPITAL LETTER Z
"[" # 0x5B -> LEFT SQUARE BRACKET
"\\" # 0x5C -> REVERSE SOLIDUS
"]" # 0x5D -> RIGHT SQUARE BRACKET
"^" # 0x5E -> CIRCUMFLEX ACCENT
"_" # 0x5F -> LOW LINE
"`" # 0x60 -> GRAVE ACCENT
"a" # 0x61 -> LATIN SMALL LETTER A
"b" # 0x62 -> LATIN SMALL LETTER B
"c" # 0x63 -> LATIN SMALL LETTER C
"d" # 0x64 -> LATIN SMALL LETTER D
"e" # 0x65 -> LATIN SMALL LETTER E
"f" # 0x66 -> LATIN SMALL LETTER F
"g" # 0x67 -> LATIN SMALL LETTER G
"h" # 0x68 -> LATIN SMALL LETTER H
"i" # 0x69 -> LATIN SMALL LETTER I
"j" # 0x6A -> LATIN SMALL LETTER J
"k" # 0x6B -> LATIN SMALL LETTER K
"l" # 0x6C -> LATIN SMALL LETTER L
"m" # 0x6D -> LATIN SMALL LETTER M
"n" # 0x6E -> LATIN SMALL LETTER N
"o" # 0x6F -> LATIN SMALL LETTER O
"p" # 0x70 -> LATIN SMALL LETTER P
"q" # 0x71 -> LATIN SMALL LETTER Q
"r" # 0x72 -> LATIN SMALL LETTER R
"s" # 0x73 -> LATIN SMALL LETTER S
"t" # 0x74 -> LATIN SMALL LETTER T
"u" # 0x75 -> LATIN SMALL LETTER U
"v" # 0x76 -> LATIN SMALL LETTER V
"w" # 0x77 -> LATIN SMALL LETTER W
"x" # 0x78 -> LATIN SMALL LETTER X
"y" # 0x79 -> LATIN SMALL LETTER Y
"z" # 0x7A -> LATIN SMALL LETTER Z
"{" # 0x7B -> LEFT CURLY BRACKET
"|" # 0x7C -> VERTICAL LINE
"}" # 0x7D -> RIGHT CURLY BRACKET
"~" # 0x7E -> TILDE
"\x7f" # 0x7F -> DELETE
"\u05d0" # 0x80 -> HEBREW LETTER ALEF
"\u05d1" # 0x81 -> HEBREW LETTER BET
"\u05d2" # 0x82 -> HEBREW LETTER GIMEL
"\u05d3" # 0x83 -> HEBREW LETTER DALET
"\u05d4" # 0x84 -> HEBREW LETTER HE
"\u05d5" # 0x85 -> HEBREW LETTER VAV
"\u05d6" # 0x86 -> HEBREW LETTER ZAYIN
"\u05d7" # 0x87 -> HEBREW LETTER HET
"\u05d8" # 0x88 -> HEBREW LETTER TET
"\u05d9" # 0x89 -> HEBREW LETTER YOD
"\u05da" # 0x8A -> HEBREW LETTER FINAL KAF
"\u05db" # 0x8B -> HEBREW LETTER KAF
"\u05dc" # 0x8C -> HEBREW LETTER LAMED
"\u05dd" # 0x8D -> HEBREW LETTER FINAL MEM
"\u05de" # 0x8E -> HEBREW LETTER MEM
"\u05df" # 0x8F -> HEBREW LETTER FINAL NUN
"\u05e0" # 0x90 -> HEBREW LETTER NUN
"\u05e1" # 0x91 -> HEBREW LETTER SAMEKH
"\u05e2" # 0x92 -> HEBREW LETTER AYIN
"\u05e3" # 0x93 -> HEBREW LETTER FINAL PE
"\u05e4" # 0x94 -> HEBREW LETTER PE
"\u05e5" # 0x95 -> HEBREW LETTER FINAL TSADI
"\u05e6" # 0x96 -> HEBREW LETTER TSADI
"\u05e7" # 0x97 -> HEBREW LETTER QOF
"\u05e8" # 0x98 -> HEBREW LETTER RESH
"\u05e9" # 0x99 -> HEBREW LETTER SHIN
"\u05ea" # 0x9A -> HEBREW LETTER TAV
"\ufffe" # 0x9B -> UNDEFINED
"\xa3" # 0x9C -> POUND SIGN
"\ufffe" # 0x9D -> UNDEFINED
"\xd7" # 0x9E -> MULTIPLICATION SIGN
"\ufffe" # 0x9F -> UNDEFINED
"\ufffe" # 0xA0 -> UNDEFINED
"\ufffe" # 0xA1 -> UNDEFINED
"\ufffe" # 0xA2 -> UNDEFINED
"\ufffe" # 0xA3 -> UNDEFINED
"\ufffe" # 0xA4 -> UNDEFINED
"\ufffe" # 0xA5 -> UNDEFINED
"\ufffe" # 0xA6 -> UNDEFINED
"\ufffe" # 0xA7 -> UNDEFINED
"\ufffe" # 0xA8 -> UNDEFINED
"\xae" # 0xA9 -> REGISTERED SIGN
"\xac" # 0xAA -> NOT SIGN
"\xbd" # 0xAB -> VULGAR FRACTION ONE HALF
"\xbc" # 0xAC -> VULGAR FRACTION ONE QUARTER
"\ufffe" # 0xAD -> UNDEFINED
"\xab" # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
"\xbb" # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
"\u2591" # 0xB0 -> LIGHT SHADE
"\u2592" # 0xB1 -> MEDIUM SHADE
"\u2593" # 0xB2 -> DARK SHADE
"\u2502" # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
"\u2524" # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
"\ufffe" # 0xB5 -> UNDEFINED
"\ufffe" # 0xB6 -> UNDEFINED
"\ufffe" # 0xB7 -> UNDEFINED
"\xa9" # 0xB8 -> COPYRIGHT SIGN
"\u2563" # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
"\u2551" # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
"\u2557" # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
"\u255d" # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
"\xa2" # 0xBD -> CENT SIGN
"\xa5" # 0xBE -> YEN SIGN
"\u2510" # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
"\u2514" # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
"\u2534" # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
"\u252c" # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
"\u251c" # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
"\u2500" # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
"\u253c" # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
"\ufffe" # 0xC6 -> UNDEFINED
"\ufffe" # 0xC7 -> UNDEFINED
"\u255a" # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
"\u2554" # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
"\u2569" # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
"\u2566" # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
"\u2560" # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
"\u2550" # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
"\u256c" # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
"\xa4" # 0xCF -> CURRENCY SIGN
"\ufffe" # 0xD0 -> UNDEFINED
"\ufffe" # 0xD1 -> UNDEFINED
"\ufffe" # 0xD2 -> UNDEFINED
"\ufffe" # 0xD3 -> UNDEFINEDS
"\ufffe" # 0xD4 -> UNDEFINED
"\ufffe" # 0xD5 -> UNDEFINED
"\ufffe" # 0xD6 -> UNDEFINEDE
"\ufffe" # 0xD7 -> UNDEFINED
"\ufffe" # 0xD8 -> UNDEFINED
"\u2518" # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
"\u250c" # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
"\u2588" # 0xDB -> FULL BLOCK
"\u2584" # 0xDC -> LOWER HALF BLOCK
"\xa6" # 0xDD -> BROKEN BAR
"\ufffe" # 0xDE -> UNDEFINED
"\u2580" # 0xDF -> UPPER HALF BLOCK
"\ufffe" # 0xE0 -> UNDEFINED
"\ufffe" # 0xE1 -> UNDEFINED
"\ufffe" # 0xE2 -> UNDEFINED
"\ufffe" # 0xE3 -> UNDEFINED
"\ufffe" # 0xE4 -> UNDEFINED
"\ufffe" # 0xE5 -> UNDEFINED
"\xb5" # 0xE6 -> MICRO SIGN
"\ufffe" # 0xE7 -> UNDEFINED
"\ufffe" # 0xE8 -> UNDEFINED
"\ufffe" # 0xE9 -> UNDEFINED
"\ufffe" # 0xEA -> UNDEFINED
"\ufffe" # 0xEB -> UNDEFINED
"\ufffe" # 0xEC -> UNDEFINED
"\ufffe" # 0xED -> UNDEFINED
"\xaf" # 0xEE -> MACRON
"\xb4" # 0xEF -> ACUTE ACCENT
"\xad" # 0xF0 -> SOFT HYPHEN
"\xb1" # 0xF1 -> PLUS-MINUS SIGN
"\u2017" # 0xF2 -> DOUBLE LOW LINE
"\xbe" # 0xF3 -> VULGAR FRACTION THREE QUARTERS
"\xb6" # 0xF4 -> PILCROW SIGN
"\xa7" # 0xF5 -> SECTION SIGN
"\xf7" # 0xF6 -> DIVISION SIGN
"\xb8" # 0xF7 -> CEDILLA
"\xb0" # 0xF8 -> DEGREE SIGN
"\xa8" # 0xF9 -> DIAERESIS
"\xb7" # 0xFA -> MIDDLE DOT
"\xb9" # 0xFB -> SUPERSCRIPT ONE
"\xb3" # 0xFC -> SUPERSCRIPT THREE
"\xb2" # 0xFD -> SUPERSCRIPT TWO
"\u25a0" # 0xFE -> BLACK SQUARE
"\xa0" # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
| [
"[email protected]"
] | |
493325b68e5cee35062e2a15547d5355bb645658 | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/dialogs/swimmer_dialog_20170810151903.py | 4281b0f5b05eac3a5ee208c260614a79152a8016 | [] | no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,751 | py | from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem)
from PyQt5 import QtCore, QtGui
import numpy as np
import core.gui.swimmer as swimmmer
class Swimmer(QWidget, swimmmer.Ui_Swimmer):
def __init__(self, parent):
super(Swimmer,self).__init__(parent)
self.setupUi(self)
def on_swimmer_data_signal(self,signal):
self.swimmer_data = signal['swimmer_data'] #pandas dataframe
def closeEvent(self,event):
#Override closeEvent so that we hide the window rather than exit so we don't lose data
event.ignore()
self.hide()
class SwimmerPlotter(QWidget):
def __init__(self,parent):
super(SwimmerPlotter,self).__init__(parent)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
markersize = 5 #needs to be user variable so that as more/less bars added, it looks ok
bar_width = 0.75
def on_swimmer_data_signal(self,signal):
self.swimmer_data = signal['swimmer_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
def on_general_settings_signal(self,signal):
try:
hasattr(self,'ax')
self.ax.set_title(signal[0])
self.ax.set_xlabel(signal[1])
self.ax.set_ylabel(signal[2])
self.canvas.draw()
except Exception as e:
print(e)
def default_plot(self):
'''
Plot swimmer data
'''
self.figure.clear()
self.ax = self.figure.add_subplot(111)
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.bar_locations = np.arange(len(self.swimmer_data.ix[:,0]))
self.stack_lists = [x for x in self.swimmer_data.ix[:,1:6]]
self.offset_list = [0]*len(stack_lists[0])
for i in range(len(stack_lists)):
ax.barh(self.bar_locations, self.stack_lists[i], bar_width, color = 'b', left = offset_list, edgecolor = 'k')
self.offset_list = [sum(x) for x in zip(offset_list, stack_length_lists[i])]
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called | [
"[email protected]"
] | |
4f56e6665110bb50afcfad478ddd605eaf21e59c | 976efd2cf265ff8e5549edbaeaef4c12233117c5 | /reprounzip-vagrant/reprounzip/unpackers/vagrant/__init__.py | 604927aeea5fae4552017827640ca323e40a05db | [
"BSD-3-Clause"
] | permissive | koconne8/reprozip | 30c0e522012ebfe5d476d8ba32ad90253f99ebb1 | c655a7a52670085329dc4fc0f3a8818aaab60196 | refs/heads/master | 2021-01-09T06:52:19.125710 | 2016-05-11T03:53:48 | 2016-05-11T03:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,359 | py | # Copyright (C) 2014-2016 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Vagrant plugin for reprounzip.
This files contains the 'vagrant' unpacker, which builds a Vagrant template
from a reprozip pack. That template can then be run as a virtual machine via
Vagrant (``vagrant up``).
See http://www.vagrantup.com/
"""
from __future__ import division, print_function, unicode_literals
import argparse
from distutils.version import LooseVersion
import logging
import os
import paramiko
import re
from rpaths import PosixPath, Path
import subprocess
import sys
from reprounzip.common import load_config, record_usage, RPZPack
from reprounzip import signals
from reprounzip.parameters import get_parameter
from reprounzip.unpackers.common import COMPAT_OK, COMPAT_MAYBE, COMPAT_NO, \
CantFindInstaller, composite_action, target_must_exist, \
make_unique_name, shell_escape, select_installer, busybox_url, join_root, \
FileUploader, FileDownloader, get_runs, add_environment_options, \
fixup_environment, metadata_read, metadata_write, \
metadata_initial_iofiles, metadata_update_run
from reprounzip.unpackers.common.x11 import X11Handler
from reprounzip.unpackers.vagrant.run_command import IgnoreMissingKey, \
run_interactive
from reprounzip.utils import unicode_, iteritems, stderr, download_file
def select_box(runs):
"""Selects a box for the experiment, with the correct distribution.
"""
distribution, version = runs[0]['distribution']
distribution = distribution.lower()
architecture = runs[0]['architecture']
record_usage(vagrant_select_box='%s;%s;%s' % (distribution, version,
architecture))
if architecture not in ('i686', 'x86_64'):
logging.critical("Error: unsupported architecture %s", architecture)
sys.exit(1)
def find_distribution(parameter, distribution, version, architecture):
boxes = parameter['boxes']
for distrib in boxes:
if re.match(distrib['name'], distribution) is not None:
result = find_version(distrib, version, architecture)
if result is not None:
return result
default = parameter['default']
logging.warning("Unsupported distribution '%s', using %s",
distribution, default['name'])
result = default['architectures'].get(architecture)
if result:
return default['distribution'], result
def find_version(distrib, version, architecture):
if version is not None:
for box in distrib['versions']:
if re.match(box['version'], version) is not None:
result = box['architectures'].get(architecture)
if result is not None:
return box['distribution'], result
box = distrib['default']
if version is not None:
logging.warning("Using %s instead of '%s'",
box['name'], version)
result = box['architectures'].get(architecture)
if result is not None:
return box['distribution'], result
result = find_distribution(get_parameter('vagrant_boxes'),
distribution, version, architecture)
if result is None:
logging.critical("Error: couldn't find a base box for required "
"architecture")
sys.exit(1)
return result
def write_dict(path, dct):
metadata_write(path, dct, 'vagrant')
def read_dict(path):
return metadata_read(path, 'vagrant')
def machine_setup(target, use_chroot):
"""Prepare the machine and get SSH parameters from ``vagrant ssh``.
"""
try:
out = subprocess.check_output(['vagrant', 'ssh-config'],
cwd=target.path,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# Makes sure the VM is running
logging.info("Calling 'vagrant up'...")
try:
retcode = subprocess.check_call(['vagrant', 'up'], cwd=target.path)
except OSError:
logging.critical("vagrant executable not found")
sys.exit(1)
else:
if retcode != 0:
logging.critical("vagrant up failed with code %d", retcode)
sys.exit(1)
# Try again
out = subprocess.check_output(['vagrant', 'ssh-config'],
cwd=target.path)
vagrant_info = {}
for line in out.split(b'\n'):
line = line.strip().split(b' ', 1)
if len(line) != 2:
continue
value = line[1].decode('utf-8')
if len(value) >= 2 and value[0] == '"' and value[-1] == '"':
# Vagrant should really be escaping special characters here, but
# it's not -- https://github.com/mitchellh/vagrant/issues/6428
value = value[1:-1]
vagrant_info[line[0].decode('utf-8').lower()] = value
if 'identityfile' in vagrant_info:
key_file = vagrant_info['identityfile']
else:
key_file = Path('~/.vagrant.d/insecure_private_key').expand_user()
info = dict(hostname=vagrant_info.get('hostname', '127.0.0.1'),
port=int(vagrant_info.get('port', 2222)),
username=vagrant_info.get('user', 'vagrant'),
key_filename=key_file)
logging.debug("SSH parameters from Vagrant: %s@%s:%s, key=%s",
info['username'], info['hostname'], info['port'],
info['key_filename'])
if use_chroot:
# Mount directories
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(IgnoreMissingKey())
ssh.connect(**info)
chan = ssh.get_transport().open_session()
chan.exec_command(
'/usr/bin/sudo /bin/sh -c %s' % shell_escape(
'for i in dev proc; do '
'if ! grep "^/experimentroot/$i$" /proc/mounts; then '
'mount -o rbind /$i /experimentroot/$i; '
'fi; '
'done'))
if chan.recv_exit_status() != 0:
logging.critical("Couldn't mount directories in chroot")
sys.exit(1)
ssh.close()
return info
def vagrant_setup_create(args):
"""Sets up the experiment to be run in a Vagrant-built virtual machine.
This can either build a chroot or not.
If building a chroot, we do just like without Vagrant: we copy all the
files and only get what's missing from the host. But we do install
automatically the packages whose files are required.
If not building a chroot, we install all the packages, and only unpack
files that don't come from packages.
In short: files from packages with packfiles=True will only be used if
building a chroot.
"""
if not args.pack:
logging.critical("setup/create needs the pack filename")
sys.exit(1)
pack = Path(args.pack[0])
target = Path(args.target[0])
if target.exists():
logging.critical("Target directory exists")
sys.exit(1)
use_chroot = args.use_chroot
mount_bind = args.bind_magic_dirs
record_usage(use_chroot=use_chroot,
mount_bind=mount_bind)
signals.pre_setup(target=target, pack=pack)
# Unpacks configuration file
rpz_pack = RPZPack(pack)
rpz_pack.extract_config(target / 'config.yml')
# Loads config
runs, packages, other_files = config = load_config(target / 'config.yml',
True)
if not args.memory:
memory = None
else:
try:
memory = int(args.memory[-1])
except ValueError:
logging.critical("Invalid value for memory size: %r", args.memory)
sys.exit(1)
if args.base_image and args.base_image[0]:
record_usage(vagrant_explicit_image=True)
box = args.base_image[0]
if args.distribution:
target_distribution = args.distribution[0]
else:
target_distribution = None
else:
target_distribution, box = select_box(runs)
logging.info("Using box %s", box)
logging.debug("Distribution: %s", target_distribution or "unknown")
# If using chroot, we might still need to install packages to get missing
# (not packed) files
if use_chroot:
packages = [pkg for pkg in packages if not pkg.packfiles]
if packages:
record_usage(vagrant_install_pkgs=True)
logging.info("Some packages were not packed, so we'll install and "
"copy their files\n"
"Packages that are missing:\n%s",
' '.join(pkg.name for pkg in packages))
if packages:
try:
installer = select_installer(pack, runs, target_distribution)
except CantFindInstaller as e:
logging.error("Need to install %d packages but couldn't select a "
"package installer: %s",
len(packages), e)
target.mkdir(parents=True)
try:
# Writes setup script
logging.info("Writing setup script %s...", target / 'setup.sh')
with (target / 'setup.sh').open('w', encoding='utf-8',
newline='\n') as fp:
fp.write('#!/bin/sh\n\nset -e\n\n')
if packages:
# Updates package sources
fp.write(installer.update_script())
fp.write('\n')
# Installs necessary packages
fp.write(installer.install_script(packages))
fp.write('\n')
# TODO : Compare package versions (painful because of sh)
# Untar
if use_chroot:
fp.write('\n'
'mkdir /experimentroot; cd /experimentroot\n')
fp.write('tar zpxf /vagrant/data.tgz --numeric-owner '
'--strip=1 %s\n' % rpz_pack.data_prefix)
if mount_bind:
fp.write('\n'
'mkdir -p /experimentroot/dev\n'
'mkdir -p /experimentroot/proc\n')
for pkg in packages:
fp.write('\n# Copies files from package %s\n' % pkg.name)
for f in pkg.files:
f = f.path
dest = join_root(PosixPath('/experimentroot'), f)
fp.write('mkdir -p %s\n' %
shell_escape(unicode_(f.parent)))
fp.write('cp -L %s %s\n' % (
shell_escape(unicode_(f)),
shell_escape(unicode_(dest))))
else:
fp.write('\ncd /\n')
paths = set()
pathlist = []
# Adds intermediate directories, and checks for existence in
# the tar
for f in other_files:
path = PosixPath('/')
for c in rpz_pack.remove_data_prefix(f.path).components:
path = path / c
if path in paths:
continue
paths.add(path)
try:
rpz_pack.get_data(path)
except KeyError:
logging.info("Missing file %s", path)
else:
pathlist.append(path)
# FIXME : for some reason we need reversed() here, I'm not sure
# why. Need to read more of tar's docs.
# TAR bug: --no-overwrite-dir removes --keep-old-files
# TAR bug: there is no way to make --keep-old-files not report
# an error if an existing file is encountered. --skip-old-files
# was introduced too recently. Instead, we just ignore the exit
# status
with (target / 'rpz-files.list').open('wb') as lfp:
for p in reversed(pathlist):
lfp.write(join_root(rpz_pack.data_prefix, p).path)
lfp.write(b'\0')
fp.write('tar zpxf /vagrant/data.tgz --keep-old-files '
'--numeric-owner --strip=1 '
'--null -T /vagrant/rpz-files.list || /bin/true\n')
# Copies busybox
if use_chroot:
arch = runs[0]['architecture']
download_file(busybox_url(arch),
target / 'busybox',
'busybox-%s' % arch)
fp.write(r'''
cp /vagrant/busybox /experimentroot/busybox
chmod +x /experimentroot/busybox
mkdir -p /experimentroot/bin
[ -e /experimentroot/bin/sh ] || \
ln -s /busybox /experimentroot/bin/sh
''')
# Copies pack
logging.info("Copying pack file...")
rpz_pack.copy_data_tar(target / 'data.tgz')
rpz_pack.close()
# Writes Vagrant file
logging.info("Writing %s...", target / 'Vagrantfile')
with (target / 'Vagrantfile').open('w', encoding='utf-8',
newline='\n') as fp:
# Vagrant header and version
fp.write(
'# -*- mode: ruby -*-\n'
'# vi: set ft=ruby\n\n'
'VAGRANTFILE_API_VERSION = "2"\n\n'
'Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n')
# Selects which box to install
fp.write(' config.vm.box = "%s"\n' % box)
# Run the setup script on the virtual machine
fp.write(' config.vm.provision "shell", path: "setup.sh"\n')
# Memory size
if memory is not None:
fp.write(' config.vm.provider "virtualbox" do |v|\n'
' v.memory = %d\n'
' end\n' % memory)
fp.write('end\n')
# Meta-data for reprounzip
write_dict(target,
metadata_initial_iofiles(config,
{'use_chroot': use_chroot}))
signals.post_setup(target=target, pack=pack)
except Exception:
target.rmtree(ignore_errors=True)
raise
@target_must_exist
def vagrant_setup_start(args):
"""Starts the vagrant-built virtual machine.
"""
target = Path(args.target[0])
use_chroot = read_dict(target)['use_chroot']
check_vagrant_version()
machine_setup(target, use_chroot)
@target_must_exist
def vagrant_run(args):
"""Runs the experiment in the virtual machine.
"""
target = Path(args.target[0])
unpacked_info = read_dict(target)
use_chroot = unpacked_info['use_chroot']
cmdline = args.cmdline
check_vagrant_version()
# Loads config
config = load_config(target / 'config.yml', True)
runs = config.runs
selected_runs = get_runs(runs, args.run, cmdline)
hostname = runs[selected_runs[0]].get('hostname', 'reprounzip')
# X11 handler
x11 = X11Handler(args.x11, ('local', hostname), args.x11_display)
cmds = []
for run_number in selected_runs:
run = runs[run_number]
cmd = 'cd %s && ' % shell_escape(run['workingdir'])
if use_chroot:
cmd += '/busybox env -i '
else:
cmd += '/usr/bin/env -i '
environ = x11.fix_env(run['environ'])
environ = fixup_environment(environ, args)
cmd += ' '.join('%s=%s' % (shell_escape(k), shell_escape(v))
for k, v in iteritems(environ))
cmd += ' '
# FIXME : Use exec -a or something if binary != argv[0]
if cmdline is None:
argv = [run['binary']] + run['argv'][1:]
else:
argv = cmdline
cmd += ' '.join(shell_escape(a) for a in argv)
uid = run.get('uid', 1000)
gid = run.get('gid', 1000)
if use_chroot:
userspec = '%s:%s' % (uid, gid)
cmd = ('chroot --userspec=%s /experimentroot '
'/bin/sh -c %s' % (
userspec,
shell_escape(cmd)))
else:
cmd = 'sudo -u \'#%d\' sh -c %s' % (uid, shell_escape(cmd))
cmds.append(cmd)
if use_chroot:
cmds = ['chroot /experimentroot /bin/sh -c %s' % shell_escape(c)
for c in x11.init_cmds] + cmds
else:
cmds = x11.init_cmds + cmds
cmds = ' && '.join(cmds)
# Sets the hostname to the original experiment's machine's
# FIXME: not reentrant: this restores the Vagrant machine's hostname after
# the run, which might cause issues if several "reprounzip vagrant run" are
# running at once
cmds = ('OLD_HOSTNAME=$(/bin/hostname); /bin/hostname %s; ' % hostname +
cmds +
'; RES=$?; /bin/hostname "$OLD_HOSTNAME"; exit $RES')
cmds = '/usr/bin/sudo /bin/sh -c %s' % shell_escape(cmds)
# Gets vagrant SSH parameters
info = machine_setup(target, unpacked_info['use_chroot'])
signals.pre_run(target=target)
interactive = not (args.no_stdin or
os.environ.get('REPROUNZIP_NON_INTERACTIVE'))
retcode = run_interactive(info, interactive,
cmds,
not args.no_pty,
x11.port_forward)
stderr.write("\r\n*** Command finished, status: %d\r\n" % retcode)
# Update input file status
metadata_update_run(config, unpacked_info, selected_runs)
write_dict(target, unpacked_info)
signals.post_run(target=target, retcode=retcode)
class SSHUploader(FileUploader):
def __init__(self, target, input_files, files, use_chroot):
self.use_chroot = use_chroot
FileUploader.__init__(self, target, input_files, files)
def prepare_upload(self, files):
# Checks whether the VM is running
try:
ssh_info = machine_setup(self.target, self.use_chroot)
except subprocess.CalledProcessError:
logging.critical("Failed to get the status of the machine -- is "
"it running?")
sys.exit(1)
# Connect with SSH
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(IgnoreMissingKey())
self.ssh.connect(**ssh_info)
def upload_file(self, local_path, input_path):
if self.use_chroot:
remote_path = join_root(PosixPath('/experimentroot'),
input_path)
else:
remote_path = input_path
temp = make_unique_name(b'reprozip_input_')
ltemp = self.target / temp
rtemp = PosixPath('/vagrant') / temp
# Copy file to shared folder
logging.info("Copying file to shared folder...")
local_path.copyfile(ltemp)
# Move it
logging.info("Moving file into place...")
chan = self.ssh.get_transport().open_session()
chown_cmd = '/bin/chown --reference=%s %s' % (
shell_escape(remote_path.path),
shell_escape(rtemp.path))
chmod_cmd = '/bin/chmod --reference=%s %s' % (
shell_escape(remote_path.path),
shell_escape(rtemp.path))
mv_cmd = '/bin/mv %s %s' % (
shell_escape(rtemp.path),
shell_escape(remote_path.path))
chan.exec_command('/usr/bin/sudo /bin/sh -c %s' % shell_escape(
' && '.join((chown_cmd, chmod_cmd, mv_cmd))))
if chan.recv_exit_status() != 0:
logging.critical("Couldn't move file in virtual machine")
try:
ltemp.remove()
except OSError:
pass
sys.exit(1)
chan.close()
def finalize(self):
self.ssh.close()
@target_must_exist
def vagrant_upload(args):
"""Replaces an input file in the VM.
"""
target = Path(args.target[0])
files = args.file
unpacked_info = read_dict(target)
input_files = unpacked_info.setdefault('input_files', {})
use_chroot = unpacked_info['use_chroot']
try:
SSHUploader(target, input_files, files, use_chroot)
finally:
write_dict(target, unpacked_info)
class SSHDownloader(FileDownloader):
def __init__(self, target, files, use_chroot, all_=False):
self.use_chroot = use_chroot
FileDownloader.__init__(self, target, files, all_=all_)
def prepare_download(self, files):
# Checks whether the VM is running
try:
info = machine_setup(self.target, self.use_chroot)
except subprocess.CalledProcessError:
logging.critical("Failed to get the status of the machine -- is "
"it running?")
sys.exit(1)
# Connect with SSH
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(IgnoreMissingKey())
self.ssh.connect(**info)
def download(self, remote_path, local_path):
if self.use_chroot:
remote_path = join_root(PosixPath('/experimentroot'), remote_path)
temp = make_unique_name(b'reprozip_output_')
rtemp = PosixPath('/vagrant') / temp
ltemp = self.target / temp
# Copy file to shared folder
logging.info("Copying file to shared folder...")
chan = self.ssh.get_transport().open_session()
cp_cmd = '/bin/cp %s %s' % (
shell_escape(remote_path.path),
shell_escape(rtemp.path))
chown_cmd = '/bin/chown vagrant %s' % shell_escape(rtemp.path)
chmod_cmd = '/bin/chmod 644 %s' % shell_escape(rtemp.path)
chan.exec_command('/usr/bin/sudo /bin/sh -c %s' % shell_escape(
' && '.join((cp_cmd, chown_cmd, chmod_cmd))))
if chan.recv_exit_status() != 0:
logging.critical("Couldn't copy file in virtual machine")
try:
ltemp.remove()
except OSError:
pass
return False
# Move file to final destination
try:
ltemp.rename(local_path)
except OSError as e:
logging.critical("Couldn't download output file: %s\n%s",
remote_path, str(e))
ltemp.remove()
return False
return True
def finalize(self):
self.ssh.close()
@target_must_exist
def vagrant_download(args):
"""Gets an output file out of the VM.
"""
target = Path(args.target[0])
files = args.file
use_chroot = read_dict(target)['use_chroot']
SSHDownloader(target, files, use_chroot, all_=args.all)
@target_must_exist
def vagrant_suspend(args):
"""Suspends the VM through Vagrant, without destroying it.
"""
target = Path(args.target[0])
retcode = subprocess.call(['vagrant', 'suspend'], cwd=target.path)
if retcode != 0:
logging.critical("vagrant suspend failed with code %d, ignoring...",
retcode)
@target_must_exist
def vagrant_destroy_vm(args):
"""Destroys the VM through Vagrant.
"""
target = Path(args.target[0])
read_dict(target)
retcode = subprocess.call(['vagrant', 'destroy', '-f'], cwd=target.path)
if retcode != 0:
logging.critical("vagrant destroy failed with code %d, ignoring...",
retcode)
@target_must_exist
def vagrant_destroy_dir(args):
"""Destroys the directory.
"""
target = Path(args.target[0])
read_dict(target)
signals.pre_destroy(target=target)
target.rmtree()
signals.post_destroy(target=target)
def _executable_in_path(executable):
pathlist = os.environ['PATH'].split(os.pathsep) + ['.']
pathexts = os.environ.get('PATHEXT', '').split(os.pathsep)
for path in pathlist:
for ext in pathexts:
fullpath = os.path.join(path, executable) + ext
if os.path.isfile(fullpath):
return True
return False
def check_vagrant_version():
try:
out = subprocess.check_output(['vagrant', '--version'])
except (subprocess.CalledProcessError, OSError):
logging.error("Couldn't run vagrant")
sys.exit(1)
out = out.decode('ascii').strip().lower().split()
if out[0] == 'vagrant':
if LooseVersion(out[1]) < LooseVersion('1.1'):
logging.error("Vagrant >=1.1 is required; detected version: %s",
out[1])
sys.exit(1)
else:
logging.error("Vagrant >=1.1 is required")
sys.exit(1)
def test_has_vagrant(pack, **kwargs):
"""Compatibility test: has vagrant (ok) or not (maybe).
"""
if not _executable_in_path('vagrant'):
return COMPAT_MAYBE, "vagrant not found in PATH"
try:
out = subprocess.check_output(['vagrant', '--version'])
except subprocess.CalledProcessError:
return COMPAT_NO, ("vagrant was found in PATH but doesn't seem to "
"work properly")
out = out.decode('ascii').strip().lower().split()
if out[0] == 'vagrant':
if LooseVersion(out[1]) >= LooseVersion('1.1'):
return COMPAT_OK
else:
return COMPAT_NO, ("Vagrant >=1.1 is required; detected version: "
"%s" % out[1])
else:
return COMPAT_NO, "Vagrant >=1.1 is required"
def setup(parser, **kwargs):
"""Runs the experiment in a virtual machine created through Vagrant
You will need Vagrant to be installed on your machine if you want to run
the experiment.
setup setup/create creates Vagrantfile (needs the pack filename)
setup/start starts or resume the virtual machine
upload replaces input files in the machine
(without arguments, lists input files)
run runs the experiment in the virtual machine
suspend suspend the virtual machine without destroying it
download gets output files from the machine
(without arguments, lists output files)
destroy destroy/vm destroys the virtual machine
destroy/dir removes the unpacked directory
For example:
$ reprounzip vagrant setup mypack.rpz experiment; cd experiment
$ reprounzip vagrant run .
$ reprounzip vagrant download . results:/home/user/theresults.txt
$ cd ..; reprounzip vagrant destroy experiment
Upload specifications are either:
:input_id restores the original input file from the pack
filename:input_id replaces the input file with the specified local
file
Download specifications are either:
output_id: print the output file to stdout
output_id:filename extracts the output file to the corresponding local
path
"""
subparsers = parser.add_subparsers(title="actions",
metavar='', help=argparse.SUPPRESS)
def add_opt_general(opts):
opts.add_argument('target', nargs=1, help="Experiment directory")
# setup/create
def add_opt_setup(opts):
opts.add_argument('pack', nargs=1, help="Pack to extract")
opts.add_argument(
'--use-chroot', action='store_true',
default=True,
help=argparse.SUPPRESS)
opts.add_argument(
'--dont-use-chroot', action='store_false', dest='use_chroot',
default=True,
help="Don't prefer original files nor use chroot in the virtual "
"machine")
opts.add_argument(
'--no-use-chroot', action='store_false', dest='use_chroot',
default=True, help=argparse.SUPPRESS)
opts.add_argument(
'--dont-bind-magic-dirs', action='store_false', default=True,
dest='bind_magic_dirs',
help="Don't mount /dev and /proc inside the chroot (no effect if "
"--dont-use-chroot is set)")
opts.add_argument('--base-image', nargs=1, help="Vagrant box to use")
opts.add_argument('--distribution', nargs=1,
help="Distribution used in the Vagrant box (for "
"package installer selection)")
opts.add_argument('--memory', nargs=1,
help="Amount of RAM to allocate to VM (megabytes, "
"default: box default)")
parser_setup_create = subparsers.add_parser('setup/create')
add_opt_setup(parser_setup_create)
add_opt_general(parser_setup_create)
parser_setup_create.set_defaults(func=vagrant_setup_create)
# setup/start
parser_setup_start = subparsers.add_parser('setup/start')
add_opt_general(parser_setup_start)
parser_setup_start.set_defaults(func=vagrant_setup_start)
# setup
parser_setup = subparsers.add_parser('setup')
add_opt_setup(parser_setup)
add_opt_general(parser_setup)
parser_setup.set_defaults(func=composite_action(vagrant_setup_create,
vagrant_setup_start))
# upload
parser_upload = subparsers.add_parser('upload')
add_opt_general(parser_upload)
parser_upload.add_argument('file', nargs=argparse.ZERO_OR_MORE,
help="<path>:<input_file_name")
parser_upload.set_defaults(func=vagrant_upload)
# run
parser_run = subparsers.add_parser('run')
add_opt_general(parser_run)
parser_run.add_argument('run', default=None, nargs='?')
parser_run.add_argument('--no-stdin', action='store_true', default=False,
help="Don't connect program's input stream to "
"this terminal")
parser_run.add_argument('--no-pty', action='store_true', default=False,
help="Don't request a PTY from the SSH server")
parser_run.add_argument('--cmdline', nargs=argparse.REMAINDER,
help="Command line to run")
parser_run.add_argument('--enable-x11', action='store_true', default=False,
dest='x11',
help="Enable X11 support (needs an X server on "
"the host)")
parser_run.add_argument('--x11-display', dest='x11_display',
help="Display number to use on the experiment "
"side (change the host display with the "
"DISPLAY environment variable)")
add_environment_options(parser_run)
parser_run.set_defaults(func=vagrant_run)
# download
parser_download = subparsers.add_parser('download')
add_opt_general(parser_download)
parser_download.add_argument('file', nargs=argparse.ZERO_OR_MORE,
help="<output_file_name>[:<path>]")
parser_download.add_argument('--all', action='store_true',
help="Download all output files to the "
"current directory")
parser_download.set_defaults(func=vagrant_download)
parser_suspend = subparsers.add_parser('suspend')
add_opt_general(parser_suspend)
parser_suspend.set_defaults(func=vagrant_suspend)
# destroy/vm
parser_destroy_vm = subparsers.add_parser('destroy/vm')
add_opt_general(parser_destroy_vm)
parser_destroy_vm.set_defaults(func=vagrant_destroy_vm)
# destroy/dir
parser_destroy_dir = subparsers.add_parser('destroy/dir')
add_opt_general(parser_destroy_dir)
parser_destroy_dir.set_defaults(func=vagrant_destroy_dir)
# destroy
parser_destroy = subparsers.add_parser('destroy')
add_opt_general(parser_destroy)
parser_destroy.set_defaults(func=composite_action(vagrant_destroy_vm,
vagrant_destroy_dir))
return {'test_compatibility': test_has_vagrant}
| [
"[email protected]"
] | |
b173f68c5c45adaaeffd0c75d7eebf3022350c31 | d177addc1830153404c71fa115a5584f94a392c3 | /N1539_KthMissingPositiveNumber.py | ec2a85adf987c4c2c541611169e916af013faa56 | [] | no_license | zerghua/leetcode-python | 38a84452f60a360e991edf90c8156de03a949000 | 02726da394971ef02616a038dadc126c6ff260de | refs/heads/master | 2022-10-25T11:36:22.712564 | 2022-10-02T19:56:52 | 2022-10-02T19:56:52 | 61,502,010 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #
# Create by Hua on 4/3/22.
#
"""
Given an array arr of positive integers sorted in a strictly increasing order, and an integer k.
Find the kth positive integer that is missing from this array.
Example 1:
Input: arr = [2,3,4,7,11], k = 5
Output: 9
Explanation: The missing positive integers are [1,5,6,8,9,10,12,13,...]. The 5th missing positive integer is 9.
Example 2:
Input: arr = [1,2,3,4], k = 2
Output: 6
Explanation: The missing positive integers are [5,6,7,...]. The 2nd missing positive integer is 6.
Constraints:
1 <= arr.length <= 1000
1 <= arr[i] <= 1000
1 <= k <= 1000
arr[i] < arr[j] for 1 <= i < j <= arr.length
"""
class Solution(object):
def findKthPositive(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: int
thought: iterate 1-3000, build missing array, and return the kth
04/03/2022 14:04 Accepted 112 ms 13.6 MB python
easy 5 min.
can do binary search.
https://leetcode.com/problems/kth-missing-positive-number/discuss/779999/JavaC%2B%2BPython-O(logN)
"""
rt = list()
for i in range(1, 3001):
if i not in arr:
rt.append(i)
if len(rt) == k:
return rt[-1]
return -1
| [
"[email protected]"
] | |
6d856f492bd4e381bfaf4ac7158cb061c4a9f63b | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /Dataset/MOT/Seed/PathTrack.py | 593c78a035dd677424dfafa90338eb09be91d69b | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # https://www.trace.ethz.ch/publications/2017/pathtrack/index.html
from Dataset.Base.factory_seed import BaseSeed
from Dataset.Type.data_split import DataSplit
class PathTrack_Seed(BaseSeed):
def __init__(self, root_path: str=None, data_split: DataSplit=DataSplit.Training | DataSplit.Validation):
name = 'PathTrack'
if root_path is None:
root_path = self.get_path_from_config('PathTrack_PATH')
super(PathTrack_Seed, self).__init__(name, root_path, data_split, 1)
def construct(self, constructor):
from .Impl.PathTrack import construct_PathTrack
construct_PathTrack(constructor, self)
| [
"[email protected]"
] | |
75565b4e4f3520375386cf6faaf8f0755753e4f6 | 362765585815165ca3625895d4a675600efdb518 | /orchestrator.py | 67e314397be339554e940e42475881ae9f5046a7 | [] | no_license | obulpathi/notifications | 00e7decd479685d10e815b9ada7b1f774862f0f9 | 131b784d5e40310cdfc98587e0ceaff4cca4cf03 | refs/heads/master | 2021-03-12T20:23:05.143662 | 2014-07-08T18:58:44 | 2014-07-08T18:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import os
import sys
from jinja2 import Environment, FileSystemLoader, meta
def render(sense, action):
env = Environment(loader=FileSystemLoader('/src/templates/sense'))
template_source = env.loader.get_source(env, sense+'.py')[0]
parsed_content = env.parse(template_source)
vars = meta.find_undeclared_variables(parsed_content)
args = {}
for var in vars:
if "NOTIFICATIONS_" + var.upper() in os.environ:
args[var] = os.environ["NOTIFICATIONS_" + var.upper()]
else:
print "Could not find NOTIFICATIONS_"+var.upper()+" in environment variables"
exit()
template = env.get_template(sense+'.py')
render = template.render(**args)
with open('/src/binpy/sense.py', 'w+') as fh:
fh.write(render)
env = Environment(loader=FileSystemLoader('/src/templates/action'))
template_source = env.loader.get_source(env, action+'.py')[0]
parsed_content = env.parse(template_source)
vars = meta.find_undeclared_variables(parsed_content)
args = {}
for var in vars:
if "NOTIFICATIONS_" + var.upper() in os.environ:
args[var] = os.environ["NOTIFICATIONS_" + var.upper()]
else:
print "Could not find NOTIFICATIONS_"+var.upper()+" in environment variables"
exit()
template = env.get_template(action+'.py')
render = template.render(**args)
with open('/src/binpy/action.py', 'w+') as fh:
fh.write(render)
if __name__ == "__main__":
render(sys.argv[1], sys.argv[2])
from binpy import action, sense
result = sense.sense()
if result[0]:
action.action(result[1])
| [
"[email protected]"
] | |
3095707d0fd46be66dd4cba60697331cad3d4d77 | 5b57fa09b08e72ccb10de1bf341b556c00b4be42 | /Server/src/pyticas_tetres/da/incident_iris.py | 46a5025a883e4a2eedf26a6be2d1dd3019d017ac | [] | no_license | mnit-rtmc/tetres | 6f331e463f90e1608c6e47d72c232e3f6b8d5d33 | 4935d82ffe5f51284f08749b27f48491a62d9968 | refs/heads/master | 2022-10-02T02:45:13.860004 | 2021-12-08T23:14:51 | 2021-12-08T23:14:51 | 200,887,049 | 3 | 6 | null | 2022-09-16T18:07:46 | 2019-08-06T16:25:02 | Python | UTF-8 | Python | false | false | 3,784 | py | # -*- coding: utf-8 -*-
__author__ = 'Chongmyung Park ([email protected])'
import datetime
from sqlalchemy import and_
from pyticas_tetres.da.base import DataAccessBase
from pyticas_tetres.db.iris import model, conn
from pyticas_tetres.ttypes import IrisIncidentInfo
class IrisIncidentDataAccess(object):
def __init__(self, **kwargs):
kwargs['session'] = conn.get_session()
kwargs['primary_key'] = 'event_id'
self.da_base = DataAccessBase(model.IrisIncident, IrisIncidentInfo, **kwargs)
def list_as_generator(self, sdate, edate, corridor, direction, **kwargs):
"""
:param sdate: e.g. 2013-12-04 12:00:00
:type sdate: str or datetime.datetime
:param edate: e.g. 2013-12-04 13:00:00
:type edate: str or datetime.datetime
:param corridor: only number part of corridor name e.g. 35W, 94, 494, 100, 169
:type corridor: str
:param direction: e.g. NB, SB, EB, WB
:type direction: str
:rtype: Generator : IncidentInfo
"""
if isinstance(sdate, str):
sdate = datetime.datetime.strptime(sdate, '%Y-%m-%d %H:%M:%S')
if isinstance(edate, str):
edate = datetime.datetime.strptime(edate, '%Y-%m-%d %H:%M:%S')
as_model = kwargs.get('as_model', False)
limit = kwargs.get('limit', None)
order_by = kwargs.get('order_by', None)
window_size = kwargs.get('window_size', 1000)
db_model = model.IrisIncident
session = self.da_base.session
if corridor and direction:
qry = (session.query(db_model).filter(and_(
db_model.event_date >= sdate,
db_model.event_date <= edate
)).filter(and_(
db_model.road == corridor,
db_model.direction == direction
))
)
else:
qry = (session.query(db_model).filter(and_(
db_model.event_date >= sdate,
db_model.event_date <= edate
))
)
# apply 'order by'
if order_by and isinstance(order_by, tuple):
# e.g. order_by = ('id', 'desc')
# e.g. order_by = ('name', 'asc')
qry = qry.order_by(getattr(getattr(db_model, order_by[0]), order_by[1])())
else:
qry = qry.order_by(db_model.event_date.asc())
# apply 'limit'
if limit:
qry = qry.limit(limit)
for m in self.da_base.query_generator(qry, window_size):
if as_model:
yield m
else:
yield self.da_base.to_info(m)
def list(self, sdate, edate, corridor=None, direction=None, **kwargs):
"""
:param sdate: e.g. 2013-12-04 12:00:00
:type sdate: str or datetime.datetime
:param edate: e.g. 2013-12-04 13:00:00
:type edate: str or datetime.datetime
:param corridor: only number part of corridor name e.g. 35W, 94, 494, 100, 169
:type corridor: str
:param direction: e.g. NB, SB, EB, WB
:type direction: str
:rtype: list[IrisIncidentInfo]
"""
return [m for m in self.list_as_generator(sdate, edate, corridor, direction, **kwargs)]
def get_by_id(self, pkey):
"""
:type pkey: int
:rtype: IrisIncidentInfo
"""
return self.da_base.get_data_by_id(pkey)
def get_by_event_id(self, event_id):
"""
:type event_id: int
:rtype: IrisIncidentInfo
"""
res = self.da_base.search([('event_id', event_id)])
if res:
return res[0]
else:
return None
def close_session(self):
self.da_base.close()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.