id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4971035
|
import json
import unittest
from datetime import datetime as dt
import os.path as op
from stac_sentinel import sentinel_s2_l1c, sentinel_s2_l2a
testpath = op.dirname(__file__)
class Test(unittest.TestCase):
""" Test main module """
@classmethod
def get_metadata(self, collection_id):
with open(op.join(testpath, 'metadata', collection_id + '.json')) as f:
dat = json.loads(f.read())
return dat
def test_sentinel_s2_l1c(self):
collection_id = 'sentinel-s2-l1c'
metadata = self.get_metadata(collection_id)
item = sentinel_s2_l1c(metadata)
fname = op.join(testpath, collection_id + '.json')
with open(fname, 'w') as f:
f.write(json.dumps(item))
def test_sentinel_s2_l2a(self):
collection_id = 'sentinel-s2-l2a'
metadata = self.get_metadata(collection_id)
item = sentinel_s2_l2a(metadata)
fname = op.join(testpath, collection_id + '.json')
with open(fname, 'w') as f:
f.write(json.dumps(item))
|
StarcoderdataPython
|
4868467
|
<reponame>pld/bamboo
#!/usr/bin/env python
import os
import sys
sys.path.append(os.getcwd())
from pymongo import ASCENDING
from bamboo.config.db import Database
from bamboo.core.frame import DATASET_ID
from bamboo.models.observation import Observation
# The encoded dataset_id will be set to '0'.
ENCODED_DATASET_ID = '0'
def bamboo_index(collection, key):
ensure_index = collection.__getattribute__('ensure_index')
ensure_index([(key, ASCENDING)])
ensure_index([(key, ASCENDING), (Observation.DELETED_AT, ASCENDING)])
def ensure_indexing():
"""Ensure that bamboo models are indexed."""
db = Database.db()
# collections
calculations = db.calculations
datasets = db.datasets
observations = db.observations
# indices
bamboo_index(datasets, DATASET_ID)
bamboo_index(observations, ENCODED_DATASET_ID)
bamboo_index(observations, Observation.ENCODING_DATASET_ID)
bamboo_index(calculations, DATASET_ID)
if __name__ == '__main__':
ensure_indexing()
|
StarcoderdataPython
|
3241368
|
<reponame>AOF-BudakHasan/number_to_text<gh_stars>0
import unittest
from number_to_text import NTT
from adapters import AdapterLangTr
class MyTestCase1(unittest.TestCase):
# Only use setUp() and tearDown() if necessary
def setUp(self):
self.test_number = 2345.5265
self.expected_text = "İKİBİNÜÇYÜZKIRKBEŞLİRAELLİÜÇKURUŞ"
def tearDown(self):
pass
def test_lang_tr(self):
new_ntt = NTT(adapter=AdapterLangTr, fraction_size=2)
result_text = new_ntt.number_to_text(self.test_number)
self.assertEqual(result_text, self.expected_text, 'Result should be as self.expected_text')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1651386
|
<filename>penetration/python/BasicSniffer.py
import socket
# USAGE: terminal_1: python BasicSniffer.py
# terminal_2: ping 192.168.0.103 -c 3
# create the sniffer the raw socket obj
# listening for ICMP packets
# can also TCP packet or UDP packet
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
# bind to localhost, port 0 is officially a reserved port in TCP networking
# port 0 is programming technique for specifying allocation the ports
# in this case 0 is connection parametr and operation system
# will automaticaly search for next avaiable port in dynamic port range
sniffer.bind(('0.0.0.0',0))
# make sure that the IP header is included
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL,1)
print 'sniffer is listening for incoming connections'
# get a single packet
print sniffer.recvfrom(65535)
|
StarcoderdataPython
|
3311640
|
from django.apps import AppConfig
class LaunchPageConfig(AppConfig):
name = 'launch_page'
|
StarcoderdataPython
|
3446387
|
<gh_stars>0
import torch.nn as nn
def l2_loss(input, target, batch_size, mask=None):
if mask is not None:
loss = (input - target) * mask
else:
loss = input - target
loss = (loss * loss) / 2 / batch_size
return loss.sum()
def mse_loss(output, target, mask):
mse = nn.MSELoss()
batch_size = output.size(0)
num_keypoints = output.size(1)
heatmaps_target = target.reshape((batch_size, num_keypoints, -1)).split(1, 1)
heatmaps_pred = output.reshape((batch_size, num_keypoints, -1)).split(1, 1)
loss = 0
for idx in range(num_keypoints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_target = heatmaps_target[idx].squeeze()
loss += 0.5 * mse(
heatmap_pred.mul(mask.cuda()[:, idx]),
heatmap_target.mul(mask[:, idx]).cuda(),
)
return loss / num_keypoints
|
StarcoderdataPython
|
6419337
|
<filename>demo_bidirection_streaming/server/server.py
import os
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
from signal import signal, SIGTERM
import grpc
from grpc_interceptor import ExceptionToStatusInterceptor
from grpc_interceptor.exceptions import NotFound
import logging
import math
from demo_pb2 import BrokerResponse
import demo_pb2_grpc
import config
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class BrokerServiceServicer(demo_pb2_grpc.BrokerServiceServicer):
def BidirectionalStreaming(self, request_iterator, context):
"""Server request callback function"""
if not request_iterator:
raise NotFound("Client request is incorrect!")
for request in request_iterator:
logging.info("Client request: %d %g %g %g %g",
request.id,
request.sensor1,
request.sensor2,
request.sensor3,
request.sensor4)
if (self.isPrimeNumber(request.id) == True):
yield BrokerResponse(id=request.id, prediction=True)
else:
yield BrokerResponse(id=request.id, prediction=False)
def isFibonacci(self, n):
return self.isPerfectSquare(5*n*n + 4) or self.isPerfectSquare(5*n*n - 4)
def isPerfectSquare(self, x):
s = int(math.sqrt(x))
return s*s == x
def isPrimeNumber(self, num):
flag = True
if num > 1:
for i in range(2, num):
if (num % i) == 0:
flag = False
break
return flag
def main():
interceptors = [ExceptionToStatusInterceptor()]
server = grpc.server(
ThreadPoolExecutor(max_workers=10),
interceptors=interceptors)
demo_pb2_grpc.add_BrokerServiceServicer_to_server(
BrokerServiceServicer(),
server)
server.add_insecure_port(config.port)
logging.info("Starting server. Listening on port : %s", str(config.port))
server.start()
server.wait_for_termination()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Interrupted!")
exit(0)
|
StarcoderdataPython
|
1702678
|
<reponame>JohannesBuchner/pystrict3<gh_stars>1-10
# A* Shortest Path Algorithm
# http://en.wikipedia.org/wiki/A*
# FB - 201012256
from heapq import heappush, heappop # for priority queue
import math
import time
import random
class node:
xPos = 0 # x position
yPos = 0 # y position
distance = 0 # total distance already travelled to reach the node
priority = 0 # priority = distance + remaining distance estimate
def __init__(self, xPos, yPos, distance, priority):
self.xPos = xPos
self.yPos = yPos
self.distance = distance
self.priority = priority
def __lt__(self, other): # comparison method for priority queue
return self.priority < other.priority
def updatePriority(self, xDest, yDest):
self.priority = self.distance + self.estimate(xDest, yDest) * 10 # A*
# give higher priority to going straight instead of diagonally
def nextMove(self, dirs, d): # d: direction to move
if dirs == 8 and d % 2 != 0:
self.distance += 14
else:
self.distance += 10
# Estimation function for the remaining distance to the goal.
def estimate(self, xDest, yDest):
xd = xDest - self.xPos
yd = yDest - self.yPos
# Euclidian Distance
d = math.sqrt(xd * xd + yd * yd)
# Manhattan distance
# d = abs(xd) + abs(yd)
# Chebyshev distance
# d = max(abs(xd), abs(yd))
return(d)
# A-star algorithm.
# The path returned will be a string of digits of directions.
def pathFind(the_map, n, m, dirs, dx, dy, xA, yA, xB, yB):
closed_nodes_map = [] # map of closed (tried-out) nodes
open_nodes_map = [] # map of open (not-yet-tried) nodes
dir_map = [] # map of dirs
row = [0] * n
for i in range(m): # create 2d arrays
closed_nodes_map.append(list(row))
open_nodes_map.append(list(row))
dir_map.append(list(row))
pq = [[], []] # priority queues of open (not-yet-tried) nodes
pqi = 0 # priority queue index
# create the start node and push into list of open nodes
n0 = node(xA, yA, 0, 0)
n0.updatePriority(xB, yB)
heappush(pq[pqi], n0)
open_nodes_map[yA][xA] = n0.priority # mark it on the open nodes map
# A* search
while len(pq[pqi]) > 0:
# get the current node w/ the highest priority
# from the list of open nodes
n1 = pq[pqi][0] # top node
n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)
x = n0.xPos
y = n0.yPos
heappop(pq[pqi]) # remove the node from the open list
open_nodes_map[y][x] = 0
closed_nodes_map[y][x] = 1 # mark it on the closed nodes map
# quit searching when the goal is reached
# if n0.estimate(xB, yB) == 0:
if x == xB and y == yB:
# generate the path from finish to start
# by following the dirs
path = ''
while not (x == xA and y == yA):
j = dir_map[y][x]
c = str((j + dirs / 2) % dirs)
path = c + path
x += dx[j]
y += dy[j]
return path
# generate moves (child nodes) in all possible dirs
for i in range(dirs):
xdx = x + dx[i]
ydy = y + dy[i]
if not (xdx < 0 or xdx > n-1 or ydy < 0 or ydy > m - 1
or the_map[ydy][xdx] == 1 or closed_nodes_map[ydy][xdx] == 1):
# generate a child node
m0 = node(xdx, ydy, n0.distance, n0.priority)
m0.nextMove(dirs, i)
m0.updatePriority(xB, yB)
# if it is not in the open list then add into that
if open_nodes_map[ydy][xdx] == 0:
open_nodes_map[ydy][xdx] = m0.priority
heappush(pq[pqi], m0)
# mark its parent node direction
dir_map[ydy][xdx] = (i + dirs / 2) % dirs
elif open_nodes_map[ydy][xdx] > m0.priority:
# update the priority
open_nodes_map[ydy][xdx] = m0.priority
# update the parent direction
dir_map[ydy][xdx] = (i + dirs / 2) % dirs
# replace the node
# by emptying one pq to the other one
# except the node to be replaced will be ignored
# and the new node will be pushed in instead
while not (pq[pqi][0].xPos == xdx and pq[pqi][0].yPos == ydy):
heappush(pq[1 - pqi], pq[pqi][0])
heappop(pq[pqi])
heappop(pq[pqi]) # remove the target node
# empty the larger size priority queue to the smaller one
if len(pq[pqi]) > len(pq[1 - pqi]):
pqi = 1 - pqi
while len(pq[pqi]) > 0:
heappush(pq[1-pqi], pq[pqi][0])
heappop(pq[pqi])
pqi = 1 - pqi
heappush(pq[pqi], m0) # add the better node instead
return '' # if no route found
# MAIN
dirs = 8 # number of possible directions to move on the map
if dirs == 4:
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
elif dirs == 8:
dx = [1, 1, 0, -1, -1, -1, 0, 1]
dy = [0, 1, 1, 1, 0, -1, -1, -1]
n = 30 # horizontal size of the map
m = 30 # vertical size of the map
the_map = []
row = [0] * n
for i in range(m): # create empty map
the_map.append(list(row))
# fillout the map with a '+' pattern
for x in range(n / 8, n * 7 / 8):
the_map[m / 2][x] = 1
for y in range(m/8, m * 7 / 8):
the_map[y][n / 2] = 1
# randomly select start and finish locations from a list
sf = []
sf.append((0, 0, n - 1, m - 1))
sf.append((0, m - 1, n - 1, 0))
sf.append((n / 2 - 1, m / 2 - 1, n / 2 + 1, m / 2 + 1))
sf.append((n / 2 - 1, m / 2 + 1, n / 2 + 1, m / 2 - 1))
sf.append((n / 2 - 1, 0, n / 2 + 1, m - 1))
sf.append((n / 2 + 1, m - 1, n / 2 - 1, 0))
sf.append((0, m / 2 - 1, n - 1, m / 2 + 1))
sf.append((n - 1, m / 2 + 1, 0, m / 2 - 1))
(xA, yA, xB, yB) = random.choice(sf)
print('Map size (X,Y): ', n, m)
print('Start: ', xA, yA)
print('Finish: ', xB, yB)
t = time.time()
route = pathFind(the_map, n, m, dirs, dx, dy, xA, yA, xB, yB)
print('Time to generate the route (seconds): ', time.time() - t)
print('Route:')
print(route)
# mark the route on the map
if len(route) > 0:
x = xA
y = yA
the_map[y][x] = 2
for i in range(len(route)):
j = int(route[i])
x += dx[j]
y += dy[j]
the_map[y][x] = 3
the_map[y][x] = 4
# display the map with the route added
print('Map:')
for y in range(m):
for x in range(n):
xy = the_map[y][x]
if xy == 0:
print('.', end=' ') # space
elif xy == 1:
print('O', end=' ') # obstacle
elif xy == 2:
print('S', end=' ') # start
elif xy == 3:
print('R', end=' ') # route
elif xy == 4:
print('F', end=' ') # finish
print()
input('Press Enter...')
|
StarcoderdataPython
|
85501
|
# Copyright 2016, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Custom potion db manager
"""
#
# IMPORTS
#
from flask_potion import exceptions as potion_exceptions
from flask_potion.contrib.alchemy.manager import SQLAlchemyManager
from sqlalchemy import exc as sa_exceptions
from sqlalchemy.orm import aliased
from werkzeug.exceptions import BadRequest
import logging
import re
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class ApiManager(SQLAlchemyManager):
"""
Extend potion's manager with some features needed by the api
"""
class DataError(potion_exceptions.PotionException):
"""
Define a exception in potion's hierarchy to deal with the data type
error in database.
"""
werkzeug_exception = BadRequest
def __init__(self, sa_exc):
"""
Constructors, extract message from sa's exception object
Args:
sa_exc (DataError): sa's exception
"""
super().__init__()
self._logger = logging.getLogger(__name__)
# original exception did not came from postgres: we don't know how
# to handle it therefore return generic message
if (not hasattr(sa_exc, 'orig') or
not hasattr(sa_exc.orig, 'pgcode')):
self.msg = 'A value entered is in wrong format.'
return
# this re extracts the value which caused the error, the message
# looks like:
# ungültige Eingabesyntax für Typ macaddr: »ff:dd:cc:bb:aa«
msg_match = re.match(r'^.*»(.*)«.*$',
sa_exc.orig.diag.message_primary)
try:
value = msg_match.group(1)
except (AttributeError, IndexError):
self._logger.debug(
'Returning generic msg: failed to match re')
self.msg = 'A value entered is in wrong format.'
return
self.msg = "The value '{}' is in wrong format.".format(value)
# __init__()
def as_dict(self):
"""
Wraps original as_dict to return customized message
"""
ret_dict = super().as_dict()
ret_dict['message'] = self.msg
return ret_dict
# as_dict()
# DataError
def instances(self, where=None, sort=None):
"""
Add the functionality to join tables when queries use hybrid
attributes that point to another table via a foreign key.
Args:
where (list): list of SQLAlchemyBaseFilter instances
sort (list): list containing sorting conditions
Returns:
sqlalchemy.orm.query: the sa's query object
Raises:
None
"""
# get the model's query object
query = self._query()
# sanity check
if query is None:
return []
# filtering condition was specified: build sqlalchemy query expression
if where:
expressions = []
# each condition is an instance of SQLAlchemyBaseFilter
for condition in where:
# retrieve the sa object corresponding to the target column
col = condition.filter.column
# column points to another table: add the join condition to the
# query.
if (hasattr(col, 'property') and
col.property.expression.table.name !=
self.model.__tablename__):
# name of the attribute in the resource, corresponds to the
# name of the column
attr_name = condition.filter.attribute
# specify the attribute in the model containing the
# fk relationship definition. Here we rely on our naming
# convention where a hybrid column named 'foo' always has
# its fk relationship defined under the attribute name
# 'foo_rel'. This is better than trying to extract the
# relationship information from the column object as it
# would require a lot of digging because one has to provide
# both target and dependent columns to the join() method,
# i.e. join(DepTable.type_id == Target.id)
query = query.join(attr_name + '_rel')
# special case parent/child for systems
elif (self.model.__tablename__ == 'systems' and
condition.filter.attribute == 'hypervisor'):
parent = aliased(self.model)
query = query.join(
parent, 'hypervisor_rel')
query = query.filter(parent.name == condition.value)
continue
# special case parent/child for system profiles
elif (self.model.__tablename__ == 'system_profiles' and
condition.filter.attribute == 'hypervisor_profile'):
# TODO: handle the case when user specifies
# hypervisor-name/profile-name
parent = aliased(self.model)
query = query.join(
parent, 'hypervisor_profile_rel')
query = query.filter(parent.name == condition.value)
continue
# add the comparison expression (the filter itself)
expressions.append(self._expression_for_condition(condition))
# more than one expression specified: build the final statement
if expressions:
query = self._query_filter(
query, self._and_expression(expressions))
# sort field(s) specified: add join to the fk fields as needed
if sort:
for _, attribute, _ in sort:
column = getattr(self.model, attribute)
if (hasattr(column, 'property') and
column.property.expression.table.name !=
self.model.__tablename__):
query = query.join(attribute + '_rel')
# special case parent/child for systems
elif (self.model.__tablename__ == 'systems' and
attribute == 'hypervisor'):
parent = aliased(self.model)
query = query.join(parent, 'hypervisor_rel')
query = query.filter(parent.id == self.model.hypervisor_id)
# special case parent/child for system profiles
elif (self.model.__tablename__ == 'system_profiles' and
attribute == 'hypervisor_profile'):
parent = aliased(self.model)
query = query.join(parent, 'hypervisor_profile_rel')
query = query.filter(
parent.id == self.model.hypervisor_profile_id)
query = self._query_order_by(query, sort)
return query
# instances()
def create(self, properties, commit=True):
"""
Fix the create method which is not catching sa's exception for error in
data format.
Args:
properties (dict): properties for the created item
commit (bool): commit session
Returns:
any: whatever parent's create returns
Raises:
ApiManager.DataError: in case a data error occurs (i.e. mac address
in wrong format)
BackendConflict: database could not perform an operation due to
data conflicts
"""
try:
return super().create(properties, commit=commit)
except sa_exceptions.DataError as sa_exc:
session = self._get_session()
session.rollback()
raise ApiManager.DataError(sa_exc)
except sa_exceptions.IntegrityError:
session = self._get_session()
session.rollback()
raise potion_exceptions.BackendConflict()
# create()
def delete(self, item, commit=True):
"""
Fix the delete method which is not cleaning up the session after a
failed operation
Args:
item (dict): item to delete
commit (bool): commit session
Returns:
any: whatever parent's delete returns (as of today, None)
Raises:
BackendConflict: in case a integrity error occurs (i.e. FK
depending on item to delete)
"""
try:
return super().delete(item, commit=commit)
except sa_exceptions.IntegrityError:
session = self._get_session()
session.rollback()
raise potion_exceptions.BackendConflict()
# delete()
def update(self, item, changes, commit=True):
"""
Fix the update method which is not catching sa's exception for error in
data format.
Args:
item (dict): item to update
changes (dict): changes to apply
commit (bool): commit session
Returns:
any: whatever parent's update returns
Raises:
ApiManager.DataError: in case a data error occurs (i.e. mac address
in wrong format)
BackendConflict: database could not perform an operation due to
data conflicts
"""
try:
return super().update(item, changes, commit=commit)
except sa_exceptions.DataError as sa_exc:
session = self._get_session()
session.rollback()
raise ApiManager.DataError(sa_exc)
except sa_exceptions.IntegrityError:
session = self._get_session()
session.rollback()
raise potion_exceptions.BackendConflict()
# update()
# ApiManager
|
StarcoderdataPython
|
3523822
|
from datetime import datetime
import requests
#check a hubmap service
#if it returns a 200 from the standard /status call
#within 2 seconds return the number of milliseconds it took
#to return
#
# input- service_url: the url of the service (e.g. https://uuid.api.hubmapconsortium.org)
# outputs- on success: an integer representing the number of milliseconds the status call took
# on failure: the boolean False
def check_hm_ws(service_url):
start_time = datetime.now()
try:
failed = False
url = service_url.strip()
if url.endswith('/'): url = url[:len(url) - 1]
if not url.endswith('/status'):
url = url + "/status"
resp = requests.get(url, timeout=2)
if resp.ok == False: failed = True
except Exception:
failed = True
finally:
end_time = datetime.now()
time_diff = end_time - start_time
elapsed_time_millis = int((time_diff.seconds * 1000) + (time_diff.microseconds / 1000))
if failed:
return False
else:
return elapsed_time_millis
#print(str(check_hm_ws('https://uuid-api.refactor.hubmapconsortium.org/status/')))
|
StarcoderdataPython
|
6706382
|
import logging
import os
import sys
try:
from coloredlogs import ColoredFormatter as Formatter
except ImportError:
from logging import Formatter
__version__ = '0.14.2'
PY37 = sys.version_info.minor == 7
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(Formatter('%(name)s %(levelname)s %(message)s'))
try:
level = os.environ.get('PANTABLELOGLEVEL', logging.WARNING)
logger.setLevel(level=level)
except ValueError:
logger.setLevel(level=logging.WARNING)
logger.error(f'Unknown PANTABLELOGLEVEL {level}, set to default WARNING.')
|
StarcoderdataPython
|
1853639
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a loading report.
When executed as a script, takes a trace filename and print the report.
"""
from content_classification_lens import ContentClassificationLens
from loading_graph_view import LoadingGraphView
import loading_trace
from network_activity_lens import NetworkActivityLens
from user_satisfied_lens import (
FirstTextPaintLens, FirstContentfulPaintLens, FirstSignificantPaintLens)
class LoadingReport(object):
"""Generates a loading report from a loading trace."""
def __init__(self, trace, ad_rules=None, tracking_rules=None):
"""Constructor.
Args:
trace: (LoadingTrace) a loading trace.
ad_rules: ([str]) List of ad filtering rules.
tracking_rules: ([str]) List of tracking filtering rules.
"""
self.trace = trace
self._text_msec = FirstTextPaintLens(self.trace).SatisfiedMs()
self._contentful_paint_msec = (
FirstContentfulPaintLens(self.trace).SatisfiedMs())
self._significant_paint_msec = (
FirstSignificantPaintLens(self.trace).SatisfiedMs())
navigation_start_events = trace.tracing_track.GetMatchingEvents(
'blink.user_timing', 'navigationStart')
self._navigation_start_msec = min(
e.start_msec for e in navigation_start_events)
self._load_end_msec = self._ComputePlt(trace)
network_lens = NetworkActivityLens(self.trace)
if network_lens.total_download_bytes > 0:
self._contentful_byte_frac = (
network_lens.DownloadedBytesAt(self._contentful_paint_msec)
/ float(network_lens.total_download_bytes))
self._significant_byte_frac = (
network_lens.DownloadedBytesAt(self._significant_paint_msec)
/ float(network_lens.total_download_bytes))
else:
self._contentful_byte_frac = float('Nan')
self._significant_byte_frac = float('Nan')
self._ad_report = self._AdRequestsReport(
trace, ad_rules or [], tracking_rules or [])
graph = LoadingGraphView.FromTrace(trace)
self._contentful_inversion = graph.GetInversionsAtTime(
self._contentful_paint_msec)
self._significant_inversion = graph.GetInversionsAtTime(
self._significant_paint_msec)
def GenerateReport(self):
"""Returns a report as a dict."""
report = {
'url': self.trace.url,
'first_text_ms': self._text_msec - self._navigation_start_msec,
'contentful_paint_ms': (self._contentful_paint_msec
- self._navigation_start_msec),
'significant_paint_ms': (self._significant_paint_msec
- self._navigation_start_msec),
'plt_ms': self._load_end_msec - self._navigation_start_msec,
'contentful_byte_frac': self._contentful_byte_frac,
'significant_byte_frac': self._significant_byte_frac,
# Take the first (earliest) inversions.
'contentful_inversion': (self._contentful_inversion[0].url
if self._contentful_inversion
else None),
'significant_inversion': (self._significant_inversion[0].url
if self._significant_inversion
else None)}
report.update(self._ad_report)
return report
@classmethod
def FromTraceFilename(cls, filename, ad_rules_filename,
tracking_rules_filename):
"""Returns a LoadingReport from a trace filename."""
trace = loading_trace.LoadingTrace.FromJsonFile(filename)
return LoadingReport(trace, ad_rules_filename, tracking_rules_filename)
@classmethod
def _AdRequestsReport(cls, trace, ad_rules, tracking_rules):
has_rules = bool(ad_rules) or bool(tracking_rules)
requests = trace.request_track.GetEvents()
result = {
'request_count': len(requests),
'ad_requests': 0 if ad_rules else None,
'tracking_requests': 0 if tracking_rules else None,
'ad_or_tracking_requests': 0 if has_rules else None,
'ad_or_tracking_initiated_requests': 0 if has_rules else None}
content_classification_lens = ContentClassificationLens(
trace, ad_rules, tracking_rules)
if not has_rules:
return result
for request in trace.request_track.GetEvents():
is_ad = content_classification_lens.IsAdRequest(request)
is_tracking = content_classification_lens.IsTrackingRequest(request)
if ad_rules:
result['ad_requests'] += int(is_ad)
if tracking_rules:
result['tracking_requests'] += int(is_tracking)
result['ad_or_tracking_requests'] += int(is_ad or is_tracking)
result['ad_or_tracking_initiated_requests'] = len(
content_classification_lens.AdAndTrackingRequests())
return result
@classmethod
def _ComputePlt(cls, trace):
mark_load_events = trace.tracing_track.GetMatchingEvents(
'devtools.timeline', 'MarkLoad')
# Some traces contain several load events for the main frame.
main_frame_load_events = filter(
lambda e: e.args['data']['isMainFrame'], mark_load_events)
if main_frame_load_events:
return max(e.start_msec for e in main_frame_load_events)
# Main frame onLoad() didn't finish. Take the end of the last completed
# request.
return max(r.end_msec or -1 for r in trace.request_track.GetEvents())
def _Main(args):
assert len(args) == 4, 'Usage: report.py trace.json ad_rules tracking_rules'
trace_filename = args[1]
ad_rules = open(args[2]).readlines()
tracking_rules = open(args[3]).readlines()
report = LoadingReport.FromTraceFilename(
trace_filename, ad_rules, tracking_rules)
print json.dumps(report.GenerateReport(), indent=2)
if __name__ == '__main__':
import sys
import json
_Main(sys.argv)
|
StarcoderdataPython
|
3322807
|
<filename>isenw_app/migrations/0003_content_new.py
# Generated by Django 2.2.24 on 2021-06-25 21:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isenw_app', '0002_auto_20210625_2110'),
]
operations = [
migrations.AddField(
model_name='content',
name='new',
field=models.FileField(default='', upload_to=''),
),
]
|
StarcoderdataPython
|
1741687
|
#!/usr/bin/env python
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a list of all the functions that are not auto-generated.
# It contains all the meta data that describes the function. The format is:
# <function name>, <return_type>, [<args>], <backend function name>, [<sql aliases>]
#
# 'function name' is the base of what the opcode enum will be generated from. It does not
# have to be unique, the script will mangle the name with the signature if necessary.
#
# 'sql aliases' are the function names that can be used from sql. They are
# optional and there can be multiple aliases for a function.
#
# This is combined with the list in generated_functions to code-gen the opcode
# registry in the FE and BE.
functions = [
['Compound_And', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], \
'CompoundPredicate::AndComputeFn', []],
['Compound_Or', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], \
'CompoundPredicate::OrComputeFn', []],
['Compound_Not', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], \
'CompoundPredicate::NotComputeFn', []],
['Constant_Regex', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], \
'LikePredicate::ConstantRegexFn', []],
['Constant_Substring', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], \
'LikePredicate::ConstantSubstringFn', []],
['Like', 'BOOLEAN', ['STRING', 'STRING'], 'LikePredicate::LikeFn', []],
['Regex', 'BOOLEAN', ['STRING', 'STRING'], 'LikePredicate::RegexFn', []],
['Math_Pi', 'DOUBLE', [], 'MathFunctions::Pi', ['pi']],
['Math_E', 'DOUBLE', [], 'MathFunctions::E', ['e']],
['Math_Abs', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Abs', ['abs']],
['Math_Sign', 'FLOAT', ['DOUBLE'], 'MathFunctions::Sign', ['sign']],
['Math_Sin', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Sin', ['sin']],
['Math_Asin', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Asin', ['asin']],
['Math_Cos', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Cos', ['cos']],
['Math_Acos', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Acos', ['acos']],
['Math_Tan', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Tan', ['tan']],
['Math_Atan', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Atan', ['atan']],
['Math_Radians', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Radians', ['radians']],
['Math_Degrees', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Degrees', ['degrees']],
['Math_Ceil', 'BIGINT', ['DOUBLE'], 'MathFunctions::Ceil', ['ceil', 'ceiling']],
['Math_Floor', 'BIGINT', ['DOUBLE'], 'MathFunctions::Floor', ['floor']],
['Math_Round', 'BIGINT', ['DOUBLE'], 'MathFunctions::Round', ['round']],
['Math_Round', 'DOUBLE', ['DOUBLE', 'INT'], 'MathFunctions::RoundUpTo', ['round']],
['Math_Exp', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Exp', ['exp']],
['Math_Ln', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Ln', ['ln']],
['Math_Log10', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Log10', ['log10']],
['Math_Log2', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Log2', ['log2']],
['Math_Log', 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'MathFunctions::Log', ['log']],
['Math_Pow', 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'MathFunctions::Pow', ['pow', 'power']],
['Math_Sqrt', 'DOUBLE', ['DOUBLE'], 'MathFunctions::Sqrt', ['sqrt']],
['Math_Rand', 'DOUBLE', [], 'MathFunctions::Rand', ['rand']],
['Math_Rand', 'DOUBLE', ['INT'], 'MathFunctions::RandSeed', ['rand']],
['Math_Bin', 'STRING', ['BIGINT'], 'MathFunctions::Bin', ['bin']],
['Math_Hex', 'STRING', ['BIGINT'], 'MathFunctions::HexInt', ['hex']],
['Math_Hex', 'STRING', ['STRING'], 'MathFunctions::HexString', ['hex']],
['Math_Unhex', 'STRING', ['STRING'], 'MathFunctions::Unhex', ['unhex']],
['Math_Conv', 'STRING', ['BIGINT', 'TINYINT', 'TINYINT'], \
'MathFunctions::ConvInt', ['conv']],
['Math_Conv', 'STRING', ['STRING', 'TINYINT', 'TINYINT'], \
'MathFunctions::ConvString', ['conv']],
['Math_Pmod', 'BIGINT', ['BIGINT', 'BIGINT'], 'MathFunctions::PmodBigInt', ['pmod']],
['Math_Pmod', 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'MathFunctions::PmodDouble', ['pmod']],
['Math_Positive', 'BIGINT', ['BIGINT'], 'MathFunctions::PositiveBigInt', ['positive']],
['Math_Positive', 'DOUBLE', ['DOUBLE'], 'MathFunctions::PositiveDouble', ['positive']],
['Math_Negative', 'BIGINT', ['BIGINT'], 'MathFunctions::NegativeBigInt', ['negative']],
['Math_Negative', 'DOUBLE', ['DOUBLE'], 'MathFunctions::NegativeDouble', ['negative']],
['String_Substring', 'STRING', ['STRING', 'INT'], \
'StringFunctions::Substring', ['substr', 'substring']],
['String_Substring', 'STRING', ['STRING', 'INT', 'INT'], \
'StringFunctions::Substring', ['substr', 'substring']],
# left and right are key words, leave them out for now.
['String_Left', 'STRING', ['STRING', 'INT'], 'StringFunctions::Left', ['strleft']],
['String_Right', 'STRING', ['STRING', 'INT'], 'StringFunctions::Right', ['strright']],
['String_Length', 'INT', ['STRING'], 'StringFunctions::Length', ['length']],
['String_Lower', 'STRING', ['STRING'], 'StringFunctions::Lower', ['lower', 'lcase']],
['String_Upper', 'STRING', ['STRING'], 'StringFunctions::Upper', ['upper', 'ucase']],
['String_Reverse', 'STRING', ['STRING'], 'StringFunctions::Reverse', ['reverse']],
['String_Trim', 'STRING', ['STRING'], 'StringFunctions::Trim', ['trim']],
['String_Ltrim', 'STRING', ['STRING'], 'StringFunctions::Ltrim', ['ltrim']],
['String_Rtrim', 'STRING', ['STRING'], 'StringFunctions::Rtrim', ['rtrim']],
['String_Space', 'STRING', ['INT'], 'StringFunctions::Space', ['space']],
['String_Repeat', 'STRING', ['STRING', 'INT'], 'StringFunctions::Repeat', ['repeat']],
['String_Ascii', 'INT', ['STRING'], 'StringFunctions::Ascii', ['ascii']],
['String_Lpad', 'STRING', ['STRING', 'INT', 'STRING'], \
'StringFunctions::Lpad', ['lpad']],
['String_Rpad', 'STRING', ['STRING', 'INT', 'STRING'], \
'StringFunctions::Rpad', ['rpad']],
['String_Instr', 'INT', ['STRING', 'STRING'], 'StringFunctions::Instr', ['instr']],
['String_Locate', 'INT', ['STRING', 'STRING'], 'StringFunctions::Locate', ['locate']],
['String_Locate', 'INT', ['STRING', 'STRING', 'INT'], \
'StringFunctions::LocatePos', ['locate']],
['String_Regexp_Extract', 'STRING', ['STRING', 'STRING', 'INT'], \
'StringFunctions::RegexpExtract', ['regexp_extract']],
['String_Regexp_Replace', 'STRING', ['STRING', 'STRING', 'STRING'], \
'StringFunctions::RegexpReplace', ['regexp_replace']],
['String_Concat', 'STRING', ['STRING', '...'], 'StringFunctions::Concat', ['concat']],
['String_Concat_Ws', 'STRING', ['STRING', 'STRING', '...'], \
'StringFunctions::ConcatWs', ['concat_ws']],
['String_Find_In_Set', 'INT', ['STRING', 'STRING'], \
'StringFunctions::FindInSet', ['find_in_set']],
['String_Parse_Url', 'STRING', ['STRING', 'STRING'], \
'StringFunctions::ParseUrl', ['parse_url']],
['String_Parse_Url', 'STRING', ['STRING', 'STRING', 'STRING'], \
'StringFunctions::ParseUrlKey', ['parse_url']],
['Utility_Version', 'STRING', [], 'UtilityFunctions::Version', ['version']],
# Timestamp Functions
['Unix_Timestamp', 'INT', [], \
'TimestampFunctions::Unix', ['unix_timestamp']],
['Unix_Timestamp', 'INT', ['TIMESTAMP'], \
'TimestampFunctions::Unix', ['unix_timestamp']],
['Unix_Timestamp', 'INT', ['STRING', 'STRING'], \
'TimestampFunctions::Unix', ['unix_timestamp']],
['From_UnixTime', 'STRING', ['INT'], \
'TimestampFunctions::FromUnix', ['from_unixtime']],
['From_UnixTime', 'STRING', ['INT', 'STRING'], \
'TimestampFunctions::FromUnix', ['from_unixtime']],
['Timestamp_year', 'INT', ['TIMESTAMP'], 'TimestampFunctions::Year', ['year']],
['Timestamp_month', 'INT', ['TIMESTAMP'], 'TimestampFunctions::Month', ['month']],
['Timestamp_day', 'INT', ['TIMESTAMP'], 'TimestampFunctions::Day', ['day']],
['Timestamp_dayofmonth', 'INT', ['TIMESTAMP'], \
'TimestampFunctions::DayOfMonth', ['dayofmonth']],
['Timestamp_weekofyear', 'INT', ['TIMESTAMP'], \
'TimestampFunctions::WeekOfYear', ['weekofyear']],
['Timestamp_hour', 'INT', ['TIMESTAMP'], 'TimestampFunctions::Hour', ['hour']],
['Timestamp_minute', 'INT', ['TIMESTAMP'], 'TimestampFunctions::Minute', ['minute']],
['Timestamp_second', 'INT', ['TIMESTAMP'], 'TimestampFunctions::Second', ['second']],
['Timestamp_now', 'TIMESTAMP', [], \
'TimestampFunctions::Now', ['now', 'current_timestamp']],
['Timestamp_to_date', 'STRING', ['TIMESTAMP'], \
'TimestampFunctions::ToDate', ['to_date']],
['Timestamp_years_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::YearsAdd', ['years_add']],
['Timestamp_years_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::YearsSub', ['years_sub']],
['Timestamp_months_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MonthsAdd', ['months_add']],
['Timestamp_months_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MonthsSub', ['months_sub']],
['Timestamp_weeks_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::WeeksAdd', ['weeks_add']],
['Timestamp_weeks_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::WeeksSub', ['weeks_sub']],
['Timestamp_days_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::DaysAdd', ['days_add', 'date_add', 'adddate']],
['Timestamp_days_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::DaysSub', ['days_sub', 'date_sub', 'subdate']],
['Timestamp_hours_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::HoursAdd', ['hours_add']],
['Timestamp_hours_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::HoursSub', ['hours_sub']],
['Timestamp_minutes_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MinutesAdd', ['minutes_add']],
['Timestamp_minutes_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MinutesSub', ['minutes_sub']],
['Timestamp_seconds_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::SecondsAdd', ['seconds_add']],
['Timestamp_seconds_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::SecondsSub', ['seconds_sub']],
['Timestamp_milliseconds_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MillisAdd', ['milliseconds_add']],
['Timestamp_milliseconds_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MillisSub', ['milliseconds_sub']],
['Timestamp_microseconds_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MicrosAdd', ['microseconds_add']],
['Timestamp_microseconds_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::MicrosSub', ['microseconds_sub']],
['Timestamp_nanoseconds_add', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::NanosAdd', ['nanoseconds_add']],
['Timestamp_nanoseconds_sub', 'TIMESTAMP', ['TIMESTAMP', 'INT'], \
'TimestampFunctions::NanosSub', ['nanoseconds_sub']],
['Timestamp_diff', 'INT', ['TIMESTAMP', 'TIMESTAMP'], \
'TimestampFunctions::DateDiff', ['datediff']],
['From_utc_timestamp', 'TIMESTAMP', ['TIMESTAMP', 'STRING'], \
'TimestampFunctions::FromUtc', ['from_utc_timestamp']],
['To_utc_timestamp', 'TIMESTAMP', ['TIMESTAMP', 'STRING'], \
'TimestampFunctions::ToUtc', ['to_utc_timestamp']],
# Conditional Functions
['Conditional_If', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN', 'BOOLEAN'], \
'ConditionalFunctions::IfBool', ['if']],
['Conditional_If', 'BIGINT', ['BOOLEAN', 'BIGINT', 'BIGINT'], \
'ConditionalFunctions::IfInt', ['if']],
['Conditional_If', 'DOUBLE', ['BOOLEAN', 'DOUBLE', 'DOUBLE'], \
'ConditionalFunctions::IfFloat', ['if']],
['Conditional_If', 'STRING', ['BOOLEAN', 'STRING', 'STRING'], \
'ConditionalFunctions::IfString', ['if']],
['Conditional_If', 'TIMESTAMP', ['BOOLEAN', 'TIMESTAMP', 'TIMESTAMP'], \
'ConditionalFunctions::IfTimestamp', ['if']],
['Conditional_Coalesce', 'BOOLEAN', ['BOOLEAN', '...'], \
'ConditionalFunctions::CoalesceBool', ['coalesce']],
['Conditional_Coalesce', 'BIGINT', ['BIGINT', '...'], \
'ConditionalFunctions::CoalesceInt', ['coalesce']],
['Conditional_Coalesce', 'DOUBLE', ['DOUBLE', '...'], \
'ConditionalFunctions::CoalesceFloat', ['coalesce']],
['Conditional_Coalesce', 'STRING', ['STRING', '...'], \
'ConditionalFunctions::CoalesceString', ['coalesce']],
['Conditional_Coalesce', 'TIMESTAMP', ['TIMESTAMP', '...'], \
'ConditionalFunctions::CoalesceTimestamp', ['coalesce']],
]
|
StarcoderdataPython
|
3200698
|
<gh_stars>1-10
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import os, glob, shutil
def crop_center(im, new_height, new_width):
height = im.shape[0] # Get dimensions
width = im.shape[1]
left = (width - new_width) // 2
top = (height - new_height) // 2
right = (width + new_width) // 2
bottom = (height + new_height) // 2
return im[top:bottom, left:right]
def process_folder():
img_path1 = '/home/zmy/Downloads/betaDisentangle/374e_beta5_dim55_30'
aim_path = '/home/zmy/Downloads/betaDisentangle/374e'
img_files = [img for img in os.listdir(img_path1) if img.endswith('.png')]
print(img_files)
for img_file in img_files:
old_file = os.path.join(img_path1, img_file)
prefix, rest = img_file.split('_')[0], img_file.split('_')[1]
new_name = '1' + prefix.zfill(2) + '_' + rest
new_file = os.path.join(aim_path, new_name)
shutil.copy(old_file, new_file)
print(prefix, rest, new_name, new_file)
if __name__ == '__main__':
fig = plt.figure(figsize=(55, 16), dpi=100) # width, height
rows = 2
columns = 6
img_path = '/home/zmy/Downloads/betaDisentangle/374e'
# Method1: plot all cols
# row_start = 0
# col_start = 0
# Method2: plot selected cols
aim_col_list = [2,4,6,8,9,10]
row_start=0
start_inx = 0
col_start = aim_col_list[start_inx]
for i in range(1, columns * rows + 1):
fig.add_subplot(rows, columns, i)
plt.axis('off')
name = str(row_start) + str(col_start).zfill(2) + '_gen.png'
print("Ploting Image", name)
I = mpimg.imread(os.path.join(img_path, name))
I = crop_center(I, 260, 260)
plt.imshow(I)
# Method1:
# col_start += 1
# Method2:
start_inx+=1
if start_inx < len(aim_col_list):
col_start = aim_col_list[start_inx]
# Method1:
# if col_start > 10:
# col_start = 0
# row_start += 1
# Method2:
if start_inx >= len(aim_col_list):
start_inx = 0
col_start = aim_col_list[start_inx]
row_start += 1
fig.tight_layout()
# plt.subplots_adjust(left=0.05, right=0.95,bottom=0.05,top=0.95,wspace=0.2, hspace=0.2)
# plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig('/home/zmy/Downloads/betaDisentangle/374e_interpolation_fig_w55_h16_sub_w260_h260.png')
|
StarcoderdataPython
|
4946506
|
import hyperchamber as hc
import numpy as np
import torch
from .base_distribution import BaseDistribution
from ..gan_component import ValidationException
from torch.distributions import uniform
from hypergan.gan_component import ValidationException, GANComponent
TINY=1e-12
class FitnessDistribution(BaseDistribution):
def __init__(self, gan, config):
BaseDistribution.__init__(self, gan, config)
klass = GANComponent.lookup_function(None, self.config['source'])
self.source = klass(gan, config)
self.current_channels = config["z"]
self.current_width = 1
self.current_height = 1
self.current_input_size = config["z"]
self.z = self.source.z
def create(self):
pass
def next(self):
if not hasattr(self, 'best_sample'):
self.best_sample = self.source.next()
self.best_sample = self.next()
if self.config.discard_prior:
all_samples = []
all_scores = []
else:
all_samples = torch.split(self.best_sample, 1, dim=0)
prior_best_scores = self.gan.discriminator(self.gan.generator(self.best_sample))
all_scores = torch.split(prior_best_scores, 1, dim=0)
all_scores = [d.mean() for d in all_scores]
for i in range(self.config.steps or 1):
sample = self.source.next()
d_scores = self.gan.discriminator(self.gan.generator(sample))
d_scores = torch.split(d_scores, 1, dim=0)
d_scores = [d.mean() for d in d_scores]
samples = torch.split(sample, 1, dim=0)
all_scores += d_scores
all_samples += samples
all_scores = [s.item() for s in all_scores]
sorted_idx = np.argsort(all_scores)
if self.config.reverse:
sorted_idx = sorted_idx[::-1]
sorted_idx = sorted_idx[:self.gan.batch_size()]
sorted_samples = [all_samples[idx] for idx in sorted_idx]
self.best_sample = torch.cat(sorted_samples, dim=0)
self.z = self.best_sample
return self.best_sample
|
StarcoderdataPython
|
289789
|
"""
Copyright (C) 2019 <NAME> <<EMAIL>>
MIT License
"""
import multiprocessing
import threading
import time
from collections import defaultdict
from queue import Queue
from uuid import uuid4
import zmq
from dotenv import load_dotenv
load_dotenv()
from _14_worker import Worker
from message_handler import MessageHandler
from utils import setup_logging
def _zmq_puller(message_queue, zmq_pull_url):
ctx = zmq.Context()
with ctx.socket(zmq.PULL) as zmq_socket:
zmq_socket.setsockopt(zmq.SNDHWM, 10000)
zmq_socket.setsockopt(zmq.RCVHWM, 10000)
zmq_socket.connect(zmq_pull_url)
while True:
try:
packed = zmq_socket.recv()
message_queue.put(packed)
except zmq.core.error.ZMQError as e:
if e.errno == zmq.ETERM:
break
def _launch_worker(message_queue, kill_switch):
"""
Starts worker that connects to the Pusher
"""
# create dedicated queue for worker process
status_queue = multiprocessing.Queue()
queue = multiprocessing.JoinableQueue()
# create Worker, add to list, and start
worker = Worker(queue, status_queue, kill_switch)
# recv data from socket and add to queue
for slot in range(2500):
packed = message_queue.get()
print(packed)
queue.put(packed)
queue.put("END")
worker.start()
class ZMQPuller(multiprocessing.Process):
name = "ZMQPuller"
def __init__(self, kill_switch, pull_host=r'127.0.0.1', pull_port="5559", interval_time=10):
multiprocessing.Process.__init__(self)
self.zmq_pull_url = f'tcp://{pull_host}:{pull_port}'
self.kill_switch = kill_switch
self.worker_processes = 12
self.work_size = 2500
self.message_queue_size = 2500
self.show_first_message = True
self.interval_time = interval_time
self.initialize_counters()
def run(self):
self.logger = multiprocessing.get_logger()
self.logger.handlers[0] = setup_logging()
self.message_queue = Queue()
self._receive_thread = threading.Thread(target=_zmq_puller, args=(self.message_queue, self.zmq_pull_url,), name="ZMQPullerThread")
self._receive_thread.start()
self.logger.info("\n\n")
self.logger.info(f'\tSpawning ZMQPuller')
self.logger.info("\n\n")
self.running_workers = defaultdict(list)
self.finished_workers_and_queues = defaultdict(list)
self.initialize_counters()
while not self.kill_switch.is_set():
self.check_for_completed_workers()
# if queue greater x AND worker processes currently launched less than threshhold
if self.message_queue.qsize() > self.message_queue_size and len(self.running_workers) <= self.worker_processes:
# _thread = threading.Thread(target=_launch_worker, args=(self.message_queue, self.kill_switch,), name="Message Distributor Thread")
# _thread.start()
self.launch_worker()
else:
time.sleep(0.1)
self.check_status()
def launch_worker(self):
"""
Starts worker that connects to the Pusher
"""
# create dedicated queue for worker process
status_queue = multiprocessing.Queue()
queue = multiprocessing.JoinableQueue()
# create Worker, add to list, and start
worker = Worker(queue, status_queue, self.kill_switch)
# recv data from socket and add to queue
for slot in range(self.work_size):
packed = self.message_queue.get()
queue.put(packed)
self.counter_messages_period += 1
queue.put("--END--")
worker.start()
self.running_workers[str(uuid4())] = worker, queue
def check_for_completed_workers(self):
if len(self.running_workers) > 0:
workers_to_delete = []
# check if alive and queue empty
for idx, (uuid, (_worker, _queue)) in enumerate(self.running_workers.items()):
if not _worker.is_alive() and _queue.empty():
self.counter_messages_total += self.work_size
self.logger.debug(f'Worker[{idx}]:\tis not alive')
self.logger.debug(f'Worker[{idx}]:\tqueue is empty')
self.logger.debug(f'Moving Worker[{idx}] to Finished Worker List')
self.finished_workers_and_queues[uuid] = _worker, _queue
self.logger.debug(f'Adding Worker[{idx}] to Finished Worker List')
workers_to_delete.append(uuid)
if len(workers_to_delete) > 0:
for uuid in workers_to_delete:
self.logger.debug(f'Removing Worker[{uuid}] from Running Workers List')
del self.running_workers[uuid]
self.logger.debug(f'\n\n')
def check_status(self):
self.counter_total_consumer += 1
self.counter_total_manager += 1
self.counter_messages_period_sleep += 1
self.current_loop_iteration += 1
self.counter += 1
if time.time() - self.time_period_start > self.interval_time:
time_now = time.time()
time_period = time_now - self.time_period_start
self.time_total = self.time_total + time_period
messages_per_second = round(self.counter_messages_period / time_period, 2)
self.logger.info(f'')
self.logger.info(f'Time Elapsed:\t{round(self.interval_time, 2)} seconds')
self.logger.info(f'Messages During Period:\t{self.counter_messages_period}')
self.logger.info(f'Messages Per Second:\t{messages_per_second}')
self.logger.info(f'')
self.logger.info(f'Total Time Elapsed:\t{round(self.time_total, 2)} seconds')
self.logger.info(f'Total Messages Distributed to finished Workers:\t{self.counter_messages_total}')
self.logger.info(f'Total Messages Per Second:\t{round((self.counter_messages_total / self.time_total), 2)}')
self.logger.info(f'')
self.logger.info(f'')
self.logger.info(f"\tTotal Messages in _Queue:\t {self.message_queue.qsize()}")
self.logger.info(f"\tCurrently Running Workers:\t {len(self.running_workers)}")
self.logger.info("\n\n")
self.counter_messages_period = 0
self.counter_messages_period_sleep = 0
self.time_period_start = time.time()
def initialize_counters(self):
self.counter = 0
self.counter_total_manager = 0
self.counter_total_consumer = 0
self.current_loop_iteration = 0
self.counter_messages_total = 0
self.counter_messages_period = 0
self.counter_messages_period_sleep = 0
self.counter_messages_current = 0
self.time_total = 0
self.time_period_start = time.time()
def __repr__(self):
return self.name
def __str__(self):
return self.name
|
StarcoderdataPython
|
11274143
|
# Program to digest make depend files
import os
import re
import sys
from typing import Dict, List, Set
from collections import defaultdict
DirectoryName = str
FileName = str
def usage():
print(""" Usage:
python factoroptions.py input-options-filename
""")
sys.exit(1)
def get_includes(line: str) -> Set[str]:
""" Return the set of Include options from the -options-for-sources line. """
parts = line.split(";")
# parts[0] has the -options-for-sources and the source file.
includes = {part for part in parts
if (part.startswith("-I ") and not part.startswith("-I /usr"))}
return includes
def get_usr_includes(line: str) -> Set[str]:
""" Return the set of Include options from the -options-for-sources line. """
parts = line.split(";")
# parts[0] has the -options-for-sources and the source file.
includes = {part for part in parts if part.startswith("-I /usr")}
return includes
def get_non_includes(line: str) -> Set[str]:
""" Return the set of non-Include options from the -options-for-sources line. """
parts = line.split(";")
# parts[0] has the -options-for-sources and the source file.
others = {part for part in parts[1:] if not part.startswith("-I ")}
return others
class FactorOptions:
""" Digest .depend files"""
def __init__(self, input_filename):
self.input_filename = input_filename
self.output_filename = input_filename + ".factored"
self.includes_filename = input_filename + ".includes"
self.defines_filename = input_filename + ".defines"
self.lines = self.read_input
# Collect the -options-for-sources lines.
self.ofs_lines = [line for line in self.lines if line.startswith("-options-for-sources ")]
common_includes = get_includes(self.ofs_lines[0])
common_usr_includes = get_usr_includes(self.ofs_lines[0])
common_other = get_non_includes(self.ofs_lines[0])
print(f"len(common_includes) = {len(common_includes)}")
print(f"len(common_usr_includes) = {len(common_usr_includes)}")
print(f"len(common_other) = {len(common_other)}")
for i in range(1, len(self.ofs_lines)):
this_includes = get_includes(self.ofs_lines[i])
common_includes = common_includes.union(this_includes)
print(f"len(common_includes) = {len(common_includes)}")
this_usr_includes = get_usr_includes(self.ofs_lines[i])
common_usr_includes = common_usr_includes.union(this_usr_includes)
print(f"len(common_usr_includes) = {len(common_usr_includes)}")
this_other = get_non_includes((self.ofs_lines[i]))
common_other = common_other.union(this_other)
print(f"len(common_other) = {len(common_other)}")
self.common_includes = common_includes
self.common_usr_includes = common_usr_includes
self.common_other = common_other
self.factored = self.compute_output()
pass
@property
def read_input(self) -> List[str]:
with open(self.input_filename, 'r') as f:
lines = [line.strip("\\\n") for line in f]
return lines
def is_common(self, arg):
if arg in self.common_includes:
return True
if arg in self.common_usr_includes:
return True
return arg in self.common_other
def compute_new_ofs(self, ofs_index) -> str:
line = self.ofs_lines[ofs_index]
parts = line.split(";")
args = []
for i in range(1, len(parts)):
arg = parts[i]
if not self.is_common(arg):
args.append(arg)
if args:
argstring = ';'.join(args)
result = ':'.join([parts[0], argstring])
return result
return ''
def get_common_usr_include_list(self) -> List[str]:
# Preserve the order from the first.
line = self.ofs_lines[0]
parts = line.split(";")
result = []
for i in range(1, len(parts)):
part = parts[i]
if part in self.common_usr_includes:
if os.path.isdir(part[3:]):
result.append(part)
else:
print(part + " is not a directory")
return result
def compute_output(self) -> List[str]:
factored_lines = []
ofs_index = 0
for line in self.lines:
if line.startswith("-options-for-sources "):
factored = self.compute_new_ofs(ofs_index)
if factored:
factored_lines.append(factored)
ofs_index += 1
pass
else:
if ofs_index == len(self.ofs_lines):
# We have processed the last ofs line. Output common.
factored_lines += ["# Start of factored options"]
# First the non-/usr includes.
sorted_includes = [include for include in self.common_includes
if os.path.isdir(include[3:])]
sorted_includes.sort()
factored_lines += sorted_includes
# Next the /usr includes.
factored_lines += self.get_common_usr_include_list()
# Now the others. Let's sort them too.
sorted_others = [other for other in self.common_other]
sorted_others.sort()
factored_lines += sorted_others
# increment ofs_index so we don't do this again.
ofs_index += 1
factored_lines += ["# End of factored options"]
# One of the original lines.
factored_lines.append(line)
pass
return factored_lines
def output_factored(self):
with open(self.output_filename, "w") as w:
for line in self.factored:
w.write(line + '\n')
pass
def output_defines(self):
with open(self.defines_filename, "w") as w:
for line in self.common_other:
w.write(line + '\n')
pass
def output_includes(self):
with open(self.includes_filename, "w") as w:
for line in self.common_usr_includes:
w.write(line + '\n')
for line in self.common_includes:
w.write(line + '\n')
pass
if __name__ == u'__main__':
print('Factoring PolySpace Code Prover options\n')
options = sys.argv
if len(sys.argv) != 2:
usage()
factor = FactorOptions(options[1])
factor.output_factored()
factor.output_defines()
factor.output_includes()
print('\ndone.\n')
|
StarcoderdataPython
|
1957945
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import pandas as pd
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core import config
from activitysim.core import tracing
from activitysim.core import inject
from activitysim.core import mem
from activitysim.core import chunk
from activitysim.core import simulate
from activitysim.core import logit
from activitysim.core import los
from activitysim.core import timetable as tt
from activitysim.core.util import reindex
from activitysim.core import expressions
from activitysim.core.pathbuilder import TransitVirtualPathBuilder
logger = logging.getLogger(__name__)
TDD_CHOICE_COLUMN = 'tdd'
USE_BRUTE_FORCE_TO_COMPUTE_LOGSUMS = False
RUN_ALTS_PREPROCESSOR_BEFORE_MERGE = True # see FIXME below before changing this
def skims_for_logsums(tour_purpose, model_settings, trace_label):
assert 'LOGSUM_SETTINGS' in model_settings
network_los = inject.get_injectable('network_los')
skim_dict = network_los.get_default_skim_dict()
orig_col_name = 'home_zone_id'
destination_for_tour_purpose = model_settings.get('DESTINATION_FOR_TOUR_PURPOSE')
if isinstance(destination_for_tour_purpose, str):
dest_col_name = destination_for_tour_purpose
elif isinstance(destination_for_tour_purpose, dict):
dest_col_name = destination_for_tour_purpose.get(tour_purpose)
else:
raise RuntimeError(f"expected string or dict DESTINATION_FOR_TOUR_PURPOSE model_setting for {tour_purpose}")
odt_skim_stack_wrapper = skim_dict.wrap_3d(orig_key=orig_col_name, dest_key=dest_col_name,
dim3_key='out_period')
dot_skim_stack_wrapper = skim_dict.wrap_3d(orig_key=dest_col_name, dest_key=orig_col_name,
dim3_key='in_period')
odr_skim_stack_wrapper = skim_dict.wrap_3d(orig_key=orig_col_name, dest_key=dest_col_name,
dim3_key='in_period')
dor_skim_stack_wrapper = skim_dict.wrap_3d(orig_key=dest_col_name, dest_key=orig_col_name,
dim3_key='out_period')
od_skim_stack_wrapper = skim_dict.wrap(orig_col_name, dest_col_name)
skims = {
"odt_skims": odt_skim_stack_wrapper,
"dot_skims": dot_skim_stack_wrapper,
"odr_skims": odr_skim_stack_wrapper,
"dor_skims": dor_skim_stack_wrapper,
"od_skims": od_skim_stack_wrapper,
'orig_col_name': orig_col_name,
'dest_col_name': dest_col_name,
}
if network_los.zone_system == los.THREE_ZONE:
# fixme - is this a lightweight object?
tvpb = network_los.tvpb
tvpb_logsum_odt = tvpb.wrap_logsum(orig_key=orig_col_name, dest_key=dest_col_name,
tod_key='out_period', segment_key='demographic_segment',
trace_label=trace_label, tag='tvpb_logsum_odt')
tvpb_logsum_dot = tvpb.wrap_logsum(orig_key=dest_col_name, dest_key=orig_col_name,
tod_key='in_period', segment_key='demographic_segment',
trace_label=trace_label, tag='tvpb_logsum_dot')
skims.update({
'tvpb_logsum_odt': tvpb_logsum_odt,
'tvpb_logsum_dot': tvpb_logsum_dot
})
return skims
def _compute_logsums(alt_tdd, tours_merged, tour_purpose, model_settings, network_los, skims, trace_label):
"""
compute logsums for tours using skims for alt_tdd out_period and in_period
"""
trace_label = tracing.extend_trace_label(trace_label, 'logsums')
with chunk.chunk_log(trace_label):
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
choosers = alt_tdd.join(tours_merged, how='left', rsuffix='_chooser')
logger.info(f"{trace_label} compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts")
# - locals_dict
constants = config.get_model_constants(logsum_settings)
locals_dict = {}
locals_dict.update(constants)
if network_los.zone_system == los.THREE_ZONE:
# TVPB constants can appear in expressions
locals_dict.update(network_los.setting('TVPB_SETTINGS.tour_mode_choice.CONSTANTS'))
locals_dict.update(skims)
# constrained coefficients can appear in expressions
coefficients = simulate.get_segment_coefficients(logsum_settings, tour_purpose)
locals_dict.update(coefficients)
# - run preprocessor to annotate choosers
# allow specification of alternate preprocessor for nontour choosers
preprocessor = model_settings.get('LOGSUM_PREPROCESSOR', 'preprocessor')
preprocessor_settings = logsum_settings[preprocessor]
if preprocessor_settings:
simulate.set_skim_wrapper_targets(choosers, skims)
expressions.assign_columns(
df=choosers,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
# - compute logsums
logsum_spec = simulate.read_model_spec(file_name=logsum_settings['SPEC'])
logsum_spec = simulate.eval_coefficients(logsum_spec, coefficients, estimator=None)
nest_spec = config.get_logit_model_settings(logsum_settings)
nest_spec = simulate.eval_nest_coefficients(nest_spec, coefficients, trace_label)
logsums = simulate.simple_simulate_logsums(
choosers,
logsum_spec,
nest_spec,
skims=skims,
locals_d=locals_dict,
chunk_size=0,
trace_label=trace_label)
return logsums
def dedupe_alt_tdd(alt_tdd, tour_purpose, trace_label):
tdd_segments = inject.get_injectable('tdd_alt_segments', None)
alt_tdd_periods = None
logger.info(f"tdd_alt_segments specified for representative logsums")
with chunk.chunk_log(tracing.extend_trace_label(trace_label, 'dedupe_alt_tdd')):
if tdd_segments is not None:
dedupe_columns = ['out_period', 'in_period']
# tdd_alt_segments is optionally segmented by tour purpose
if 'tour_purpose' in tdd_segments:
is_tdd_for_tour_purpose = (tdd_segments.tour_purpose == tour_purpose)
if not is_tdd_for_tour_purpose.any():
is_tdd_for_tour_purpose = tdd_segments.tour_purpose.isnull()
assert is_tdd_for_tour_purpose.any(), \
f"no segments found for tour purpose {tour_purpose} in tour_departure_and_duration_segments"
tdd_segments = tdd_segments[is_tdd_for_tour_purpose].drop(columns=['tour_purpose'])
assert len(tdd_segments) > 0, f"tour_purpose '{tour_purpose}' not in tdd_alt_segments"
# left join representative start on out_period
alt_tdd_periods = \
pd.merge(alt_tdd[['out_period', 'in_period']].reset_index(),
tdd_segments[['time_period', 'start']].rename(columns={'time_period': 'out_period'}),
how='left', on='out_period')
chunk.log_df(trace_label, "alt_tdd_periods", alt_tdd_periods)
# left join representative end on in_period
alt_tdd_periods = \
pd.merge(alt_tdd_periods,
tdd_segments[['time_period', 'end']].rename(columns={'time_period': 'in_period'}),
how='left', on=['in_period'])
chunk.log_df(trace_label, "alt_tdd_periods", alt_tdd_periods)
if tdd_segments.start.isnull().any():
missing_periods = tdd_segments.out_period[tdd_segments.start.isnull()].unique()
logger.warning(f"missing out_periods in tdd_alt_segments: {missing_periods}")
if tdd_segments.end.isnull().any():
missing_periods = tdd_segments.in_period[tdd_segments.end.isnull()].unique()
logger.warning(f"missing in_periods in tdd_alt_segments: {missing_periods}")
assert not tdd_segments.start.isnull().any()
assert not tdd_segments.end.isnull().any()
# drop duplicates
alt_tdd_periods = alt_tdd_periods.drop_duplicates().set_index(alt_tdd.index.name)
chunk.log_df(trace_label, "alt_tdd_periods", alt_tdd_periods)
# representative duration
alt_tdd_periods['duration'] = alt_tdd_periods['end'] - alt_tdd_periods['start']
chunk.log_df(trace_label, "alt_tdd_periods", alt_tdd_periods)
logger.debug(f"{trace_label} "
f"dedupe_alt_tdd.tdd_alt_segments reduced number of rows by "
f"{round(100 * (len(alt_tdd) - len(alt_tdd_periods)) / len(alt_tdd), 2)}% "
f"from {len(alt_tdd)} to {len(alt_tdd_periods)}")
# if there is no tdd_alt_segments file, we can at least dedupe on 'out_period', 'in_period', 'duration'
if alt_tdd_periods is None:
# FIXME This won't work if they reference start or end in logsum calculations
# for MTC only duration is used (to calculate all_day parking cost)
dedupe_columns = ['out_period', 'in_period', 'duration']
logger.warning(f"No tdd_alt_segments for representative logsums so fallback to "
f"deduping tdd_alts by time_period and duration")
# - get list of unique (tour_id, out_period, in_period, duration) in alt_tdd_periods
# we can cut the number of alts roughly in half (for mtctm1) by conflating duplicates
alt_tdd_periods = alt_tdd[dedupe_columns].reset_index().drop_duplicates().set_index(alt_tdd.index.name)
chunk.log_df(trace_label, "alt_tdd_periods", alt_tdd_periods)
logger.debug(f"{trace_label} "
f"dedupe_alt_tdd.drop_duplicates reduced number of rows by "
f"{round(100 * (len(alt_tdd) - len(alt_tdd_periods)) / len(alt_tdd), 2)}% "
f"from {len(alt_tdd)} to {len(alt_tdd_periods)}")
return alt_tdd_periods, dedupe_columns
def compute_logsums(alt_tdd, tours_merged, tour_purpose, model_settings, skims, trace_label):
"""
Compute logsums for the tour alt_tdds, which will differ based on their different start, stop
times of day, which translate to different odt_skim out_period and in_periods.
In mtctm1, tdds are hourly, but there are only 5 skim time periods, so some of the tdd_alts
will be the same, once converted to skim time periods. With 5 skim time periods there are
15 unique out-out period pairs but 190 tdd alternatives.
For efficiency, rather compute a lot of redundant logsums, we compute logsums for the unique
(out-period, in-period) pairs and then join them back to the alt_tdds.
"""
trace_label = tracing.extend_trace_label(trace_label, 'compute_logsums')
network_los = inject.get_injectable('network_los')
# - in_period and out_period
assert 'out_period' not in alt_tdd
assert 'in_period' not in alt_tdd
alt_tdd['out_period'] = network_los.skim_time_period_label(alt_tdd['start'])
alt_tdd['in_period'] = network_los.skim_time_period_label(alt_tdd['end'])
alt_tdd['duration'] = alt_tdd['end'] - alt_tdd['start']
# outside chunk_log context because we extend log_df call for alt_tdd made by our only caller _schedule_tours
chunk.log_df(trace_label, "alt_tdd", alt_tdd)
with chunk.chunk_log(trace_label):
if USE_BRUTE_FORCE_TO_COMPUTE_LOGSUMS:
# compute logsums for all the tour alt_tdds (inefficient)
logsums = _compute_logsums(alt_tdd, tours_merged, tour_purpose,
model_settings, network_los, skims, trace_label)
return logsums
index_name = alt_tdd.index.name
deduped_alt_tdds, redupe_columns = dedupe_alt_tdd(alt_tdd, tour_purpose, trace_label)
chunk.log_df(trace_label, "deduped_alt_tdds", deduped_alt_tdds)
logger.info(f"{trace_label} compute_logsums "
f"deduped_alt_tdds reduced number of rows by "
f"{round(100 * (len(alt_tdd) - len(deduped_alt_tdds)) / len(alt_tdd), 2)}% "
f"from {len(alt_tdd)} to {len(deduped_alt_tdds)} compared to USE_BRUTE_FORCE_TO_COMPUTE_LOGSUMS")
t0 = tracing.print_elapsed_time()
# - compute logsums for the alt_tdd_periods
deduped_alt_tdds['logsums'] = \
_compute_logsums(deduped_alt_tdds, tours_merged, tour_purpose,
model_settings, network_los, skims, trace_label)
# tracing.log_runtime(model_name=trace_label, start_time=t0)
# redupe - join the alt_tdd_period logsums to alt_tdd to get logsums for alt_tdd
logsums = pd.merge(
alt_tdd.reset_index(),
deduped_alt_tdds.reset_index(),
on=[index_name] + redupe_columns,
how='left'
).set_index(index_name).logsums
chunk.log_df(trace_label, "logsums", logsums)
del deduped_alt_tdds
chunk.log_df(trace_label, "deduped_alt_tdds", None)
# this is really expensive
TRACE = False
if TRACE:
trace_logsums_df = logsums.to_frame('representative_logsum')
trace_logsums_df['brute_force_logsum'] = \
_compute_logsums(alt_tdd, tours_merged, tour_purpose, model_settings, network_los, skims, trace_label)
tracing.trace_df(trace_logsums_df,
label=tracing.extend_trace_label(trace_label, 'representative_logsums'),
slicer='NONE', transpose=False)
# leave it to our caller to pick up logsums with call to chunk.log_df
return logsums
def get_previous_tour_by_tourid(current_tour_window_ids,
previous_tour_by_window_id,
alts):
"""
Matches current tours with attributes of previous tours for the same
person. See the return value below for more information.
Parameters
----------
current_tour_window_ids : Series
A Series of parent ids for the tours we're about make the choice for
- index should match the tours DataFrame.
previous_tour_by_window_id : Series
A Series where the index is the parent (window) id and the value is the index
of the alternatives of the scheduling.
alts : DataFrame
The alternatives of the scheduling.
Returns
-------
prev_alts : DataFrame
A DataFrame with an index matching the CURRENT tours we're making a
decision for, but with columns from the PREVIOUS tour of the person
associated with each of the CURRENT tours. Columns listed in PREV_TOUR_COLUMNS
from the alternatives will have "_previous" added as a suffix to keep
differentiated from the current alternatives that will be part of the
interaction.
"""
PREV_TOUR_COLUMNS = ['start', 'end']
previous_tour_by_tourid = \
previous_tour_by_window_id.loc[current_tour_window_ids]
previous_tour_by_tourid = alts.loc[previous_tour_by_tourid, PREV_TOUR_COLUMNS]
previous_tour_by_tourid.index = current_tour_window_ids.index
previous_tour_by_tourid.columns = [x+'_previous' for x in PREV_TOUR_COLUMNS]
return previous_tour_by_tourid
def tdd_interaction_dataset(tours, alts, timetable, choice_column, window_id_col, trace_label):
"""
interaction_sample_simulate expects
alts index same as choosers (e.g. tour_id)
name of choice column in alts
Parameters
----------
tours : pandas DataFrame
must have person_id column and index on tour_id
alts : pandas DataFrame
alts index must be timetable tdd id
timetable : TimeTable object
choice_column : str
name of column to store alt index in alt_tdd DataFrame
(since alt_tdd is duplicate index on person_id but unique on person_id,alt_id)
Returns
-------
alt_tdd : pandas DataFrame
columns: start, end , duration, <choice_column>
index: tour_id
"""
trace_label = tracing.extend_trace_label(trace_label, 'tdd_interaction_dataset')
with chunk.chunk_log(trace_label):
alts_ids = np.tile(alts.index, len(tours.index))
chunk.log_df(trace_label, 'alts_ids', alts_ids)
tour_ids = np.repeat(tours.index, len(alts.index))
window_row_ids = np.repeat(tours[window_id_col], len(alts.index))
alt_tdd = alts.take(alts_ids)
alt_tdd.index = tour_ids
alt_tdd[window_id_col] = window_row_ids
# add tdd alternative id
# by convention, the choice column is the first column in the interaction dataset
alt_tdd.insert(loc=0, column=choice_column, value=alts_ids)
# slice out all non-available tours
available = timetable.tour_available(alt_tdd[window_id_col], alt_tdd[choice_column])
logger.debug(f"tdd_interaction_dataset keeping {available.sum()} of ({len(available)}) available alt_tdds")
assert available.any()
chunk.log_df(trace_label, 'alt_tdd', alt_tdd) # catch this before we slice on available
alt_tdd = alt_tdd[available]
chunk.log_df(trace_label, 'alt_tdd', alt_tdd)
# FIXME - don't need this any more after slicing
del alt_tdd[window_id_col]
return alt_tdd
def run_alts_preprocessor(model_settings, alts, segment, locals_dict, trace_label):
"""
run preprocessor on alts, as specified by ALTS_PREPROCESSOR in model_settings
we are agnostic on whether alts are merged or not
Parameters
----------
model_settings: dict
yaml model settings file as dict
alts: pandas.DataFrame
tdd_alts or tdd_alts merged wiht choosers (we are agnostic)
segment: string
segment selector as understood by caller (e.g. logsum_tour_purpose)
locals_dict: dict
we let caller worry about what needs to be in it. though actually depends on modelers needs
trace_label: string
Returns
-------
alts: pandas.DataFrame
annotated copy of alts
"""
preprocessor_settings = model_settings.get('ALTS_PREPROCESSOR', {})
if segment in preprocessor_settings:
# segmented by logsum_tour_purpose
preprocessor_settings = preprocessor_settings.get(segment)
logger.debug(f"running ALTS_PREPROCESSOR with spec for {segment}: {preprocessor_settings.get('SPEC')}")
elif 'SPEC' in preprocessor_settings:
# unsegmented (either because no segmentation, or fallback if settings has generic preprocessor)
logger.debug(f"running ALTS_PREPROCESSOR with unsegmented spec {preprocessor_settings.get('SPEC')}")
else:
logger.debug(f"skipping alts preprocesser because no ALTS_PREPROCESSOR segment for {segment}")
preprocessor_settings = None
if preprocessor_settings:
logger.debug(f"run_alts_preprocessor calling assign_columns for {segment} preprocessor_settings")
alts = alts.copy()
expressions.assign_columns(
df=alts,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
return alts
def _schedule_tours(
tours, persons_merged, alts,
spec, logsum_tour_purpose,
model_settings, skims,
timetable, window_id_col,
previous_tour, tour_owner_id_col,
estimator,
tour_trace_label):
"""
previous_tour stores values used to add columns that can be used in the spec
which have to do with the previous tours per person. Every column in the
alternatives table is appended with the suffix "_previous" and made
available. So if your alternatives table has columns for start and end,
then start_previous and end_previous will be set to the start and end of
the most recent tour for a person. The first time through,
start_previous and end_previous are undefined, so make sure to protect
with a tour_num >= 2 in the variable computation.
Parameters
----------
tours : DataFrame
chunk of tours to schedule with unique timetable window_id_col
persons_merged : DataFrame
DataFrame of persons to be merged with tours containing attributes referenced
by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent all possible time slots.
tdd_interaction_dataset function will use timetable to filter them to omit
unavailable alternatives
spec : DataFrame
The spec which will be passed to interaction_simulate.
model_settings : dict
timetable : TimeTable
timetable of timewidows for person (or subtour) with rows for tours[window_id_col]
window_id_col : str
column name from tours that identifies timetable owner (or None if tours index)
- person_id for non/mandatory tours
- parent_tour_id for subtours,
- None (tours index) for joint_tours since every tour may have different participants)
previous_tour: Series
series with value of tdd_alt choice for last previous tour scheduled for
tour_owner_id_col : str
column name from tours that identifies 'owner' of this tour
(person_id for non/mandatory tours, parent_tour_id for subtours,
household_id for joint_tours)
tour_trace_label
Returns
-------
"""
logger.info("%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours)))
# merge persons into tours
# avoid dual suffix for redundant columns names (e.g. household_id) that appear in both
tours = pd.merge(tours, persons_merged, left_on='person_id', right_index=True,
suffixes=('', '_y'))
chunk.log_df(tour_trace_label, "tours", tours)
# - add explicit window_id_col for timetable owner if it is index
# if no timetable window_id_col specified, then add index as an explicit column
# (this is not strictly necessary but its presence makes code simpler in several places)
if window_id_col is None:
window_id_col = tours.index.name
tours[window_id_col] = tours.index
# timetable can't handle multiple tours per window_id
assert not tours[window_id_col].duplicated().any()
# - build interaction dataset filtered to include only available tdd alts
# dataframe columns start, end , duration, person_id, tdd
# indexed (not unique) on tour_id
choice_column = TDD_CHOICE_COLUMN
alt_tdd = tdd_interaction_dataset(tours, alts, timetable, choice_column, window_id_col, tour_trace_label)
# print(f"tours {tours.shape} alts {alts.shape}")
chunk.log_df(tour_trace_label, "alt_tdd", alt_tdd)
# - add logsums
if logsum_tour_purpose:
logsums = compute_logsums(alt_tdd, tours, logsum_tour_purpose, model_settings, skims, tour_trace_label)
else:
logsums = 0
alt_tdd['mode_choice_logsum'] = logsums
del logsums
chunk.log_df(tour_trace_label, "alt_tdd", alt_tdd)
# - merge in previous tour columns
# adds start_previous and end_previous, joins on index
tours = tours.join(get_previous_tour_by_tourid(tours[tour_owner_id_col], previous_tour, alts))
chunk.log_df(tour_trace_label, "tours", tours)
# - make choices
locals_d = {
'tt': timetable
}
constants = config.get_model_constants(model_settings)
if constants is not None:
locals_d.update(constants)
if not RUN_ALTS_PREPROCESSOR_BEFORE_MERGE:
# Note: Clint was running alts_preprocessor here on tdd_interaction_dataset instead of on raw (unmerged) alts
# and he was using logsum_tour_purpose as selector, although logically it should be the spec_segment
# It just happened to work for example_arc.mandatory_tour_scheduling because, in that model, (unlike semcog)
# logsum_tour_purpose and spec_segments are aligned (both logsums and spec are segmented on work, school, univ)
# In any case, I don't see any benefit to doing this here - at least not for any existing implementations
# but if we do, it will require passing spec_segment to schedule_tours and _schedule_tours
# or redundently segmenting alts (yuck!) to conform to more granular tour_segmentation (e.g. univ do school)
spec_segment = logsum_tour_purpose # FIXME this is not always right - see note above
alt_tdd = run_alts_preprocessor(model_settings, alt_tdd, spec_segment, locals_d, tour_trace_label)
chunk.log_df(tour_trace_label, "alt_tdd", alt_tdd)
if estimator:
# write choosers after annotation
estimator.write_choosers(tours)
estimator.set_alt_id(choice_column)
estimator.write_interaction_sample_alternatives(alt_tdd)
log_alt_losers = config.setting('log_alt_losers', False)
choices = interaction_sample_simulate(
tours,
alt_tdd,
spec,
choice_column=choice_column,
log_alt_losers=log_alt_losers,
locals_d=locals_d,
chunk_size=0,
trace_label=tour_trace_label,
estimator=estimator
)
chunk.log_df(tour_trace_label, 'choices', choices)
# - update previous_tour and timetable parameters
# update previous_tour (series with most recent previous tdd choices) with latest values
previous_tour.loc[tours[tour_owner_id_col]] = choices.values
# update timetable with chosen tdd footprints
timetable.assign(tours[window_id_col], choices)
return choices
def schedule_tours(
tours, persons_merged, alts,
spec, logsum_tour_purpose,
model_settings,
timetable, timetable_window_id_col,
previous_tour, tour_owner_id_col,
estimator,
chunk_size, tour_trace_label, tour_chunk_tag):
"""
chunking wrapper for _schedule_tours
While interaction_sample_simulate provides chunking support, the merged tours, persons
dataframe and the tdd_interaction_dataset are very big, so we want to create them inside
the chunking loop to minimize memory footprint. So we implement the chunking loop here,
and pass a chunk_size of 0 to interaction_sample_simulate to disable its chunking support.
"""
if not tours.index.is_monotonic_increasing:
logger.info("schedule_tours %s tours not monotonic_increasing - sorting df")
tours = tours.sort_index()
logger.info("%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours)))
# no more than one tour per timetable_window per call
if timetable_window_id_col is None:
assert not tours.index.duplicated().any()
else:
assert not tours[timetable_window_id_col].duplicated().any()
if 'LOGSUM_SETTINGS' in model_settings:
# we need skims to calculate tvpb skim overhead in 3_ZONE systems for use by calc_rows_per_chunk
skims = skims_for_logsums(logsum_tour_purpose, model_settings, tour_trace_label)
else:
skims = None
result_list = []
for i, chooser_chunk, chunk_trace_label \
in chunk.adaptive_chunked_choosers(tours, chunk_size, tour_trace_label, tour_chunk_tag):
choices = _schedule_tours(chooser_chunk, persons_merged,
alts, spec, logsum_tour_purpose,
model_settings, skims,
timetable, timetable_window_id_col,
previous_tour, tour_owner_id_col,
estimator,
tour_trace_label=chunk_trace_label)
result_list.append(choices)
chunk.log_df(tour_trace_label, f'result_list', result_list)
# FIXME: this will require 2X RAM
# if necessary, could append to hdf5 store on disk:
# http://pandas.pydata.org/pandas-docs/stable/io.html#id2
if len(result_list) > 1:
choices = pd.concat(result_list)
assert len(choices.index == len(tours.index))
return choices
def vectorize_tour_scheduling(tours, persons_merged, alts, timetable,
tour_segments, tour_segment_col,
model_settings,
chunk_size=0, trace_label=None):
"""
The purpose of this method is fairly straightforward - it takes tours
and schedules them into time slots. Alternatives should be specified so
as to define those time slots (usually with start and end times).
schedule_tours adds variables that can be used in the spec which have
to do with the previous tours per person. Every column in the
alternatives table is appended with the suffix "_previous" and made
available. So if your alternatives table has columns for start and end,
then start_previous and end_previous will be set to the start and end of
the most recent tour for a person. The first time through,
start_previous and end_previous are undefined, so make sure to protect
with a tour_num >= 2 in the variable computation.
FIXME - fix docstring: tour_segments, tour_segment_col
Parameters
----------
tours : DataFrame
DataFrame of tours containing tour attributes, as well as a person_id
column to define the nth tour for each person.
persons_merged : DataFrame
DataFrame of persons containing attributes referenced by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent time slots. Will be passed to
interaction_simulate in batches for each nth tour.
spec : DataFrame
The spec which will be passed to interaction_simulate.
(or dict of specs keyed on tour_type if tour_types is not None)
model_settings : dict
Returns
-------
choices : Series
A Series of choices where the index is the index of the tours
DataFrame and the values are the index of the alts DataFrame.
timetable : TimeTable
persons timetable updated with tours (caller should replace_table for it to persist)
"""
trace_label = tracing.extend_trace_label(trace_label, 'vectorize_tour_scheduling')
assert len(tours.index) > 0
assert 'tour_num' in tours.columns
assert 'tour_type' in tours.columns
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# this ought to have been ensured when tours are created (tour_frequency.process_tours)
choice_list = []
# keep a series of the the most recent tours for each person
# initialize with first trip from alts
previous_tour_by_personid = pd.Series(alts.index[0], index=tours.person_id.unique())
timetable_window_id_col = 'person_id'
tour_owner_id_col = 'person_id'
compute_logsums = ('LOGSUM_SETTINGS' in model_settings)
assert isinstance(tour_segments, dict)
# no more than one tour per person per call to schedule_tours
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# segregate scheduling by tour_type if multiple specs passed in dict keyed by tour_type
for tour_num, nth_tours in tours.groupby('tour_num', sort=True):
tour_trace_label = tracing.extend_trace_label(trace_label, f'tour_{tour_num}')
tour_chunk_tag = tracing.extend_trace_label(trace_label, f"tour_{1 if tour_num == 1 else 'n'}")
if tour_segment_col is not None:
for tour_segment_name, tour_segment_info in tour_segments.items():
segment_trace_label = tracing.extend_trace_label(tour_trace_label, tour_segment_name)
segment_chunk_tag = tracing.extend_trace_label(tour_chunk_tag, tour_segment_name)
# assume segmentation of spec and coefficients are aligned
spec_segment_name = tour_segment_info.get('spec_segment_name')
# assume logsum segmentation is same as tours
logsum_tour_purpose = tour_segment_name if compute_logsums else None
nth_tours_in_segment = nth_tours[nth_tours[tour_segment_col] == tour_segment_name]
if nth_tours_in_segment.empty:
logger.info("skipping empty segment %s" % tour_segment_name)
continue
if RUN_ALTS_PREPROCESSOR_BEFORE_MERGE:
locals_dict = {}
alts = run_alts_preprocessor(model_settings, alts, spec_segment_name, locals_dict, tour_trace_label)
choices = \
schedule_tours(nth_tours_in_segment, persons_merged, alts,
spec=tour_segment_info['spec'],
logsum_tour_purpose=logsum_tour_purpose,
model_settings=model_settings,
timetable=timetable,
timetable_window_id_col=timetable_window_id_col,
previous_tour=previous_tour_by_personid,
tour_owner_id_col=tour_owner_id_col,
estimator=tour_segment_info.get('estimator'),
chunk_size=chunk_size,
tour_trace_label=segment_trace_label, tour_chunk_tag=segment_chunk_tag)
choice_list.append(choices)
else:
# MTC non_mandatory_tours are not segmented by tour_purpose and do not require logsums
# FIXME should support logsums?
assert not compute_logsums, "logsums for unsegmented spec not implemented because not currently needed"
assert tour_segments.get('spec_segment_name') is None
choices = \
schedule_tours(nth_tours, persons_merged, alts,
spec=tour_segments['spec'],
logsum_tour_purpose=None,
model_settings=model_settings,
timetable=timetable,
timetable_window_id_col=timetable_window_id_col,
previous_tour=previous_tour_by_personid,
tour_owner_id_col=tour_owner_id_col,
estimator=tour_segments.get('estimator'),
chunk_size=chunk_size,
tour_trace_label=tour_trace_label, tour_chunk_tag=tour_chunk_tag)
choice_list.append(choices)
choices = pd.concat(choice_list)
return choices
def vectorize_subtour_scheduling(parent_tours, subtours, persons_merged, alts, spec,
model_settings,
estimator,
chunk_size=0, trace_label=None):
"""
Like vectorize_tour_scheduling but specifically for atwork subtours
subtours have a few peculiarities necessitating separate treatment:
Timetable has to be initialized to set all timeperiods outside parent tour footprint as
unavailable. So atwork subtour timewindows are limited to the footprint of the parent work
tour. And parent_tour_id' column of tours is used instead of parent_id as timetable row_id.
Parameters
----------
parent_tours : DataFrame
parent tours of the subtours (because we need to know the tdd of the parent tour to
assign_subtour_mask of timetable indexed by parent_tour id
subtours : DataFrame
atwork subtours to schedule
persons_merged : DataFrame
DataFrame of persons containing attributes referenced by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent time slots. Will be passed to
interaction_simulate in batches for each nth tour.
spec : DataFrame
The spec which will be passed to interaction_simulate.
(all subtours share same spec regardless of subtour type)
model_settings : dict
chunk_size
trace_label
Returns
-------
choices : Series
A Series of choices where the index is the index of the subtours
DataFrame and the values are the index of the alts DataFrame.
"""
if not trace_label:
trace_label = 'vectorize_non_mandatory_tour_scheduling'
assert len(subtours.index) > 0
assert 'tour_num' in subtours.columns
assert 'tour_type' in subtours.columns
timetable_window_id_col = 'parent_tour_id'
tour_owner_id_col = 'parent_tour_id'
logsum_tour_purpose = None # FIXME logsums not currently supported
# timetable with a window for each parent tour
parent_tour_windows = tt.create_timetable_windows(parent_tours, alts)
timetable = tt.TimeTable(parent_tour_windows, alts)
# mask the periods outside parent tour footprint
timetable.assign_subtour_mask(parent_tours.tour_id, parent_tours.tdd)
# print timetable.windows
"""
[[7 7 7 0 0 0 0 0 0 0 0 7 7 7 7 7 7 7 7 7 7]
[7 0 0 0 0 0 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7]
[7 7 7 7 7 0 0 0 0 0 0 0 0 0 0 7 7 7 7 7 7]
[7 7 0 0 0 0 0 0 0 7 7 7 7 7 7 7 7 7 7 7 7]]
"""
choice_list = []
# keep a series of the the most recent tours for each person
# initialize with first trip from alts
previous_tour_by_parent_tour_id = \
pd.Series(alts.index[0], index=subtours['parent_tour_id'].unique())
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# this ought to have been ensured when tours are created (tour_frequency.process_tours)
for tour_num, nth_tours in subtours.groupby('tour_num', sort=True):
tour_trace_label = tracing.extend_trace_label(trace_label, f'tour_{tour_num}')
tour_chunk_tag = tracing.extend_trace_label(trace_label, f"tour_{1 if tour_num == 1 else 'n'}")
# no more than one tour per timetable window per call to schedule_tours
assert not nth_tours.parent_tour_id.duplicated().any()
choices = \
schedule_tours(nth_tours,
persons_merged, alts,
spec, logsum_tour_purpose,
model_settings,
timetable, timetable_window_id_col,
previous_tour_by_parent_tour_id, tour_owner_id_col,
estimator,
chunk_size, tour_trace_label, tour_chunk_tag)
choice_list.append(choices)
choices = pd.concat(choice_list)
# print "\nfinal timetable.windows\n%s" % timetable.windows
"""
[[7 7 7 0 0 0 0 2 7 7 4 7 7 7 7 7 7 7 7 7 7]
[7 0 2 7 4 0 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7]
[7 7 7 7 7 2 4 0 0 0 0 0 0 0 0 7 7 7 7 7 7]
[7 7 0 2 7 7 4 0 0 7 7 7 7 7 7 7 7 7 7 7 7]]
"""
# we dont need to call replace_table() for this nonce timetable
# because subtours are occuring during persons timetable scheduled time
return choices
def build_joint_tour_timetables(joint_tours, joint_tour_participants, persons_timetable, alts):
# timetable with a window for each joint tour
joint_tour_windows_df = tt.create_timetable_windows(joint_tours, alts)
joint_tour_timetable = tt.TimeTable(joint_tour_windows_df, alts)
for participant_num, nth_participants in \
joint_tour_participants.groupby('participant_num', sort=True):
# nth_participant windows from persons_timetable
participant_windows = persons_timetable.slice_windows_by_row_id(nth_participants.person_id)
# assign them joint_tour_timetable
joint_tour_timetable.assign_footprints(nth_participants.tour_id, participant_windows)
return joint_tour_timetable
def vectorize_joint_tour_scheduling(
joint_tours, joint_tour_participants,
persons_merged, alts, persons_timetable,
spec, model_settings,
estimator,
chunk_size=0, trace_label=None):
"""
Like vectorize_tour_scheduling but specifically for joint tours
joint tours have a few peculiarities necessitating separate treatment:
Timetable has to be initialized to set all timeperiods...
Parameters
----------
tours : DataFrame
DataFrame of tours containing tour attributes, as well as a person_id
column to define the nth tour for each person.
persons_merged : DataFrame
DataFrame of persons containing attributes referenced by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent time slots. Will be passed to
interaction_simulate in batches for each nth tour.
spec : DataFrame
The spec which will be passed to interaction_simulate.
(or dict of specs keyed on tour_type if tour_types is not None)
model_settings : dict
Returns
-------
choices : Series
A Series of choices where the index is the index of the tours
DataFrame and the values are the index of the alts DataFrame.
persons_timetable : TimeTable
timetable updated with joint tours (caller should replace_table for it to persist)
"""
trace_label = tracing.extend_trace_label(trace_label, 'vectorize_joint_tour_scheduling')
assert len(joint_tours.index) > 0
assert 'tour_num' in joint_tours.columns
assert 'tour_type' in joint_tours.columns
timetable_window_id_col = None
tour_owner_id_col = 'household_id'
logsum_tour_purpose = None # FIXME logsums not currently supported
choice_list = []
# keep a series of the the most recent tours for each person
# initialize with first trip from alts
previous_tour_by_householdid = pd.Series(alts.index[0], index=joint_tours.household_id.unique())
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# this ought to have been ensured when tours are created (tour_frequency.process_tours)
# print "participant windows before scheduling\n%s" % \
# persons_timetable.slice_windows_by_row_id(joint_tour_participants.person_id)
for tour_num, nth_tours in joint_tours.groupby('tour_num', sort=True):
tour_trace_label = tracing.extend_trace_label(trace_label, f'tour_{tour_num}')
tour_chunk_tag = tracing.extend_trace_label(trace_label, f"tour_{1 if tour_num == 1 else 'n'}")
# no more than one tour per household per call to schedule_tours
assert not nth_tours.household_id.duplicated().any()
nth_participants = \
joint_tour_participants[joint_tour_participants.tour_id.isin(nth_tours.index)]
timetable = build_joint_tour_timetables(
nth_tours, nth_participants,
persons_timetable, alts)
choices = \
schedule_tours(nth_tours,
persons_merged, alts,
spec,
logsum_tour_purpose,
model_settings,
timetable, timetable_window_id_col,
previous_tour_by_householdid, tour_owner_id_col,
estimator,
chunk_size, tour_trace_label, tour_chunk_tag)
# - update timetables of all joint tour participants
persons_timetable.assign(
nth_participants.person_id,
reindex(choices, nth_participants.tour_id))
choice_list.append(choices)
choices = pd.concat(choice_list)
return choices
|
StarcoderdataPython
|
4935913
|
from functools import partial
import itertools as it
import more_itertools as mit
import operator as op
with open('input') as fh:
initial_state = [int(x) for x in fh.readline().split(',')]
program = initial_state.copy()
inputs = [1]
operations = {1: op.add, 2: op.mul, 3: inputs.pop, 4: print}
parameter_counts = {1: 2, 2: 2, 3: 0, 4: 1}
program_modifiers = dict.fromkeys((1, 2, 3, 4), partial(op.setitem, program))
index_modifiers = dict.fromkeys((1, 2, 3, 4), lambda i, v: i + 1)
parameter_modes = {0: partial(op.getitem, program), 1: lambda x: x}
def run():
index = 0
while index < len(program):
instruction = str(program[index])
index += 1
op_code = int(instruction[-2:])
if op_code == 99:
break
if parameter_counts[op_code]:
parameters = program[index : index + parameter_counts[op_code]]
modes = mit.padded(reversed(instruction[:-2]), 0, n=len(parameters))
parameters = [parameter_modes[int(m)](int(p)) for m, p in zip(modes, parameters)]
index += parameter_counts[op_code]
else:
parameters = ()
value = operations[op_code](*parameters)
if value is not None:
program_modifiers[op_code](program[index], value)
index = index_modifiers[op_code](index, value)
print('Part 1:')
run()
operations[5] = operations[6] = lambda *args: args
parameter_counts[5] = parameter_counts[6] = 2
program_modifiers[5] = program_modifiers[6] = lambda i, v: None
index_modifiers[5] = lambda i, v: v[1] if v[0] else i
index_modifiers[6] = lambda i, v: v[1] if not v[0] else i
operations[7] = op.lt
operations[8] = op.eq
parameter_counts[7] = parameter_counts[8] = 2
program_modifiers[7] = program_modifiers[8] = partial(op.setitem, program)
index_modifiers[7] = index_modifiers[8] = lambda i, v: i + 1
program[:] = initial_state
inputs[:] = [5]
print('Part 2:')
run()
|
StarcoderdataPython
|
4922028
|
<gh_stars>1-10
from tokenize_output.tokenize_output import dict_keys_values
def test_dict_keys_values():
assert dict_keys_values([{'abc':{'b':{'c':123}}, 'd':[[1,2,3], None, True, {'e':1}]},4]) == {'keys': ['abc', 'b', 'c', 'd', 'e'], 'values': [123, 1, 2, 3, True, 1, 4]}
|
StarcoderdataPython
|
6524427
|
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
Bulkdozer
Bulkdozer is a tool that can reduce trafficking time in Campaign Manager by up to 80%% by providing automated bulk editing capabilities.
O
p
e
n
t
h
e
<
a
h
r
e
f
=
'
h
t
t
p
s
:
/
/
d
o
c
s
.
g
o
o
g
l
e
.
c
o
m
/
s
p
r
e
a
d
s
h
e
e
t
s
/
d
/
1
h
L
r
p
s
a
p
X
Y
5
j
s
b
g
Q
R
c
S
p
j
H
0
4
a
v
v
d
c
o
r
t
u
f
I
0
O
X
S
J
Z
Q
H
8
'
t
a
r
g
e
t
=
'
_
b
l
a
n
k
'
>
B
u
l
k
d
o
z
e
r
0
.
2
3
<
/
a
>
f
e
e
d
.
M
a
k
e
y
o
u
r
o
w
n
c
o
p
y
o
f
t
h
e
f
e
e
d
b
y
c
l
i
c
k
i
n
g
t
h
e
F
i
l
e
-
>
M
a
k
e
a
c
o
p
y
.
.
.
m
e
n
u
i
n
t
h
e
f
e
e
d
.
G
i
v
e
i
t
a
m
e
a
n
i
n
f
u
l
n
a
m
e
i
n
c
l
u
d
i
n
g
t
h
e
v
e
r
s
i
o
n
,
y
o
u
r
n
a
m
e
,
a
n
d
t
e
a
m
t
o
h
e
l
p
y
o
u
i
d
e
n
t
i
f
y
i
t
a
n
d
e
n
s
u
r
e
y
o
u
a
r
e
u
s
i
n
g
t
h
e
c
o
r
r
e
c
t
v
e
r
s
i
o
n
.
U
n
d
e
r
t
h
e
A
c
c
o
u
n
t
I
D
f
i
e
l
d
b
e
l
o
w
,
e
n
t
e
r
t
h
e
y
o
u
r
C
a
m
p
a
i
g
n
M
a
n
a
g
e
r
N
e
t
w
o
r
k
I
D
.
U
n
d
e
r
S
h
e
e
t
U
R
L
,
e
n
t
e
r
t
h
e
U
R
L
o
f
y
o
u
r
c
o
p
y
o
f
t
h
e
f
e
e
d
t
h
a
t
y
o
u
j
u
s
t
c
r
e
a
t
e
d
i
n
t
h
e
s
t
e
p
s
a
b
o
v
e
.
G
o
t
o
t
h
e
S
t
o
r
e
t
a
b
o
f
y
o
u
r
n
e
w
f
e
e
d
,
a
n
d
e
n
t
e
r
y
o
u
r
p
r
o
f
i
l
e
I
D
i
n
t
h
e
p
r
o
f
i
l
e
I
d
f
i
e
l
d
(
c
e
l
l
B
2
)
.
Y
o
u
r
p
r
o
f
i
l
e
I
D
i
s
v
i
s
i
b
l
e
i
n
C
a
m
p
a
i
g
n
M
a
n
a
g
e
r
b
y
c
l
i
c
k
i
n
g
y
o
u
r
a
v
a
t
a
r
o
n
t
h
e
t
o
p
r
i
g
h
t
c
o
r
n
e
r
.
C
l
i
c
k
t
h
e
'
S
a
v
e
'
b
u
t
t
o
n
b
e
l
o
w
.
A
f
t
e
r
c
l
i
c
k
i
n
g
'
S
a
v
e
'
,
c
o
p
y
t
h
i
s
p
a
g
e
'
s
U
R
L
f
r
o
m
y
o
u
r
b
r
o
w
s
e
r
a
d
d
r
e
s
s
b
a
r
,
a
n
d
p
a
s
t
e
i
t
i
n
t
h
e
S
t
o
r
e
t
a
b
f
o
r
t
h
e
r
e
c
i
p
e
_
u
r
l
f
i
e
l
d
(
c
e
l
l
B
5
)
y
o
u
r
s
h
e
e
t
.
B
u
l
k
d
o
z
e
r
i
s
r
e
a
d
y
f
o
r
u
s
e
S
t
a
y
u
p
t
o
d
a
t
e
o
n
n
e
w
r
e
l
e
a
s
e
s
a
n
d
o
t
h
e
r
g
e
n
e
r
a
l
a
n
o
u
n
c
e
m
e
n
t
s
b
y
j
o
i
n
i
n
g
<
a
h
r
e
f
=
'
h
t
t
p
s
:
/
/
g
r
o
u
p
s
.
g
o
o
g
l
e
.
c
o
m
/
f
o
r
u
m
/
#
!
f
o
r
u
m
/
b
u
l
k
d
o
z
e
r
-
a
n
n
o
u
n
c
e
m
e
n
t
s
'
t
a
r
g
e
t
=
'
_
b
l
a
n
k
'
>
B
u
l
k
d
o
z
e
r
a
n
n
o
u
n
c
e
m
e
n
t
s
<
/
a
>
.
R
e
v
i
e
w
t
h
e
<
a
h
r
e
f
=
'
h
t
t
p
s
:
/
/
g
i
t
h
u
b
.
c
o
m
/
g
o
o
g
l
e
/
s
t
a
r
t
h
i
n
k
e
r
/
b
l
o
b
/
m
a
s
t
e
r
/
t
u
t
o
r
i
a
l
s
/
B
u
l
k
d
o
z
e
r
/
I
n
s
t
a
l
l
a
t
i
o
n
_
a
n
d
_
U
s
e
r
_
g
u
i
d
e
s
.
m
d
'
t
a
r
g
e
t
=
'
_
b
l
a
n
k
'
>
B
u
l
k
d
o
z
e
r
d
o
c
u
m
e
n
t
a
t
i
o
n
<
/
a
>
.
'''
from starthinker_airflow.factory import DAG_Factory
USER_CONN_ID = "google_cloud_default" # The connection to use for user authentication.
GCP_CONN_ID = "" # The connection to use for service authentication.
INPUTS = {
'recipe_timezone': 'America/Chicago', # Timezone for report dates.
'account_id': '', # Campaign Manager Network ID (optional if profile id provided)
'dcm_profile_id': '', # Campaign Manager Profile ID (optional if account id provided)
'sheet_url': '', # Feed Sheet URL
}
TASKS = [
{
'traffic': {
'hour': [
],
'account_id': {
'field': {
'name': 'account_id',
'kind': 'string',
'order': 1,
'description': 'Campaign Manager Network ID (optional if profile id provided)',
'default': ''
}
},
'dcm_profile_id': {
'field': {
'name': 'dcm_profile_id',
'kind': 'string',
'order': 1,
'description': 'Campaign Manager Profile ID (optional if account id provided)',
'default': ''
}
},
'auth': 'user',
'sheet_url': {
'field': {
'name': 'sheet_url',
'kind': 'string',
'order': 2,
'description': 'Feed Sheet URL',
'default': ''
}
},
'timezone': {
'field': {
'name': 'recipe_timezone',
'kind': 'timezone',
'description': 'Timezone for report dates.',
'default': 'America/Chicago'
}
}
}
}
]
DAG_FACTORY = DAG_Factory('bulkdozer', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
|
StarcoderdataPython
|
1780713
|
#!/usr/bin/python3
import requests
from urllib.request import urlopen
from requests import get
import time
import logging
# Credentials
username = 'username goes here'
password = '<PASSWORD>'
hostname = 'sub.example.com'
timer = 30
ip = ''
logging.basicConfig(filename="DynDNS.log",
format='%(asctime)s %(message)s',
filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
while True:
try:
current_ip = urlopen('https://api.ipify.org/').read().decode('utf-8')
except:
logger.error('SOMETHING WENT WRONG!!!')
time.sleep(timer)
else:
if ip != current_ip:
# Use this line for IPv4
url = 'https://{}:{}@domains.google.com/nic/update?hostname={}&myip={}'.format(
username, password, hostname, current_ip)
resp = requests.post(url)
out = resp.content.decode('utf-8')
if ('good' in out or 'nochg' in out):
ip = current_ip
logger.info('****{} UPDATE****'.format(hostname))
logger.info('DynDNS Response {}'.format(out))
# Use this line for IPv6
# url = 'https://{}:{}@domains.google.com/nic/update?hostname={}'.format(username,password,hostname)
time.sleep(timer)
|
StarcoderdataPython
|
9799828
|
<reponame>ezrankayamba/noxyt_bulkpay<filename>backend_rest/payments/tasks.py<gh_stars>0
from background_task import background
from payments import models
def load_files():
print("Loading files...")
batch = models.Batch.objects.filter(status=1).first()
count = 0
while(batch):
batch.status = 2
batch.save()
count += 1
print(f'Batch processed: {count} - {batch.id}')
batch = models.Batch.objects.filter(status=1).first()
print(f'Initial processing completed by processing {count} batches')
|
StarcoderdataPython
|
71032
|
<reponame>edwinsteele/weather-analyser<filename>retrievers/wunderground_retriever.py
from base_retrievers import AbstractRetriever
from models import Observation, SingleForecast
import decimal
import datetime
import json
__author__ = 'esteele'
class WundergroundRetriever(AbstractRetriever):
"""
http://www.wunderground.com/weather/api/d/docs
"local_epoch":"1364800751"
"temp_c":22
"""
def __init__(self, api_key):
super(WundergroundRetriever, self).__init__()
self.api_key = api_key
@property
def source(self):
return "Wunderground"
@property
def observation_reload_delay(self):
return 30
@property
def forecast_reload_delay(self):
return 60
def generate_observation_request_for_location(self, location):
return "http://api.wunderground.com/api/%s" \
"/conditions/q/%s/%s.json" % (self.api_key,
location.WUNDERGROUND_MAJOR_LOC,
location.WUNDERGROUND_MINOR_LOC)
def generate_forecast_request_for_location(self, location):
# They have a different URL for hourly forecasts!
return "http://api.wunderground.com/api/%s" \
"/forecast10day/q/%s/%s.json" % (self.api_key,
location.WUNDERGROUND_MAJOR_LOC,
location.WUNDERGROUND_MINOR_LOC)
def parse_observation_response(self, result):
most_recent_ob = json.loads(
result, parse_int=decimal.Decimal)["current_observation"]
ob_datetime = datetime.datetime.fromtimestamp(
float(most_recent_ob["local_epoch"]))
return Observation(self.source, ob_datetime, most_recent_ob["temp_c"])
def parse_forecast_response(self, result):
forecast_results = []
json_result = json.loads(
result,
parse_float=decimal.Decimal)
issue_date_best_guess = datetime.datetime.now() #XXX ????
for daily_forecast in json_result["forecast"]["simpleforecast"]["forecastday"]:
start_time = datetime.datetime.fromtimestamp(float(daily_forecast["date"]["epoch"]))
issue_date_best_guess = min(issue_date_best_guess, start_time) #????
forecast_results.append(SingleForecast(
self.source,
SingleForecast.DAILY_FORECAST_TYPE,
start_time,
start_time + datetime.timedelta(hours=23, minutes=59),
issue_date_best_guess,
daily_forecast["low"]["celsius"],
daily_forecast["high"]["celsius"]
))
# Do hourly forecasts later
return forecast_results
|
StarcoderdataPython
|
9621537
|
# Generated by Django 2.0.1 on 2020-07-21 17:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picture', '0005_image_thumbnail'),
]
operations = [
migrations.AlterField(
model_name='album',
name='name',
field=models.CharField(default='No Name', max_length=64, verbose_name='相册名'),
),
migrations.AlterField(
model_name='image',
name='img',
field=models.ImageField(default='picture/cover.jpeg', upload_to='picture/%Y/%m/%d/', verbose_name='照片'),
),
migrations.AlterField(
model_name='image',
name='thumbnail',
field=models.URLField(blank=True),
),
]
|
StarcoderdataPython
|
3210227
|
#!/usr/bin/env python3
# Copyright 2019 Google, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import logging
import re
import sys
import time
import googleapiclient
import googleapiclient.discovery
DISK_REGEXP = r'^https:\/\/www\.googleapis\.com\/compute\/v1\/projects\/(.*?)\/zones\/(.*?)\/disks\/(.*?)$'
def main():
parser = argparse.ArgumentParser(
description=
'''Update disks attached to a GCE instance to customer supplied disk-type '''
)
parser.add_argument('--project',
required=True,
dest='project',
action='store',
type=str,
help='Project containing the GCE instance.')
parser.add_argument('--zone',
required=True,
dest='zone',
action='store',
type=str,
help='Zone containing the GCE instance.')
parser.add_argument('--instance',
required=True,
dest='instance',
action='store',
type=str,
help='Instance name.')
parser.add_argument('--disktype',
required=True,
dest='disktype',
action='store',
type=str,
help='New disk that will replace the old one')
parser.add_argument(
'--destructive',
dest='destructive',
action='store_const',
const=True,
default=False,
help=
'Upon completion, delete source disks and snapshots created during migration process.'
)
args = parser.parse_args()
create_newdisk_process(args.project, args.zone, args.instance,
args.disktype, args.destructive)
def create_newdisk_process(project, zone, instance, disktype, destructive):
start = time.time()
region = zone.rpartition("-")[0]
compute = googleapiclient.discovery.build('compute', 'v1')
stop_instance(compute, project, zone, instance)
disks = get_instance_disks(compute, project, zone, instance)
disktype = 'https://www.googleapis.com/compute/v1/projects/{0}/zones/{1}/diskTypes/'.format(
project, zone) + disktype
for source_disk in disks:
disk_url = source_disk['source']
boot = source_disk['boot']
auto_delete = source_disk['autoDelete']
deviceName = source_disk['deviceName'][0:46]
existing_disk_name = re.search(DISK_REGEXP, disk_url).group(3)
snapshot_name = '{}-update-disk-{}'.format(
existing_disk_name[0:39], int(datetime.datetime.now().timestamp()))
new_disk_name = '{}-disk-{}'.format(
existing_disk_name[0:46], int(datetime.datetime.now().timestamp()))
create_snapshot(compute, project, zone, existing_disk_name,
snapshot_name)
create_disk(compute, project, region, zone, snapshot_name,
new_disk_name, disktype)
detach_disk(compute, project, zone, instance, deviceName)
attach_disk(compute, project, zone, instance, new_disk_name, boot,
auto_delete, deviceName)
if destructive:
delete_disk(compute, project, zone, existing_disk_name)
delete_snapshot(compute, project, snapshot_name)
start_instance(compute, project, zone, instance)
end = time.time()
logging.info('Migration took %s seconds.', end - start)
def get_disk_type(compute, project, zone, disk_name):
logging.debug('Getting project=%s, zone=%s, disk_name=%s metadata', project,
zone, disk_name)
result = compute.disks().get(project=project, zone=zone,
disk=disk_name).execute()
logging.debug(
'Getting project=%s, zone=%s, disk_name=%s metadata complete.', project,
zone, disk_name)
return result['type']
def get_instance_disks(compute, project, zone, instance):
logging.debug('Getting project=%s, zone=%s, instance=%s disks', project,
zone, instance)
result = compute.instances().get(project=project,
zone=zone,
instance=instance).execute()
logging.debug('Getting project=%s, zone=%s, instance=%s disks complete.',
project, zone, instance)
return result['disks']
def create_snapshot(compute, project, zone, disk, snapshot_name):
body = {
'name': snapshot_name,
}
logging.debug('Creating snapshot of disk project=%s, zone=%s, disk=%s',
project, zone, disk)
operation = compute.disks().createSnapshot(project=project,
zone=zone,
disk=disk,
body=body).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug('Snapshotting of disk project=%s, zone=%s, disk=%s complete.',
project, zone, disk)
return result
def delete_snapshot(compute, project, snapshot_name):
logging.debug('Deleting snapshot project=%s, snapshot_name=%s', project,
snapshot_name)
operation = compute.snapshots().delete(project=project,
snapshot=snapshot_name).execute()
result = wait_for_global_operation(compute, project, operation)
logging.debug('Deleting snapshot project=%s, snapshot_name=%s complete.',
project, snapshot_name)
return result
def attach_disk(compute, project, zone, instance, disk, boot, auto_delete,
deviceName):
""" Attaches disk to instance.
Requries iam.serviceAccountUser
"""
disk_url = 'projects/{0}/zones/{1}/disks/{2}'.format(project, zone, disk)
body = {
'autoDelete': auto_delete,
'boot': boot,
'deviceName': deviceName,
'source': disk_url,
}
logging.debug('Attaching disk project=%s, zone=%s, instance=%s, disk=%s',
project, zone, instance, disk_url)
operation = compute.instances().attachDisk(project=project,
zone=zone,
instance=instance,
body=body).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug(
'Attaching disk project=%s, zone=%s, instance=%s, disk=%s complete.',
project, zone, instance, disk_url)
return result
def detach_disk(compute, project, zone, instance, disk):
logging.debug('Detaching disk project=%s, zone=%s, instance=%s, disk=%s',
project, zone, instance, disk)
operation = compute.instances().detachDisk(project=project,
zone=zone,
instance=instance,
deviceName=disk).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug(
'Detaching disk project=%s, zone=%s, instance=%s, disk=%s complete.',
project, zone, instance, disk)
return result
def delete_disk(compute, project, zone, disk):
logging.debug('Deleting disk project=%s, zone=%s, disk=%s', project, zone,
disk)
operation = compute.disks().delete(project=project, zone=zone,
disk=disk).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug('Deleting disk project=%s, zone=%s, disk=%s complete.',
project, zone, disk)
return result
def create_disk(compute, project, region, zone, snapshot_name, disk_name,
disk_type):
"""Creates a new user supplied disk-type from snapshot"""
source_snapshot = 'projects/{0}/global/snapshots/{1}'.format(
project, snapshot_name)
body = {
'name': disk_name,
'sourceSnapshot': source_snapshot,
'type': disk_type
}
logging.debug(
'Creating new disk project=%s, zone=%s, name=%s source_snapshot=%s, kmsKeyName=%s',
project, zone, disk_name, source_snapshot)
operation = compute.disks().insert(project=project, zone=zone,
body=body).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug(
'Creating new disk project=%s, zone=%s, name=%s source_snapshot=%s, kmsKeyName=%s complete.',
project, zone, disk_name, source_snapshot)
return result
def start_instance(compute, project, zone, instance):
logging.debug('Starting project=%s, zone=%s, instance=%s', project, zone,
instance)
operation = compute.instances().start(project=project,
zone=zone,
instance=instance).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug('Starting project=%s, zone=%s, instance=%s complete.',
project, zone, instance)
return result
def stop_instance(compute, project, zone, instance):
logging.debug('Stopping project=%s, zone=%s, instance=%s', project, zone,
instance)
operation = compute.instances().stop(project=project,
zone=zone,
instance=instance).execute()
result = wait_for_zonal_operation(compute, project, zone, operation)
logging.debug('Stopping project=%s, zone=%s, instance=%s complete.',
project, zone, instance)
return result
def wait_for_global_operation(compute, project, operation):
"""Helper for waiting for global operation to complete."""
operation = operation['name']
def build():
return compute.globalOperations().get(project=project,
operation=operation)
return _wait_for_operation(operation, build)
def wait_for_zonal_operation(compute, project, zone, operation):
"""Helper for waiting for zonal operation to complete."""
operation = operation['name']
def build():
return compute.zoneOperations().get(project=project,
zone=zone,
operation=operation)
return _wait_for_operation(operation, build)
def _wait_for_operation(operation, build_request):
"""Helper for waiting for operation to complete."""
logging.debug('Waiting for %s', operation)
while True:
sys.stdout.flush()
result = build_request().execute()
if result['status'] == 'DONE':
logging.debug('done!')
if 'error' in result:
logging.error('finished with an error')
logging.error('Error %s', result['error'])
raise Exception(result['error'])
return result
time.sleep(5)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1660061
|
<reponame>JBlaschke/lcls2<filename>psana/psana/graphqt/PSPopupSelectExp.py
#------------------------------
# Module PSPopupSelectExp...
#------------------------------
from PyQt5.QtWidgets import QDialog, QListWidget, QPushButton, QListWidgetItem,\
QVBoxLayout, QHBoxLayout, QTabBar
from PyQt5.QtCore import Qt, QPoint, QMargins, QEvent
from PyQt5.QtGui import QFont, QColor, QCursor
#------------------------------
def years(lst_exp) :
years = []
for exp in lst_exp :
year = exp[-2:]
if year in years : continue
if not year.isdigit() : continue
years.append(year)
return ['20%s'%y for y in sorted(years)]
#------------------------------
def years_and_runs(lst_exp) :
years = []
runs = []
for exp in lst_exp :
if len(exp) != 8 : continue
year = exp[-2:]
if year in years : continue
if not year.isdigit() : continue
years.append(year)
for exp in lst_exp :
if len(exp) != 9 : continue
run = exp[-2:]
if run in runs : continue
if not run.isdigit() : continue
runs.append(run)
return ['20%s'%y for y in sorted(years)], ['Run:%s'%r for r in sorted(runs)]
#------------------------------
def lst_exp_for_year(lst_exp, year) :
str_year = year if isinstance(year,str) else '%4d'%year
pattern = str_year[-2:] # two last digits if the year
return [exp for exp in lst_exp if exp[-2:]==pattern]
#------------------------------
class PSPopupSelectExp(QDialog) :
"""
"""
def __init__(self, parent=None, lst_exp=[]):
QDialog.__init__(self, parent)
self.name_sel = None
self.list = QListWidget(parent)
self.fill_list(lst_exp)
# Confirmation buttons
#self.but_cancel = QPushButton('&Cancel')
#self.but_apply = QPushButton('&Apply')
#cp.setIcons()
#self.but_cancel.setIcon(cp.icon_button_cancel)
#self.but_apply .setIcon(cp.icon_button_ok)
#self.connect(self.but_cancel, QtCore.SIGNAL('clicked()'), self.onCancel)
#self.connect(self.but_apply, QtCore.SIGNAL('clicked()'), self.onApply)
#self.hbox = QVBoxLayout()
#self.hbox.addWidget(self.but_cancel)
#self.hbox.addWidget(self.but_apply)
##self.hbox.addStretch(1)
vbox = QVBoxLayout()
vbox.addWidget(self.list)
#vbox.addLayout(self.hbox)
self.setLayout(vbox)
self.list.itemClicked.connect(self.onItemClick)
self.showToolTips()
self.setStyle()
def fill_list_v0(self, lst_exp) :
for exp in sorted(lst_exp) :
item = QListWidgetItem(exp, self.list)
self.list.sortItems(Qt.AscendingOrder)
def fill_list_v1(self, lst_exp) :
self.years = sorted(years(lst_exp))
for year in self.years :
item = QListWidgetItem(year, self.list)
item.setFont(QFont('Courier', 14, QFont.Bold))
item.setFlags(Qt.NoItemFlags)
#item.setFlags(Qt.NoItemFlags ^ Qt.ItemIsEnabled ^ Qt.ItemIsSelectable)
for exp in sorted(lst_exp_for_year(lst_exp, year)) :
item = QListWidgetItem(exp, self.list)
item.setFont(QFont('Monospace', 11, QFont.Normal)) # Bold))
def fill_list(self, lst_exp) :
self.years, self.runs = years_and_runs(lst_exp)
for year in self.years :
item = QListWidgetItem(year, self.list)
item.setFont(QFont('Courier', 14, QFont.Bold))
item.setFlags(Qt.NoItemFlags)
#item.setFlags(Qt.NoItemFlags ^ Qt.ItemIsEnabled ^ Qt.ItemIsSelectable)
for exp in sorted(lst_exp_for_year(lst_exp, year)) :
if len(exp) != 8 : continue
item = QListWidgetItem(exp, self.list)
item.setFont(QFont('Monospace', 11, QFont.Normal)) # Bold))
for run in self.runs :
item = QListWidgetItem(run, self.list)
item.setFont(QFont('Courier', 14, QFont.Bold))
item.setFlags(Qt.NoItemFlags)
#item.setFlags(Qt.NoItemFlags ^ Qt.ItemIsEnabled ^ Qt.ItemIsSelectable)
for exp in sorted(lst_exp_for_year(lst_exp, run)) :
if len(exp) != 9 : continue
item = QListWidgetItem(exp, self.list)
item.setFont(QFont('Monospace', 11, QFont.Normal)) # Bold))
def setStyle(self):
self.setWindowTitle('Select experiment')
self.setFixedWidth(120)
self.setMinimumHeight(600)
#self.setMaximumWidth(600)
#self.setStyleSheet(cp.styleBkgd)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
self.layout().setContentsMargins(2,2,2,2)
#self.setStyleSheet(cp.styleBkgd)
#self.but_cancel.setStyleSheet(cp.styleButton)
#self.but_apply.setStyleSheet(cp.styleButton)
self.move(QCursor.pos().__add__(QPoint(-110,-50)))
def showToolTips(self):
#self.but_apply.setToolTip('Apply selection')
#self.but_cancel.setToolTip('Cancel selection')
self.setToolTip('Select experiment')
def onItemClick(self, item):
#if item.isSelected(): item.setSelected(False)
#widg = self.list.itemWidget(item)
#item.checkState()
self.name_sel = item.text()
if self.name_sel in self.years : return # ignore selection of year
if self.name_sel in self.runs : return # ignore selection of run
#print(self.name_sel)
#logger.debug('Selected experiment %s' % self.name_sel, __name__)
self.accept()
def event(self, e):
"""Intercepts mouse clicks outside popup window"""
#print('event.type', e.type())
if e.type() == QEvent.WindowDeactivate :
self.reject()
return QDialog.event(self, e)
def closeEvent(self, event):
#logger.info('closeEvent', __name__)
self.reject()
def selectedName(self):
return self.name_sel
def onCancel(self):
#logger.debug('onCancel', __name__)
self.reject()
def onApply(self):
#logger.debug('onApply', __name__)
self.accept()
#------------------------------
#----------- TESTS ------------
#------------------------------
if __name__ == "__main__" :
def select_experiment_v1(parent, lst_exp) :
w = PSPopupSelectExp(parent, lst_exp)
##w.show()
resp=w.exec_()
if resp == QDialog.Accepted : return w.selectedName()
elif resp == QDialog.Rejected : return None
else : return None
#------------------------------
def test_all(tname) :
import os
from PyQt5.QtWidgets import QApplication
lst_exp = sorted(os.listdir('/reg/d/psdm/SXR/'))
#lst_exp = sorted(os.listdir('/reg/d/psdm/CXI/'))
#print('lst_exps:', lst_exp)
print('years form the list of experiments', years(lst_exp))
print('years and runs form the list of experiments', str(years_and_runs(lst_exp)))
print('experiments for 2016:', lst_exp_for_year(lst_exp, '2016'))
app = QApplication(sys.argv)
exp_name = 'N/A'
if tname == '1': exp_name = select_experiment_v1(None, lst_exp)
print('exp_name = %s' % exp_name)
del app
#------------------------------
if __name__ == "__main__" :
import sys; global sys
tname = sys.argv[1] if len(sys.argv) > 1 else '1'
print(50*'_', '\nTest %s' % tname)
test_all(tname)
sys.exit('End of Test %s' % tname)
#------------------------------
|
StarcoderdataPython
|
11287012
|
<reponame>jojonki/QA-LSTM<filename>train.py
'''
LSTM-based Deep Learning Models for Non-factoid Answer Selection
<NAME>, <NAME>, <NAME>, <NAME>, ICLR 2016
https://arxiv.org/abs/1511.04108
'''
import os
import random
import argparse
from tqdm import tqdm
import numpy as np
import torch
from gensim.models.keyedvectors import KeyedVectors
from utils import load_data, load_data2, load_vocabulary, Config, load_embd_weights
from utils import make_vector
from models import QA_LSTM
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128, help='input batch size')
parser.add_argument('--start_epoch', type=int, default=0, help='resume epoch count, default=0')
parser.add_argument('--n_epochs', type=int, default=4, help='input batch size')
parser.add_argument('--embd_size', type=int, default=300, help='word embedding size')
parser.add_argument('--hidden_size', type=int, default=141, help='hidden size of one-directional LSTM')
parser.add_argument('--max_sent_len', type=int, default=200, help='max sentence length')
parser.add_argument('--margin', type=float, default=0.2, help='margin for loss function')
parser.add_argument('--use_pickle', type=int, default=0, help='load dataset from pickles')
parser.add_argument('--test', type=int, default=0, help='1 for test, or for training')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--resume', default='./checkpoints/model_best.tar', type=str, metavar='PATH', help='path saved params')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
PAD = '<PAD>'
id_to_word, label_to_ans, label_to_ans_text = load_vocabulary('./V2/vocabulary', './V2/InsuranceQA.label2answer.token.encoded')
w2i = {w: i for i, w in enumerate(id_to_word.values(), 1)}
w2i[PAD] = 0
vocab_size = len(w2i)
print('vocab_size:', vocab_size)
train_data = load_data('./V2/InsuranceQA.question.anslabel.token.500.pool.solr.train.encoded', id_to_word, label_to_ans_text)
test_data = load_data2('./V2/InsuranceQA.question.anslabel.token.500.pool.solr.test.encoded', id_to_word, label_to_ans_text)
print('n_train:', len(train_data))
print('n_test:', len(test_data))
args.vocab_size = vocab_size
args.pre_embd = None
print('loading a word2vec binary...')
model_path = './GoogleNews-vectors-negative300.bin'
word2vec = KeyedVectors.load_word2vec_format(model_path, binary=True)
print('loaded!')
pre_embd = load_embd_weights(word2vec, vocab_size, args.embd_size, w2i)
# save_pickle(pre_embd, 'pre_embd.pickle')
args.pre_embd = pre_embd
def save_checkpoint(state, filename):
print('save model!', filename)
torch.save(state, filename)
def loss_fn(pos_sim, neg_sim):
loss = args.margin - pos_sim + neg_sim
if loss.data[0] < 0:
loss.data[0] = 0
return loss
def train(model, data, test_data, optimizer, n_epochs=4, batch_size=256):
for epoch in range(n_epochs):
model.train()
print('epoch', epoch)
random.shuffle(data) # TODO use idxies
losses = []
for i, d in enumerate(tqdm(data)):
q, pos, negs = d[0], d[1], d[2]
vec_q = make_vector([q], w2i, len(q))
vec_pos = make_vector([pos], w2i, len(pos))
pos_sim = model(vec_q, vec_pos)
for _ in range(50):
neg = random.choice(negs)
vec_neg = make_vector([neg], w2i, len(neg))
neg_sim = model(vec_q, vec_neg)
loss = loss_fn(pos_sim, neg_sim)
if loss.data[0] != 0:
losses.append(loss)
break
if len(losses) == batch_size or i == len(data) - 1:
loss = torch.mean(torch.stack(losses, 0).squeeze(), 0)
print(loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses = []
filename = '{}/Epoch-{}.model'.format('./checkpoints', epoch)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, filename=filename)
test(model, test_data)
def test(model, data):
acc, total = 0, 0
for d in data:
q = d[0]
print('q', ' '.join(q))
labels = d[1]
cands = d[2]
# preprare answer labels
label_indices = [cands.index(l) for l in labels if l in cands]
# build data
q = make_vector([q], w2i, len(q))
cands = [label_to_ans_text[c] for c in cands] # id to text
max_cand_len = min(args.max_sent_len, max([len(c) for c in cands]))
cands = make_vector(cands, w2i, max_cand_len)
# predict
scores = [model(q, c.unsqueeze(0)).data[0] for c in cands]
pred_idx = np.argmax(scores)
if pred_idx in label_indices:
print('correct')
acc += 1
else:
print('wrong')
total += 1
print('Test Acc:', 100*acc/total, '%')
model = QA_LSTM(args)
if torch.cuda.is_available():
model.cuda()
# optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.01)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer']) # TODO ?
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
train(model, train_data, test_data, optimizer)
# test(model, test_data)
|
StarcoderdataPython
|
6617070
|
<filename>seimas/migrations/0022_auto_20180816_1903.py<gh_stars>1-10
# Generated by Django 2.1 on 2018-08-16 19:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seimas', '0021_politiciangame'),
]
operations = [
migrations.AlterModelOptions(
name='politiciangame',
options={'ordering': ['-created_at'], 'verbose_name_plural': 'Politicians game'},
),
]
|
StarcoderdataPython
|
11270550
|
<reponame>anhuaxiang/compose<filename>composeml/label_times.py<gh_stars>1-10
import json
import os
import pandas as pd
from composeml.label_plots import LabelPlots
def read_csv(path, filename='label_times.csv', load_settings=True):
"""Read label times in csv format from disk.
Args:
path (str) : Directory on disk to read from.
filename (str) : Filename for label times. Default value is `label_times.csv`.
load_settings (bool) : Whether to load the settings used to make the label times.
Returns:
LabelTimes : Deserialized label times.
"""
file = os.path.join(path, filename)
assert os.path.exists(file), "data not found: '%s'" % file
data = pd.read_csv(file, index_col='id')
label_times = LabelTimes(data=data)
if load_settings:
label_times = label_times._load_settings(path)
return label_times
def read_parquet(path, filename='label_times.parquet', load_settings=True):
"""Read label times in parquet format from disk.
Args:
path (str) : Directory on disk to read from.
filename (str) : Filename for label times. Default value is `label_times.parquet`.
load_settings (bool) : Whether to load the settings used to make the label times.
Returns:
LabelTimes : Deserialized label times.
"""
file = os.path.join(path, filename)
assert os.path.exists(file), "data not found: '%s'" % file
data = pd.read_parquet(file)
label_times = LabelTimes(data=data)
if load_settings:
label_times = label_times._load_settings(path)
return label_times
def read_pickle(path, filename='label_times.pickle', load_settings=True):
"""Read label times in parquet format from disk.
Args:
path (str) : Directory on disk to read from.
filename (str) : Filename for label times. Default value is `label_times.parquet`.
load_settings (bool) : Whether to load the settings used to make the label times.
Returns:
LabelTimes : Deserialized label times.
"""
file = os.path.join(path, filename)
assert os.path.exists(file), "data not found: '%s'" % file
data = pd.read_pickle(file)
label_times = LabelTimes(data=data)
if load_settings:
label_times = label_times._load_settings(path)
return label_times
class LabelTimes(pd.DataFrame):
"""A data frame containing labels made by a label maker.
Attributes:
settings
"""
_metadata = ['settings']
def __init__(self, data=None, target_entity=None, name=None, label_type=None, settings=None, *args, **kwargs):
super().__init__(data=data, *args, **kwargs)
if label_type is not None:
error = 'label type must be "continuous" or "discrete"'
assert label_type in ['continuous', 'discrete'], error
self.settings = settings or {
'target_entity': target_entity,
'labeling_function': name,
'label_type': label_type,
'transforms': [],
}
self.plot = LabelPlots(self)
def __finalize__(self, other, method=None, **kwargs):
"""Propagate metadata from other label times.
Args:
other (LabelTimes) : The label times from which to get the attributes from.
method (str) : A passed method name for optionally taking different types of propagation actions based on this value.
"""
if method == 'concat':
other = other.objs[0]
for key in self._metadata:
value = getattr(other, key, None)
setattr(self, key, value)
return self
return super().__finalize__(other=other, method=method, **kwargs)
@property
def _constructor(self):
return LabelTimes
@property
def name(self):
"""Get name of label times."""
return self.settings.get('labeling_function')
@name.setter
def name(self, value):
"""Set name of label times."""
self.settings['labeling_function'] = value
@property
def target_entity(self):
"""Get target entity of label times."""
return self.settings.get('target_entity')
@target_entity.setter
def target_entity(self, value):
"""Set target entity of label times."""
self.settings['target_entity'] = value
@property
def label_type(self):
"""Get label type."""
return self.settings.get('label_type')
@label_type.setter
def label_type(self, value):
"""Set label type."""
self.settings['label_type'] = value
@property
def transforms(self):
"""Get transforms of label times."""
return self.settings.get('transforms', [])
@transforms.setter
def transforms(self, value):
"""Set transforms of label times."""
self.settings['transforms'] = value
@property
def is_discrete(self):
"""Whether labels are discrete."""
if self.label_type is None:
self.label_type = self.infer_type()
return self.label_type == 'discrete'
@property
def distribution(self):
"""Returns label distribution if labels are discrete."""
if self.is_discrete:
labels = self.assign(count=1)
labels = labels.groupby(self.name)
distribution = labels['count'].count()
return distribution
@property
def count(self):
"""Returns label count per instance."""
count = self.groupby(self.target_entity)
count = count[self.name].count()
count = count.to_frame('count')
return count
@property
def count_by_time(self):
"""Returns label count across cutoff times."""
if self.is_discrete:
keys = ['cutoff_time', self.name]
value = self.groupby(keys).cutoff_time.count()
value = value.unstack(self.name).fillna(0)
value = value.cumsum()
return value
value = self.groupby('cutoff_time')
value = value[self.name].count()
value = value.cumsum()
return value
def describe(self):
"""Prints out label info with transform settings that reproduce labels."""
if self.name is not None and self.is_discrete:
print('Label Distribution\n' + '-' * 18, end='\n')
distribution = self[self.name].value_counts()
distribution.index = distribution.index.astype('str')
distribution.sort_index(inplace=True)
distribution['Total:'] = distribution.sum()
print(distribution.to_string(), end='\n\n\n')
settings = pd.Series(self.settings)
transforms = settings.pop('transforms')
print('Settings\n' + '-' * 8, end='\n')
if settings.isnull().all():
print('No settings', end='\n\n\n')
else:
settings.sort_index(inplace=True)
print(settings.to_string(), end='\n\n\n')
print('Transforms\n' + '-' * 10, end='\n')
for step, transform in enumerate(transforms):
transform = pd.Series(transform)
transform.sort_index(inplace=True)
name = transform.pop('transform')
transform = transform.add_prefix(' - ')
transform = transform.add_suffix(':')
transform = transform.to_string()
header = '{}. {}\n'.format(step + 1, name)
print(header + transform, end='\n\n')
if len(transforms) == 0:
print('No transforms applied', end='\n\n')
def copy(self):
"""
Makes a copy of this object.
Returns:
LabelTimes : Copy of label times.
"""
label_times = super().copy()
label_times.settings = self.settings.copy()
label_times.transforms = self.transforms.copy()
return label_times
def threshold(self, value, inplace=False):
"""
Creates binary labels by testing if labels are above threshold.
Args:
value (float) : Value of threshold.
inplace (bool) : Modify labels in place.
Returns:
labels (LabelTimes) : Instance of labels.
"""
labels = self if inplace else self.copy()
labels[self.name] = labels[self.name].gt(value)
labels.label_type = 'discrete'
labels.settings['label_type'] = 'discrete'
transform = {'transform': 'threshold', 'value': value}
labels.transforms.append(transform)
if not inplace:
return labels
def apply_lead(self, value, inplace=False):
"""
Shifts the label times earlier for predicting in advance.
Args:
value (str) : Time to shift earlier.
inplace (bool) : Modify labels in place.
Returns:
labels (LabelTimes) : Instance of labels.
"""
labels = self if inplace else self.copy()
labels['cutoff_time'] = labels['cutoff_time'].sub(pd.Timedelta(value))
transform = {'transform': 'apply_lead', 'value': value}
labels.transforms.append(transform)
if not inplace:
return labels
def bin(self, bins, quantiles=False, labels=None, right=True):
"""
Bin labels into discrete intervals.
Args:
bins (int or array) : The criteria to bin by.
* bins (int) : Number of bins either equal-width or quantile-based.
If `quantiles` is `False`, defines the number of equal-width bins.
The range is extended by .1% on each side to include the minimum and maximum values.
If `quantiles` is `True`, defines the number of quantiles (e.g. 10 for deciles, 4 for quartiles, etc.)
* bins (array) : Bin edges either user defined or quantile-based.
If `quantiles` is `False`, defines the bin edges allowing for non-uniform width. No extension is done.
If `quantiles` is `True`, defines the bin edges usings an array of quantiles (e.g. [0, .25, .5, .75, 1.] for quartiles)
quantiles (bool) : Determines whether to use a quantile-based discretization function.
labels (array) : Specifies the labels for the returned bins. Must be the same length as the resulting bins.
right (bool) : Indicates whether bins includes the rightmost edge or not. Does not apply to quantile-based bins.
Returns:
LabelTimes : Instance of labels.
Examples:
.. _equal-widths:
Using bins of `equal-widths`_:
>>> labels.bin(2).head(2).T
label_id 0 1
customer_id 1 1
cutoff_time 2014-01-01 00:45:00 2014-01-01 00:48:00
my_labeling_function (157.5, 283.46] (31.288, 157.5]
.. _custom-widths:
Using bins of `custom-widths`_:
>>> values = labels.bin([0, 200, 400])
>>> values.head(2).T
label_id 0 1
customer_id 1 1
cutoff_time 2014-01-01 00:45:00 2014-01-01 00:48:00
my_labeling_function (200, 400] (0, 200]
.. _quantile-based:
Using `quantile-based`_ bins:
>>> values = labels.bin(4, quantiles=True) # (i.e. quartiles)
>>> values.head(2).T
label_id 0 1
customer_id 1 1
cutoff_time 2014-01-01 00:45:00 2014-01-01 00:48:00
my_labeling_function (137.44, 241.062] (43.848, 137.44]
.. _labels:
Assigning `labels`_ to bins:
>>> values = labels.bin(3, labels=['low', 'medium', 'high'])
>>> values.head(2).T
label_id 0 1
customer_id 1 1
cutoff_time 2014-01-01 00:45:00 2014-01-01 00:48:00
my_labeling_function high low
""" # noqa
label_times = self.copy()
values = label_times[self.name].values
if quantiles:
label_times[self.name] = pd.qcut(values, q=bins, labels=labels)
else:
label_times[self.name] = pd.cut(values, bins=bins, labels=labels, right=right)
transform = {
'transform': 'bin',
'bins': bins,
'quantiles': quantiles,
'labels': labels,
'right': right,
}
label_times.transforms.append(transform)
label_times.label_type = 'discrete'
return label_times
def sample(self, n=None, frac=None, random_state=None, replace=False):
"""
Return a random sample of labels.
Args:
n (int or dict) : Sample number of labels. A dictionary returns
the number of samples to each label. Cannot be used with frac.
frac (float or dict) : Sample fraction of labels. A dictionary returns
the sample fraction to each label. Cannot be used with n.
random_state (int) : Seed for the random number generator.
replace (bool) : Sample with or without replacement. Default value is False.
Returns:
LabelTimes : Random sample of labels.
Examples:
Create mock data:
>>> labels = {'labels': list('AABBBAA')}
>>> labels = LabelTimes(labels, name='labels')
>>> labels
labels
0 A
1 A
2 B
3 B
4 B
5 A
6 A
Sample number of labels:
>>> labels.sample(n=3, random_state=0).sort_index()
labels
1 A
2 B
6 A
Sample number per label:
>>> n_per_label = {'A': 1, 'B': 2}
>>> labels.sample(n=n_per_label, random_state=0).sort_index()
labels
3 B
4 B
5 A
Sample fraction of labels:
>>> labels.sample(frac=.4, random_state=2).sort_index()
labels
1 A
3 B
4 B
Sample fraction per label:
>>> frac_per_label = {'A': .5, 'B': .34}
>>> labels.sample(frac=frac_per_label, random_state=2).sort_index()
labels
4 B
5 A
6 A
""" # noqa
if isinstance(n, int):
sample = super().sample(n=n, random_state=random_state, replace=replace)
return sample
if isinstance(n, dict):
sample_per_label = []
for label, n, in n.items():
label = self[self[self.name] == label]
sample = label.sample(n=n, random_state=random_state, replace=replace)
sample_per_label.append(sample)
sample = pd.concat(sample_per_label, axis=0, sort=False)
return sample
if isinstance(frac, float):
sample = super().sample(frac=frac, random_state=random_state, replace=replace)
return sample
if isinstance(frac, dict):
sample_per_label = []
for label, frac, in frac.items():
label = self[self[self.name] == label]
sample = label.sample(frac=frac, random_state=random_state, replace=replace)
sample_per_label.append(sample)
sample = pd.concat(sample_per_label, axis=0, sort=False)
return sample
def infer_type(self):
"""Infer label type.
Returns:
str : Inferred label type. Either "continuous" or "discrete".
"""
dtype = self[self.name].dtype
is_discrete = pd.api.types.is_bool_dtype(dtype)
is_discrete = is_discrete or pd.api.types.is_categorical_dtype(dtype)
is_discrete = is_discrete or pd.api.types.is_object_dtype(dtype)
if is_discrete:
return 'discrete'
return 'continuous'
def equals(self, other):
"""Determines if two label time objects are the same.
Args:
other (LabelTimes) : Other label time object for comparison.
Returns:
bool : Whether label time objects are the same.
"""
return super().equals(other) and self.settings == other.settings
def _load_settings(self, path):
"""Read the settings in json format from disk.
Args:
path (str) : Directory on disk to read from.
"""
file = os.path.join(path, 'settings.json')
assert os.path.exists(file), 'settings not found'
with open(file, 'r') as file:
settings = json.load(file)
if 'dtypes' in settings:
dtypes = settings.pop('dtypes')
self = LabelTimes(self.astype(dtypes))
self.settings.update(settings)
return self
def _save_settings(self, path):
"""Write the settings in json format to disk.
Args:
path (str) : Directory on disk to write to.
"""
dtypes = self.dtypes.astype('str')
self.settings['dtypes'] = dtypes.to_dict()
file = os.path.join(path, 'settings.json')
with open(file, 'w') as file:
json.dump(self.settings, file)
del self.settings['dtypes']
def to_csv(self, path, filename='label_times.csv', save_settings=True):
"""Write label times in csv format to disk.
Args:
path (str) : Location on disk to write to (will be created as a directory).
filename (str) : Filename for label times. Default value is `label_times.csv`.
save_settings (bool) : Whether to save the settings used to make the label times.
"""
os.makedirs(path, exist_ok=True)
file = os.path.join(path, filename)
super().to_csv(file)
if save_settings:
self._save_settings(path)
def to_parquet(self, path, filename='label_times.parquet', save_settings=True):
"""Write label times in parquet format to disk.
Args:
path (str) : Location on disk to write to (will be created as a directory).
filename (str) : Filename for label times. Default value is `label_times.parquet`.
save_settings (bool) : Whether to save the settings used to make the label times.
"""
os.makedirs(path, exist_ok=True)
file = os.path.join(path, filename)
super().to_parquet(file, compression=None, engine='auto')
if save_settings:
self._save_settings(path)
def to_pickle(self, path, filename='label_times.pickle', save_settings=True):
"""Write label times in pickle format to disk.
Args:
path (str) : Location on disk to write to (will be created as a directory).
filename (str) : Filename for label times. Default value is `label_times.pickle`.
save_settings (bool) : Whether to save the settings used to make the label times.
"""
os.makedirs(path, exist_ok=True)
file = os.path.join(path, filename)
super().to_pickle(file)
if save_settings:
self._save_settings(path)
|
StarcoderdataPython
|
77453
|
import warnings
import cv2
import imageio
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
import model
import opt
import train
import pdb
def set_deterministic():
import random
import numpy
import torch
torch.manual_seed(0)
random.seed(0)
numpy.random.seed(0)
torch.backends.cudnn.benchmark = False
def adjust_jupyter_argv():
import sys
sys.argv = sys.argv[:1]
def write_mp4(name, frames, fps=10):
imageio.mimwrite(name + ".mp4", frames, "mp4", fps=fps)
def overlay_image(im, im_overlay, coord=(100, 70)):
# assumes that im is 3 channel and im_overlay 4 (with alpha)
alpha = im_overlay[:, :, 3]
offset_rows = im_overlay.shape[0]
offset_cols = im_overlay.shape[1]
row = coord[0]
col = coord[1]
im[row : row + offset_rows, col : col + offset_cols, :] = (
1 - alpha[:, :, None]
) * im[row : row + offset_rows, col : col + offset_cols, :] + alpha[
:, :, None
] * im_overlay[
:, :, :3
]
return im
def get_parameters(models):
"""Get all model parameters recursively."""
parameters = []
if isinstance(models, list):
for model in models:
parameters += get_parameters(model)
elif isinstance(models, dict):
for model in models.values():
parameters += get_parameters(model)
else:
# single pytorch model
parameters += list(models.parameters())
return parameters
def visualize_depth(depth, cmap=cv2.COLORMAP_JET):
x = depth.cpu().numpy()
x = np.nan_to_num(x) # change nan to 0
mi = np.min(x) # get minimum depth
ma = np.max(x)
x = (x - mi) / (ma - mi + 1e-8) # normalize to 0~1
x = (255 * x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = torchvision.transforms.ToTensor()(x_) # (3, H, W)
return x_
def assign_appearance(ids_train, ids_unassigned):
# described in experiments, (3) NeRF-W: reassign each test embedding to closest train embedding
ids = sorted(ids_train + ids_unassigned)
g = {}
for id in ids_unassigned:
pos = ids.index(id)
if pos == 0:
# then only possible to assign to next embedding
id_reassign = ids[1]
elif pos == len(ids) - 1:
# then only possible to assign to previous embedding
id_reassign = ids[pos - 1]
else:
# otherwise the one that is closes according to frame index
id_prev = ids[pos - 1]
id_next = ids[pos + 1]
id_reassign = min(
(abs(ids[pos] - id_prev), id_prev), (abs(ids[pos] - id_next), id_next)
)[1]
g[ids[pos]] = id_reassign
return g
def init_model(ckpt_path, dataset):
ckpt = torch.load(ckpt_path, map_location="cpu")
opt_hp = opt.get_opts(dataset.vid)
for j in ckpt["hyper_parameters"]:
setattr(opt_hp, j, ckpt["hyper_parameters"][j])
model = train.NeuralDiffSystem(
opt_hp, train_dataset=dataset, val_dataset=dataset
).cuda()
model.load_state_dict(ckpt["state_dict"])
g_test = assign_appearance(dataset.img_ids_train, dataset.img_ids_test)
g_val = assign_appearance(dataset.img_ids_train, dataset.img_ids_val)
for g in [g_test, g_val]:
for i, i_train in g.items():
model.embedding_a.weight.data[i] = model.embedding_a.weight.data[
i_train
]
return model
|
StarcoderdataPython
|
6653816
|
<reponame>S-Stephen/mock-idp<filename>mockidp/saml/response.py
# coding: utf-8
import base64
import time
import pkg_resources
from jinja2 import Environment, PackageLoader, select_autoescape
from lxml import etree
from signxml import XMLSigner
from mockidp.core.config import get_service_provider
env = Environment(
loader=PackageLoader('mockidp', 'templates'),
autoescape=select_autoescape(['html', 'xml']),
)
def saml_timestamp(epoch):
return time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(epoch))
env.filters['timestamp'] = saml_timestamp
def read_bytes(path):
filename = pkg_resources.resource_filename('mockidp', path)
return open(filename, 'rb').read()
def sign_assertions(response_str):
""" Return signed response string """
response_element = etree.fromstring(response_str)
cert = read_bytes("keys/cert.pem")
key = read_bytes("keys/key.pem")
for e in response_element.findall('{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'):
signer = XMLSigner(c14n_algorithm="http://www.w3.org/2001/10/xml-exc-c14n#",
signature_algorithm='rsa-sha1', digest_algorithm='sha1')
signed_e = signer.sign(e, key=key, cert=cert)
response_element.replace(e, signed_e)
return etree.tostring(response_element, pretty_print=True)
def create_auth_response(config, session):
rendered_response = render_response(session, session.user)
signed_response = sign_assertions(rendered_response)
encoded_response = base64.b64encode(signed_response).decode('utf-8')
service_provider = get_service_provider(config, session.sp_entity_id)
url = service_provider['response_url']
return url, encoded_response
def render_response(session, user):
template = env.get_template('saml_response.xml')
params = dict(
session=session,
user=user
)
response = template.render(params)
return response
def create_logout_response(config, session):
rendered_response = render_logout_response(config, session.user, session)
signed_response = sign_assertions(rendered_response)
encoded_response = base64.b64encode(signed_response).decode('utf-8')
service_provider = get_service_provider(config, session.sp_entity_id)
url = service_provider['logout_url']
return url, encoded_response
def render_logout_response(config, user, session):
template = env.get_template('saml/logout_response.xml')
params = dict(
config=config,
session=session,
user=user
)
response = template.render(params)
return response
|
StarcoderdataPython
|
11335850
|
<reponame>pdubucq/steamturbines<filename>tools.py
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 10:00:15 2018
Gist to show how to write to an excel file without deleting its contents
@author: <NAME>
"""
import pandas as pd
import numpy as np
from openpyxl import load_workbook
def update_excel(df, filename, sheet_name):
book = load_workbook(filename)
writer = pd.ExcelWriter(filename, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer, sheet_name)
writer.save()
def read_config(filename):
full_params=pd.read_excel(filename, sheet_name='params', index_col=0)
params=full_params['Wert'].transpose()
ts=pd.read_excel(filename, sheet_name='timeseries', index_col=0)
return params, ts
if __name__ == "__main__":
#%%
df=pd.DataFrame(np.random.normal(size=(100,1))).cumsum()
update_excel(df, 'test.xlsx', 'python_output')
|
StarcoderdataPython
|
23434
|
<gh_stars>0
import xxhash
import numpy as np
from base.grid import SimpleGRID
import scipy.sparse as SP
h = xxhash.xxh64()
s_to_i = lambda x,size : size*x[0]+x[1]
i_to_s = lambda x,size : (x%size,x//size)
def hash(x):
h.reset()
h.update(x)
return h.digest()
class Indexer(object):
def __init__(self):
self.total = 0
self.dict = {}
def get(self,hs):
val = self.dict.get(hs,-1)
if val == -1:
val = self.total
self.dict[hs] = val
self.total += 1
return val
def reset(self):
self.__init__()
class HashIndexer(object):
def __init__(self):
self.total = 0
self.dict = {}
def get(self,state):
hs=hash(state)
val = self.dict.get(hs,-1)
if val == -1:
val = self.total
self.dict[hs] = val
self.total += 1
return val
def reset(self):
self.__init__()
def get_graph(size):
env = SimpleGRID(grid_size=size,max_time=5000)
input_shape = env.observation_space.shape
min_batch = size**2-size
indexer = Indexer()
W = np.zeros((min_batch,min_batch))
states = np.zeros(min_batch).astype(int)
data = np.zeros((min_batch,)+input_shape)
while indexer.total<min_batch:
done = False
s = env.reset()
#s = s.transpose(2,0,1)#np.expand_dims(s,axis=0)
i = indexer.get(s_to_i(env.get_cat(),size))
states[i] = s_to_i(env.get_cat(),size)
data[states[i]] = s
while not done:
s,r,done = env.step(np.random.randint(4))
#s = np.expand_dims(s,axis=0)
#s = s.transpose(-1,0,1)
j = indexer.get(s_to_i(env.get_cat(),size))
states[j] = s_to_i(env.get_cat(),size)
data[states[j]] = s
W[states[i],states[j]] = W[states[j],states[i]] = 1
if r==1:
print(s_to_i(env.get_cat(),size),indexer.total)
i = j
return data, W
class GraphBuilder(object):
def __init__(self, env, action_set, batch_size):
self.env = env
self.action_set = action_set
self.h = xxhash.xxh64()
self.max_size = batch_size
self.indices = set()
self._total = 0
self.dict = {}
self.states = []
self.prev = 0
self.roll = self.roller()
def submit(self,state, new=False):
hs = self.hash(state)
val = self.dict.get(hs,-1)
if val == -1:
self.states.append(state)
val = self._total
self.dict[hs] = self._total
self._total += 1
if not new:
self.indices.add((self.prev,val))
self.prev = val
def reset(self):
self.indices = set()
self._total = 0
self.dict = {}
self.states = []
self.prev = 0
def roller(self):
done = True
while True:
self.reset()
while not self.full:
if done:
s = self.env.reset()
self.submit(s.copy(), new=done)
done = False
while not done and not self.full:
s,_,done,_ = self.env.step(np.random.choice(self.action_set))
self.submit(s.copy())
S,W = self.get_graph()
W = W.toarray()
#W = (W+W.T)/2
W = np.maximum(W,W.T)
#np.fill_diagonal(W, 1)
yield S, W
def get(self):
return self.roll.__next__()
def hash(self,x):
self.h.reset()
self.h.update(x)
return self.h.digest()
def get_graph(self):
if not self.full:
raise "Graph not full Yet"
indices = np.array(list(self.indices))
rows = indices[:,0]
cols = indices[:,1]
data = np.ones(len(rows))
return np.array(self.states),SP.coo_matrix((data, (rows, cols)),shape=(self.max_size, self.max_size))
@property
def size(self):
return self._total
@property
def full(self):
return self.size == self.max_size
|
StarcoderdataPython
|
5124028
|
<reponame>stfc-aeg/odin-timeslice
"""Demo adapter for ODIN control Timeslice
This class implements a simple adapter used for demonstration purposes in a
<NAME>, STFC Application Engineering
"""
import logging
import tornado
import time
import os
from os import path
from concurrent import futures
import smtplib
import email
import ssl
from tornado.ioloop import IOLoop
from tornado.concurrent import run_on_executor
from tornado.escape import json_decode
from odin.adapters.adapter import ApiAdapter, ApiAdapterResponse, request_types, response_types
from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError
from odin._version import get_versions
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class TimesliceAdapter(ApiAdapter):
"""System info adapter class for the ODIN server.
This adapter provides ODIN clients with information about the server and the system that it is
running on.
"""
def __init__(self, **kwargs):
"""Initialize the TimesliceAdapter object.
This constructor initializes the TimesliceAdapter object.
:param kwargs: keyword arguments specifying options
"""
# Intialise superclass
super(TimesliceAdapter, self).__init__(**kwargs)
rendered_files = (self.options.get('rendered_files'))
config_message = (self.options.get('config_message'))
self.timeslice = Timeslice(rendered_files,config_message)
logging.debug('TimesliceAdapter loaded')
@response_types('application/json', default='application/json')
def get(self, path, request):
"""Handle an HTTP GET request.
This method handles an HTTP GET request, returning a JSON response.
:param path: URI path of request
:param request: HTTP request object
:return: an ApiAdapterResponse object containing the appropriate response
"""
try:
response = self.timeslice.get(path)
status_code = 200
except ParameterTreeError as e:
response = {'error': str(e)}
status_code = 400
content_type = 'application/json'
return ApiAdapterResponse(response, content_type=content_type,
status_code=status_code)
@request_types('application/json')
@response_types('application/json', default='application/json')
def put(self, path, request):
"""Handle an HTTP PUT request.
This method handles an HTTP PUT request, returning a JSON response.
:param path: URI path of request
:param request: HTTP request object
:return: an ApiAdapterResponse object containing the appropriate response
"""
content_type = 'application/json'
try:
data = json_decode(request.body)
self.timeslice.set(path, data)
response = self.timeslice.get(path)
status_code = 200
except TimesliceError as e:
response = {'error': str(e)}
status_code = 400
except (TypeError, ValueError) as e:
response = {'error': 'Failed to decode PUT request body: {}'.format(str(e))}
status_code = 400
logging.debug(response)
return ApiAdapterResponse(response, content_type=content_type,
status_code=status_code)
def delete(self, path, request):
"""Handle an HTTP DELETE request.
This method handles an HTTP DELETE request, returning a JSON response.
:param path: URI path of request
:param request: HTTP request object
:return: an ApiAdapterResponse object containing the appropriate response
"""
response = 'TimesliceAdapter: DELETE on path {}'.format(path)
status_code = 200
logging.debug(response)
return ApiAdapterResponse(response, status_code=status_code)
class TimesliceError(Exception):
"""Simple exception class for PSCUData to wrap lower-level exceptions."""
pass
class Timeslice():
"""Timeslice - class that extracts and stores information about system-level parameters."""
# Thread executor used for background tasks
executor = futures.ThreadPoolExecutor(max_workers=1)
def __init__(self, rendered_files, config_message,):
"""Initialise the Timeslice object.
This constructor initlialises the Timeslice object, building a parameter tree and
launching a background task if enabled
"""
self.rendered_files = rendered_files
self.config_message = config_message
self.access_codes = []
self.files = []
self.email_address = ""
# Store initialisation time
self.init_time = time.time()
# Get package version information
version_info = get_versions()
# Store all information in a parameter tree
self.param_tree = ParameterTree({
'odin_version': version_info['version'],
'tornado_version': tornado.version,
'server_uptime': (self.get_server_uptime, None),
'access_codes': (lambda: self.access_codes, None),
'add_access_code': ("", self.add_task_access_code),
'rendered_files': (lambda: self.rendered_files,None),
'config_message': (lambda: self.config_message,None),
'clear_access_codes' : (False, self.clear_access_codes),
'clear_email' : (False, self.clear_email),
'email_address' : (lambda: self.email_address, None),
'add_email_address' : ("", self.add_email_address),
'send_email_new' : (False, self.send_email_new),
'files': (lambda: self.files, None),
})
def get_server_uptime(self):
"""Get the uptime for the ODIN server.
This method returns the current uptime for the ODIN server.
"""
return time.time() - self.init_time
def get(self, path):
"""Get the parameter tree.
This method returns the parameter tree for use by clients via the Timeslice adapter.
:param path: path to retrieve from tree
"""
return self.param_tree.get(path)
def set(self, path, data):
"""Set parameters in the parameter tree.
This method simply wraps underlying ParameterTree method so that an exceptions can be
re-raised with an appropriate TimesliceError.
:param path: path of parameter tree to set values for
:param data: dictionary of new data values to set in the parameter tree
"""
try:
self.param_tree.set(path, data)
except ParameterTreeError as e:
raise TimesliceError(e)
def add_task_access_code(self, access_code):
"""Validate and store entered access codes to be sent in an email
When an access code is entered, first the code checks that the entered access code
isn't already in the access codes list in order to avoid duplication of attachments
in the email.
If the code isn't a duplicate the system then checks that it relates to an existing
file. If the file exists then the code is added to the list of access codes being stored
and the files list recieves both the access code and the file path in order to attach
the mp4 file to the email.
Otherwise the system sends out an error message
"""
if access_code in self.access_codes:
raise TimesliceError("This code is already stored")
file_path = os.path.join(self.rendered_files, access_code + '.mp4')
logging.debug("Testing if file {} exists".format(file_path))
if os.path.isfile(os.path.join(file_path)):
logging.debug("adding access code %s", access_code)
self.access_codes.append(access_code)
self.files.append(file_path)
logging.debug(self.access_codes)
logging.debug(self.files)
else:
raise TimesliceError("This access code does not match any stored videos, please try again")
def clear_access_codes(self, clear):
""" This empties both the access codes list and the files list used for attaching mp4
files.
"""
logging.debug("Setting list clear to %s", clear)
self.access_codes = []
self.files = []
logging.debug(self.access_codes)
def clear_email(self, clear):
""" This empties the stored email address when the page loads"""
self.email_address = None
logging.debug("clearing email: %s",clear)
def add_email_address(self, email_address):
"""This sets the email address for videos to be sent to
"""
self.email_address = email_address
logging.debug("Email address recieved: %s", email_address)
def send_email_new(self, send):
"""This is the code that actually collects the various pieces of entered information
and uses them to send an email out to the timeslice user
"""
config_message = self.config_message
subject = "Timeslice videos"
body = (config_message).format(self.email_address, self.access_codes)
sender_email = "<NAME> <<EMAIL>>"
receiver_email = '{0}'.format(self.email_address)
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message.attach(MIMEText(body, "plain"))
files_list = self.access_codes
logging.debug(files_list)
for file_number, a_file in enumerate(files_list, start=1):
a_file = os.path.join(self.rendered_files, a_file + '.mp4')
attachment = open(a_file, "rb")
filename = 'STFC-Timeslice-video-{}'.format(file_number)+'.mp4'
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
message.attach(part)
try:
smtp_obj = smtplib.SMTP('outbox.rl.ac.uk')
smtp_obj.sendmail(sender_email, receiver_email, message.as_string())
logging.debug("Yay, we sent mail")
except smtplib.SMTPException as error:
logging.debug("Boo, emailing failed: {}".format(str(error)))
|
StarcoderdataPython
|
1780657
|
class MarbleDecoration:
def maxLength(self, R, G, B):
def m(a, b):
return 2*min(a, b) + 1 - int(a == b)
return max(m(R, G), m(R, B), m(G, B))
|
StarcoderdataPython
|
6688573
|
# flake8: noqa
from .core import Bower
from .error import Error
from .autoversion import (filesystem_second_autoversion,
filesystem_microsecond_autoversion)
from .utility import module_relative_path
from .publisher import PublisherTween
from .injector import InjectorTween
from .renderer import render_inline_js, render_inline_css
|
StarcoderdataPython
|
5084137
|
from setuptools import setup
setup(name='feature_selection',
version='0.1',
description='Small and simple python package to run filter and wrapper feature selection methods',
url='https://github.com/FabianIsensee/FeatureSelection',
author='<NAME>, Division of Medical Image Computing, German Cancer Research Center',
author_email='<EMAIL>',
license='MIT',
packages=['feature_selection']
)
|
StarcoderdataPython
|
133476
|
<reponame>ryan-rozario/vyper<gh_stars>0
import copy
from vyper import (
ast as vy_ast,
)
from vyper.exceptions import (
StructureException,
TypeMismatch,
VariableDeclarationException,
)
from vyper.parser.context import (
Context,
)
from vyper.parser.expr import (
Expr,
)
from vyper.parser.memory_allocator import (
MemoryAllocator,
)
from vyper.types.types import (
BaseType,
ByteArrayType,
)
from vyper.utils import (
SizeLimits,
is_instances,
)
class Constants(object):
def __init__(self):
self._constants = dict()
self._constants_ast = dict()
def __contains__(self, key):
return key in self._constants
def unroll_constant(self, const, global_ctx):
ann_expr = None
expr = Expr.parse_value_expr(
const.value,
Context(
vars=None,
global_ctx=global_ctx,
origcode=const.full_source_code,
memory_allocator=MemoryAllocator()
),
)
annotation_type = global_ctx.parse_type(const.annotation.args[0], None)
fail = False
if is_instances([expr.typ, annotation_type], ByteArrayType):
if expr.typ.maxlen < annotation_type.maxlen:
return const
fail = True
elif expr.typ != annotation_type:
fail = True
# special case for literals, which can be uint256 types as well.
is_special_case_uint256_literal = (
is_instances([expr.typ, annotation_type], BaseType)
) and (
[annotation_type.typ, expr.typ.typ] == ['uint256', 'int128']
) and SizeLimits.in_bounds('uint256', expr.value)
is_special_case_int256_literal = (
is_instances([expr.typ, annotation_type], BaseType)
) and (
[annotation_type.typ, expr.typ.typ] == ['int128', 'int128']
) and SizeLimits.in_bounds('int128', expr.value)
if is_special_case_uint256_literal or is_special_case_int256_literal:
fail = False
if fail:
raise TypeMismatch(
f"Invalid value for constant type, expected {annotation_type} got "
f"{expr.typ} instead",
const.value,
)
ann_expr = copy.deepcopy(expr)
ann_expr.typ = annotation_type
ann_expr.typ.is_literal = expr.typ.is_literal # Annotation type doesn't have literal set.
return ann_expr
def add_constant(self, item, global_ctx):
args = item.annotation.args
if not item.value:
raise StructureException('Constants must express a value!', item)
is_correctly_formatted_struct = (
len(args) == 1 and isinstance(args[0], (vy_ast.Subscript, vy_ast.Name, vy_ast.Call))
) and item.target
if is_correctly_formatted_struct:
c_name = item.target.id
if global_ctx.is_valid_varname(c_name, item):
self._constants[c_name] = self.unroll_constant(item, global_ctx)
self._constants_ast[c_name] = item.value
# TODO: the previous `if` has no else which will result in this
# *silently* existing without doing anything. is this intended
# behavior.
else:
raise StructureException('Incorrectly formatted struct', item)
def ast_is_constant(self, ast_node):
return isinstance(ast_node, vy_ast.Name) and ast_node.id in self._constants
def is_constant_of_base_type(self, ast_node, base_types):
base_types = (base_types) if not isinstance(base_types, tuple) else base_types
valid = self.ast_is_constant(ast_node)
if not valid:
return False
const = self._constants[ast_node.id]
if isinstance(const.typ, BaseType) and const.typ.typ in base_types:
return True
return False
def get_constant(self, const_name, context):
""" Return unrolled const """
# check if value is compatible with
const = self._constants[const_name]
if isinstance(const, vy_ast.AnnAssign): # Handle ByteArrays.
if context:
expr = Expr(const.value, context).lll_node
return expr
else:
raise VariableDeclarationException(
f"ByteArray: Can not be used outside of a function context: {const_name}"
)
# Other types are already unwrapped, no need
return self._constants[const_name]
|
StarcoderdataPython
|
4801302
|
<filename>myWeb/myWeb/model/User.py
from myWeb import db
class User(db.Model):
__tablename__='b_user'
id=db.Column(db.Integer,primary_key=True)
username=db.Column(db.String(10),unique=True)
password=db.Column(db.String(16))
def __init__(self,username,password):
self.username=username
self.password=password
def __repr__(self):
return '<user %r>' % self.username
|
StarcoderdataPython
|
244564
|
<filename>270 Closest Binary Search Tree Value.py
"""
Premium Question
"""
import sys
__author__ = 'Daniel'
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def closestValue(self, root, target):
"""
Divide the problem into 2 parts:
1. find the value just smaller than target
2. find the value just larger than target
:type root: TreeNode
:type target: float
:rtype: int
"""
lo = [-sys.float_info.max]
self.find(root, target, lo, True)
hi = [sys.float_info.max]
self.find(root, target, hi, False)
if hi[0] - target < target - lo[0]:
return int(hi[0])
else:
return int(lo[0])
def find(self, root, target, ret, lower=True):
if not root:
return
if root.val == target:
ret[0] = root.val
return
if root.val < target:
if lower: ret[0] = max(ret[0], root.val)
self.find(root.right, target, ret, lower)
else:
if not lower: ret[0] = min(ret[0], root.val)
self.find(root.left, target, ret, lower)
if __name__ == "__main__":
assert Solution().closestValue(TreeNode(2147483647), 0.0) == 2147483647
|
StarcoderdataPython
|
11306780
|
from rest_framework import routers
from .views import ImageViewSet
from django.urls import path, include
router = routers.DefaultRouter()
router.register('images', ImageViewSet)
urlpatterns = [
path('', include(router.urls))
]
|
StarcoderdataPython
|
4896751
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import types
from typing import Any, Dict, Optional, Tuple
import tensorflow as tf
from nncf import NNCFConfig
from nncf.api.compression import CompressionAlgorithmBuilder
from nncf.api.compression import CompressionAlgorithmController
from nncf.common.compression import BaseCompressionAlgorithmController as BaseController
from nncf.config.structures import ModelEvaluationArgs
from nncf.config.utils import is_accuracy_aware_training
from nncf.tensorflow.accuracy_aware_training.keras_model_utils import accuracy_aware_fit
from nncf.config.extractors import extract_algorithm_names
from nncf.tensorflow.algorithm_selector import NoCompressionAlgorithmBuilder
from nncf.tensorflow.algorithm_selector import get_compression_algorithm_builder
from nncf.tensorflow.api.composite_compression import TFCompositeCompressionAlgorithmBuilder
from nncf.tensorflow.helpers.utils import get_built_model
def create_compression_algorithm_builder(config: NNCFConfig,
should_init: bool) -> CompressionAlgorithmBuilder:
"""
Factory to create an instance of the compression algorithm builder
by NNCFConfig.
:param config: An instance of NNCFConfig that defines compression methods.
:param should_init: The flag indicates that the generated compression builder
will initialize (True) or not (False) the training parameters of the model
during model building.
:return: An instance of the `CompressionAlgorithmBuilder`
"""
algo_names = extract_algorithm_names(config)
number_compression_algorithms = len(algo_names)
if number_compression_algorithms == 0:
return NoCompressionAlgorithmBuilder(config, should_init)
if number_compression_algorithms == 1:
algo_name = next(iter(algo_names))
return get_compression_algorithm_builder(algo_name)(config, should_init)
return TFCompositeCompressionAlgorithmBuilder(config, should_init)
def create_compressed_model(model: tf.keras.Model,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None) \
-> Tuple[CompressionAlgorithmController, tf.keras.Model]:
"""
The main function used to produce a model ready for compression fine-tuning
from an original TensorFlow Keras model and a configuration object.
:param model: The original model. Should have its parameters already loaded
from a checkpoint or another source.
:param config: A configuration object used to determine the exact compression
modifications to be applied to the model.
:param compression_state: compression state to unambiguously restore the compressed model.
Includes builder and controller states. If it is specified, trainable parameter initialization will be skipped
during building.
:return: A tuple (compression_ctrl, compressed_model) where
- compression_ctrl: The controller of the compression algorithm.
- compressed_model: The model with additional modifications
necessary to enable algorithm-specific compression during fine-tuning.
"""
model = get_built_model(model, config)
original_model_accuracy = None
if is_accuracy_aware_training(config, compression_config_passed=True):
if config.has_extra_struct(ModelEvaluationArgs):
evaluation_args = config.get_extra_struct(ModelEvaluationArgs)
original_model_accuracy = evaluation_args.eval_fn(model)
builder = create_compression_algorithm_builder(config, should_init=not compression_state)
if compression_state:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(model)
compression_ctrl = builder.build_controller(compressed_model)
compressed_model.original_model_accuracy = original_model_accuracy
if isinstance(compressed_model, tf.keras.Model):
compressed_model.accuracy_aware_fit = types.MethodType(accuracy_aware_fit, compressed_model)
return compression_ctrl, compressed_model
|
StarcoderdataPython
|
8160943
|
<reponame>Eladhi/VI_Glow
import numpy as np
import torch
from tqdm import tqdm
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
from misc import util
class Inferer:
def __init__(self, hps, graph, devices, data_device):
"""
Network inferer
:param hps: hyper-parameters for this network
:type hps: dict
:param graph: model graph
:type graph: torch.nn.Module
:param devices: list of usable devices for model running
:type devices: list
:param data_device: list of usable devices for data loading
:type data_device: str or int
"""
# general
self.hps = hps
# state
self.graph = graph
self.graph.eval()
self.devices = devices
self.use_cuda = 'cpu' not in self.devices
# data
self.data_device = data_device
self.batch_size = self.graph.h_top.shape[0]
self.num_classes = self.hps.dataset.num_classes
# ablation
self.y_condition = self.hps.ablation.y_condition
def sample(self, z, y_onehot, eps_std=0.5):
"""
Sample image
:param z: latent feature vector
:type z: torch.Tensor or None
:param y_onehot: one-hot vector of label
:type y_onehot: torch.Tensor or None
:param eps_std: standard deviation of eps
:type eps_std: float
:return: generated image
:rtype: torch.Tensor
"""
with torch.no_grad():
# generate sample from model
img = self.graph(z=z, y_onehot=y_onehot, eps_std=eps_std, reverse=True)
# create image grid
grid = make_grid(img)
return grid
def encode(self, img):
"""
Encode input image to latent features
:param img: input image
:type img: torch.Tensor or np.numpy or Image.Image
:return: latent features
:rtype: torch.Tensor
"""
with torch.no_grad():
if not torch.is_tensor(img):
img = util.image_to_tensor(
img,
shape=self.hps.model.image_shape)
img = util.make_batch(img, self.batch_size)
elif len(img.shape) == 3:
img = util.make_batch(img, self.batch_size)
if self.use_cuda:
img = img.cuda()
z, _, _ = self.graph(img)
return z[0, :, :, :]
def decode(self, z):
"""
:param z: input latent feature vector
:type z: torch.Tensor
:return: decoded image
:rtype: torch.Tensor
"""
with torch.no_grad():
if len(z.shape) == 3:
z = util.make_batch(z, self.batch_size)
if self.use_cuda:
z = z.cuda()
img = self.graph(z=z, y_onehot=None, reverse=True)[0, :, :, :]
return img
def compute_attribute_delta(self, dataset):
"""
Compute feature vector deltaz of different attributes
:param dataset: dataset for training model
:type dataset: torch.utils.data.Dataset
:return:
:rtype:
"""
with torch.no_grad():
# initialize variables
attrs_z_pos = np.zeros([self.num_classes, *self.graph.flow.output_shapes[-1][1:]])
attrs_z_neg = np.zeros([self.num_classes, *self.graph.flow.output_shapes[-1][1:]])
num_z_pos = np.zeros(self.num_classes)
num_z_neg = np.zeros(self.num_classes)
deltaz = np.zeros([self.num_classes, *self.graph.flow.output_shapes[-1][1:]])
data_loader = DataLoader(dataset, batch_size=self.batch_size,
num_workers=self.hps.dataset.num_workers,
shuffle=True,
drop_last=True)
progress = tqdm(data_loader)
for idx, batch in enumerate(progress):
# extract batch data
assert 'y_onehot' in batch.keys(), 'Compute attribute deltaz needs "y_onehot" in batch data'
for i in batch:
batch[i] = batch[i].to(self.data_device)
x = batch['x']
y_onehot = batch['y_onehot']
# decode latent features
z, _, _ = self.graph(x)
# append to latent feature list by attributes
for i in range(len(batch)):
for cls in range(self.num_classes):
if y_onehot[i, cls] > 0:
attrs_z_pos[cls] += z[i]
num_z_pos[cls] += 1
else:
attrs_z_neg[cls] += z[i]
num_z_neg[cls] += 1
# compute deltaz
num_z_pos = [max(1., float(num)) for num in num_z_pos]
num_z_neg = [max(1., float(num)) for num in num_z_neg]
for cls in range(self.num_classes):
mean_z_pos = attrs_z_pos[cls] / num_z_pos[cls]
mean_z_neg = attrs_z_neg[cls] / num_z_neg[cls]
deltaz[cls] = mean_z_pos - mean_z_neg
return deltaz
def apply_attribute_delta(self, img, deltaz, interpolation):
"""
Apply attribute delta to image by given interpolation vector
:param img: given image
:type img: torch.Tensor or np.numpy or Image.Image
:param deltaz: delta vector of attributes in latent space
:type deltaz: np.ndarray
:param interpolation: interpolation vector
:type interpolation: torch.Tensor or np.ndarray or list[float]
:return: processed image
:rtype: torch.Tensor
"""
if isinstance(deltaz, np.ndarray):
deltaz = torch.Tensor(deltaz)
assert len(interpolation) == self.num_classes
assert deltaz.shape == torch.Size([self.num_classes,
*self.graph.flow.output_shapes[-1][1:]])
# encode
z = self.encode(img)
# perform interpolation
z_interpolated = z.clone()
for i in range(len(interpolation)):
z_delta = deltaz[i].mul(interpolation[i])
if self.use_cuda:
z_delta = z_delta.cuda()
z_interpolated += z_delta
# decode
img = self.decode(z_interpolated)
return img
|
StarcoderdataPython
|
3299395
|
<reponame>tungwenyang/sc-projects
"""
SC101 Baby Names Project
Adapted from <NAME>'s Baby Names assignment by
<NAME>.
This program is to plot the historical trend of a
list of name from a given dict of baby name data
onto the canvas.
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
return GRAPH_MARGIN_SIZE + year_index * (width - 2 * GRAPH_MARGIN_SIZE) / len(YEARS)
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
#################################
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
CANVAS_HEIGHT - GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, 0, GRAPH_MARGIN_SIZE, CANVAS_HEIGHT)
for i in range(len(YEARS)):
canvas.create_line(get_x_coordinate(CANVAS_WIDTH, i), 0, get_x_coordinate(CANVAS_WIDTH, i), CANVAS_HEIGHT)
canvas.create_text(get_x_coordinate(CANVAS_WIDTH, i) + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE,
text=YEARS[i], anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
#################################
for i in range(len(lookup_names)):
d = name_data[lookup_names[i]]
last_x = 0
last_y = 0
for j in range(len(YEARS)):
# x coordinate
current_x = get_x_coordinate(CANVAS_WIDTH, j)
# y coordinate
if str(YEARS[j]) in d:
current_y = get_y_coordinate(CANVAS_HEIGHT, d[str(YEARS[j])])
else:
current_y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
# create line if not YEAR[0]
if j > 0:
canvas.create_line(last_x, last_y, current_x, current_y, width=LINE_WIDTH, fill=COLORS[i % len(COLORS)])
# create text
if current_y == CANVAS_HEIGHT - GRAPH_MARGIN_SIZE:
canvas.create_text(current_x + TEXT_DX, current_y, text=(lookup_names[i], '*'),
anchor=tkinter.SW, fill=COLORS[i % len(COLORS)])
else:
canvas.create_text(current_x + TEXT_DX, current_y, text=(lookup_names[i], d[str(YEARS[j])]),
anchor=tkinter.SW, fill=COLORS[i % len(COLORS)])
# assign current_x,_y as last_x,_y for next loop
last_x, last_y = current_x, current_y
def get_y_coordinate(height, rank):
"""
Given the height of the canvas and the rank of the current year
to the associated name, returns the y coordinate.
Input:
height (int): The height of the canvas
rank (str): The rank of the current year in the name_data[name] dict
Returns:
y_coordinate (int): The y coordinate associated with the rank of specified year.
"""
if int(rank) <= MAX_RANK:
return GRAPH_MARGIN_SIZE + int(rank) * (height - 2 * GRAPH_MARGIN_SIZE) / MAX_RANK
else:
return height - GRAPH_MARGIN_SIZE
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
97446
|
import pyautogui
from pynput.mouse import Button, Listener
from datetime import *
def clicou(x,y, botao, pressionado):
if pressionado == True:
im1 = pyautogui.screenshot()
im1.save(f'{datetime.now()}.png')
listener = Listener(on_click=clicou)
listener.start()
listener.join()
|
StarcoderdataPython
|
325074
|
<filename>docs/exts/sphinxtr/pluginparameters.py
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import nodes
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive, Parser
from docutils.parsers.rst.directives.tables import Table
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
class PluginParameters(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list using to describe
Mitsuba 2 plugin parameters.
"""
option_spec = {}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
num_cols = self.check_list_content(node)
# Hardcode this:
col_widths = [20, 15, 65]
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 1)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows-1, stub_columns)
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', ['paramstable'])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
return num_cols
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
# Append first row
header_text = ['Parameter', 'Type', 'Description']
header_row_node = nodes.row()
for text in header_text:
entry = nodes.entry()
entry += [nodes.paragraph(text=text)]
header_row_node += entry
rows.append(header_row_node)
for row in table_data:
row_node = nodes.row()
for i, cell in enumerate(row):
entry = nodes.entry()
# force the first column to be write in paramtype style
if i == 0:
rst = ViewList()
rst.append(""":paramtype:`{name}`""".format(name=str(cell[0][0])), "", 0)
parsed_node = nodes.section()
parsed_node.document = self.state.document
nested_parse_with_titles(self.state, rst, parsed_node)
entry += [parsed_node[0]]
else:
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
def setup(app):
app.add_directive('pluginparameters', PluginParameters)
|
StarcoderdataPython
|
11333113
|
#!/usr/bin/env python
'''
This script converts data in panoptic COCO format to semantic segmentation. All
segments with the same semantic class in one image are combined together.
Additional option:
- using option '--things_others' the script combine all segments of thing
classes into one segment with semantic class 'other'.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
import numpy as np
import json
import time
import multiprocessing
from collections import defaultdict
import PIL.Image as Image
from panopticapi.utils import get_traceback, rgb2id, save_json
try:
# set up path for pycocotools
# sys.path.append('./cocoapi-master/PythonAPI/')
from pycocotools import mask as COCOmask
except Exception:
raise Exception(
"Please install pycocotools module from https://github.com/cocodataset/cocoapi")
OTHER_CLASS_ID = 183
@get_traceback
def extract_semantic_single_core(proc_id,
annotations_set,
segmentations_folder,
output_json_file,
semantic_seg_folder,
categories,
save_as_png,
things_other):
annotation_semantic_seg = []
for working_idx, annotation in enumerate(annotations_set):
if working_idx % 100 == 0:
print('Core: {}, {} from {} images processed'.format(proc_id,
working_idx,
len(annotations_set)))
try:
pan_format = np.array(
Image.open(os.path.join(
segmentations_folder, annotation['file_name'])),
dtype=np.uint32
)
except IOError:
raise KeyError('no prediction png file for id: {}'.format(
annotation['image_id']))
pan = rgb2id(pan_format)
semantic = np.zeros(pan.shape, dtype=np.uint8)
RLE_per_category = defaultdict(list)
for segm_info in annotation['segments_info']:
cat_id = segm_info['category_id']
if things_other and categories[cat_id]['isthing'] == 1:
cat_id = OTHER_CLASS_ID
mask = pan == segm_info['id']
if save_as_png:
semantic[mask] = cat_id
else:
RLE = COCOmask.encode(np.asfortranarray(mask.astype('uint8')))
RLE['counts'] = RLE['counts'].decode('utf8')
RLE_per_category[cat_id].append(RLE)
if save_as_png:
Image.fromarray(semantic).save(os.path.join(
semantic_seg_folder, annotation['file_name']))
else:
for cat_id, RLE_list in RLE_per_category.items():
if len(RLE_list) == 1:
RLE = RLE_list[0]
else:
RLE = COCOmask.merge(RLE_list)
semantic_seg_record = {}
semantic_seg_record["image_id"] = annotation['image_id']
semantic_seg_record["category_id"] = cat_id
semantic_seg_record["segmentation"] = RLE
semantic_seg_record["area"] = int(COCOmask.area(RLE))
semantic_seg_record["bbox"] = list(COCOmask.toBbox(RLE))
semantic_seg_record["iscrowd"] = 0
annotation_semantic_seg.append(semantic_seg_record)
print('Core: {}, all {} images processed'.format(
proc_id, len(annotations_set)))
return annotation_semantic_seg
def extract_semantic(input_json_file,
segmentations_folder,
output_json_file,
semantic_seg_folder,
categories_json_file,
things_other):
start_time = time.time()
with open(input_json_file, 'r') as f:
d_coco = json.load(f)
annotations = d_coco['annotations']
if segmentations_folder is None:
segmentations_folder = input_json_file.rsplit('.', 1)[0]
print("EXTRACTING FROM...")
print("COCO panoptic format:")
print("\tSegmentation folder: {}".format(segmentations_folder))
print("\tJSON file: {}".format(input_json_file))
print("SEMANTIC SEGMENTATION")
if output_json_file is not None and semantic_seg_folder is not None:
raise Exception("'--output_json_file' and '--semantic_seg_folder' \
options cannot be used together")
save_as_png = False
if output_json_file is None:
if semantic_seg_folder is None:
raise Exception("One of '--output_json_file' and '--semantic_seg_folder' \
options must be used specified")
else:
save_as_png = True
print("in PNG format:")
print("\tFolder with semnatic segmentations: {}".format(
semantic_seg_folder))
if not os.path.isdir(semantic_seg_folder):
print("Creating folder {} for semantic segmentation PNGs".format(
semantic_seg_folder))
os.mkdir(semantic_seg_folder)
else:
print("in COCO detection format:")
print("\tJSON file: {}".format(output_json_file))
if things_other:
print("Merging all things categories into 'other' category")
print('\n')
with open(categories_json_file, 'r') as f:
categories_list = json.load(f)
categories = {category['id']: category for category in categories_list}
cpu_num = multiprocessing.cpu_count()
annotations_split = np.array_split(annotations, cpu_num)
print("Number of cores: {}, images per core: {}".format(
cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for proc_id, annotations_set in enumerate(annotations_split):
p = workers.apply_async(extract_semantic_single_core,
(proc_id, annotations_set, segmentations_folder,
output_json_file, semantic_seg_folder,
categories, save_as_png, things_other))
processes.append(p)
annotations_coco_semantic_seg = []
for p in processes:
annotations_coco_semantic_seg.extend(p.get())
if not save_as_png:
for idx, ann in enumerate(annotations_coco_semantic_seg):
ann['id'] = idx
d_coco['annotations'] = annotations_coco_semantic_seg
categories_coco_semantic_seg = []
for category in categories_list:
if things_other and category['isthing'] == 1:
continue
category.pop('isthing')
category.pop('color')
categories_coco_semantic_seg.append(category)
if things_other:
categories_coco_semantic_seg.append({'id': OTHER_CLASS_ID,
'name': 'other',
'supercategory': 'other'})
d_coco['categories'] = categories_coco_semantic_seg
save_json(d_coco, output_json_file)
t_delta = time.time() - start_time
print("Time elapsed: {:0.2f} seconds".format(t_delta))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This script converts data in panoptic COCO format to \
semantic segmentation. All segments with the same semantic class in one \
image are combined together. See this file's head for more information."
)
parser.add_argument('--input_json_file', type=str,
help="JSON file with panoptic data")
parser.add_argument(
'--segmentations_folder', type=str, default=None, help="Folder with \
panoptic COCO format segmentations. Default: X if input_json_file is \
X.json"
)
parser.add_argument('--output_json_file', type=str, default=None,
help="JSON file with semantic data. If '--output_json_file' \
is specified, resulting semantic segmentation will be \
stored as a JSON file in COCO stuff format (see \
http://cocodataset.org/#format-data for details).")
parser.add_argument('--semantic_seg_folder', type=str, default=None,
help="Folder for semantic segmentation. If '--semantic_seg_folder' \
is specified, resulting semantic segmentation will be \
stored in the specified folder in PNG format.")
parser.add_argument('--categories_json_file', type=str,
help="JSON file with Panoptic COCO categories information",
default='./panoptic_coco_categories.json')
parser.add_argument('--things_other', action='store_true',
help="Is set, all things classes are merged into one \
'other' class")
args = parser.parse_args()
extract_semantic(args.input_json_file,
args.segmentations_folder,
args.output_json_file,
args.semantic_seg_folder,
args.categories_json_file,
args.things_other)
|
StarcoderdataPython
|
11312284
|
<reponame>soren5/bee_bot
import pprint
import json
import time
import os
def create_report(filename, dirname, report, extra):
pp = pprint.PrettyPrinter(indent=4)
message = pp.pformat(report)
extra = pp.pformat(extra)
todate = time.localtime()
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", todate)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(dirname + filename, 'w') as fp:
json.dump([timestamp, message, extra], fp)
def get_progress_data(current_iteration, max_iteration, start_time):
progress_data = {}
progress_percentage = float(current_iteration + 1) / float(max_iteration)
time_elapsed = float(time.time() - start_time)
time_estimate = float(time_elapsed) / progress_percentage
hours_elapsed = int(time_elapsed / 60 / 60)
minutes_elapsed = int(time_elapsed % 60)
seconds_elapsed = int(time_elapsed / 60 % 60)
hours_left = int(time_estimate / 60 / 60)
minutes_left = int(time_estimate % 60)
seconds_left = int(time_estimate / 60 % 60)
progress_data['progress_percentage'] = str(progress_percentage)
progress_data['time_elapsed'] = str(hours_elapsed) + ':' + str(minutes_elapsed) + ':' + str(seconds_elapsed)
progress_data['time_left'] = str(hours_left) + ':' + str(minutes_left) + ':' + str(seconds_left)
return progress_data
#create_report("test.json", 'reports/', ["This is an example report"], ["this is additional hidden information"])
|
StarcoderdataPython
|
4855183
|
from django.shortcuts import render
from index.models import *
from django.views.generic import ListView
from django.conf import settings
_ = settings.RANKING_VIEW
def rankingView(request):
"""path '' handler """
# 热搜歌曲
searchs = Dynamic.objects.select_related('song').order_by('-search').all()[:_['SEARCHS']]
labels = Label.objects.all()
# 歌曲列表信息
t = request.GET.get('type', '')
if t:
dynamics = \
Dynamic.objects.select_related('song').filter(song__label=t).order_by('-plays').all()[:_['TOP']]
else:
dynamics = Dynamic.objects.select_related('song').order_by('-plays').all()[:_['TOP']]
return render(request, 'ranking.html', locals())
|
StarcoderdataPython
|
1705345
|
import os
import simur
import sys
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def usage():
print(f'{sys.argv[0]} vcs reporoot relpath revision')
print(f' e.g. {sys.argv[0]} svn https://barbar/svn/SVNrepo trunk/main.c 3')
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def handle_svn(reporoot, relpath, revision):
url = reporoot + '/' + relpath
command = f'svn cat {url}@{revision}'
reply, exit_code = simur.run_process(command, True, extra_dir=None,
as_text=False)
return reply
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def handle_local_git(reporoot, revision):
# Make a pushd to the git dir
curr_dir = os.getcwd()
os.chdir(reporoot)
command = f'git show {revision}'
reply, exit_code = simur.run_process(command, True, extra_dir=reporoot,
as_text=False)
os.chdir(curr_dir)
return reply
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def handle_remote_git(reporoot, revision):
git_dir = simur.find_and_update_git_cache(reporoot)
if not git_dir:
reply = f'Could not find a .git dir from {reporoot}\n'
reply += f'when looking in {git_dir}'
else:
curr_dir = os.getcwd()
os.chdir(git_dir)
command = f'git show {revision}'
reply, exit_code = simur.run_process(command, True, extra_dir=git_dir,
as_text=False)
os.chdir(curr_dir)
return reply
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def handle_git(reporoot, revision):
if os.path.exists(reporoot):
reply = handle_local_git(reporoot, revision)
else:
reply = handle_remote_git(reporoot, revision)
return reply
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def main():
if len(sys.argv) < 5:
print("Too few arguments")
usage()
return 3
vcs = sys.argv[1]
reporoot = sys.argv[2]
relpath = sys.argv[3]
revision = sys.argv[4]
if vcs == 'svn':
reply = handle_svn(reporoot, relpath, revision)
elif vcs == 'git':
reply = handle_git(reporoot, revision)
else:
print(f'Cannot handle {vcs}, only svn and git\n')
usage()
return 3
sys.stdout.buffer.write(reply)
return 0
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
sys.exit(main())
|
StarcoderdataPython
|
9664091
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import base58
import uuid
# helper function for constructing paths to resource files.
def resource_path(relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
abspath = os.path.abspath(os.path.join(__file__, "..", "..", ".."))
abspath = os.path.dirname(abspath)
#print("abspath: ", abspath)
return os.path.join(abspath, relative)
# helper function for constructing paths to resource files.
def base_path():
if hasattr(sys, "_MEIPASS"):
return sys._MEIPASS
else:
# assumes this file is located at src/eavatar/util/__init__.py
abspath = os.path.abspath(os.path.join(__file__, "..", "..", ".."))
abspath = os.path.dirname(abspath)
return abspath
def is_frozen():
""" Checks if in frozen mode.
:return:
"""
return hasattr(sys, "frozen")
def new_object_id():
""" Generates a new object ID in base58_check format.
:return:
"""
oid = uuid.uuid1().get_bytes()
return base58.b58encode_check(oid)
|
StarcoderdataPython
|
1672561
|
#!/usr/bin/env python
from pyknon.plot import plot2, plot2_bw
from pyknon.simplemusic import inversion
def plot_color():
n1 = [11, 10, 7]
for x in range(12):
plot2(n1, inversion(n1, x), "ex-inversion-plot-{0:02}.ps".format(x))
n2 = [1, 3, 7, 9, 4]
plot2(n2, inversion(n2, 9), "ex-inversion-plot.ps")
def plot_black_and_white():
n1 = [11, 10, 7]
for x in range(12):
plot2_bw(n1, inversion(n1, x), "ex-inversion-plot-bw-{0:02}.ps".format(x))
n2 = [1, 3, 7, 9, 4]
plot2_bw(n2, inversion(n2, 9), "ex-inversion-plot-bw.ps")
if __name__ == "__main__":
plot_color()
plot_black_and_white()
|
StarcoderdataPython
|
8184419
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Testing suite for COS method.
"""
from __future__ import print_function, division
import unittest as ut
import numpy as np
import scipy.stats as scs
from impvol import impvol_bisection, blackscholes_norm, lfmoneyness
from fangoosterlee import (cosmethod, cfinverse, GBM, GBMParam,
VarGamma, VarGammaParam,
Heston, HestonParam, ARG, ARGParam)
class COSTestCase(ut.TestCase):
"""Test COS method."""
def test_gbm(self):
"""Test GBM model."""
price, strike = 100, 110
riskfree, maturity = .01, 30/365
call = True
put = np.logical_not(call)
moneyness = lfmoneyness(price, strike, riskfree, maturity)
sigma = .15
model = GBM(GBMParam(sigma=sigma), riskfree, maturity)
premium = cosmethod(model, moneyness=moneyness, call=call)
premium_true = blackscholes_norm(moneyness, maturity, sigma, call)
impvol_model = impvol_bisection(moneyness, maturity, premium, call)
self.assertEqual(premium.shape, (1,))
np.testing.assert_array_almost_equal(premium, premium_true, 3)
np.testing.assert_array_almost_equal(impvol_model, sigma, 2)
moneyness = np.linspace(0, .1, 10)
premium = cosmethod(model, moneyness=moneyness, call=call)
premium_true = blackscholes_norm(moneyness, maturity, sigma, call)
impvol_model = impvol_bisection(moneyness, maturity, premium, call)
impvol_true = np.ones_like(impvol_model) * sigma
self.assertEqual(premium.shape, moneyness.shape)
np.testing.assert_array_almost_equal(premium, premium_true, 2)
np.testing.assert_array_almost_equal(impvol_model, impvol_true, 2)
riskfree = np.zeros_like(moneyness)
premium = cosmethod(model, moneyness=moneyness, call=call)
premium_true = blackscholes_norm(moneyness, maturity, sigma, call)
impvol_model = impvol_bisection(moneyness, maturity, premium, call)
self.assertEqual(premium.shape, moneyness.shape)
np.testing.assert_array_almost_equal(premium, premium_true, 3)
np.testing.assert_array_almost_equal(impvol_model, sigma, 2)
moneyness = np.linspace(-.1, 0, 10)
premium = cosmethod(model, moneyness=moneyness, call=put)
premium_true = blackscholes_norm(moneyness, maturity, sigma, put)
impvol_model = impvol_bisection(moneyness, maturity, premium, put)
np.testing.assert_array_almost_equal(premium, premium_true, 2)
np.testing.assert_array_almost_equal(impvol_model, impvol_true, 2)
def test_vargamma(self):
"""Test VarGamma model."""
price, strike = 100, 90
riskfree, maturity = 0, 30/365
moneyness = np.log(strike/price) - riskfree * maturity
nu = .2
theta = -.14
sigma = .25
param = VarGammaParam(theta=theta, nu=nu, sigma=sigma)
model = VarGamma(param, riskfree, maturity)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, (1,))
moneyness = np.linspace(-.1, .1, 10)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, moneyness.shape)
riskfree = np.zeros_like(moneyness)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, moneyness.shape)
def test_heston(self):
"""Test Heston model."""
price, strike = 100, 90
riskfree, maturity = 0, 30/365
moneyness = np.log(strike/price) - riskfree * maturity
lm = 1.5768
mu = .12**2
eta = .5751
rho = -.0
sigma = .12**2
param = HestonParam(lm=lm, mu=mu, eta=eta, rho=rho, sigma=sigma)
model = Heston(param, riskfree, maturity)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, (1,))
moneyness = np.linspace(-.1, .1, 10)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, moneyness.shape)
riskfree = np.zeros_like(moneyness)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, moneyness.shape)
def test_argamma(self):
"""Test ARG model."""
price, strike = 100, 90
riskfree, maturity = 0, 30/365
moneyness = np.log(strike/price) - riskfree * maturity
rho = .55
delta = .75
mu = .2**2/365
sigma = .2**2/365
phi = -.0
theta1 = -16.0
theta2 = 20.95
param = ARGParam(rho=rho, delta=delta, mu=mu, sigma=sigma,
phi=phi, theta1=theta1, theta2=theta2)
model = ARG(param, riskfree, maturity)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, (1,))
moneyness = np.linspace(-.1, .1, 10)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, moneyness.shape)
riskfree = np.zeros_like(moneyness)
premium = cosmethod(model, moneyness=moneyness, call=True)
self.assertEqual(premium.shape, moneyness.shape)
def test_cfinverse(self):
"""Test Fourier inversion."""
riskfree, maturity = 0, 30/365
sigma = .15
points = int(1e5)
model = GBM(GBMParam(sigma=sigma), riskfree, maturity)
grid, density = cfinverse(model.charfun, points=points,
alim=-1e5, blim=1e5)
loc = (riskfree - sigma**2/2) * maturity
scale = sigma**2 * maturity
norm_density = scs.norm.pdf(grid, loc=loc, scale=scale**.5)
self.assertEqual(grid.shape, (points,))
self.assertEqual(density.shape, (points,))
good = np.abs(grid) < 2
np.testing.assert_array_almost_equal(density[good], norm_density[good],
decimal=2)
if __name__ == '__main__':
ut.main()
|
StarcoderdataPython
|
8183835
|
<filename>lib/utils.py
import pandas as pd
import os
import shutil
import pdb
def make_data_folder(mode):
csv_file = "sample/"+mode+"_clean.csv"
df=pd.read_csv(csv_file)
print(df.head())
directory= "sample/ImageData/"
pdb.set_trace()
for index,rows in df.iterrows():
img_folder =directory+str(rows['landmark_id'])
if not os.path.exists(img_folder):
print("Creating Directory {}".format(img_folder))
os.makedirs(img_folder)
else:
print("Folder {} exists!".format(img_folder))
img = "sample/"+mode+"/"+str(rows["id"])+".jpg"
print("Copying image {}".format(img))
shutil.copy2(img,img_folder)
if __name__=="__main__":
make_data_folder("train")
|
StarcoderdataPython
|
3515919
|
import sys
from je_api_testka.utils.test_record.test_record_class import test_record_instance
from je_api_testka.utils.exception.exceptions import HTMLException
from je_api_testka.utils.exception.exception_tag import html_generate_no_data_tag
from threading import Lock
lock = Lock()
_html_string_head = \
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<title>Load Density Report</title>
<style>
body{
font-size: 100%;
}
h1{
font-size: 2em;
}
.main_table {
margin: 0 auto;
border-collapse: collapse;
width: 75%;
font-size: 1.5em;
}
.success_table_head {
border: 3px solid #262626;
background-color: aqua;
font-family: "Times New Roman", sans-serif;
text-align: center;
}
.failure_table_head {
border: 3px solid #262626;
background-color: #f84c5f;
font-family: "Times New Roman", sans-serif;
text-align: center;
}
.table_data_field_title {
border: 3px solid #262626;
padding: 0;
margin: 0;
background-color: #dedede;
font-family: "Times New Roman", sans-serif;
text-align: center;
width: 25%;
}
.table_data_field_text {
border: 3px solid #262626;
padding: 0;
margin: 0;
background-color: #dedede;
font-family: "Times New Roman", sans-serif;
text-align: left;
width: 75%;
}
.text {
text-align: center;
font-family: "Times New Roman", sans-serif;
}
</style>
</head>
<body>
<h1 class="text">
Test Report
</h1>
""".strip()
_html_string_bottom = \
"""
</body>
</html>
""".strip()
_success_table = \
r"""
<table class="main_table">
<thead>
<tr>
<th colspan="2" class="success_table_head">Test Report</th>
</tr>
</thead>
<tbody>
<tr>
<td class="table_data_field_title">status_code</td>
<td class="table_data_field_text">{status_code}</td>
</tr>
<tr>
<td class="table_data_field_title">text</td>
<td class="table_data_field_text">{text}</td>
</tr>
<tr>
<td class="table_data_field_title">content</td>
<td class="table_data_field_text">{content}</td>
</tr>
<tr>
<td class="table_data_field_title">headers</td>
<td class="table_data_field_text">{headers}</td>
</tr>
<tr>
<td class="table_data_field_title">history</td>
<td class="table_data_field_text">{history}</td>
</tr>
<tr>
<td class="table_data_field_title">encoding</td>
<td class="table_data_field_text">{encoding}</td>
</tr>
<tr>
<td class="table_data_field_title">cookies</td>
<td class="table_data_field_text">{cookies}</td>
</tr>
<tr>
<td class="table_data_field_title">elapsed</td>
<td class="table_data_field_text">{elapsed}</td>
</tr>
<tr>
<td class="table_data_field_title">request_time_sec</td>
<td class="table_data_field_text">{request_time_sec}</td>
</tr>
<tr>
<td class="table_data_field_title">request_method</td>
<td class="table_data_field_text">{request_method}</td>
</tr>
<tr>
<td class="table_data_field_title">request_url</td>
<td class="table_data_field_text">{request_url}</td>
</tr>
<tr>
<td class="table_data_field_title">request_body</td>
<td class="table_data_field_text">{request_body}</td>
</tr>
<tr>
<td class="table_data_field_title">start_time</td>
<td class="table_data_field_text">{start_time}</td>
</tr>
<tr>
<td class="table_data_field_title">end_time</td>
<td class="table_data_field_text">{end_time}</td>
</tr>
</tbody>
</table>
<br>
""".strip()
_failure_table = \
r"""
<table class="main_table">
<thead>
<tr>
<th colspan="2" class="failure_table_head">Test Report</th>
</tr>
</thead>
<tbody>
<tr>
<td class="table_data_field_title">http_method</td>
<td class="table_data_field_text">{http_method}</td>
</tr>
<tr>
<td class="table_data_field_title">test_url</td>
<td class="table_data_field_text">{test_url}</td>
</tr>
<tr>
<td class="table_data_field_title">soap</td>
<td class="table_data_field_text">{soap}</td>
</tr>
<tr>
<td class="table_data_field_title">record_request_info</td>
<td class="table_data_field_text">{record_request_info}</td>
</tr>
<tr>
<td class="table_data_field_title">clean_record</td>
<td class="table_data_field_text">{clean_record}</td>
</tr>
<tr>
<td class="table_data_field_title">result_check_dict</td>
<td class="table_data_field_text">{result_check_dict}</td>
</tr>
<tr>
<td class="table_data_field_title">error</td>
<td class="table_data_field_text">{error}</td>
</tr>
</tbody>
</table>
<br>
""".strip()
def generate_html(html_name: str = "default_name"):
"""
:param html_name: save html file name
:return: html_string
"""
if len(test_record_instance.test_record_list) == 0 and len(test_record_instance.error_record_list) == 0:
raise HTMLException(html_generate_no_data_tag)
else:
success_list = list()
for record_data in test_record_instance.test_record_list:
success_list.append(
_success_table.format(
status_code=record_data.get("status_code"),
text=record_data.get("text"),
content=str(record_data.get("content"), encoding="utf-8"),
headers=record_data.get("headers"),
history=record_data.get("history"),
encoding=record_data.get("encoding"),
cookies=record_data.get("cookies"),
elapsed=record_data.get("elapsed"),
request_time_sec=record_data.get("request_time_sec"),
request_method=record_data.get("request_method"),
request_url=record_data.get("request_url"),
request_body=record_data.get("request_body"),
start_time=record_data.get("start_time"),
end_time=record_data.get("end_time"),
)
)
failure_list = list()
if len(test_record_instance.error_record_list) == 0:
pass
else:
for record_data in test_record_instance.error_record_list:
failure_list.append(
_failure_table.format(
http_method=record_data[0].get("http_method"),
test_url=record_data[0].get("test_url"),
soap=record_data[0].get("soap"),
record_request_info=record_data[0].get("record_request_info"),
clean_record=record_data[0].get("clean_record"),
result_check_dict=record_data[0].get("result_check_dict"),
error=record_data[1]
),
)
try:
lock.acquire()
with open(html_name + ".html", "w+") as file_to_write:
file_to_write.writelines(
_html_string_head
)
for success in success_list:
file_to_write.write(success)
for failure in failure_list:
file_to_write.write(failure)
file_to_write.writelines(
_html_string_bottom
)
except Exception as error:
print(repr(error), file=sys.stderr)
finally:
lock.release()
return success_list, failure_list
|
StarcoderdataPython
|
5040528
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 ft=python
# author : Prakash [प्रकाश]
# date : 2019-09-08 23:33
from .asctouni import converter
from .translit import translit
from .asctouni.converter import Converter
from .translit.translit import Translit
|
StarcoderdataPython
|
1697899
|
<filename>hydromet_forecasting/evaluating.py
# -*- encoding: UTF-8 -*-
from numpy import nan, isnan, arange, corrcoef, mean
import numpy as np
from matplotlib import pyplot as plt
import pandas
from hydromet_forecasting.timeseries import FixedIndexTimeseries
from string import Template
import base64
import tempfile
from os import path
from collections import OrderedDict, defaultdict
from babel.dates import format_date, get_month_names
from utils import to_str, activate
from plot_utils import PlotUtils
class Evaluator(object):
"""Evaluator class for a predicted timeseries that is given as an FixedIndexTimeseries instance and has annual seasonality.
This class enables to evaluate the performance of an forecast by means of comparing the observed and the forecasted
timeseries. Those series need to be instances of the FixedIndexTimeseries class. The Evaluator class implements
various methods to analyse the series statistically. The method write_html writes a report to a specified path.
Attributes:
forecast: the forecasted timeseries
y_adj: the observed timeseries, reduced to the index of the forecasted timeseries
"""
_rel_error = None
_p = None
def __init__(self, y, forecast):
"""Initialising the Evaluator Instance
Args:
y: A FixedIndexTimeseries instance of the observed data
forecast: A FixedIndexTimeseries instance of the forecasted data of the same mode as y.
Returns:
The Evaluator instance
Raises:
ValueError: When the two timeseries are not of the same mode.
InsufficientData: If the length of the forecasted timeseries is not sufficient for evalaution. At least two
complete years of data must be provided in order to compute standard deviations etc.
"""
if not y.mode == forecast.mode:
raise ValueError("The target timeseries is not of the same mode as the forecasted timeseries.")
self.y = y
self.forecast = forecast
self.y_adj=FixedIndexTimeseries(self.y.timeseries[self.forecast.timeseries.index], mode=self.y.mode)
datagroups = [len(self.forecast.data_by_index(i)) for i in range(1,self.forecast.maxindex+1)]
if min(datagroups)<2:
raise self.__InsufficientData("The length of the forecasted timeseries is not sufficient.")
def computeP(self, annualindex=None):
""" Returns the P value (Percentage of forecasts with error/stdev > 0.674)
Args:
annualindex (int): default None
Returns:
A list of values for each period of the year of the forecasted timeseries if annualindex is None, else one value.
NaN is retuned of the value could not be determined (e.g. not enough data)
Raises:
None
"""
P = []
indexrange = range(1, self.y_adj.maxindex+1) if annualindex is None else [annualindex]
for index in indexrange:
allowed_error = 0.674 * self.y.stdev_s(index)
try:
error = abs(self.forecast.data_by_index(index) - self.y_adj.data_by_index(index))
error = error.dropna()
good = sum(error <= allowed_error)
P.append(float(good) / len(error))
except:
P.append(nan)
return P
def computeRelError(self, annualindex=None):
""" Returns the relative error value of the forecast (error / stdev.sample)
Args:
None
Returns:
A list of values for each period of the year of the forecasted timeseries if annualindex is None, else one value.
NaN is retuned of the value could not be determined (e.g. not enough data)
Raises:
None
"""
relerror = []
indexrange = range(1, self.y_adj.maxindex + 1) if annualindex is None else [annualindex]
for index in indexrange:
stdev = self.y.stdev_s(index)
try:
error = abs(self.forecast.data_by_index(index) - self.y_adj.data_by_index(index))
error = error.dropna()
relerror.append(error.values/stdev)
except:
relerror.append(nan)
return relerror
def trainingdata_count(self, annualindex = None):
""" Returns the number of training data for each period of the year
Args:
annualindex (int): the annualindex for which trainingdata shall be counted. default=None
Returns:
A list of values for each period of the year of the forecasted timeseries if annualindex is None, else a single integer
Raises:
None
"""
if annualindex is None:
count = list()
indexrange = range(1, self.y_adj.maxindex + 1)
for index in indexrange:
count.append(len(self.forecast.data_by_index(index)))
else:
count = len(self.forecast.data_by_index(annualindex))
return count
def plot_y_stats(self):
norm = self.y.norm()
stdev = self.y.stdev_s()
upper = [norm[i]+stdev[i] for i in range(0,len(stdev))]
lower = [norm[i]-stdev[i] for i in range(0,len(stdev))]
fig, ax = PlotUtils.prepare_figure(len(stdev))
[ax.plot(self.y.data_by_year(year).values, label='individual years', color='blue', alpha=.2) for year in
range(self.y.timeseries.index[0].year, self.y.timeseries.index[-1].year + 1)]
ax.plot(upper, color='black')
ax.plot(lower, color='black',label="+/- STDEV")
ax.plot(norm,label="NORM", color='red')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-3:], labels[-3:])
plt.ylabel(self.y.label)
return fig
def plot_trainingdata(self):
count = self.trainingdata_count()
fig, ax = PlotUtils.prepare_figure(len(count))
ax.bar(range(0, len(count)), count, width=0.7)
ax.bar(range(0, len(count)), count, width=0.7)
plt.ylabel(_("Number of training data"))
return fig
def summary_table(self, frequency):
header, indices = self.period_header_and_indices(frequency, len(self.rel_error))
data = OrderedDict((
(header, indices),
(_('Number of training data'), self.trainingdata_count()),
(_('Minimum'), self.y.min()),
(_('Average'), np.round(self.y.norm(), 2)),
(_('Maximum'), np.round(self.y.max(), 2)),
(_('+/- d'), np.round(self.y.stdev_s(), 2)),
(_('P%'), np.round(self.p, 2) * 100),
(_('ScaledError'), [np.round(mean(x), 2) for x in self.rel_error]),
))
df = pandas.DataFrame(data)
return df.to_html(justify='justify-all', index=False)
def p_plot_table(self, frequency):
header, indices = self.period_header_and_indices(frequency, len(self.p))
data = OrderedDict((
(header, indices),
(_('P%'), self.p),
))
df = pandas.DataFrame(data)
return df.to_html(index=False)
def rel_error_table(self, frequency):
header, indices = self.period_header_and_indices(frequency, len(self.rel_error))
data = OrderedDict((
(header, indices),
(_('ScaledError'), [mean(x) for x in self.rel_error]),
))
df = pandas.DataFrame(data)
return df.to_html(index=False)
def get_spacers(self, frequency, language):
spacer_1 = defaultdict(lambda: '0px')
spacer_2 = defaultdict(lambda: '0px')
spacer_3 = defaultdict(lambda: '0px')
spacer_4 = defaultdict(lambda: '0px')
spacer_5 = defaultdict(lambda: '0px')
# TODO fine tune if needed
# spacer_1['monthly_ru'] = '600px'
# spacer_2['monthly_ru'] = '800px'
# spacer_3['monthly_ru'] = '30px'
# spacer_4['monthly_ru'] = '500px'
# spacer_5['monthly_ru'] = '30px'
#
# spacer_2['monthly_en'] = '100px'
# spacer_3['monthly_en'] = '200px'
# spacer_4['monthly_en'] = '500px'
# spacer_5['monthly_en'] = '30px'
#
# spacer_1['decade_en'] = '100px'
# spacer_2['decade_en'] = '100px'
#
# spacer_1['decade_ru'] = '100px'
# spacer_2['decade_ru'] = '100px'
#
# spacer_1['fiveday_en'] = '200px'
# spacer_2['fiveday_en'] = '300px'
# spacer_4['fiveday_en'] = '100px'
#
# spacer_1['fiveday_ru'] = '100px'
# spacer_2['fiveday_ru'] = '300px'
# spacer_4['fiveday_ru'] = '100px'
return {
'SPACER_1': spacer_1['_'.join((frequency, language))],
'SPACER_2': spacer_2['_'.join((frequency, language))],
'SPACER_3': spacer_3['_'.join((frequency, language))],
'SPACER_4': spacer_4['_'.join((frequency, language))],
'SPACER_5': spacer_5['_'.join((frequency, language))],
}
@staticmethod
def load_template_file(filename='template.html'):
template_path = path.join(path.dirname(__file__), filename)
with open(template_path, 'r') as template_path:
page = Template(template_path.read())
return page
def write_html(
self,
username,
organization,
site_code,
site_name,
forecast_model_name,
forecast_method,
forecast_model_params,
forecast_method_params,
filename=None,
htmlpage=None,
language='en'
):
activate(language)
if self.y.mode == 'p':
frequency = 'fiveday'
elif self.y.mode == 'd':
frequency = 'decade'
elif self.y.mode == 'm':
frequency = 'monthly'
page = self.load_template_file()
scatter_plot = PlotUtils.plot_ts_comparison(
self.y_adj.timeseries,
self.forecast.timeseries,
frequency,
language=language,
)
scaled_error_title = _('Scaled Error [RMSE/STDEV]')
scaled_error_plot = PlotUtils.plot_rel_error(self.rel_error, frequency, title=scaled_error_title)
scaled_error_table = self.rel_error_table(frequency)
p_plot_title = _('P% Plot')
p_plot_plot = PlotUtils.plot_p(self.p, frequency, title=p_plot_title)
p_plot_table = self.p_plot_table(frequency)
quality_assessment_table = self.summary_table(frequency)
report_data = {
'SITE_INFO': _('Station: {code} - {name}').format(
code=to_str(site_code),
name=to_str(site_name)
),
'USERNAME': username,
'ORGANIZATION': organization,
'TITLE': _('Forecast Model Training Report'),
'REPORT_DATE': format_date(format='long', locale=language),
'PLOTS_HEADER': _('{frequency} Forecast Model Quality Assessment').format(
frequency=frequency.capitalize()),
'SCATTER_PLOT_LABEL': _('Scatter Plot: Observed versus Predicted values'),
'SCALED_ERROR_LABEL': scaled_error_title,
'P_PLOT_LABEL': p_plot_title,
'QUALITY_ASSESSMENT_LABEL': _('Quality Assessment'),
'SCATTER_PLOT_IMAGE': scatter_plot,
'SCALED_ERROR_PLOT_IMAGE': scaled_error_plot,
'SCALED_ERROR_TABLE': scaled_error_table,
'P_PLOT_IMAGE': p_plot_plot,
'P_PLOT_TABLE': p_plot_table,
'QUALITY_ASSESSMENT_TABLE': quality_assessment_table,
'FORECAST_MODEL_INFO': _('Forecast model info:'),
'FORECAST_MODEL_NAME': _('Name: ') + forecast_model_name,
'FORECAST_METHOD': _('Method: ') + forecast_method,
'FORECAST_MODEL_PARAMS': _('Model parameters: ') + str(forecast_model_params),
'FORECAST_METHOD_PARAMS': _('Method parameters: ') + str(forecast_method_params),
}
report_data.update(self.get_spacers(frequency, language))
self.encode_utf8(report_data)
if filename:
htmlpage = open(filename, 'w')
htmlpage.write(page.safe_substitute(**report_data))
htmlpage.close()
return filename
elif htmlpage:
htmlpage.write(page.safe_substitute(**report_data))
return htmlpage
@staticmethod
def period_header_and_indices(frequency, num_of_data):
if frequency == 'fiveday':
header = _('pentade').capitalize()
elif frequency == 'decade':
header = _('decade').capitalize()
elif frequency == 'monthly':
header = _('Month')
return header, [x + 1 for x in range(num_of_data)]
@classmethod
def encode_utf8(cls, template_vars):
for key, value in template_vars.iteritems():
template_vars[key] = to_str(value)
@property
def rel_error(self):
if self._rel_error is None:
self._rel_error = self.computeRelError()
return self._rel_error
@property
def p(self):
if self._p is None:
self._p = self.computeP()
return self._p
class __InsufficientData(Exception):
pass
class SeasonalEvaluator(object):
def __init__(self, featurenames,selectedfeatures,modelEvaluators):
self.featurenames = featurenames
self.selectedfeatures = selectedfeatures
self.modelEvaluators = modelEvaluators
self.score = [mean(CV.computeRelError()) for CV in self.modelEvaluators]
def __prepare_figure(self, width=12, height=3):
fig, ax = plt.subplots(1, 1)
fig.set_figwidth(width)
fig.set_figheight(height)
fig.subplots_adjust(left=0.06, bottom=0.16, right=0.94, top=0.92)
return fig, ax
def __table_summary(self):
data = dict({
_('Minimum'): mean(self.modelEvaluators[0].y.min()),
_('Average'): self.modelEvaluators[0].y.norm(),
_('Maximum'): self.modelEvaluators[0].y.max(),
_('+/- d'): self.modelEvaluators[0].y.stdev_s()
})
df=pandas.DataFrame(data)
return df.to_html(index=False)
def model_table(self):
feature_selection = OrderedDict()
for i, name in enumerate(self.featurenames):
# translate substrings
for x in ('disch', 'precip', 'temp', 'snow'):
name = name.replace(x.capitalize(), _(x).capitalize())
feature = [selected_feature[i] for selected_feature in self.selectedfeatures]
feature_dict = OrderedDict(((name, feature), ))
feature_selection.update(feature_dict)
data = OrderedDict((
(
_('Number of training data'),
[CV.trainingdata_count()[0] for CV in self.modelEvaluators]
),
(
_('Error/STDEV'),
[round(mean(CV.computeRelError()[0]), 2) for CV in self.modelEvaluators]
),
(
_('P%'),
[round(CV.computeP()[0], 2) for CV in self.modelEvaluators]),
))
data.update(feature_selection)
df = pandas.DataFrame(data)
df = df.sort_values(by=[_('Error/STDEV')])
df.insert(0, column=_('Rank'), value=[x + 1 for x in range(len(self.modelEvaluators))])
return df
def __model_htmltable(self):
# pandas.set_option('display.max_colwidth', 50)
return self.model_table().to_html(justify='justify-all', index=False)
def plot_timeseries(self):
fig, ax = self.__prepare_figure()
[ax.plot(
CV.forecast.timeseries,
color='red',
label=_("individual forecasts").decode('utf-8'),
alpha=.2
) for CV in self.modelEvaluators]
df_concat = pandas.concat(([CV.forecast.timeseries for CV in self.modelEvaluators]))
by_row_index = df_concat.groupby(df_concat.index)
df_means = by_row_index.mean()
ax.plot(df_means, color='black', label=_('mean forecast').decode('utf-8'))
ax.plot(
self.modelEvaluators[0].y.timeseries,
color='green',
label=_('observed').decode('utf-8')
)
ax.set_xlabel(_('Year').decode('utf-8'))
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-3:], labels[-3:])
return fig
@staticmethod
def load_template_file(filename='template_seasonal.html'):
template_path = path.join(path.dirname(__file__), filename)
with open(template_path, 'r') as template_path:
page = Template(template_path.read())
return page
@classmethod
def encode_utf8(cls, template_vars):
for key, value in template_vars.iteritems():
template_vars[key] = to_str(value)
def write_html(
self,
username,
organization,
site_code,
site_name,
forecast_model_name,
forecast_method,
forecast_model_params,
forecast_method_params,
filename=None,
htmlpage=None,
language='en'
):
activate(language)
page = self.load_template_file()
timeseries_plot = self.__encode_figure(self.plot_timeseries())
quality_assessment_table = self.__table_summary()
model_table = self.__model_htmltable()
report_data = {
'SITE_INFO': _('Station: {code} - {name}').format(
code=to_str(site_code),
name=to_str(site_name)
),
'USERNAME': username,
'ORGANIZATION': organization,
'TITLE': _('Forecast Model Training Report'),
'REPORT_DATE': format_date(format='long', locale=language),
'PLOTS_HEADER': _('{frequency} Forecast Model Quality Assessment').format(
frequency=_('seasonal').capitalize()),
'MODEL_TABLE_LABEL': _('Model table'),
'MODEL_TABLE': model_table,
'QUALITY_ASSESSMENT_LABEL': _('Quality Assessment'),
'QUALITY_ASSESSMENT_TABLE': quality_assessment_table,
'TIMESERIES_LABEL': _('Timeseries plot'),
'TIMESERIES_PLOT': timeseries_plot,
'FORECAST_MODEL_INFO': _('Forecast model info:'),
'FORECAST_MODEL_NAME': _('Name: ') + forecast_model_name,
'FORECAST_METHOD': _('Method: ') + forecast_method,
'FORECAST_MODEL_PARAMS': _('Model parameters: ') + str(forecast_model_params),
'FORECAST_METHOD_PARAMS': _('Method parameters: ') + str(forecast_method_params),
}
self.encode_utf8(report_data)
if filename:
htmlpage = open(filename, 'w')
htmlpage.write(page.safe_substitute(**report_data))
htmlpage.close()
return filename
elif htmlpage:
htmlpage.write(page.safe_substitute(**report_data))
return htmlpage
def __encode_figure(self, fig):
with tempfile.TemporaryFile(suffix=".png") as tmpfile:
fig.savefig(tmpfile, format="png")
tmpfile.seek(0)
encoded = base64.b64encode(tmpfile.read())
tmpfile.close()
return encoded
|
StarcoderdataPython
|
8009277
|
from distutils.core import setup
import py2exe
import matplotlib
#========================================================================================================================
setup(
windows=[{"script":"ServiceApplication.py","dest_base":"ServiceApplication"}],
data_files=matplotlib.get_py2exe_datafiles(),
options = {
'py2exe': {
'optimize': 2,
'dist_dir':'ServiceApplication',
'includes': ['logging'],
#~ 'excludes': ['OpenGL',],
"dll_excludes": ["MSVCP90.dll","OLEACC.dll",]
}
},
)#End of setup
#==========================================================================================================================
#excludes OLEACC.dll is necessary for wxpython. Another method is to delete this dll in the compiled folder.
#Need to excludes MSVCP90.dll. py2exe says: error: [Errno 2] No such file or directory: 'MSVCP90.dll' during compilation
|
StarcoderdataPython
|
9730235
|
<gh_stars>0
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Modals for edit objects."""
from lib import decorator
from lib.constants import locator
from lib.page.modal import base as modal_base, delete_object
from lib.utils import selenium_utils
class _EditModal(modal_base.BaseModal):
"""Modal for edit objects."""
def delete_object(self):
"""Return: delete_object.DeleteObjectModal
"""
self._driver.find_element(*locator.ModalEditObject.BUTTON_DELETE).click()
return delete_object.DeleteObjectModal(self._driver)
@decorator.handle_alert
def save_and_close(self):
"""Edit object and close Edit modal."""
self.button_save_and_close.click()
selenium_utils.wait_until_not_present(
self._driver, self._locator_button_save)
class Assessments(modal_base.AsmtsModal, _EditModal):
"""Assessments edit modal."""
class Programs(modal_base.ProgramsModal, _EditModal):
"""Programs edit modal."""
class Controls(modal_base.ControlsModal, _EditModal):
"""Controls edit modal."""
class Risks(modal_base.RisksModal, _EditModal):
"""Risks edit modal."""
class OrgGroups(modal_base.OrgGroupsModal, _EditModal):
"""Org Groups edit modal."""
class Processes(modal_base.ProcessesModal, _EditModal):
"""Processes edit modal."""
class DataAssets(modal_base.RisksModal, _EditModal):
"""Data Assets edit modal."""
class Systems(modal_base.RisksModal, _EditModal):
"""Systems edit modal."""
class Products(modal_base.ProductsModal, _EditModal):
"""Products edit modal."""
class Projects(modal_base.ProjectsModal, _EditModal):
"""Projects edit modal."""
|
StarcoderdataPython
|
11297700
|
<gh_stars>0
# Generated by Django 3.2.12 on 2022-03-29 07:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0006_recipeingredientimage'),
]
operations = [
migrations.AlterField(
model_name='recipeingredientimage',
name='image',
field=models.ImageField(upload_to='recipes/'),
),
]
|
StarcoderdataPython
|
1878297
|
<gh_stars>10-100
"""scrapli_community.siemens.roxii"""
from scrapli_community.siemens.roxii.siemens_roxii import SCRAPLI_PLATFORM
__all__ = ("SCRAPLI_PLATFORM",)
|
StarcoderdataPython
|
6447605
|
<filename>morphounit/plots/plot_feats_pop_morp_stats.py
# For data manipulation
import os
from scipy import stats
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
from matplotlib import pyplot as plt
class FeatsPop_MorphStats:
"""
Displays data in dictionary of cells population morpho-features
in the form of correlation, countour and distribution plots
"""
def __init__(self, testObj):
self.testObj = testObj
self.prefix_filename_lreg = "prediction_lreg_"
self.prefix_filename_stats = "prediction_stats_"
self.prefix_filename_stats_all = "prediction_allPlots_"
self.filepath_list = list()
def FeaturesPop_dict_DFrame(self):
"""
Formats a dictionary of cells population morpho-features
into a dictionary of DataFrame structures
"""
pop_prediction_raw = self.testObj.prediction_cells_dict
prediction_raw = list(pop_prediction_raw.values())[0]
dict_pred_CellPart_df = dict()
for CellPart in prediction_raw.keys():
dict_pred_CellPart_df[CellPart] = pd.DataFrame(prediction_raw[CellPart])
return dict_pred_CellPart_df
def corrfunc(self, x, y, **kws):
r, p = stats.pearsonr(x, y)
ax = plt.gca()
ax.annotate(f"r = {r:.2E}\n(p ={p:.2E})", xy=(.1, .9), xycoords=ax.transAxes)
def df_drop_features(self, df=None, threshold_corr=0.95, threshold_var=0.05):
'''Drops one in any pair of highly correlated features of a DataFrame,
as the calculation of some quantities may not be posible. Besides,
columns with no variance are excluded. For instance,
joint kernel distributions estimates (kde) for two highly correlated
features may not be computed. The same holds for features with low variability.
The cutoffs for correlation (or variance) to be considered as too high (or too low)
are given by 'threshold_corr' ('threshold_var').
Note: Adapted from
https://chrisalbon.com/machine_learning/feature_selection/
drop_highly_correlated_features/
'''
feats_to_drop = list()
# Compute the correlation DataFrame of the original DataFrame of feature values
corr_matrix_df = df.corr().abs()
# Build a copy with the elements below the first diagonal as False
upper = corr_matrix_df.where(np.triu(np.ones(corr_matrix_df.shape), k=1).astype(np.bool))
# Find and collect one feature name in any pair of correlated features
features_hcorr = [column for column in df.columns
if any(upper[column] > threshold_corr)]
feats_to_drop.extend(features_hcorr)
# Compute the variance DataSeries of the original DataFrame of feature values
var_series = df.var().abs()
# Find and collect all features with low variance
feats_no_var = [column for column in df.columns
if var_series[column] < threshold_var]
feats_to_drop.extend(feats_no_var)
# Drop all those disposable (maybe superfluous) features found
df.drop(df[feats_to_drop], axis=1, inplace=True)
return df
def FeaturesPop_Linreg_plots(self, dict_pred_CellPart_df=None):
'''Plots a histogram for values of each morpho-feature, in the diagonal,
together with a kernel density estimation (kde) for that histogram.
Linear correlation results are shown below and above the diagonal for
the same pair of morpho-feattures (i.e., numerical results are symmetric).
'''
for CellPart, prediction_raw_df in list(dict_pred_CellPart_df.items()):
data = self.df_drop_features(df=prediction_raw_df)
g = sns.pairplot(data, height=5, aspect=1, diag_kind="kde")
# g = sns.PairGrid(data, size=5, aspect=1, palette=["red"])
# g.map(sns.regplot)
# g.map(self.corrfunc)
g.map_upper(sns.regplot)
g.map_upper(self.corrfunc)
g.map_diag(sns.distplot, kde=True)
g.map_lower(sns.regplot)
g.map_lower(self.corrfunc)
plt.subplots_adjust(top=0.95)
g.fig.suptitle('Cell part: '+CellPart, fontsize=17)
filepath = os.path.join(self.testObj.path_test_output, self.prefix_filename_lreg + CellPart + '_FSI_pop.pdf')
plt.savefig(filepath, dpi=600, )
self.filepath_list.append(filepath)
def FeaturesPop_ContourLinreg_plots(self, dict_pred_CellPart_df=None):
'''Plots a histogram for values of each morpho-feature, in the diagonal,
together with a kernel density estimation (kde) for that histogram.
Linear correlation results and contour (kde) plots for the same pair of
morpho-feattures are shown above and below the diagonal, respectively.
Note that some morpho-features are excluded from the analysis, when their
correlation is high , as their (kde) countour-plots can not be computed.
'''
for CellPart, prediction_raw_df in list(dict_pred_CellPart_df.items()):
data = self.df_drop_features(df=prediction_raw_df)
g = sns.pairplot(data, height=5, aspect=1, diag_kind="kde")
# g = sns.PairGrid(data, size=5, aspect=1, palette=["red"]
g.map_upper(sns.regplot)
g.map_upper(self.corrfunc)
# g.map_upper(plt.reg, s=10)
g.map_diag(sns.distplot, kde=True)
g.map_lower(sns.kdeplot, cmap="Blues_d", n_levels=8)
plt.subplots_adjust(top=0.95)
g.fig.suptitle('Cell part: ' + CellPart, fontsize=17)
filepath = os.path.join(self.testObj.path_test_output, self.prefix_filename_stats_all + CellPart + '_FSI_pop.pdf')
plt.savefig(filepath, dpi=600, )
self.filepath_list.append(filepath)
def create(self):
Dict_CellPart_DFrame_pred = self.FeaturesPop_dict_DFrame()
self.FeaturesPop_Linreg_plots(dict_pred_CellPart_df=Dict_CellPart_DFrame_pred)
self.FeaturesPop_ContourLinreg_plots(dict_pred_CellPart_df=Dict_CellPart_DFrame_pred)
return self.filepath_list
|
StarcoderdataPython
|
5126595
|
import pymongo
import pprint
import sys
if len(sys.argv) > 1 :
host=sys.argv[1]
else:
host="mongodb://localhost:27017"
client = pymongo.MongoClient(host=host) # defaults to mongodb://localhost:27017
blogDatabase = client["blog"]
usersCollection = blogDatabase["users"]
articlesCollection = blogDatabase[ "articles" ]
author = "jdrumgoole"
article = { "title" : "This is my first post",
"body" : "The is the longer body text for my blog post. We can add lots of text here.",
"username" : author,
"tags" : [ "joe", "general", "Ireland", "admin" ]
}
#
# Lets check if our author exists
#
if usersCollection.find_one( { "username" : author }) :
doc = articlesCollection.insert_one( article )
pprint.pprint( article )
else:
raise ValueError( "Author %s does not exist as a user" % author )
|
StarcoderdataPython
|
120977
|
<gh_stars>0
import argparse
from pwn import cyclic_metasploit_find
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--address", type=str, required=True, help="EIP hex value"
)
parser.add_argument(
"-c",
"--prepend_cmd",
type=str,
required=False,
default="",
help="EIP hex value",
)
args = parser.parse_args()
hex_val = int(args.address, 16)
offset = cyclic_metasploit_find(hex_val)
cmd = args.prepend_cmd
offset_eip = offset + len(cmd)
offset_esp = offset_eip + 4 # + 4 bytes after EIP
print(f"Offset of {args.address}: {offset}")
print(f"Offset of EIP + cmd len : {offset_eip}")
print(f"Offset of EIP + cmd len + 4 = ESP: {offset_esp}")
|
StarcoderdataPython
|
242327
|
<gh_stars>1-10
import argparse
from endless_sky.datafile import DataFile
from re import sub
parser = argparse.ArgumentParser()
parser.add_argument("locations")
parser.add_argument("links")
args = parser.parse_args()
links = {}
for i in DataFile(args.links).root.filter_first("system"):
links[i.tokens[1]] = []
for j in i.filter_first("link"):
links[i.tokens[1]].append(j.tokens[1])
from pprint import pprint
with open(args.locations) as f:
contents = f.read()
contents = sub(r"\tlink .+\n", "", contents)
with open(args.locations + ".linkless.txt", "w") as f:
f.write(contents)
systems = []
for i in DataFile(args.links).root.filter_first("system"):
systems.append(i.tokens[1])
for i in systems:
if not links.get(i):
continue
for j in links[i]:
contents = contents.replace(f'system "{i}"', f'system "{i}"\n\tlink "{j}"')
contents = contents.replace(f'system {i}\n', f'system {i}\n\tlink "{j}"\n')
with open(args.locations + ".merged.txt", "w") as f:
f.write(contents)
|
StarcoderdataPython
|
1619132
|
"""
Tests for opencadd.structure.superposition.engines.mmligner
"""
import os
import pytest
from opencadd.structure.superposition.api import Structure
from opencadd.structure.superposition.engines.mmligner import MMLignerAligner
def test_mmligner_instantiation():
aligner = MMLignerAligner()
@pytest.mark.skipif(
"GITHUB_ACTIONS" in os.environ, reason="FIXME: MMLigner in conda-forge is not yet patched"
)
def test_mmligner_alignment():
aligner = MMLignerAligner()
structures = [Structure.from_pdbid(pdb_id) for pdb_id in ["4u3y", "4u40"]]
result = aligner.calculate(structures)
# Check API compliance
assert "superposed" in result
assert "scores" in result
assert "rmsd" in result["scores"]
assert "metadata" in result
# Check RMSD value for these PDBs
assert pytest.approx(result["scores"]["rmsd"], 2.279)
|
StarcoderdataPython
|
9783452
|
from pdf2image import convert_from_path
from PIL import Image
import os
PAPERS_DIR = "./data/papers/"
PAPERS_IMG_DIR = "./data/paper_img/"
IM_SIZE = 256, 256
papers = os.listdir(PAPERS_DIR)
for paper in papers:
images = convert_from_path(PAPERS_DIR + paper)
for im in images:
im.thumbnail(IM_SIZE, Image.ANTIALIAS)
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save(PAPERS_IMG_DIR + paper +'.jpg')
|
StarcoderdataPython
|
6431115
|
<filename>datajob/stepfunctions/stepfunctions_execute.py
import json
import time
from datetime import datetime
from typing import Union
import boto3
from stepfunctions.workflow import Execution
from stepfunctions.workflow import Workflow
from datajob import console
from datajob import logger
from datajob.datajob_execution_input import DataJobExecutionInput
CURRENT_DATE = datetime.utcnow()
MAX_CHARS = 63
def find_state_machine_arn(state_machine: str) -> str:
"""lookup the state machine arn based on the state machine name."""
workflows = Workflow.list_workflows()
state_machine_object = [
workflow for workflow in workflows if workflow.get("name") == state_machine
]
if len(state_machine_object) == 1:
logger.debug(f"we have found one statemachine {state_machine_object[0]}")
return state_machine_object[0].get("stateMachineArn")
elif len(state_machine_object) == 0:
logger.error(f"statemachine {state_machine} not found.")
raise LookupError("no statemachine found.")
else:
logger.error(f"more than one statemachine found with name {state_machine}.")
raise Exception(
"more than one statemachine found. Something strange is going on ..."
)
def _describe_stack_resources(sfn_arn: str) -> dict:
"""Describe stack resources for a stepfunctions workflow arn."""
return boto3.client("cloudformation").describe_stack_resources(
PhysicalResourceId=sfn_arn
)
def _find_cloudformation_stack_name_for_sfn_workflow(sfn_arn: str) -> str:
"""Find the cloudformation stackname for a stepfunction workflow.
Args:
sfn_arn: the AWS ARN of stepfunctions workflow
Returns: Stackname
"""
logger.debug(f"looking for the stack for step functions arn {sfn_arn}")
stack_resources = _describe_stack_resources(sfn_arn=sfn_arn)
stepfunctions_resource = [
element
for element in stack_resources.get("StackResources")
if element.get("PhysicalResourceId") == sfn_arn
]
stack_name = stepfunctions_resource[0]["StackName"]
logger.debug(f"found stack {stack_name}")
return stack_name
def _describe_stacks(stack_name: str) -> dict:
return boto3.client("cloudformation").describe_stacks(StackName=stack_name)
def _generate_unique_name(
name: str,
max_chars: int = MAX_CHARS,
unique_identifier: datetime = CURRENT_DATE,
datetime_format: str = "%Y%m%dT%H%M%S",
):
"""Generate a unique name by adding a datetime behind the name.
Args:
name: the name we want to make unique
max_chars: the maximum number of characters a unique name can have.
datetime_format: the format of the datetime that gets appended to the name,
Returns: the name as the unique name.
"""
current_date_as_string = unique_identifier.strftime(datetime_format)
total_length = len(current_date_as_string) + len(name)
difference = max_chars - total_length
if difference < 0:
logger.debug(
f"the length of the unique name is {total_length}. Max chars is {max_chars}. Removing last {difference} chars from name"
)
name = name[: difference - 1]
unique_name = f"{name}-{current_date_as_string}"
logger.debug(f"generated unique name is {unique_name}")
return unique_name
def _get_execution_input_from_stack(stack_name: str) -> Union[dict, None]:
"""Look for the execution input in the outputs of this stack. If present
generate unique names for the ExecutionInput and return the dict. If not
present return None.
Args:
stack_name: name of the cloudformation stack.
Returns: ExecutionInput as a dict or None
"""
logger.debug(f"looking for execution input in {stack_name}")
stack = _describe_stacks(stack_name=stack_name)
outputs = stack.get("Stacks")[0].get("Outputs")
if outputs:
for output in outputs:
if (
output.get("OutputKey")
== DataJobExecutionInput().DATAJOB_EXECUTION_INPUT
):
execution_inputs = json.loads(output.get("OutputValue"))
return_value = {
execution_input: _generate_unique_name(execution_input)
for execution_input in execution_inputs
}
console.log("execution input found: \n" f"{return_value}")
return return_value
logger.debug("no execution input found.")
def get_execution_input(sfn_arn: str) -> Union[dict, None]:
"""Get execution input dict for a workflow.
- we will first find the cloudformation stack name based on the stepfunctions workflow arn.
- then we try to get the execution input schema from the cloudformation stack "Outputs".
Args:
sfn_arn: arn of the stepfunctions workflow
Returns: ExecutionInput or None
"""
stack_name = _find_cloudformation_stack_name_for_sfn_workflow(sfn_arn=sfn_arn)
return _get_execution_input_from_stack(stack_name=stack_name)
def get_status(execution: Execution):
"""get the status of a stepfunctions workflow execution."""
time.sleep(1)
description = execution.describe()
return description.get("status")
def execute(state_machine_arn: str, execution_input: Union[dict, None]):
"""execute statemachine based on the name."""
workflow = Workflow.attach(state_machine_arn)
return workflow.execute(inputs=execution_input)
|
StarcoderdataPython
|
3498878
|
from flask import request
from banal import ensure_list
from followthemoney import model
from marshmallow import Schema, post_dump, pre_load
from marshmallow.fields import Nested, Integer, String, List
from marshmallow.fields import Dict, Boolean
from marshmallow.validate import Length
from aleph.core import url_for
from aleph.logic.util import document_url, entity_url
from aleph.serializers.common import BaseSchema, flatten_id
from aleph.serializers.common import SchemaName, PartialDate
from aleph.serializers.common import Country, Language
from aleph.serializers.roles import RoleReferenceSchema
from aleph.serializers.collections import CollectionSchema
from aleph.model import Role, Document, Entity, Collection
class ShallowCombinedSchema(BaseSchema):
collection_id = String()
# Joint entity/document attributes
collection = Nested(CollectionSchema())
schema = SchemaName()
schemata = List(SchemaName())
names = List(String())
addresses = List(String())
phones = List(String())
emails = List(String())
identifiers = List(String())
countries = List(Country())
dates = List(PartialDate())
bulk = Boolean()
# Entity attributes
foreign_id = String()
name = String()
entities = List(String())
properties = Dict()
# Document attributes
status = String()
content_hash = String()
uploader_id = String()
uploader = Nested(RoleReferenceSchema())
error_message = String()
title = String()
summary = String()
languages = List(Language())
keywords = List(String())
date = PartialDate()
authored_at = PartialDate()
modified_at = PartialDate()
published_at = PartialDate()
retrieved_at = PartialDate()
file_name = String()
file_size = Integer()
author = String()
generator = String()
mime_type = String()
extension = String()
encoding = String()
source_url = String()
pdf_version = String()
columns = List(String())
headers = Dict()
children = Integer()
# TODO: is this a separate endpoint?
text = String()
html = String()
def document_links(self, data, pk, schemata):
links = {
'self': url_for('documents_api.view', document_id=pk),
'tags': url_for('entities_api.tags', id=pk),
'ui': document_url(pk)
}
if data.get('content_hash'):
links['file'] = url_for('documents_api.file',
document_id=pk,
_authorize=True)
if schemata.intersection([Document.SCHEMA_PDF]):
links['pdf'] = url_for('documents_api.pdf',
document_id=pk,
_authorize=True)
if schemata.intersection([Document.SCHEMA_PDF, Document.SCHEMA_TABLE]):
links['records'] = url_for('documents_api.records', document_id=pk)
if schemata.intersection([Document.SCHEMA_FOLDER]):
query = (('filter:parent.id', pk),)
links['children'] = url_for('documents_api.index', _query=query)
return links
def entity_links(self, data, pk, schemata):
return {
'self': url_for('entities_api.view', id=pk),
# 'similar': url_for('entities_api.similar', id=pk),
# 'documents': url_for('entities_api.documents', id=pk),
'references': url_for('entities_api.references', id=pk),
'tags': url_for('entities_api.tags', id=pk),
'ui': entity_url(pk)
}
@post_dump
def hypermedia(self, data):
pk = str(data.get('id'))
collection = data.get('collection', {})
collection_id = collection.get('id')
collection_id = collection_id or data.get('collection_id')
schemata = set(data.get('schemata', []))
if Document.SCHEMA in schemata:
data['links'] = self.document_links(data, pk, schemata)
else:
data['links'] = self.entity_links(data, pk, schemata)
if data.get('bulk'):
data['writeable'] = False
else:
data['writeable'] = request.authz.can_write(collection_id)
return data
class CombinedSchema(ShallowCombinedSchema):
EXPAND = [
('collection_id', Collection, 'collection', CollectionSchema, False),
('entities', Entity, '_related', ShallowCombinedSchema, True),
('uploader_id', Role, 'uploader', RoleReferenceSchema, False),
('parent', Document, 'parent', ShallowCombinedSchema, False),
]
related = List(Nested(ShallowCombinedSchema()))
parent = Nested(ShallowCombinedSchema())
@post_dump(pass_many=True)
def expand(self, objs, many=False):
super(ShallowCombinedSchema, self).expand(objs, many=many)
for obj in ensure_list(objs):
# This will replace entity IDs for related entities with the
# actual entity objects which have been retrieved because they
# were all listed in the 'entities' reverse index.
related = obj.pop('_related', [])
schema = model.get(obj.get('schema'))
if schema is None or not len(related):
continue
related = {r.get('id'): r for r in related}
properties = obj.get('properties')
for name, prop in schema.properties.items():
if name not in properties or prop.type_name != 'entity':
continue
values = ensure_list(properties.get(name))
values = [related.get(v) for v in values if v in related]
properties[name] = values
class EntityUpdateSchema(Schema):
name = String(allow_none=True)
schema = SchemaName(required=True)
properties = Dict()
class EntityCreateSchema(EntityUpdateSchema):
collection_id = String(required=True)
foreign_id = String()
@pre_load
def flatten_collection(self, data):
flatten_id(data, 'collection_id', 'collection')
class DocumentParentSchema(Schema):
id = String(allow_none=True)
foreign_id = String(allow_none=True)
class DocumentUpdateSchema(Schema):
title = String(allow_none=True)
summary = String(allow_none=True)
countries = List(Country())
languages = List(Language())
keywords = List(String(validate=Length(min=1, max=5000)))
date = PartialDate(allow_none=True)
authored_at = PartialDate(allow_none=True)
modified_at = PartialDate(allow_none=True)
published_at = PartialDate(allow_none=True)
retrieved_at = PartialDate(allow_none=True)
file_name = String(allow_none=True)
author = String(allow_none=True)
generator = String(allow_none=True)
mime_type = String(allow_none=True)
source_url = String(allow_none=True)
class DocumentCreateSchema(DocumentUpdateSchema):
parent = Nested(DocumentParentSchema())
|
StarcoderdataPython
|
193165
|
<filename>src/cisco_gnmi/client.py
"""Copyright 2019 Cisco Systems
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
The contents of this file are licensed under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
"""Python gNMI wrapper to ease usage of gNMI."""
import logging
from xml.etree.ElementPath import xpath_tokenizer_re
from six import string_types
from . import proto
from . import util
LOGGER = logging.getLogger(__name__)
logger = LOGGER
class Client(object):
"""gNMI gRPC wrapper client to ease usage of gNMI.
Returns relatively raw response data. Response data may be accessed according
to the gNMI specification.
Methods
-------
capabilities()
Retrieve meta information about version, supported models, etc.
get(...)
Get a snapshot of config, state, operational, or all forms of data.
set(...)
Update, replace, or delete configuration.
subscribe(...)
Stream snapshots of data from the device.
Examples
--------
>>> import grpc
>>> from cisco_gnmi import Client
>>> from cisco_gnmi.auth import CiscoAuthPlugin
>>> channel = grpc.secure_channel(
... '127.0.0.1:9339',
... grpc.composite_channel_credentials(
... grpc.ssl_channel_credentials(),
... grpc.metadata_call_credentials(
... CiscoAuthPlugin(
... 'admin',
... 'its_a_secret'
... )
... )
... )
... )
>>> client = Client(channel)
>>> capabilities = client.capabilities()
>>> print(capabilities)
"""
"""Defining property due to gRPC timeout being based on a C long type.
Should really define this based on architecture.
32-bit C long max value. "Infinity".
"""
_C_MAX_LONG = 2147483647
# gNMI uses nanoseconds, baseline to seconds
_NS_IN_S = int(1e9)
def __init__(self, grpc_channel, timeout=_C_MAX_LONG, default_call_metadata=None):
"""gNMI initialization wrapper which simply wraps some aspects of the gNMI stub.
Parameters
----------
grpc_channel : grpc.Channel
The gRPC channel to initialize the gNMI stub with.
Use ClientBuilder if unfamiliar with gRPC.
timeout : uint
Timeout for gRPC functionality.
default_call_metadata : list
Metadata to be sent with each gRPC call.
"""
self.service = proto.gnmi_pb2_grpc.gNMIStub(grpc_channel)
self.default_call_metadata = default_call_metadata
self._channel = grpc_channel
def capabilities(self):
"""Capabilities allows the client to retrieve the set of capabilities that
is supported by the target. This allows the target to validate the
service version that is implemented and retrieve the set of models that
the target supports. The models can then be specified in subsequent RPCs
to restrict the set of data that is utilized.
Reference: gNMI Specification Section 3.2
Returns
-------
proto.gnmi_pb2.CapabilityResponse
"""
message = proto.gnmi_pb2.CapabilityRequest()
LOGGER.debug(str(message))
response = self.service.Capabilities(
message, metadata=self.default_call_metadata
)
return response
def get(
self,
paths,
prefix=None,
data_type="ALL",
encoding="JSON_IETF",
use_models=None,
extension=None,
):
"""A snapshot of the requested data that exists on the target.
Parameters
----------
paths : iterable of proto.gnmi_pb2.Path
An iterable of Paths to request data of.
prefix : proto.gnmi_pb2.Path, optional
A path to prefix all Paths in paths
data_type : proto.gnmi_pb2.GetRequest.DataType, optional
A member of the GetRequest.DataType enum to specify what datastore to target
[ALL, CONFIG, STATE, OPERATIONAL]
encoding : proto.gnmi_pb2.Encoding, optional
A member of the proto.gnmi_pb2.Encoding enum specifying desired encoding of returned data
[JSON, BYTES, PROTO, ASCII, JSON_IETF]
use_models : iterable of proto.gnmi_pb2.ModelData, optional
extension : iterable of proto.gnmi_ext.Extension, optional
Returns
-------
proto.gnmi_pb2.GetResponse
"""
data_type = util.validate_proto_enum(
"data_type",
data_type,
"GetRequest.DataType",
proto.gnmi_pb2.GetRequest.DataType,
)
encoding = util.validate_proto_enum(
"encoding", encoding, "Encoding", proto.gnmi_pb2.Encoding
)
request = proto.gnmi_pb2.GetRequest()
if not isinstance(paths, (list, set, map)):
raise Exception("paths must be an iterable containing Path(s)!")
request.path.extend(paths)
request.type = data_type
request.encoding = encoding
if prefix:
request.prefix = prefix
if use_models:
request.use_models = use_models
if extension:
request.extension = extension
LOGGER.debug(str(request))
get_response = self.service.Get(request, metadata=self.default_call_metadata)
return get_response
def set(
self, prefix=None, updates=None, replaces=None, deletes=None, extensions=None
):
"""Modifications to the configuration of the target.
Parameters
----------
prefix : proto.gnmi_pb2.Path, optional
The Path to prefix all other Paths defined within other messages
updates : iterable of iterable of proto.gnmi_pb2.Update, optional
The Updates to update configuration with.
replaces : iterable of proto.gnmi_pb2.Update, optional
The Updates which replaces other configuration.
The main difference between replace and update is replace will remove non-referenced nodes.
deletes : iterable of proto.gnmi_pb2.Path, optional
The Paths which refers to elements for deletion.
extensions : iterable of proto.gnmi_ext.Extension, optional
Returns
-------
proto.gnmi_pb2.SetResponse
"""
request = proto.gnmi_pb2.SetRequest()
if prefix:
request.prefix.CopyFrom(prefix)
test_list = [updates, replaces, deletes]
if not any(test_list):
raise Exception("At least update, replace, or delete must be specified!")
for item in test_list:
if not item:
continue
if not isinstance(item, (list, set)):
raise Exception("updates, replaces, and deletes must be iterables!")
if updates:
request.update.extend(updates)
if replaces:
request.replaces.extend(replaces)
if deletes:
request.delete.extend(deletes)
if extensions:
request.extension.extend(extensions)
LOGGER.debug(str(request))
response = self.service.Set(request, metadata=self.default_call_metadata)
return response
def subscribe(self, request_iter, extensions=None):
"""Subscribe allows a client to request the target to send it values
of particular paths within the data tree. These values may be streamed
at a particular cadence (STREAM), sent one off on a long-lived channel
(POLL), or sent as a one-off retrieval (ONCE).
Reference: gNMI Specification Section 3.5
Parameters
----------
request_iter : iterable of proto.gnmi_pb2.SubscriptionList or proto.gnmi_pb2.Poll or proto.gnmi_pb2.AliasList
The requests to embed as the SubscribeRequest, oneof the above.
subscribe RPC is a streaming request thus can arbitrarily generate SubscribeRequests into request_iter
to use the same bi-directional streaming connection if already open.
extensions : iterable of proto.gnmi_ext.Extension, optional
Returns
-------
generator of SubscriptionResponse
"""
def validate_request(request):
subscribe_request = proto.gnmi_pb2.SubscribeRequest()
if isinstance(request, proto.gnmi_pb2.SubscriptionList):
subscribe_request.subscribe.CopyFrom(request)
elif isinstance(request, proto.gnmi_pb2.Poll):
subscribe_request.poll.CopyFrom(request)
elif isinstance(request, proto.gnmi_pb2.AliasList):
subscribe_request.aliases.CopyFrom(request)
else:
raise Exception(
"request must be a SubscriptionList, Poll, or AliasList!"
)
if extensions:
subscribe_request.extensions.extend(extensions)
LOGGER.debug(str(subscribe_request))
return subscribe_request
response_stream = self.service.Subscribe(
(validate_request(request) for request in request_iter),
metadata=self.default_call_metadata,
)
return response_stream
def subscribe_xpaths(
self,
xpath_subscriptions,
request_mode="STREAM",
sub_mode="SAMPLE",
encoding="JSON",
sample_interval=_NS_IN_S * 10,
suppress_redundant=False,
heartbeat_interval=None,
prefix=None,
):
"""A convenience wrapper of subscribe() which aids in building of SubscriptionRequest
with request as subscribe SubscriptionList. This method accepts an iterable of simply xpath strings,
dictionaries with Subscription attributes for more granularity, or already built Subscription
objects and builds the SubscriptionList. Fields not supplied will be defaulted with the default arguments
to the method.
Generates a single SubscribeRequest.
Parameters
----------
xpath_subscriptions : str or iterable of str, dict, Subscription
An iterable which is parsed to form the Subscriptions in the SubscriptionList to be passed
to SubscriptionRequest. Strings are parsed as XPaths and defaulted with the default arguments,
dictionaries are treated as dicts of args to pass to the Subscribe init, and Subscription is
treated as simply a pre-made Subscription.
request_mode : proto.gnmi_pb2.SubscriptionList.Mode, optional
Indicates whether STREAM to stream from target,
ONCE to stream once (like a get),
POLL to respond to POLL.
[STREAM, ONCE, POLL]
sub_mode : proto.gnmi_pb2.SubscriptionMode, optional
The default SubscriptionMode on a per Subscription basis in the SubscriptionList.
TARGET_DEFINED indicates that the target (like device/destination) should stream
information however it knows best. This instructs the target to decide between ON_CHANGE
or SAMPLE - e.g. the device gNMI server may understand that we only need RIB updates
as an ON_CHANGE basis as opposed to SAMPLE, and we don't have to explicitly state our
desired behavior.
ON_CHANGE only streams updates when changes occur.
SAMPLE will stream the subscription at a regular cadence/interval.
[TARGET_DEFINED, ON_CHANGE, SAMPLE]
encoding : proto.gnmi_pb2.Encoding, optional
A member of the proto.gnmi_pb2.Encoding enum specifying desired encoding of returned data
[JSON, BYTES, PROTO, ASCII, JSON_IETF]
sample_interval : int, optional
Default nanoseconds for SAMPLE to occur.
Defaults to 10 seconds.
suppress_redundant : bool, optional
Indicates whether values that have not changed should be sent in a SAMPLE subscription.
heartbeat_interval : int, optional
Specifies the maximum allowable silent period in nanoseconds when
suppress_redundant is in use. The target should send a value at least once
in the period specified. Also applies in ON_CHANGE.
prefix : proto.gnmi_pb2.Path, optional
A common path prepended to all path elements in the message. This reduces message size by
removing redundent path elements. Smaller message == improved thoughput.
Returns
-------
subscribe()
"""
subscription_list = proto.gnmi_pb2.SubscriptionList()
subscription_list.mode = util.validate_proto_enum(
"mode",
request_mode,
"SubscriptionList.Mode",
proto.gnmi_pb2.SubscriptionList.Mode,
)
subscription_list.encoding = util.validate_proto_enum(
"encoding", encoding, "Encoding", proto.gnmi_pb2.Encoding
)
if prefix:
subscription_list.prefix.CopyFrom(prefix)
if isinstance(
xpath_subscriptions, (string_types, dict, proto.gnmi_pb2.Subscription)
):
xpath_subscriptions = [xpath_subscriptions]
subscriptions = []
for xpath_subscription in xpath_subscriptions:
subscription = None
if isinstance(xpath_subscription, proto.gnmi_pb2.Subscription):
subscription = xpath_subscription
elif isinstance(xpath_subscription, string_types):
subscription = proto.gnmi_pb2.Subscription()
subscription.path.CopyFrom(
self.parse_xpath_to_gnmi_path(xpath_subscription)
)
subscription.mode = util.validate_proto_enum(
"sub_mode",
sub_mode,
"SubscriptionMode",
proto.gnmi_pb2.SubscriptionMode,
)
if sub_mode == "SAMPLE":
subscription.sample_interval = sample_interval
elif isinstance(xpath_subscription, dict):
subscription_dict = {}
if "path" not in xpath_subscription.keys():
raise Exception("path must be specified in dict!")
if isinstance(xpath_subscription["path"], proto.gnmi_pb2.Path):
subscription_dict["path"] = xpath_subscription["path"]
elif isinstance(xpath_subscription["path"], string_types):
subscription_dict["path"] = self.parse_xpath_to_gnmi_path(
xpath_subscription["path"]
)
else:
raise Exception("path must be string or Path proto!")
sub_mode_name = (
sub_mode
if "mode" not in xpath_subscription.keys()
else xpath_subscription["mode"]
)
subscription_dict["mode"] = util.validate_proto_enum(
"sub_mode",
sub_mode,
"SubscriptionMode",
proto.gnmi_pb2.SubscriptionMode,
)
if sub_mode_name == "SAMPLE":
subscription_dict["sample_interval"] = (
sample_interval
if "sample_interval" not in xpath_subscription.keys()
else xpath_subscription["sample_interval"]
)
if "suppress_redundant" in xpath_subscription.keys():
subscription_dict["suppress_redundant"] = xpath_subscription[
"suppress_redundant"
]
if sub_mode_name != "TARGET_DEFINED":
if "heartbeat_interval" in xpath_subscription.keys():
subscription_dict["heartbeat_interval"] = xpath_subscription[
"heartbeat_interval"
]
subscription = proto.gnmi_pb2.Subscription(**subscription_dict)
else:
raise Exception("path must be string, dict, or Subscription proto!")
subscriptions.append(subscription)
subscription_list.subscription.extend(subscriptions)
return self.subscribe([subscription_list])
@classmethod
def parse_xpath_to_gnmi_path(cls, xpath, origin=None):
"""Parses an XPath to proto.gnmi_pb2.Path.
This function should be overridden by any child classes for origin logic.
Effectively wraps the std XML XPath tokenizer and traverses
the identified groups. Parsing robustness needs to be validated.
Probably best to formalize as a state machine sometime.
TODO: Formalize tokenizer traversal via state machine.
"""
if not isinstance(xpath, string_types):
raise Exception("xpath must be a string!")
path = proto.gnmi_pb2.Path()
if origin:
if not isinstance(origin, string_types):
raise Exception("origin must be a string!")
path.origin = origin
curr_elem = proto.gnmi_pb2.PathElem()
in_filter = False
just_filtered = False
curr_key = None
# TODO: Lazy
xpath = xpath.strip("/")
xpath_elements = xpath_tokenizer_re.findall(xpath)
path_elems = []
for index, element in enumerate(xpath_elements):
# stripped initial /, so this indicates a completed element
if element[0] == "/":
if not curr_elem.name:
raise Exception(
"Current PathElem has no name yet is trying to be pushed to path! Invalid XPath?"
)
path_elems.append(curr_elem)
curr_elem = proto.gnmi_pb2.PathElem()
continue
# We are entering a filter
elif element[0] == "[":
in_filter = True
continue
# We are exiting a filter
elif element[0] == "]":
in_filter = False
continue
# If we're not in a filter then we're a PathElem name
elif not in_filter:
curr_elem.name = element[1]
# Skip blank spaces
elif not any([element[0], element[1]]):
continue
# If we're in the filter and just completed a filter expr,
# "and" as a junction should just be ignored.
elif in_filter and just_filtered and element[1] == "and":
just_filtered = False
continue
# Otherwise we're in a filter and this term is a key name
elif curr_key is None:
curr_key = element[1]
continue
# Otherwise we're an operator or the key value
elif curr_key is not None:
# I think = is the only possible thing to support with PathElem syntax as is
if element[0] in [">", "<"]:
raise Exception("Only = supported as filter operand!")
if element[0] == "=":
continue
else:
# We have a full key here, put it in the map
if curr_key in curr_elem.key.keys():
raise Exception("Key already in key map!")
curr_elem.key[curr_key] = element[0].strip("'\"")
curr_key = None
just_filtered = True
# Keys/filters in general should be totally cleaned up at this point.
if curr_key:
raise Exception("Hanging key filter! Incomplete XPath?")
# If we have a dangling element that hasn't been completed due to no
# / element then let's just append the final element.
if curr_elem:
path_elems.append(curr_elem)
curr_elem = None
if any([curr_elem, curr_key, in_filter]):
raise Exception("Unfinished elements in XPath parsing!")
path.elem.extend(path_elems)
return path
|
StarcoderdataPython
|
3534139
|
<reponame>zzpwahaha/VimbaCamJILA
# Copyright (C) 2018--2019 <NAME>
# Copyright (C) 2018--2019 Steward Observatory
#
# This file is part of ehtplot.
#
# ehtplot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ehtplot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ehtplot. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from matplotlib.colors import ListedColormap
from matplotlib.cm import register_cmap
from ehtplot.color.ctab import list_ctab, load_ctab
def unmodified(name):
chars = set("0123456789flus")
return "_" not in name or not set(name.rsplit("_", 1)[1]) <= chars
def register(name=None, cmap=None, path=None):
if name is None:
# Self-call to register all colormaps in "ehtplot/color/"
for name in list_ctab(path=path):
register(name=name, cmap=cmap, path=path)
else:
if cmap is None:
cmap = ListedColormap(load_ctab(name, path=path))
# Register the colormap
register_cmap(name=name, cmap=cmap)
# Register the reversed colormap
register_cmap(name=name + ("_r" if unmodified(name) else "r"),
cmap=cmap.reversed())
|
StarcoderdataPython
|
3413652
|
from PySide import QtGui, QtCore
from PIL import Image, ImageQt, ImageDraw
import os, json
import numpy
import svgwrite
#import PointCloud
from PointCloud import Point2, PointCloud, intersect_line
from gcode import Mach3 as Gc
import re
class Viewer(QtGui.QMainWindow):
def __init__(self, parameters, scale=Point2(1.0,1.0)):
super(Viewer, self).__init__()
#self.layout = QtGui.QBoxLayout(QtGui.QBoxLayout.TopToBottom, None)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.parameters = parameters
self.multiWidget = QtGui.QScrollArea(self)
self.multiWidget.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.bl = QtGui.QVBoxLayout(self.multiWidget)
#self.bn = QtGui.QPushButton(self, text="Hello")
#self.bn.setSizePolicy(QtGui.QSizePolicy.Expanding,
# QtGui.QSizePolicy.Expanding)
#self.layout.addWidget(self.bn)
self.imageLabel = QtGui.QLabel(self.multiWidget)
self.imageLabel.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.imageLabel.setScaledContents(True)
self.bl.addWidget(self.imageLabel)
#self.imageLabel.setStyleSheet("border: 0px")
#self.imageLabel.setContentsMargins(0, 0, 0, 0)
#self.imageLabel.setText("nothing loaded")
self.imageLabel2 = QtGui.QLabel(self.multiWidget)
self.imageLabel2.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Minimum)
self.imageLabel2.setScaledContents(False)
#self.imageLabel2.setStyleSheet("border: 0px")
#self.imageLabel2.setContentsMargins(0, 0, 0, 0)
self.bl.addWidget(self.imageLabel2)
self.imageLabel3 = QtGui.QLabel(self.multiWidget)
#self.imageLabel3.setSizePolicy(QtGui.QSizePolicy.Ignored,
# QtGui.QSizePolicy.Ignored)
#self.imageLabel3.setScaledContents(False)
#self.imageLabel3.setStyleSheet("border: 0px")
#self.imageLabel3.setContentsMargins(0, 0, 0, 0)
self.bl.addWidget(self.imageLabel3)
self.cutoffSlider = QtGui.QSlider(self.multiWidget, orientation=QtCore.Qt.Horizontal)
self.cutoffSlider.sliderReleased.connect(self.updateNailsImage)
self.bl.addWidget(self.cutoffSlider)
self.setCentralWidget(self.multiWidget)
#self.setCentralWidget(self.imageLabel)
self.setWindowTitle("NailedIt - analyser")
self.resize(600, 600)
self.scaleFactor = scale
self.deviation = []
self.debug_cnt = 1
self.minNailDist = 2.8 # keep this distance to the nails
self.timer = QtCore.QTimer(self)
self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.debug)
self.timer.setSingleShot(True)
#self.timer.start(0)
# menu
fileMenu = self.menuBar().addMenu("File")
fileMenu.addAction("Open", self.openFile)
fileMenu.addSeparator()
fileMenu.addAction("Export Nails SVG", self.saveNailsSVG)
fileMenu.addAction("Generate Gcode", self.generateGcode)
optionsMenu = self.menuBar().addMenu("Options")
self.showNailsAction = QtGui.QAction("show nails", optionsMenu, checkable=True, triggered=self.updateNailsImage)
optionsMenu.addAction(self.showNailsAction)
self.showOverlaps = QtGui.QAction("calculate overlaps", optionsMenu, checkable=True, triggered=self.updateNailsImage)
optionsMenu.addAction(self.showOverlaps)
self.reversePaths = QtGui.QAction("reverse path", optionsMenu, checkable=True, triggered=self.reverseTriggered)
optionsMenu.addAction(self.reversePaths)
optionsMenu.addAction("check nail distance", self.checkNails)
optionsMenu.addAction("calculate COG", self.calculateCOG)
#self.layout.addWidget(self.menu)
#self.layout.addWidget(self.scrollArea)
self.showImage(Image.new("RGB", (500,500)))
#if "image" in self.parameters:
# self.showImage(self.parameters["image"])
def reverseTriggered(self):
self.deviation = []
self.updateNailsImage()
def showImage(self, image, slot=0):
if slot == 0:
self.qim = ImageQt.ImageQt(image) # don't let python clean up the data
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(self.qim))
self.imageLabel.adjustSize()
elif slot == 1:
self.qim2 = ImageQt.ImageQt(image) # don't let python clean up the data
self.imageLabel2.setPixmap(QtGui.QPixmap.fromImage(self.qim2))
self.imageLabel2.adjustSize()
elif slot == 2:
self.qim3 = ImageQt.ImageQt(image) # don't let python clean up the data
self.imageLabel3.setPixmap(QtGui.QPixmap.fromImage(self.qim3))
self.imageLabel3.adjustSize()
def checkNails(self):
if "Nails" in self.parameters:
nails = self.parameters["Nails"]["3:nails"]
pc = PointCloud(10,10)
print "list", nails
pc.addFromList(nails)
print pc.p
img = self.parameters["image"]
draw = ImageDraw.Draw(img)
min_dist = 1000
for i,p in enumerate(pc.p):
np, d = pc.closestPoint(p.x, p.y, i)
min_dist = min(min_dist, d)
if d < self.minNailDist:
draw.rectangle((p.x-3, p.y - 3, p.x + 3, p.y + 3), outline=(255,0,0))
print "minDist:", min_dist * 1000.0 * self.parameters["Nails"]["2:parameters:"]["ppi"], "mm"
self.showImage(img)
def openFile(self):
filename = QtGui.QFileDialog.getOpenFileName(self, "Open Nailedit File", "./", "Nailedit (*.json)")[0]
if filename:
js = load_nailedit(filename)
if js:
self.parameters["Nails"] = js
print "num nails:", len(js["3:nails"]), "string length:", js['1:summary']["thread length"]
self.parameters["filename"] = filename
self.setWindowTitle("NailedIt - analyser (%s)"%os.path.basename(filename))
self.cutoffSlider.setMaximum(len(js["4:thread"]))
self.cutoffSlider.setSliderPosition(self.cutoffSlider.maximum())
self.deviation = []
self.updateNailsImage()
def updateDeviation(self):
if "Nails" in self.parameters and self.deviation:
nails = self.parameters["Nails"]
w = nails['2:parameters:']["proc_width"]
h = nails['2:parameters:']["proc_height"]
img = Image.new("RGB", (w, 100), (255,255,255))
draw = ImageDraw.Draw(img, "RGB")
for x in xrange(w):
d = int(float(x)/w * len(self.deviation))
v = float(self.deviation[d])/self.deviation[0] * 100
draw.line((x, 100, x, 100-v), fill=(255,0,0), width=1)
self.showImage(img, slot=1)
def debug(self):
#################################################################################################
if "Nails" in self.parameters:
js = self.parameters["Nails"]
nails = js["3:nails"]
path = js["4:thread"]
img = self.parameters["image"].copy()
inters = 0
up_to = self.debug_cnt
p4 = Point2(nails[path[up_to]][0], nails[path[up_to]][1])
p3 = Point2(nails[path[up_to-1]][0], nails[path[up_to-1]][1])
draw = ImageDraw.Draw(img, "RGBA")
if up_to > 1:
p1 = Point2(nails[path[0]][0], nails[path[0]][1])
for c in path[1:up_to]:
p2 = Point2(nails[c][0], nails[c][1])
s, t = intersect_line(p1, p2, p3, p4)
if 0 < s < 1 and 0 < t < 1:
inters += 1
draw.line((p1.x, p1.y, p2.x, p2.y), (0,255,0,255))
p1 = p2
draw.line((p3.x, p3.y, p4.x, p4.y), (255,0,0, 255), width=2)
self.debug_cnt += 1
self.showImage(img)
print "intersects", inters
self.timer.start(1000 if self.debug_cnt % 10 == 0 else 50)
def updateNailsImage(self):
if "Nails" in self.parameters:
nails = self.parameters["Nails"]
w = nails['2:parameters:']["proc_width"]
h = nails['2:parameters:']["proc_height"]
img = self.parameters["image"] = Image.new("RGB", (w, h))
trg = Image.open(nails['2:parameters:']["inputImagePath"])
if trg:
trg = trg.resize(img.size)
ret = draw_nails(nails, img, showNails=self.showNailsAction.isChecked(), targetImage=trg if not self.deviation else None, lastString=self.cutoffSlider.value(), reversed=self.reversePaths.isChecked())
if ret:
self.deviation = ret
self.showImage(self.parameters["image"])
self.updateDeviation()
if self.showOverlaps.isChecked():
ovl = self.parameters["overlap"] = Image.new("RGB", (w, h))
draw_overlap(nails, ovl, lastString=self.cutoffSlider.value(), reversed=self.reversePaths.isChecked())
self.showImage(ovl, slot=2)
def saveNailsSVG(self):
if "Nails" in self.parameters:
filename = QtGui.QFileDialog.getSaveFileName(self, "Export Nails", "./", "SVG (*.svg)")[0]
if filename:
save_nails_SVG(self.parameters["Nails"], filename, self.scaleFactor)
def generateGcode(self):
if not "Nails" in self.parameters:
return
filename = QtGui.QFileDialog.getSaveFileName(self, "Generate Gcode", "./", "gcode (*.tap)")[0]
if not filename:
return
js = self.parameters["Nails"]
nails = js["3:nails"]
path = js["4:thread"]
w = js["2:parameters:"]["proc_width"]
h = js["2:parameters:"]["proc_height"]
sf = Point2(1,1) * self.scaleFactor * 1000.0 * js["2:parameters:"]["ppi"] # pixels to millimeters
origin = Point2(0,0)
pc = PointCloud(1,1)
pc.addFromList(nails)
cp = pc.closestPoint(origin.x, origin.y)
print "origin", nails[cp[0]]
engine = Gc(nails, path[:self.cutoffSlider.value()], scaleFactor=sf, origin=pc.p[cp[0]])
code = engine.generateStringPath(os.path.basename(self.parameters["filename"]), startPosition=Point2(w*sf.x*.5, -5), minNailDistance=self.minNailDist)
#code = engine.generateStringPath(os.path.basename(self.parameters["filename"]), nails, path[:400], self.minNailDist, scaleFactor=sf, origin=Point2(0,0), startPosition=Point2(0.5, -0.1)*w)
spl = os.path.splitext(filename)
filename = spl[0]+"_string"+spl[1]
with open(filename, "w") as f:
f.write(code)
f.close()
print "written gcode to", filename
img = self.drawGcode(self.parameters["image"], code, Point2(1.0/sf.x,1.0/sf.y), Point2(0,0))
self.showImage(img)
code = engine.generateDrillPattern(os.path.basename(self.parameters["filename"]), -6.0)
filename = spl[0]+"_drills"+spl[1]
with open(filename, "w") as f:
f.write(code)
f.close()
print "written gcode to", filename
def calculateCOG(self):
nailWeight = 9.9 / 100 #grams per nail
threadWeightPerMeter = 40.0/1000 # g / m
canvas_weight = 838 # grams
if "Nails" in self.parameters:
js = self.parameters["Nails"]
w = js["2:parameters:"]["proc_width"]
h = js["2:parameters:"]["proc_height"]
sf = Point2(1, 1) * self.scaleFactor * 1000.0 * js["2:parameters:"]["ppi"] # pixels to millimeters
origin = Point2(0, 0)
pc = PointCloud(1,1)
pc.addFromList(js["3:nails"])
#cp = pc.closestPoint(origin.x, origin.y)
#origin = pc.p[cp[0]]
pc.translate(-origin.x, -origin.y)
pc.scale(sf.x, sf.y)
# cog nails
nails_cog = Point2(0,0)
nails_mass = nailWeight * len(pc.p)
for p in pc.p:
nails_cog += p
nails_cog = nails_cog / len(pc.p)
# cog thread
path = js["4:thread"]
cp = pc.p[path[0]]
totalThreadLen = 0
thread_cog = Point2(0,0)
for pid in path[1:]:
nxt = pc.p[pid]
l = cp.dist(nxt)
totalThreadLen += l
thread_cog += (cp+nxt)*0.5*l
cp = nxt
thread_cog /= totalThreadLen
thread_mass = totalThreadLen / 1000 * threadWeightPerMeter
# canvas cog
canvas_cog = Point2(float(js["2:parameters:"]["proc_width"])*sf.x, float(js["2:parameters:"]["proc_height"])*sf.y)*0.5
print "canvas:", canvas_weight, "g"
print "nails:", nails_mass, "g"
print "thread:", thread_mass, "g"
print "canvas cog:", canvas_cog
print "nails cog:", nails_cog
print "thread cog:", thread_cog
combined_cog = (canvas_cog*canvas_weight + nails_cog * nails_mass + thread_cog * thread_mass)
combined_cog /= canvas_weight + nails_mass + thread_mass
print "overall COG", combined_cog
def drawGcode(self, img, code, scaleFact, origin):
drw = ImageDraw.Draw(img)
mpx,mpy,mpz = 0,0,0
lines = code.split('\n')
for line in lines:
x = re.search(r"(Y)([0-9.-]+)", line)
x = float(x.group(2)) if x else mpx
y = re.search(r"(X)([0-9.-]+)", line)
y = float(y.group(2)) if y else mpy
z = re.search(r"(Z)([0-9.-]+)", line)
z = float(z.group(2)) if z else mpz
i = re.search(r"(J)([0-9.-]+)", line)
i = float(i.group(2)) if i else None
j = re.search(r"(I)([0-9.-]+)", line)
j = float(j.group(2)) if j else None
if line.startswith("G0 "):
drw.line((mpx * scaleFact.x + origin.x, mpy * scaleFact.y + origin.y,
x * scaleFact.x + origin.x, y * scaleFact.y + origin.y), (0,0,255))
mpx, mpy, mpz = x, y, z
elif line.startswith("G1 "):
drw.line((mpx * scaleFact.x + origin.x, mpy * scaleFact.y + origin.y,
x * scaleFact.x + origin.x, y * scaleFact.y + origin.y), (255,50,0))
mpx, mpy, mpz = x, y, z
elif line.startswith("G2 ") or line.startswith("G3 "):
r = Point2(Point2(i,j).length() * scaleFact.x, Point2(i,j).length() * scaleFact.y)
drw.arc(((mpx+i) * scaleFact.x + origin.x - abs(r.x), (mpy+j) * scaleFact.y + origin.y - abs(r.x),
(mpx+i) * scaleFact.x + origin.x + abs(r.x), (mpy+j) * scaleFact.y + origin.y + abs(r.x)), 0, 360, (255,0,0))
mpx, mpy, mpz = x, y, z
return img
def load_nailedit(filepath):
print 'loading "%s"'%filepath
with open(filepath, 'r') as f:
js = json.load(f)
f.close()
print 'done'
return js
def save_nails_SVG(nails, filename, scale):
svg = svgwrite.Drawing(filename, profile='tiny')
pnts = nails["3:nails"]
r = 1
ptmm = nails["2:parameters:"]["ppi"] * 1000
if "nailDiameter" in nails["2:parameters:"]:
r = nails["2:parameters:"]["nailDiameter"] * ptmm
for p in pnts:
svg.add(svg.circle((p[0]*ptmm*scale.x*svgwrite.mm, p[1]*ptmm*scale.y*svgwrite.mm), r*svgwrite.mm))
svg.save()
print "saved as ", filename
print "sc:", scale, "dim:", nails["2:parameters:"]["proc_width"]*ptmm*scale, "x", nails["2:parameters:"]["proc_height"]*ptmm*scale
def draw_nails(nails, img, showNails=True, lastString=10 ** 7, targetImage=None, reversed=False):
pnts = nails["3:nails"]
path = nails["4:thread"] if not reversed else nails["4:thread"][::-1]
params = nails["2:parameters:"]
backgroundCol = params["backgroundColor"]
if not isinstance(backgroundCol, list):
backgroundCol = (backgroundCol, backgroundCol, backgroundCol)
else:
backgroundCol = tuple(backgroundCol)
stringCol = params["threadColor"]
if len(stringCol) == 2:
stringCol = [stringCol[0], stringCol[0], stringCol[0], stringCol[1]]
print stringCol
# over sampling
mmpp = params["ppi"]/0.001 #mm per pixel
threadThickness = 0.3 # mm
oversampling = 3
w = params["proc_width"] * mmpp
h = params["proc_height"] * mmpp
iw = int(w / threadThickness * oversampling)
ih = int(h / threadThickness * oversampling)
img_hi = Image.new("RGB", (int(iw), int(ih)))
scl = mmpp / threadThickness * oversampling
stringCol[3] = 255
width = oversampling
print "buffer img %dx%d"%(iw,ih), "thread:",
draw = ImageDraw.Draw(img_hi, "RGBA")
calcDeviation = targetImage != None
if calcDeviation:
lastString = 10 ** 7
target_np = numpy.array(targetImage.getchannel("R"), dtype="float32")
dev = []
current_p = path[0]
for i, next_p in enumerate(path[1:lastString]):
draw.line((pnts[current_p][0]*scl, pnts[current_p][1]*scl, pnts[next_p][0]*scl, pnts[next_p][1]*scl), fill=tuple(stringCol), width=oversampling)
if calcDeviation and not i % 50:
current_np = numpy.array(img_hi.resize(targetImage.size, resample=Image.BICUBIC).getchannel("R"), dtype="float32")
deviation = numpy.subtract(target_np, current_np, out=current_np)
dev.append( numpy.sum(deviation) / 255 )
if i % 1000 == 0:
print 'drawing',i,"/",len(path)
current_p = next_p
# resize to target image size
img.paste(img_hi.resize(img.size, resample=Image.BICUBIC))
if showNails:
draw = ImageDraw.Draw(img, "RGBA")
for pt in pnts:
draw.rectangle([pt[0] - 2, pt[1] - 2, pt[0] + 2, pt[1] + 2], fill=(255,120,0,100), outline=(255, 255, 0, 255))
if calcDeviation:
return dev
def draw_overlap(nails, img, lastString=10 ** 7, reversed=False):
pnts = nails["3:nails"]
path = nails["4:thread"] if not reversed else nails["4:thread"][::-1]
params = nails["2:parameters:"]
img_line = Image.new("F", img.size)
draw = ImageDraw.Draw(img_line)
overlap_np = numpy.array(img_line)
nailConnects = [0]*len(pnts)
current_p = path[0]
nailConnects[current_p] += 1
for i, next_p in enumerate(path[1:lastString]):
img_line.paste(0, (0,0)+img_line.size)
draw.line((pnts[current_p][0], pnts[current_p][1], pnts[next_p][0], pnts[next_p][1]), fill=1.0, width=1)
draw.rectangle((pnts[current_p][0]-1, pnts[current_p][1]-1, pnts[current_p][0]+1, pnts[current_p][1]+1), fill=0.0)
draw.rectangle((pnts[next_p][0]-1, pnts[next_p][1]-1, pnts[next_p][0]+1, pnts[next_p][1]+1), fill=0.0)
current_np = numpy.array(img_line, dtype="float32")
numpy.add(current_np, overlap_np, out=overlap_np)
if i % 1000 == 0:
print 'overlap',i,"/",len(path)
nailConnects[next_p] += 1
current_p = next_p
# resize to target image size
print "max overlaps", overlap_np.max(), "max nail connects", max(nailConnects)
overlap_np = (overlap_np / overlap_np.max()) * 255.0
overlap = Image.fromarray(overlap_np.astype("uint8"))
overlap = Image.merge("RGB", (overlap, overlap, overlap))
redlut = tuple(max(0, (i-85)*3) if i < 170 else 255 for i in xrange(256))
greenlut = tuple(max(0, (i-170)*3) for i in xrange(256))
bluelut = tuple( i*3 if i < 85 else max(0,255-i*3) for i in xrange(256) )
overlap = overlap.point((redlut + greenlut + bluelut))
img.paste(overlap)
if __name__ == '__main__':
import sys
filepath = "Q:\\Projects\\code\\nailedit\\t26.json"
#nails = load_nailedit(filepath)
#img = Image.new("RGB", (nails['2:parameters:']["proc_width"], nails["2:parameters:"]["proc_height"]))
#draw_nails(nails, img)
app = QtGui.QApplication(sys.argv)
params = {
#"image": img
}
imageViewer = Viewer(params, 29.0/30.0)
imageViewer.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
1608238
|
"""NDG XACML package for functions
NERC DataGrid
"""
__author__ = "<NAME>"
__date__ = "26/03/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__contact__ = "<EMAIL>"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "<EMAIL>"
__revision__ = "$Id$"
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
import traceback
import logging
log = logging.getLogger(__name__)
from ndg.xacml.core.attributevalue import (AttributeValue,
AttributeValueClassFactory)
from ndg.xacml.utils import VettedDict, _isIterable
from ndg.xacml.utils.factory import callModuleObject
# Mapping for function name prefixes that are not real types, but play a similar
# role
SPECIAL_TYPE_MAP = {
'url-string': 'AnyURI',
'xpath-node': 'String'}
class AbstractFunction(object, metaclass=ABCMeta):
"""Abstract Base class for all XACML matching functions
@cvar FUNCTION_NS: namespace for the given function
@type FUNCTION_NS: NoneType (must be string in derived type)
@cvar V1_0_FUNCTION_NS: XACML 1.0 function namespace prefix
@type V1_0_FUNCTION_NS: string
@cvar V2_0_FUNCTION_NS: XACML 2.0 function namespace prefix
@type V2_0_FUNCTION_NS: string
"""
FUNCTION_NS = None
V1_0_FUNCTION_NS = "urn:oasis:names:tc:xacml:1.0:function:"
V2_0_FUNCTION_NS = "urn:oasis:names:tc:xacml:2.0:function:"
def __init__(self):
"""
@raise TypeError: if FUNCTION_NS not set correctly
"""
if self.__class__.FUNCTION_NS is None:
raise TypeError('"FUNCTION_NS" class variable must be defined in '
'derived classes')
@abstractmethod
def evaluate(self, *inputs):
"""Evaluate the function from the given input arguments and context
@param inputs: input arguments need to evaluate the function
@type inputs: tuple
@return: derived type should return True for match, False otherwise
@rtype: bool (derived type), NoneType for THIS implementation
"""
class XacmlFunctionNames(object):
"""XACML standard match function names
@cvar FUNCTION_NAMES: list of all the XACML function URNs
@type FUNCTION_NAMES: tuple
"""
FUNCTION_NAMES = (
'urn:oasis:names:tc:xacml:1.0:function:string-equal',
'urn:oasis:names:tc:xacml:1.0:function:boolean-equal',
'urn:oasis:names:tc:xacml:1.0:function:integer-equal',
'urn:oasis:names:tc:xacml:1.0:function:double-equal',
'urn:oasis:names:tc:xacml:1.0:function:date-equal',
'urn:oasis:names:tc:xacml:1.0:function:time-equal',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-equal',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-equal',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-equal',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-equal',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-equal',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-equal',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-equal',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-equal',
'urn:oasis:names:tc:xacml:1.0:function:integer-add',
'urn:oasis:names:tc:xacml:1.0:function:double-add',
'urn:oasis:names:tc:xacml:1.0:function:integer-subtract',
'urn:oasis:names:tc:xacml:1.0:function:double-subtract',
'urn:oasis:names:tc:xacml:1.0:function:integer-multiply',
'urn:oasis:names:tc:xacml:1.0:function:double-multiply',
'urn:oasis:names:tc:xacml:1.0:function:integer-divide',
'urn:oasis:names:tc:xacml:1.0:function:double-divide',
'urn:oasis:names:tc:xacml:1.0:function:integer-mod',
'urn:oasis:names:tc:xacml:1.0:function:integer-abs',
'urn:oasis:names:tc:xacml:1.0:function:double-abs',
'urn:oasis:names:tc:xacml:1.0:function:round',
'urn:oasis:names:tc:xacml:1.0:function:floor',
'urn:oasis:names:tc:xacml:1.0:function:string-normalize-space',
'urn:oasis:names:tc:xacml:1.0:function:string-normalize-to-lower-case',
'urn:oasis:names:tc:xacml:1.0:function:double-to-integer',
'urn:oasis:names:tc:xacml:1.0:function:integer-to-double',
'urn:oasis:names:tc:xacml:1.0:function:or',
'urn:oasis:names:tc:xacml:1.0:function:and',
'urn:oasis:names:tc:xacml:1.0:function:n-of',
'urn:oasis:names:tc:xacml:1.0:function:not',
'urn:oasis:names:tc:xacml:1.0:function:integer-greater-than',
'urn:oasis:names:tc:xacml:1.0:function:integer-greater-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:integer-less-than',
'urn:oasis:names:tc:xacml:1.0:function:integer-less-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:double-greater-than',
'urn:oasis:names:tc:xacml:1.0:function:double-greater-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:double-less-than',
'urn:oasis:names:tc:xacml:1.0:function:double-less-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-add-dayTimeDuration',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-add-yearMonthDuration',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-subtract-dayTimeDuration',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-subtract-yearMonthDuration',
'urn:oasis:names:tc:xacml:1.0:function:date-add-yearMonthDuration',
'urn:oasis:names:tc:xacml:1.0:function:date-subtract-yearMonthDuration',
'urn:oasis:names:tc:xacml:1.0:function:string-greater-than',
'urn:oasis:names:tc:xacml:1.0:function:string-greater-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:string-less-than',
'urn:oasis:names:tc:xacml:1.0:function:string-less-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:time-greater-than',
'urn:oasis:names:tc:xacml:1.0:function:time-greater-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:time-less-than',
'urn:oasis:names:tc:xacml:1.0:function:time-less-than-or-equal',
'urn:oasis:names:tc:xacml:2.0:function:time-in-range',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-greater-than',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-greater-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-less-than',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-less-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:date-greater-than',
'urn:oasis:names:tc:xacml:1.0:function:date-greater-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:date-less-than',
'urn:oasis:names:tc:xacml:1.0:function:date-less-than-or-equal',
'urn:oasis:names:tc:xacml:1.0:function:string-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:string-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:string-is-in',
'urn:oasis:names:tc:xacml:1.0:function:string-bag',
'urn:oasis:names:tc:xacml:1.0:function:boolean-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:boolean-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:boolean-is-in',
'urn:oasis:names:tc:xacml:1.0:function:boolean-bag',
'urn:oasis:names:tc:xacml:1.0:function:integer-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:integer-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:integer-is-in',
'urn:oasis:names:tc:xacml:1.0:function:integer-bag',
'urn:oasis:names:tc:xacml:1.0:function:double-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:double-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:double-is-in',
'urn:oasis:names:tc:xacml:1.0:function:double-bag',
'urn:oasis:names:tc:xacml:1.0:function:time-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:time-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:time-is-in',
'urn:oasis:names:tc:xacml:1.0:function:time-bag',
'urn:oasis:names:tc:xacml:1.0:function:date-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:date-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:date-is-in',
'urn:oasis:names:tc:xacml:1.0:function:date-bag',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-is-in',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-bag',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-is-in',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-bag',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-is-in',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-bag',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-is-in',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-bag',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-is-in',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-bag',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-is-in',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-bag',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-is-in',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-bag',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-one-and-only',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-bag-size',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-is-in',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-bag',
'urn:oasis:names:tc:xacml:2.0:function:string-concatenate',
'urn:oasis:names:tc:xacml:2.0:function:uri-string-concatenate',
'urn:oasis:names:tc:xacml:1.0:function:any-of',
'urn:oasis:names:tc:xacml:1.0:function:all-of',
'urn:oasis:names:tc:xacml:1.0:function:any-of-any',
'urn:oasis:names:tc:xacml:1.0:function:all-of-any',
'urn:oasis:names:tc:xacml:1.0:function:any-of-all',
'urn:oasis:names:tc:xacml:1.0:function:all-of-all',
'urn:oasis:names:tc:xacml:1.0:function:map',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-match',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-match',
'urn:oasis:names:tc:xacml:1.0:function:string-regexp-match',
'urn:oasis:names:tc:xacml:2.0:function:anyURI-regexp-match',
'urn:oasis:names:tc:xacml:2.0:function:ipAddress-regexp-match',
'urn:oasis:names:tc:xacml:2.0:function:dnsName-regexp-match',
'urn:oasis:names:tc:xacml:2.0:function:rfc822Name-regexp-match',
'urn:oasis:names:tc:xacml:2.0:function:x500Name-regexp-match',
'urn:oasis:names:tc:xacml:1.0:function:xpath-node-count',
'urn:oasis:names:tc:xacml:1.0:function:xpath-node-equal',
'urn:oasis:names:tc:xacml:1.0:function:xpath-node-match',
'urn:oasis:names:tc:xacml:1.0:function:string-intersection',
'urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:string-union',
'urn:oasis:names:tc:xacml:1.0:function:string-subset',
'urn:oasis:names:tc:xacml:1.0:function:string-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:boolean-intersection',
'urn:oasis:names:tc:xacml:1.0:function:boolean-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:boolean-union',
'urn:oasis:names:tc:xacml:1.0:function:boolean-subset',
'urn:oasis:names:tc:xacml:1.0:function:boolean-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:integer-intersection',
'urn:oasis:names:tc:xacml:1.0:function:integer-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:integer-union',
'urn:oasis:names:tc:xacml:1.0:function:integer-subset',
'urn:oasis:names:tc:xacml:1.0:function:integer-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:double-intersection',
'urn:oasis:names:tc:xacml:1.0:function:double-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:double-union',
'urn:oasis:names:tc:xacml:1.0:function:double-subset',
'urn:oasis:names:tc:xacml:1.0:function:double-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:time-intersection',
'urn:oasis:names:tc:xacml:1.0:function:time-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:time-union',
'urn:oasis:names:tc:xacml:1.0:function:time-subset',
'urn:oasis:names:tc:xacml:1.0:function:time-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:date-intersection',
'urn:oasis:names:tc:xacml:1.0:function:date-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:date-union',
'urn:oasis:names:tc:xacml:1.0:function:date-subset',
'urn:oasis:names:tc:xacml:1.0:function:date-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-intersection',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-union',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-subset',
'urn:oasis:names:tc:xacml:1.0:function:dateTime-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-intersection',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-union',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-subset',
'urn:oasis:names:tc:xacml:1.0:function:anyURI-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-intersection',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-union',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-subset',
'urn:oasis:names:tc:xacml:1.0:function:hexBinary-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-intersection',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-union',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-subset',
'urn:oasis:names:tc:xacml:1.0:function:base64Binary-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-intersection',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-union',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-subset',
'urn:oasis:names:tc:xacml:1.0:function:dayTimeDuration-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-intersection',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-union',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-subset',
'urn:oasis:names:tc:xacml:1.0:function:yearMonthDuration-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-intersection',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-union',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-subset',
'urn:oasis:names:tc:xacml:1.0:function:x500Name-set-equals',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-intersection',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-at-least-one-member-of',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-union',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-subset',
'urn:oasis:names:tc:xacml:1.0:function:rfc822Name-set-equals',
)
from ndg.xacml import XacmlError
class UnsupportedFunctionError(XacmlError):
"""Encountered a function type that is not recognised as part of the XACML
specification and is not supported in this implementation"""
class UnsupportedStdFunctionError(UnsupportedFunctionError):
"""Encountered a function type that is not supported even though it is
part of the XACML specification"""
def unsupportedFunctionErrorFactory(identifier, msg=None):
"""Factory function to return an unsupported function exception based on
the function identifier passed in
@param identifier: XACML function namespace to check
@type identifier: basestring
@return: unsupported function exception instance
@rtype: UnsupportedFunctionError or UnsupportedStdFunctionError depending
on the identifier passed
"""
if identifier in XacmlFunctionNames.FUNCTION_NAMES:
if msg is None:
msg = "%s: %s" % (UnsupportedStdFunctionError.__doc__, identifier)
raise UnsupportedStdFunctionError(msg)
else:
if msg is None:
msg = "%s: %s" % (UnsupportedFunctionError.__doc__, identifier)
raise UnsupportedFunctionError(msg)
class OverwritingStdFunctionError(XacmlError):
"""Attempting to overwrite a standard function namespace with a custom one
(probably from load_custom_function method)"""
class FunctionClassFactoryInterface(object):
"""Interface class for function module class factory class
"""
__meta__ = ABCMeta
@abstractmethod
def __call__(self, identifier):
'''Create class for the given XACML function identifier
@param identifier: XACML function identifier
@type identifier: basestring
@return: at least one member of class corresponding to the given input
identifier
@rtype: AbstractFunction derived type or NoneType if no match is found
'''
return None
class FunctionClassFactoryBase(FunctionClassFactoryInterface):
"""Base implementation for XACML Function Class Factory. There should be
one derived type for each function family implemented in sub-modules of
ndg.xacml.core.functions
e.g.
for urn:oasis:names:tc:xacml:1.0:function:<type>-at-least-one-member-of a
class factory should exist,
ndg.xacml.core.functions.v1.at_least_one_member_of.FunctionClassFactory
which will be capable of returning a type derived from AbstractFunction:
<type>AtLeastOneMemberOf
e.g. StringAtLeastOneMemberOf, BooleanAtLeastOneMemberOf.
This class is for convenience only some function factories are better
derived directly from FunctionClassFactoryInterface
Derived classes MUST define these class variables:
@cvar FUNCTION_NAMES: list of function identifiers that this factory can
produce classes for e.g.:
('urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of', ...)
@type FUNCTION_NAMES: NoneType (but list in derived class)
@cvar FUNCTION_NS_SUFFIX: urn suffix for the family of function to define
e.g. -at-least-one-member-of is the suffix for the URN:
urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of
@type FUNCTION_NS_SUFFIX: NoneType (but basestring in derived class)
@cvar FUNCTION_BASE_CLASS: base class for this family of functions e.g for
urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of,
ndg.xacml.core.functions.v1.at_least_one_member_of.AtLeastOneMemberOfBase
@type FUNCTION_BASE_CLASS: NoneType (but AbstractFunction derived type in
derived function factory class)
"""
FUNCTION_NS_SUFFIX = None
FUNCTION_NAMES = None
FUNCTION_BASE_CLASS = None
URN_SEP = ':'
FUNCTION_NAME_SEP = '-'
__slots__ = ('__map', 'attributeValueClassFactory', 'functionSuffix')
def __init__(self):
'''This class is in fact abstract - derived types must define the
FUNCTION_NS_SUFFIX and FUNCTION_BASE_CLASS class variables
'''
if None in (self.__class__.FUNCTION_NS_SUFFIX,
self.__class__.FUNCTION_BASE_CLASS):
raise TypeError('"FUNCTION_NS_SUFFIX" and "FUNCTION_BASE_CLASS" '
'must be defined in a derived implementation of '
'FunctionClassFactoryBase. See '
'FunctionClassFactoryBase.__doc__ contents')
if not _isIterable(self.__class__.FUNCTION_NAMES):
raise TypeError('"FUNCTION_NAMES" class variable must be an '
'iterable of string type function identifiers; got '
'%r' % self.__class__.FUNCTION_NAMES)
self.__map = {}
# Enables creation of matching attribute types to relevant to the
# function classes
self.attributeValueClassFactory = AttributeValueClassFactory()
functionSuffixParts = self.__class__.FUNCTION_NS_SUFFIX.split(
self.__class__.FUNCTION_NAME_SEP)
self.functionSuffix = ''.join([n[0].upper() + n[1:]
for n in functionSuffixParts if n])
def initAllFunctionClasses(self):
"""Create classes for all functions for a data type e.g. a derived class
could implement a factory for <type>-at-least-one-member-of functions:
string-at-least-one-member-of, boolean-at-least-one-member-of, etc.
Function classes are placed in a look-up table __map for the __call__()
method to access
In practice, there shouldn't be a need to load all the functions in
one go. The __call__ method loads functions and caches them as needed.
"""
for identifier in self.__class__.FUNCTION_NAMES:
self.loadFunction(identifier)
def loadFunction(self, identifier):
"""Create a class for the given function namespace and cache it in the
function class look-up table for future requests. Note that this call
overwrites any existing entry in the cache whereas __call__ will try
to use an entry in the cache if it already exists
@param identifier: XACML function namespace
@type identifier: basestring
"""
# str.capitalize doesn't do what's required: need to capitalize the
# first letter of the word BUT retain camel case for the rest of it
_capitalize = lambda s: s[0].upper() + s[1:]
# Extract the function name and the type portion of the function
# name in order to make an implementation of a class to handle it
functionName = identifier.split(self.__class__.URN_SEP)[-1]
typePart = functionName.split(self.__class__.FUNCTION_NS_SUFFIX)[0]
# Attempt to infer from the function name the associated type
typeName = _capitalize(typePart)
# Remove any hyphens converting to camel case
if '-' in typeName:
typeName = ''.join([_capitalize(i) for i in typeName.split('-')])
typeURI = AttributeValue.TYPE_URI_MAP.get(typeName)
if typeURI is None:
# Ugly hack to allow for functions that start with a prefix that
# isn't a real type.
if typePart in SPECIAL_TYPE_MAP:
typeURI = AttributeValue.TYPE_URI_MAP[
SPECIAL_TYPE_MAP[typePart]]
else:
raise TypeError('No AttributeValue.TYPE_URI_MAP entry for '
'%r type' % typePart)
_type = self.attributeValueClassFactory(typeURI)
if _type is None:
raise TypeError('No AttributeValue.TYPE_MAP entry for %r type' %
typeName)
className = typeName + self.functionSuffix
classVars = {
'TYPE': _type,
'FUNCTION_NS': identifier
}
functionClass = type(className,
(self.__class__.FUNCTION_BASE_CLASS, ),
classVars)
self.__map[identifier] = functionClass
def __call__(self, identifier):
"""Return the class for the given XACML type function identifier
@param identifier: XACML *-at-least-one-member-of type function
identifier
@type identifier: basestring
@return: at least one member of class corresponding to the given input
identifier
@rtype: AtLeastOneMemberOfBase derived type or None if no match is
found
"""
# Check the cache first
functionClass = self.__map.get(identifier)
if functionClass is None:
# No class set in the cache - try loading the new class and updating
# the cache.
self.loadFunction(identifier)
# This should result in a safe retrieval from the cache because of the
# above check - None return would result otherwise.
return self.__map.get(identifier)
class FunctionMapError(Exception):
"""Generic Error exception class for FunctionMap"""
class FunctionMapConfigError(FunctionMapError):
"""Configuration related exception for FunctionMap"""
class FunctionMap(VettedDict):
"""Map function IDs to their class implementations in the various function
sub-modules. It provide a layer over the various
FunctionClassFactoryInterface implementations so that a function class can
be obtained directly from a given XACML function URN.
@cvar FUNCTION_PKG_PREFIX: python package path for functions package
@type FUNCTION_PKG_PREFIX: string
@cvar V1_0_PKG_PREFIX: python package path for XACML 1.0 functions package
@type V1_0_PKG_PREFIX: string
@cvar V2_0_PKG_PREFIX: python package path for XACML 2.0 functions package
@type V2_0_PKG_PREFIX: string
@cvar SUPPORTED_NSS: mapping of function URN prefix to Python package
@type SUPPORTED_NSS: dict
@cvar FUNCTION_CLASS_FACTORY_CLASSNAME: standard name for class factory
which should be present in each generic function module. This factory is
invoked to create the function class for any given function URN related to
that module
@type FUNCTION_CLASS_FACTORY_CLASSNAME: string
"""
FUNCTION_PKG_PREFIX = 'ndg.xacml.core.functions.'
V1_0_PKG_PREFIX = FUNCTION_PKG_PREFIX + 'v1.'
V2_0_PKG_PREFIX = FUNCTION_PKG_PREFIX + 'v2.'
SUPPORTED_NSS = {
AbstractFunction.V1_0_FUNCTION_NS: V1_0_PKG_PREFIX,
AbstractFunction.V2_0_FUNCTION_NS: V2_0_PKG_PREFIX
}
# Each function module is expected to have a class factory for obtaining
# a class for the given function identifier associated with that module
FUNCTION_CLASS_FACTORY_CLASSNAME = 'FunctionClassFactory'
def __init__(self):
"""Force type for dictionary key value pairs: function values must be
of AbstractFunction derived type and ID keys string type
"""
# Filters are defined as staticmethods but reference via self here to
# enable derived class to override them as standard methods without
# needing to redefine this __init__ method
VettedDict.__init__(self, self.keyFilter, self.valueFilter)
# This classes maintains a list of XACML function URN -> Function class
# mappings. This additional dict enables caching of class factories
# used to obtain the function classes. There is one class factory per
# function module e.g. ndg.xacml.core.functions.v1.equal contains a
# class factory which creates the various
# urn:oasis:names:tc:xacml:1.0:function:<type>-equal function classes
self.__classFactoryMap = {}
self.__custom_class_factory_map = {}
@staticmethod
def keyFilter(key):
"""Enforce string type keys
@param key: function URN
@type key: basestring
@return: True for valid key type
@rtype: bool
@raise TypeError: invalid key type
"""
if not isinstance(key, str):
raise TypeError('Expecting %r type for key; got %r' %
(str, type(key)))
return True
@staticmethod
def valueFilter(value):
"""Enforce AbstractFunction derived types for match functions
@param value: function URN
@type value: ndg.xacml.core.functions.AbstractFunction / NotImplemented
@return: True for valid function type
@rtype: bool
@raise TypeError: invlaid key type
"""
if value is NotImplemented:
return True
elif not issubclass(value, AbstractFunction):
raise TypeError('Expecting %r derived type for value; got %r' %
(AbstractFunction, value))
return True
def loadAllCore(self):
"""Load all core XACML functions"""
for functionNs in XacmlFunctionNames.FUNCTION_NAMES:
self.loadFunction(functionNs)
def loadFunction(self, functionNs):
"""Get package to retrieve function class for the given XACML function
namespace
@param functionNs: XACML function namespace
@type functionNs: basestring
"""
# Try map for custom function class
if functionNs in self:
return self[functionNs]
# else try the class factory - there is one factory per family of
# functions e.g. bag functions, at least one member of functions etc.
functionFactory = self.__classFactoryMap.get(functionNs)
if functionFactory is not None:
# Get function class from previously cached factory
self[functionNs] = functionFactory(functionNs)
return
# No Factory has been cached for this function yet
cls = FunctionMap
classPath = None
for namespacePrefix, pkgNamePrefix in list(cls.SUPPORTED_NSS.items()):
if functionNs.startswith(namespacePrefix):
# Namespace is recognised - translate into a path to a
# function class in the right functions package
functionName = functionNs.split(namespacePrefix)[-1]
functionNameParts = functionName.split('-')
if len(functionNameParts) == 1:
moduleName = functionNameParts[0]
else:
prefix = None
# Ugly hack to allow for functions that start with a prefix
# that isn't a real type.
for pfx in SPECIAL_TYPE_MAP.keys():
pfxsep = pfx + '-'
if functionName.startswith(pfxsep):
prefix = pfxsep
break
if prefix:
suffix = functionName[len(prefix):]
moduleName = '_'.join(suffix.split('-')).lower()
else:
moduleName = '_'.join(functionNameParts[1:]).lower()
classPath = pkgNamePrefix + moduleName + '.' + \
cls.FUNCTION_CLASS_FACTORY_CLASSNAME
break
if classPath is None:
raise FunctionMapConfigError('Namespace for function not '
'recognised: %r' % functionNs)
# Try instantiating the function class and loading it into the map
try:
functionFactory = callModuleObject(classPath)
except (ImportError, AttributeError) as e:
log.error("Error importing function factory class %r for function "
"identifier %r: %s", classPath, functionNs, str(e))
# No implementation exists - default to Abstract function
self[functionNs] = NotImplemented
else:
function = functionFactory(functionNs)
if function is None:
raise unsupportedFunctionErrorFactory(functionNs)
self[functionNs] = function
self.__classFactoryMap[functionNs] = functionFactory
def load_custom_function(self,
function_ns,
function_factory=None,
function_factory_path=None):
"""Add a user defined function to the list of functions supported"""
if function_ns in XacmlFunctionNames.FUNCTION_NAMES:
raise OverwritingStdFunctionError("Attempting to overwrite the "
"standard function namespace %r"
"with a new custom function" %
function_ns)
if function_factory is None:
if not isinstance(function_factory_path, str):
raise TypeError('Expecting "function_factory_path" keyword '
'set to string function factory path; got %r' %
function_factory_path)
try:
function_factory = callModuleObject(function_factory_path)
except (ImportError, AttributeError) as e:
log.error("Error importing function factory class %r for custom "
"function identifier %r: %s", function_factory_path,
function_ns, str(e))
raise
function = function_factory(function_ns)
if function is None:
raise unsupportedFunctionErrorFactory(function_ns)
self[function_ns] = function
self.__custom_class_factory_map[function_ns] = function_factory
def __getitem__(self, key):
"""Override base class implementation to load and cache function classes
if they don't otherwise exist
@param key: function URN
@type key: basestring
@return: function class
@rtype: ndg.xacml.core.functions.AbstractFunction / NotImplemented
"""
functionClass = VettedDict.get(self, key)
if functionClass is None:
self.loadFunction(key)
return VettedDict.__getitem__(self, key)
def get(self, key, *arg):
"""Likewise to __getitem__, enable loading and caching of function
classes if they don't otherwise exist
@param key: XACML function URN
@type key: basestring
@param arg: set a single additional argument if required which is
used as the default value should the key not be found in the map
@type arg: tuple
@return: function class
@rtype: ndg.xacml.core.functions.AbstractFunction / NotImplemented
"""
functionClass = VettedDict.get(self, key, *arg)
if functionClass is None:
self.loadFunction(key)
return VettedDict.get(self, key, *arg)
else:
return functionClass
# Function map singleton used by match and apply classes - add new keys to
# this dictionary to enable support for custom functions
functionMap = FunctionMap()
|
StarcoderdataPython
|
9614708
|
<gh_stars>0
# website-to-note
# simple python app that giving a url from the clipboard scrapes a web page for its title and returns a useful information for use in docs, notetaking apps, etc.
# by <NAME>
# github.com/wisehackermonkey
# <EMAIL>
# 20200420
import requests
import pyperclip
import validators
# script for getting the date in format yyyymmdd for easy use
from dateformate import current_date
# scrape url for title
# credit goes to this stackoverflow post
# https://stackoverflow.com/questions/51233/how-can-i-retrieve-the-page-title-of-a-webpage-using-python
def get_website_title(url):
if not url:
print("no url givin")
return -1
hearders = {'headers':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0'}
n = requests.get(url, headers=hearders)
al = n.text;
title = al[al.find('<title>') + 7 : al.find('</title>')]
return title
def main():
# grab the contents of the clipboard
clipboard_url = pyperclip.paste()
#check if the clipboard has a url
if not validators.url(clipboard_url):
print("Clipboard Does not contain a URL")
return -1
# format the title, url, and date for the website in a string
result = f"""
{get_website_title(clipboard_url)}
{clipboard_url}
{current_date()}
"""
print(result)
#copy result back into clipboard
pyperclip.copy(result)
if __name__ == "__main__":
print("website-to-note by <NAME> 20200420")
# start of the program
main()
|
StarcoderdataPython
|
237330
|
# Generated by Django 3.1 on 2021-08-12 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corpus", "0015_add_document_date"),
]
operations = [
migrations.RenameField(
model_name="document",
old_name="probable_languages",
new_name="secondary_languages",
),
migrations.AlterField(
model_name="document",
name="languages",
field=models.ManyToManyField(
blank=True, to="corpus.LanguageScript", verbose_name="Primary Languages"
),
),
migrations.AlterField(
model_name="document",
name="secondary_languages",
field=models.ManyToManyField(
blank=True,
limit_choices_to=models.Q(_negated=True, language__exact="Unknown"),
related_name="secondary_document",
to="corpus.LanguageScript",
),
),
]
|
StarcoderdataPython
|
4918694
|
<reponame>black-perl/priest<filename>priest/__init__.py
'''
_ _
_ __ _ __(_) ___ ___| |_
| '_ \| '__| |/ _ \/ __| __|
| |_) | | | | __/\__ \ |_
| .__/|_| |_|\___||___/\__|
|_|
Generate wishes from your command line with full customization.
Usage:
======
>>> from priest import morning
>>> morning()
# => Generates a morning message in your native language
Flexibility :
-------------
>>> morning(lang='en',pic=True)
# => Generates a picture message in English language
Now :
-----
>>> from priest import now
>>> now()
# => Automatically gets the timezone and predicts whether it is morning,afternoon,evening
# or night and generates a message accordingly in your native language
For more : github.com/black-perl/priest
'''
import os
__dir__ = os.path.abspath(os.path.dirname(__file__))
__version__ = 0.5
from priest.lib.core import *
|
StarcoderdataPython
|
9697063
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "BOS"
addresses_name = "2021-03-25T12:39:18.697471/Bolsover Democracy_Club__06May2021.tsv"
stations_name = "2021-03-25T12:39:18.697471/Bolsover Democracy_Club__06May2021.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
# Bolsover Parish Rooms, Hornscroft Road, Bolsover, Chesterfield
if record.polling_place_id == "4511":
record = record._replace(polling_place_postcode="S44 6HG")
# The Shoulder at Hardstoft, Hardstoft, Chesterfield, Derbyshire
if record.polling_place_id == "4490":
record = record._replace(polling_place_postcode="S45 8AX")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"10013062215", # 3 ROTHERHAM ROAD, SCARCLIFFE, CHESTERFIELD
"10013062214", # 1 ROTHERHAM ROAD, SCARCLIFFE, CHESTERFIELD
"10013068383", # LIVING ACCOMMODATION HORSE AND GROOM MANSFIELD ROAD, SCARCLIFFE
"10013072767", # THE FLAT THE CASTLEWOOD PUBLIC HOUSE CARTER LANE EAST, SOUTH NORMANTON
"200004519933", # APEX INSULATION SUPPLIES LTD, SAWPIT LANE, TIBSHELF, ALFRETON
]:
return None
if record.addressline6 in ["NG20 8FJ", "S44 6QH"]:
return None
return super().address_record_to_dict(record)
|
StarcoderdataPython
|
11276185
|
import math
def xp_for_level_up(level):
return math.floor(level * math.sqrt(level) * 10)
if __name__ == "__main__":
XP_GAIN = 300
level = int(input())
xp_needed = int(input())
nb_puzzles = int(input())
xp_gained = (nb_puzzles * XP_GAIN) + (xp_for_level_up(level) - xp_needed)
while xp_gained >= xp_for_level_up(level):
xp_gained -= xp_for_level_up(level)
level += 1
xp_needed = xp_for_level_up(level) - xp_gained
print(level)
print(xp_needed)
|
StarcoderdataPython
|
1981185
|
<gh_stars>10-100
from django.contrib.sitemaps import Sitemap
from django.urls import reverse
from .models import (
Category, Product, NoticeMessage, Testimonials
)
class StaticSitemap(Sitemap):
priority = 0.7
changefreq = 'daily'
def items(self):
return [
('shop:home', {'store': 'default'}),
('help:faq-list', {'store': 'default'}),
('help:guide', {'store': 'default'}),
('shop:gamemeca-ranking', {'store': 'default'}),
('shop:gamemeca-news', {'store': 'default', 'slug': 'latest'}),
('shop:gamemeca-news', {'store': 'default', 'slug': 'top'}),
('shop:gamemeca-news', {'store': 'default', 'slug': 'review'}),
('shop:gamemeca-news', {'store': 'default', 'slug': 'preview'}),
('shop:gamemeca-news', {'store': 'default', 'slug': 'feature'}),
('site_terms', {}),
('site_privacy', {}),
]
def location(self, item):
(name, kwargs) = item
return reverse(name, kwargs=kwargs)
class ProductCategorySitemap(Sitemap):
priority = 1.0
changefreq = 'daily'
def items(self):
"""
Retrieve leaf nodes
Category.objects.filter(children__isnull=True)
Category.objects.filter(lft=F('rght') - 1)
"""
categories = []
for category in Category.objects.filter(level__gt=0):
categories.append(('shop:product-category', {'store': 'default', 'slug': category.slug}))
return categories
def location(self, item):
(name, kwargs) = item
return reverse(name, kwargs=kwargs)
class ProductSitemap(Sitemap):
priority = 0.9
changefreq = 'daily'
def items(self):
products = []
for product in Product.objects.store('default').enabled().available():
products.append(('shop:product-detail', {'store': 'default', 'pk': product.pk, 'code': product.code}))
return products
def location(self, item):
(name, kwargs) = item
return reverse(name, kwargs=kwargs)
class NoticeMessageSitemap(Sitemap):
priority = 1.0
changefreq = 'daily'
def items(self):
messages = []
for message in NoticeMessage.objects.filter(store__code='default', is_removed=False).order_by('-created'):
messages.append(('help:notice-detail', {'store': 'default', 'pk': message.pk}))
return messages
def location(self, item):
(name, kwargs) = item
return reverse(name, kwargs=kwargs)
class TestimonialsSitemap(Sitemap):
priority = 1.0
changefreq = 'daily'
def items(self):
messages = []
for message in Testimonials.objects.filter(store__code='default', is_removed=False).order_by('-created'):
messages.append(('help:testimonials-detail', {'store': 'default', 'pk': message.pk}))
return messages
def location(self, item):
(name, kwargs) = item
return reverse(name, kwargs=kwargs)
|
StarcoderdataPython
|
6414929
|
import os, time, re
import sublime
import sublime_plugin
import glob
import os
from xml.etree import ElementTree
current_path = None
#
class CreatePolicyFromTemplateCommand(sublime_plugin.WindowCommand):
ROOT_DIR_PREFIX = '[root: '
ROOT_DIR_SUFFIX = ']'
INPUT_PANEL_CAPTION = 'File name:'
def run(self):
if not self.find_root():
return
self.find_templates()
self.show_quick_panel(self.templates, self.template_selected)
def create_and_open_file(self, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
open(path, 'w')
global template
template = {
'content': self.replace_variables(self.get_content(path)),
'filename': os.path.basename(path),
'path': os.path.dirname(path)
}
global current_path
view = self.window.open_file(path)
current_path = view.file_name()
if not view.is_loading():
populate_file(view)
def get_content(self, path):
content = ''
try:
content = self.template.find("content").text
except:
pass
try:
path = os.path.abspath(os.path.join(os.path.dirname(self.template_path), self.template.find("file").text))
content = open(path).read()
print(content)
except:
pass
return content
def find_root(self):
folders = self.window.folders()
if len(folders) == 0:
sublime.error_message('Could not find project root')
return False
self.root = folders[0]
self.rel_path_start = len(self.root) + 1
return True
def construct_excluded_pattern(self):
patterns = [pat.replace('|', '\\') for pat in self.get_setting('excluded_dir_patterns')]
self.excluded = re.compile('^(?:' + '|'.join(patterns) + ')$')
def get_setting(self, key):
settings = None
view = self.window.active_view()
if view:
settings = self.window.active_view().settings()
if settings and settings.has('PolicyTemplates') and key in settings.get('PolicyTemplates'):
# Get project-specific setting
results = settings.get('PolicyTemplates')[key]
else:
# Get user-specific or default setting
settings = sublime.load_settings('PolicyTemplates.sublime-settings')
results = settings.get(key)
return results
def find_templates(self):
self.templates = []
self.template_paths = []
for root, dirnames, filenames in os.walk(sublime.packages_path()):
for filename in filenames:
if filename.endswith(".file-template"):
print(filename)
self.template_paths.append(os.path.join(root, filename))
self.templates.append(os.path.basename(root) + ": " + os.path.splitext(filename)[0])
def template_selected(self, selected_index):
if selected_index != -1:
self.template_path = self.template_paths[selected_index]
#print self.template_path
tree = ElementTree.parse(open(self.template_path))
self.template = tree
self.construct_excluded_pattern()
self.build_relative_paths()
#self.move_current_directory_to_top()
self.show_quick_panel(self.relative_paths, self.dir_selected)
def build_relative_paths(self):
self.relative_paths = []
try:
path = self.template.find("path").text
except:
path = ""
if len(path) > 0:
self.relative_paths = [ "Default: " + self.template.find("path").text ]
self.relative_paths.append( self.ROOT_DIR_PREFIX + os.path.split(self.root)[-1] + self.ROOT_DIR_SUFFIX )
for base, dirs, files in os.walk(self.root):
dirs_copy = dirs[:]
[dirs.remove(dir) for dir in dirs_copy if self.excluded.search(dir)]
for dir in dirs:
relative_path = os.path.join(base, dir)[self.rel_path_start:]
self.relative_paths.append(relative_path)
def move_current_directory_to_top(self):
view = self.window.active_view()
if view:
cur_dir = os.path.dirname(view.file_name())[self.rel_path_start:]
for path in self.relative_paths:
if path == cur_dir:
i = self.relative_paths.index(path)
self.relative_paths.insert(0, self.relative_paths.pop(i))
break
def dir_selected(self, selected_index):
if selected_index != -1:
self.selected_dir = self.relative_paths[selected_index]
filename = ''
if len(self.template.find("filename").text) > 0:
filename = self.template.find("filename").text
try:
self.arguments = list(self.template.find("arguments"))
except:
self.arguments = []
self.variables = {}
self.next_argument()
def next_argument(self):
if len(self.arguments) > 0 :
self.argument = self.arguments.pop(0)
caption = self.argument.text
self.window.show_input_panel(caption, '', self.process_argument, None, None)
else:
self.file_name_input()
def process_argument(self, value):
self.variables[self.argument.tag] = value
self.next_argument()
def replace_variables(self, text):
for variable in self.variables.keys():
text = text.replace( "$" + variable, self.variables[variable] )
return text
def file_name_input(self):
file_name = self.template.find("filename").text
file_name = self.replace_variables(file_name)
dir = self.selected_dir
if self.selected_dir.startswith(self.ROOT_DIR_PREFIX):
dir = ''
if self.selected_dir.startswith("Default: "):
dir = self.template.find("path").text
dir = self.replace_variables(dir)
full_path = os.path.join(self.root, dir, file_name)
if os.path.lexists(full_path):
sublime.error_message('Policy already exists:\n%s' % full_path)
return
else:
self.create_and_open_file(full_path)
def show_quick_panel(self, options, done):
sublime.set_timeout(lambda: self.window.show_quick_panel(options, done), 10)
class PolicyTemplatesListener(sublime_plugin.EventListener):
def on_load(self, view):
global current_path
if view.file_name() == current_path:
populate_file(view)
current_path = None
def populate_file(view):
global template
view.run_command("insert_snippet", {'contents': template["content"]})
|
StarcoderdataPython
|
1811631
|
# Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from sample_images import IMG_DIR
from src.services.facescan.scanner.facenet.facenet import Facenet2018
from src.services.facescan.scanner.facescanner import FaceScanner
from src.services.facescan.scanner.facescanners import TESTED_SCANNERS
from src.services.facescan.scanner.insightface.insightface import InsightFace
from src.services.facescan.scanner.test._scanner_cache import get_scanner
from src.services.utils.pyutils import first_and_only
DIFFERENCE_THRESHOLD = {
InsightFace: 400,
Facenet2018: 0.2
}
def embeddings_are_equal(embedding1, embedding2, difference_threshold):
difference = sum(((a - b) ** 2 for a, b in zip(embedding1, embedding2)))
print(f"Embedding difference: {difference}, difference threshold: {difference_threshold}")
return difference < difference_threshold
@pytest.mark.integration
@pytest.mark.parametrize('scanner_cls', TESTED_SCANNERS)
def test__given_same_face_images__when_scanned__then_returns_same_embeddings(scanner_cls):
scanner: FaceScanner = get_scanner(scanner_cls)
img1 = IMG_DIR / '007_B.jpg'
img2 = IMG_DIR / '008_B.jpg'
emb1 = first_and_only(scanner.scan(img1)).embedding
emb2 = first_and_only(scanner.scan(img2)).embedding
assert embeddings_are_equal(emb1, emb2, DIFFERENCE_THRESHOLD[scanner_cls])
@pytest.mark.integration
@pytest.mark.parametrize('scanner_cls', TESTED_SCANNERS)
def test__given_diff_face_images__when_scanned__then_returns_diff_embeddings(scanner_cls):
scanner: FaceScanner = get_scanner(scanner_cls)
img1 = IMG_DIR / '007_B.jpg'
img2 = IMG_DIR / '009_C.jpg'
emb1 = first_and_only(scanner.scan(img1)).embedding
emb2 = first_and_only(scanner.scan(img2)).embedding
assert not embeddings_are_equal(emb1, emb2, DIFFERENCE_THRESHOLD[scanner_cls])
|
StarcoderdataPython
|
6436132
|
import logging
import math
from typing import Set, Dict
import torch
from psob_authorship.features.java.ast.Ast import FileAst
from psob_authorship.features.java.ast_metrics.StatementsMetricsCalculator import StatementsMetricsCalculator
from psob_authorship.features.java.ast_metrics.VariableMetricsCalculator import VariableMetricsCalculator
class AstMetricsCalculator:
LOGGER = logging.getLogger('metrics_calculator')
def __init__(self, language_config, ast_path, character_number_for_file: Dict[str, int], filenames=None) -> None:
self.LOGGER.info("Started calculating ast metrics")
super().__init__()
self.LOGGER.info("Started loading ast to memory")
self.asts = FileAst.load_asts_from_files(ast_path, filenames)
self.LOGGER.info("End loading ast to memory")
self.variable_metrics_calculator = VariableMetricsCalculator(language_config, self.asts)
self.statements_metrics_calculator = StatementsMetricsCalculator(language_config, self.asts,
character_number_for_file)
self.LOGGER.info("End calculating ast metrics")
def maximum_depth_of_an_ast(self, filepaths: Set[str]) -> torch.Tensor:
"""
Max depth of files ast.
:param filepaths: paths to files for which metric should be calculated
:return: files ast max depth.
"""
return torch.tensor([
math.log10(float(max([self.asts[filepath].depth for filepath in filepaths])))
])
def get_metrics(self, filepaths: Set[str]) -> torch.Tensor:
return torch.cat((
self.variable_metrics_calculator.get_metrics(filepaths),
self.statements_metrics_calculator.get_metrics(filepaths),
self.maximum_depth_of_an_ast(filepaths)
))
@staticmethod
def get_metric_names():
return VariableMetricsCalculator.get_metrics_names() + StatementsMetricsCalculator.get_metrics_names() + \
["maximum_depth_of_an_ast"]
|
StarcoderdataPython
|
301881
|
<filename>src/data/113.py
n, q = map(int, input().split())
tree = [[] for _ in range(n)]
for _ in range(n - 1):
a, b = map(int, input().split())
a -= 1
b -= 1
tree[a].append(b)
tree[b].append(a)
from collections import deque
dq = deque()
dq.append(0)
dist = [-1] * n
dist[0] = 1
while dq:
now = dq.popleft()
for next in tree[now]:
if dist[next] != -1: continue
dist[next] = dist[now] + 1
dq.append(next)
for _ in range(q):
c, d = map(int, input().split())
c -= 1
d -= 1
c2 = dist[c] % 2
d2 = dist[d] % 2
if c2 ^ d2: print('Road')
else: print('Town')
|
StarcoderdataPython
|
6467109
|
<gh_stars>1-10
from setuptools import setup, find_packages
setup(
name='Python Template',
version='0.1',
description='Python Template',
url='https://weisslab.cs.ucl.ac.uk/WEISS/SoftwareArchitecture/PythonTemplate',
author='<NAME>',
author_email='<EMAIL>',
license='BSD-3 license',
packages=find_packages(
exclude=[
'doc',
'tests',
]
)
)
|
StarcoderdataPython
|
1627164
|
<gh_stars>1000+
# Copyright 2015 ClusterHQ Inc. See LICENSE file for details.
"""
Tests for ``flocker.node.agents.cinder``.
"""
from ..cinder import _openstack_verify_from_config, _get_compute_id
from ....common import ipaddress_from_string
from ....testtools import TestCase
class VerifyTests(TestCase):
"""
Tests for _openstack_verify_from_config.
"""
def test_verify_not_set(self):
"""
HTTPS connections are verified using system CA's if not
overridden.
"""
config = {
'backend': 'openstack',
'auth_plugin': 'password',
}
self.assertEqual(_openstack_verify_from_config(**config), True)
def test_verify_ca_path(self):
"""
HTTPS connections are verified using a CA bundle if
``verify_ca_path`` is provided.
"""
config = {
'backend': 'openstack',
'auth_plugin': 'password',
'verify_peer': True,
'verify_ca_path': '/a/path'
}
self.assertEqual(_openstack_verify_from_config(**config), '/a/path')
def test_verify_false(self):
"""
HTTPS connections are not verified if ``verify_peer`` is false.
"""
config = {
'backend': 'openstack',
'auth_plugin': 'password',
'verify_peer': False,
}
self.assertEqual(_openstack_verify_from_config(**config), False)
def test_verify_false_ca_path(self):
"""
HTTPS connections are not verified if ``verify_peer`` is false,
even if a ``verify_ca_path`` is provided.
"""
config = {
'backend': 'openstack',
'auth_plugin': 'password',
'verify_peer': False,
'verify_ca_path': '/a/path'
}
self.assertEqual(_openstack_verify_from_config(**config), False)
class GetComputeIdTests(TestCase):
"""
Tests for ``_get_compute_id``.
"""
def test_local_ips_equal_reported_ips(self):
"""
If local IPs are the same as a node's IPs that node is the one
chosen.
"""
local_ips_1 = {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("10.0.0.1")}
local_ips_2 = {ipaddress_from_string("10.0.0.2")}
reported_ips = {u"server1": local_ips_1,
u"server2": local_ips_2}
self.assertEqual(
(_get_compute_id(local_ips_1, reported_ips),
_get_compute_id(local_ips_2, reported_ips)),
(u"server1", u"server2"))
def test_local_ips_superset_of_reported_ips(self):
"""
If local IPs are a superset of a node's IPs that node is the one
chosen.
We expect local IPs to include addresses like 127.0.0.1 which
won't show up in reported IPs for remote nodes.
"""
local_ips_1 = {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("10.0.0.1")}
local_ips_2 = {ipaddress_from_string("10.0.0.2"),
ipaddress_from_string("192.0.0.2")}
reported_ips = {u"server1": {ipaddress_from_string("192.0.0.1")},
u"server2": {ipaddress_from_string("192.0.0.2")}}
self.assertEqual(
(_get_compute_id(local_ips_1, reported_ips),
_get_compute_id(local_ips_2, reported_ips)),
(u"server1", u"server2"))
def test_local_ips_subset_of_reported_ips(self):
"""
If local IPs are a subset of a node's IPs that node is the one chosen.
Floating IPs will show up in reported IPs for remote nodes but are
not known to the local machine.
"""
local_ips_1 = {ipaddress_from_string("192.0.0.1")}
local_ips_2 = {ipaddress_from_string("192.0.0.2")}
reported_ips = {u"server1": {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("10.0.0.1")},
u"server2": {ipaddress_from_string("192.0.0.2"),
ipaddress_from_string("10.0.0.2")}}
self.assertEqual(
(_get_compute_id(local_ips_1, reported_ips),
_get_compute_id(local_ips_2, reported_ips)),
(u"server1", u"server2"))
def test_local_ips_intersection_of_reported_ips(self):
"""
If local IPs intersect with a node's IPs that node is the one chosen.
This can happen if there are floating IPs reported for remote node
and local list includes things like 127.0.0.1.
"""
local_ips_1 = {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("127.0.0.1")}
local_ips_2 = {ipaddress_from_string("192.0.0.2"),
ipaddress_from_string("127.0.0.1")}
reported_ips = {u"server1": {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("10.0.0.1")},
u"server2": {ipaddress_from_string("192.0.0.2"),
ipaddress_from_string("10.0.0.2")}}
self.assertEqual(
(_get_compute_id(local_ips_1, reported_ips),
_get_compute_id(local_ips_2, reported_ips)),
(u"server1", u"server2"))
def test_unknown_ip(self):
"""
A ``KeyError`` is raised if ID can't be calculated.
"""
local_ips = {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("10.0.0.1")}
reported_ips = {u"server2": {ipaddress_from_string("192.0.0.2")}}
self.assertRaises(KeyError, _get_compute_id, local_ips, reported_ips)
def test_reported_ips_empty(self):
"""
If reported IPs are blank that node is never chosen.
"""
local_ips = {ipaddress_from_string("192.0.0.1"),
ipaddress_from_string("10.0.0.1")}
reported_ips = {u"server2": {ipaddress_from_string("192.0.0.1")},
u"server1": set()}
self.assertEqual(_get_compute_id(local_ips, reported_ips), u"server2")
|
StarcoderdataPython
|
3356665
|
import abc
import asyncio
import collections
import contextlib
import fcntl
import functools
import inspect
import io
import mmap
import operator
import os
import pathlib
import random
import signal
import socket
import stat
import struct
import subprocess
import tempfile
import termios
import time
import types
import typing
import warnings
from typing import Any, BinaryIO, Callable, Dict, Generator, List, Optional, Set, Tuple
import pytest
import trio
from setuptools import Distribution
from hades import constants
from hades.leases.server import Server
# Available since CPython 3.10
F_GETPIPE_SZ = getattr(fcntl, "F_GETPIPE_SZ", 1032)
AncillaryData = List[Tuple[int, int, bytes]]
RECVMSG = Tuple[bytes, AncillaryData, int]
Result = Tuple[
int,
bytes,
bytes,
Optional[List[RECVMSG]],
Optional[bytes],
]
T = typing.TypeVar('T')
SIZEOF_INT = struct.calcsize("i")
ucred = struct.Struct("iII")
TIMEOUT = 1.0
RECVMSG_FLAGS = socket.MSG_CMSG_CLOEXEC
@pytest.fixture(scope="session")
def socket_path() -> bytes:
return os.fsencode(tempfile.mktemp(prefix="hades-", suffix=".sock"))
def read_int_sysctl(variable: str) -> int:
with (pathlib.PosixPath("/proc/sys") / variable).open("rb", 0) as f:
return int(f.read())
@pytest.fixture(scope="session")
def optmem_max() -> int:
return read_int_sysctl("net/core/optmem_max")
@pytest.fixture(scope="session")
def wmem_default() -> int:
return read_int_sysctl("net/core/wmem_default")
@pytest.fixture(scope="session")
def uid():
return os.getuid()
@pytest.fixture(scope="session")
def gid():
return os.getgid()
@pytest.fixture(scope="class")
def server(socket_path) -> socket.socket:
with contextlib.ExitStack() as stack:
type_ = socket.SOCK_STREAM | socket.SOCK_NONBLOCK | socket.SOCK_CLOEXEC
sock = stack.enter_context(socket.socket(socket.AF_UNIX, type_))
sock.bind(socket_path)
stack.callback(os.unlink, socket_path)
sock.listen()
yield sock
def test_short_write_possible(wmem_default):
"""On Linux only the sender can influence the size of a Unix stream socket
buffer."""
got = os.sysconf("SC_ARG_MAX")
expected = wmem_default + mmap.PAGESIZE
assert got > expected, "Cannot test short writes"
@contextlib.contextmanager
def chdir(directory):
prev_cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(prev_cwd)
@pytest.fixture(scope="session")
def executable(request) -> pathlib.PosixPath:
"""Let setuptools compute the path to the built executable"""
with chdir(request.config.rootdir) as root_dir:
command = "build"
distribution = Distribution({
"script_name": __file__,
"script_args": [command],
})
distribution.parse_config_files()
distribution.parse_command_line()
command = distribution.get_command_obj(command)
command.ensure_finalized()
return (
pathlib.PosixPath(root_dir).absolute()
/ command.build_platlib
/ "hades-dhcp-script"
)
def test_executable_exists(executable: pathlib.PosixPath):
assert executable.exists()
class ChildStopped(Exception):
pass
class TimeoutExceeded(Exception):
pass
@contextlib.contextmanager
def pipe():
r, w = os.pipe2(os.O_CLOEXEC | os.O_NONBLOCK)
r = os.fdopen(r, "rb", buffering=0, closefd=True)
w = os.fdopen(w, "wb", buffering=0, closefd=True)
with r, w:
yield r, w
def drain_pipe(stream: io.FileIO, buffer: typing.BinaryIO) -> Optional[int]:
chunk = stream.readall()
if chunk is not None:
buffer.write(chunk)
return len(chunk)
else:
return None
async def read_pipe(stream: trio.abc.ReceiveStream, buffer: io.BytesIO):
async for chunk in stream:
buffer.write(chunk)
async def receive_messages(
client: trio.SocketStream,
wmem_default: int,
optmem_max: int,
messages: List[RECVMSG],
):
while (r := await client.socket.recvmsg(
wmem_default,
optmem_max,
RECVMSG_FLAGS,
))[0]:
messages.append(r[:3])
async def send_reply(client: trio.SocketStream, reply: bytes) -> bytes:
reply = memoryview(reply)
sent_total = 0
try:
while (
sent_total < len(reply)
and (sent := await client.socket.send(reply[sent_total:]))
):
sent_total += sent
except BrokenPipeError:
pass
return reply[:sent_total].tobytes()
async def track_process(process: trio.Process):
await process.wait()
raise ChildStopped()
async def run_with_trio(
executable: pathlib.PosixPath,
argv: List[bytes],
environ: Dict[bytes, bytes],
stdin: Tuple[io.FileIO, io.FileIO],
stdout: Tuple[io.FileIO, io.FileIO],
stderr: Tuple[io.FileIO, io.FileIO],
server: socket.socket,
reply: bytes,
wmem_default: int,
optmem_max: int,
uid: int,
gid: int,
) -> Result:
stdout_content = io.BytesIO()
stderr_content = io.BytesIO()
messages = None
sent = None
with trio.move_on_after(TIMEOUT):
process = await trio.open_process(
argv,
executable=executable,
env=environ,
bufsize=0,
text=False,
encoding=None,
stdin=os.dup(stdin[0].fileno()),
stdout=os.dup(stdout[1].fileno()),
stderr=os.dup(stderr[1].fileno()),
)
try:
async with trio.open_nursery() as nursery:
nursery: trio.Nursery = nursery
# Read stdout/stderr in the background
nursery.start_soon(track_process, process)
nursery.start_soon(read_pipe, trio.lowlevel.FdStream(os.dup(stdout[0].fileno())), stdout_content)
nursery.start_soon(read_pipe, trio.lowlevel.FdStream(os.dup(stderr[0].fileno())), stderr_content)
server = trio.socket.from_stdlib_socket(server)
client, _ = await server.accept()
client = trio.SocketStream(client)
credentials = ucred.unpack(client.getsockopt(
socket.SOL_SOCKET, socket.SO_PEERCRED, ucred.size,
))
assert (process.pid, uid, gid) == credentials
messages = []
await receive_messages(client, wmem_default, optmem_max, messages)
sent = await send_reply(client, reply)
await client.send_eof()
except ChildStopped:
pass
else:
process.kill()
drain_pipe(stdout[0], stdout_content)
drain_pipe(stderr[0], stderr_content)
return process.returncode, stdout_content.getvalue(), stderr_content.getvalue(), messages, sent
class BaseRun(abc.ABC):
RECVMSG_FLAGS = socket.MSG_CMSG_CLOEXEC
TIMEOUT = 5.0
@pytest.fixture(scope="class")
def environ(self, server: socket.socket) -> Dict[bytes, bytes]:
path = os.fsencode(server.getsockname())
return collections.OrderedDict((
(b"HADES_AUTH_DHCP_SCRIPT_SOCKET", path),
))
@pytest.fixture(scope="class")
def stdin(self) -> Tuple[io.FileIO, io.FileIO]:
with pipe() as p:
yield p
@pytest.fixture(scope="class")
def stdout(self) -> Tuple[io.FileIO, io.FileIO]:
with pipe() as p:
yield p
@pytest.fixture(scope="class")
def stderr(self) -> Tuple[io.FileIO, io.FileIO]:
with pipe() as p:
yield p
@pytest.fixture(scope="class")
def result(
self,
executable: pathlib.PosixPath,
argv: List[bytes],
environ: Dict[bytes, bytes],
stdin: Tuple[io.FileIO, io.FileIO],
stdout: Tuple[io.FileIO, io.FileIO],
stderr: Tuple[io.FileIO, io.FileIO],
server: socket.socket,
reply: bytes,
wmem_default: int,
optmem_max: int,
uid: int,
gid: int,
) -> Result:
return trio.run(
run_with_trio,
executable,
argv,
environ,
stdin,
stdout,
stderr,
server,
reply,
wmem_default,
optmem_max,
uid,
gid,
)
@pytest.fixture(scope="class")
def status(self, result: Result) -> int:
return result[0]
@pytest.fixture(scope="class")
def stdout_content(self, result: Result) -> bytes:
return result[1]
@pytest.fixture(scope="class")
def stderr_content(self, result: Result) -> bytes:
return result[2]
@pytest.fixture(scope="class")
def messages(self, result: Result) -> Optional[List[RECVMSG]]:
return result[3]
@pytest.fixture(scope="class")
def sent(self, result: Result) -> Optional[bytes]:
return result[4]
@property
@abc.abstractmethod
def expected_status(self) -> int:
pass
@pytest.fixture(scope="class")
def reply(self) -> bytes:
return struct.pack("b", self.expected_status)
def test_status(self, status: int):
assert status == self.expected_status
@property
@abc.abstractmethod
def expected_stdout(self) -> bytes:
pass
def test_stdout_content(self, stdout_content: bytes):
assert stdout_content == self.expected_stdout
@property
@abc.abstractmethod
def expected_stderr(self) -> bytes:
pass
def test_stderr_content(self, stderr_content: bytes):
assert stderr_content == self.expected_stderr
class SuccessfulRun(BaseRun, abc.ABC):
@property
def expected_status(self):
return os.EX_OK
class NoStdoutOutputRun(BaseRun, abc.ABC):
@property
def expected_stdout(self) -> bytes:
return b""
class PrematureExitRun(NoStdoutOutputRun, abc.ABC):
@property
def expected_stderr(self) -> bytes:
return inspect.cleandoc(
f"""
hades-dhcp-script ARGS...
Sends its command-line arguments, environment variables starting
with DNSMASQ_ and the stdin/stdout file descriptors to the UNIX
socket set via the HADES_AUTH_DHCP_SCRIPT_SOCKET environment
variable (defaults to {constants.AUTH_DHCP_SCRIPT_SOCKET}).
See the -6, --dhcp-script options of dnsmasq for details.
"""
).encode("ascii")
def test_messages(self, messages: Optional[List[RECVMSG]]):
assert messages is None
def test_sent(self, sent: Optional[bytes]):
assert sent is None
class TestUsageExit(PrematureExitRun):
@property
def expected_status(self) -> int:
return os.EX_USAGE
@pytest.fixture(scope="session")
def argv(self, executable: pathlib.PosixPath) -> List[bytes]:
return [bytes(executable)]
class TestHelpExit(PrematureExitRun, SuccessfulRun):
@pytest.fixture(
scope="session",
params=[[b"-h"], [b"--help"], [b"help"]]
)
def argv(self, request, executable: pathlib.PosixPath) -> List[bytes]:
return [bytes(executable)] + request.param
class ConnectedRun(BaseRun, abc.ABC):
@pytest.fixture(scope="class")
def messages(self, result: Result) -> List[RECVMSG]:
messages = result[3]
if messages is None:
pytest.fail("No messages")
return messages
@pytest.fixture(scope="class")
def file_descriptors(
self,
messages: List[RECVMSG],
) -> Generator[List[io.FileIO], None, None]:
streams = []
with contextlib.ExitStack() as stack:
for _, ancdata, _ in messages:
streams.extend(
stack.enter_context(stream)
for stream in Server.parse_ancillary_data(ancdata)
)
# Make received file descriptors non-blocking
for stream in streams:
os.set_blocking(stream.fileno(), False)
yield streams
@pytest.fixture(scope="class")
def passed_stdin(self, file_descriptors: List[io.FileIO]):
if len(file_descriptors) != 3:
pytest.fail("Wrong number of file descriptors")
return file_descriptors[0]
@pytest.fixture(scope="class")
def passed_stdout(self, file_descriptors: List[io.FileIO]):
if len(file_descriptors) != 3:
pytest.fail("Wrong number of file descriptors")
return file_descriptors[1]
@pytest.fixture(scope="class")
def passed_stderr(self, file_descriptors: List[io.FileIO]):
if len(file_descriptors) != 3:
pytest.fail("Wrong number of file descriptors")
return file_descriptors[2]
def test_sent(self, sent: Optional[bytes], reply: bytes):
assert sent == reply
def test_flags(self, messages: Optional[List[RECVMSG]]):
got = [flags for _, _, flags in messages]
expected = [self.RECVMSG_FLAGS for _, _, _ in messages]
assert got == expected
def test_ancillary_data(self, messages: Optional[List[RECVMSG]]):
expected = [
(
socket.SOL_SOCKET,
socket.SCM_RIGHTS,
len(cmsg_data) - len(cmsg_data) % SIZEOF_INT,
)
for _, ancdata, _ in messages
for _, _, cmsg_data in ancdata
]
got = [
(cmsg_level, cmsg_type, len(cmsg_data))
for _, ancdata, _ in messages
for cmsg_level, cmsg_type, cmsg_data in ancdata
]
assert got == expected
@pytest.mark.xfail(raises=BlockingIOError)
def test_file_descriptor_count(
self,
file_descriptors: List[BinaryIO],
):
assert len(file_descriptors) == 3
@staticmethod
def assert_file(our_file: io.FileIO, passed_file: io.FileIO):
our_readable = our_file.readable()
got_mode = passed_file.mode
our_stat = os.fstat(our_file.fileno())
passed_stat = os.fstat(passed_file.fileno())
is_fifo = stat.S_ISFIFO(passed_stat.st_mode)
expected_mode = "wb" if our_readable else "rb"
reader = our_file if our_readable else passed_file
writer = passed_file if our_readable else our_file
# Verify that we have a pipe with its two ends
if is_fifo and our_stat == passed_stat:
pipe_size = fcntl.fcntl(writer.fileno(), F_GETPIPE_SZ)
# Check for pending bytes in the pipe
pending_bytes = bytearray(SIZEOF_INT)
fcntl.ioctl(reader.fileno(), termios.FIONREAD, pending_bytes)
pending_bytes = struct.unpack_from("=i", pending_bytes)[0]
test_size = min(mmap.PAGESIZE, pipe_size - pending_bytes)
expected_bytes = random.randbytes(test_size)
writer.write(expected_bytes)
writer.flush()
got_bytes = reader.read(pipe_size)
else:
expected_bytes = None
got_bytes = None
assert (
got_mode, is_fifo, passed_stat, got_bytes
) == (
expected_mode, True, our_stat, expected_bytes
)
@pytest.mark.xfail(raises=BlockingIOError)
def test_passed_stdin(
self,
stdin: Tuple[io.FileIO, io.FileIO],
passed_stdin: io.FileIO,
):
self.assert_file(stdin[1], passed_stdin)
@pytest.mark.xfail(raises=BlockingIOError)
def test_passed_stdout(
self,
stdout: Tuple[io.FileIO, io.FileIO],
passed_stdout: io.FileIO,
):
self.assert_file(stdout[0], passed_stdout)
@pytest.mark.xfail(raises=BlockingIOError)
def test_passed_stderr(
self,
stderr: Tuple[io.FileIO, io.FileIO],
passed_stderr: io.FileIO,
):
self.assert_file(stderr[0], passed_stderr)
def test_message(
self,
argv: List[bytes],
environ: Dict[bytes, bytes],
messages: Optional[List[RECVMSG]],
):
got = b"".join(map(operator.itemgetter(0), messages))
expected = b"".join([
struct.pack("=i", len(argv)),
] + [
arg + b"\x00" for arg in argv
] + [
struct.pack("=i", len(environ)),
] + [
b"%b=%b\x00" % i for i in environ.items()
])
assert got == expected
class TestSuccess(ConnectedRun, SuccessfulRun, NoStdoutOutputRun):
@property
def expected_stderr(self) -> bytes:
return b""
@pytest.fixture(scope="session")
def argv(
self,
executable: pathlib.PosixPath,
wmem_default: int,
) -> List[bytes]:
random_args = random.randbytes(2 * wmem_default).split(b"\x00")
return [
bytes(executable),
b"add",
] + random_args
@pytest.fixture(scope="class")
def environ(self, server: socket.socket) -> Dict[bytes, bytes]:
path = os.fsencode(server.getsockname())
return collections.OrderedDict((
(b"NON_DNSMASQ_PREFIX_ENV", b"1"),
(b"DNSMASQ_PREFIX_ENV", b"2"),
(b"DNSMASQ_PREFIX_WITH_WHITESPACE", b" \twith\t whitespace\t "),
(b"DNSMASQ_CHARACTERS", bytes(range(0x01, 0x100))),
(b"HADES_AUTH_DHCP_SCRIPT_SOCKET", path),
))
class TestExitStatus(ConnectedRun, NoStdoutOutputRun):
@property
def expected_stderr(self) -> bytes:
return b""
@pytest.fixture(scope="session")
def argv(self, executable: pathlib.PosixPath) -> List[bytes]:
return [
bytes(executable),
b"test"
]
@property
def expected_status(self) -> int:
return 5
|
StarcoderdataPython
|
9650395
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, <NAME>
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Jun 8, 2011
@author: evan
'''
from lxml import etree
from kayako.core.lib import UnsetParameter
from kayako.core.object import KayakoObject
from kayako.objects.ticket.ticket_note import TicketNote
from kayako.objects.ticket.ticket_post import TicketPost
from kayako.objects.ticket.ticket_time_track import TicketTimeTrack
from kayako.exception import KayakoRequestError, KayakoResponseError
class Ticket(KayakoObject):
'''
Kayako Ticket API Object.
subject The Ticket Subject
fullname Full Name of creator
email Email Address of creator
contents The contents of the first ticket post
departmentid The Department ID
ticketstatusid The Ticket Status ID
ticketpriorityid The Ticket Priority ID
tickettypeid The Ticket Type ID
userid The User ID, if the ticket is to be created as a user.
staffid The Staff ID, if the ticket is to be created as a staff
ownerstaffid The Owner Staff ID, if you want to set an Owner for this
ticket
type The ticket type: 'default' or 'phone'
'''
controller = '/Tickets/Ticket'
__parameters__ = [
'id',
'subject',
'fullname',
'email',
'contents',
'departmentid',
'ticketstatusid',
'ticketpriorityid', # synonym for priorityid
'tickettypeid',
'userid',
'staffid',
'ownerstaffid',
'type',
'flagtype',
'displayid',
'statusid',
'typeid',
'userorganization',
'userorganizationid',
'ownerstaffname',
'lastreplier',
'creationtime',
'lastactivity',
'laststaffreply',
'lastuserreply',
'slaplanid',
'nextreplydue',
'resolutiondue',
'replies',
'ipaddress',
'creator',
'creationmode',
'creationtype',
'isescalated',
'escalationruleid',
'tags',
'watchers',
'workflows',
'notes',
'posts',
'timetracks',
]
__required_add_parameters__ = ['subject', 'fullname', 'email', 'contents', 'departmentid', 'ticketstatusid', 'ticketpriorityid', 'tickettypeid', ]
__add_parameters__ = ['subject', 'fullname', 'email', 'contents', 'departmentid', 'ticketstatusid', 'ticketpriorityid', 'tickettypeid', 'userid', 'staffid', 'ownerstaffid', 'type']
__save_parameters__ = ['subject', 'fullname', 'email', 'departmentid', 'ticketstatusid', 'ticketpriorityid', 'ownerstaffid', 'userid', ]
@classmethod
def _parse_ticket(cls, api, ticket_tree):
ticketid = cls._parse_int(ticket_tree.get('id'))
workflows = [dict(id=workflow_node.get('id'), title=workflow_node.get('title')) for workflow_node in ticket_tree.findall('workflow')]
watchers = [dict(staffid=watcher_node.get('staffid'), name=watcher_node.get('name')) for watcher_node in ticket_tree.findall('watcher')]
notes = [TicketNote(api, **TicketNote._parse_ticket_note(ticket_note_tree, ticketid)) for ticket_note_tree in ticket_tree.findall('note') if ticket_note_tree.get('type') == 'ticket']
timetracks = [TicketTimeTrack(api, **TicketTimeTrack._parse_ticket_time_track(ticket_time_track_tree, ticketid)) for ticket_time_track_tree in ticket_tree.findall('note') if ticket_note_tree.get('type') == 'timetrack']
posts = []
posts_node = ticket_tree.find('posts')
if posts_node is not None:
posts = [TicketPost(api, **TicketPost._parse_ticket_post(ticket_post_tree, ticketid)) for ticket_post_tree in posts_node.findall('post')]
params = dict(
id=ticketid,
subject=cls._get_string(ticket_tree.find('subject')),
fullname=cls._get_string(ticket_tree.find('fullname')),
email=cls._get_string(ticket_tree.find('email')),
departmentid=cls._get_int(ticket_tree.find('departmentid')),
ticketstatusid=cls._get_int(ticket_tree.find('ticketstatusid'), required=False),
ticketpriorityid=cls._get_int(ticket_tree.find('priorityid')), # Note the difference, request param is ticketpriorityid, response is priorityid
tickettypeid=cls._get_int(ticket_tree.find('tickettypeid'), required=False),
userid=cls._get_int(ticket_tree.find('userid')),
ownerstaffid=cls._get_int(ticket_tree.find('ownerstaffid')),
flagtype=cls._parse_int(ticket_tree.get('flagtype'), 'flagtype'),
displayid=cls._get_string(ticket_tree.find('displayid')),
statusid=cls._get_int(ticket_tree.find('statusid')),
typeid=cls._get_int(ticket_tree.find('typeid')),
userorganization=cls._get_string(ticket_tree.find('userorganization')),
userorganizationid=cls._get_int(ticket_tree.find('userorganizationid'), required=False),
ownerstaffname=cls._get_string(ticket_tree.find('ownerstaffname')),
lastreplier=cls._get_string(ticket_tree.find('lastreplier')),
creationtime=cls._get_date(ticket_tree.find('creationtime')),
lastactivity=cls._get_date(ticket_tree.find('lastactivity')),
laststaffreply=cls._get_date(ticket_tree.find('laststaffreply')),
lastuserreply=cls._get_date(ticket_tree.find('lastuserreply')),
slaplanid=cls._get_int(ticket_tree.find('slaplanid')),
nextreplydue=cls._get_date(ticket_tree.find('nextreplydue')),
resolutiondue=cls._get_date(ticket_tree.find('resolutiondue')),
replies=cls._get_int(ticket_tree.find('replies')),
ipaddress=cls._get_string(ticket_tree.find('ipaddress')),
creator=cls._get_int(ticket_tree.find('creator')),
creationmode=cls._get_int(ticket_tree.find('creationmode')),
creationtype=cls._get_int(ticket_tree.find('creationtype')),
isescalated=cls._get_boolean(ticket_tree.find('isescalated')),
escalationruleid=cls._get_int(ticket_tree.find('escalationruleid')),
tags=cls._get_string(ticket_tree.find('tags')),
watchers=watchers,
workflows=workflows,
notes=notes,
posts=posts,
timetracks=timetracks,
)
return params
def _update_from_response(self, ticket_tree):
ticketid = self._parse_int(ticket_tree.get('id'))
if ticketid is not None:
self.id = ticketid
priority_node = ticket_tree.find('priorityid')
if priority_node is not None:
self.ticketpriorityid = self._get_int(priority_node)
for int_node in ['departmentid', 'userid', 'ownerstaffid', 'flagtype', 'statusid', 'slaplanid', 'replies', 'creator', 'creationmode', 'creationtype', 'escalationruleid', 'ticketstatusid', 'tickettypeid', 'userorganizationid' ]:
node = ticket_tree.find(int_node)
if node is not None:
setattr(self, int_node, self._get_int(node, required=False))
for str_node in ['subject', 'email', 'displayid', 'userorganization', 'ownerstaffname', 'lastreplier', 'ipaddress', 'tags']:
node = ticket_tree.find(str_node)
if node is not None:
setattr(self, str_node, self._get_string(node))
for bool_node in ['isescalated']:
node = ticket_tree.find(bool_node)
if node is not None:
setattr(self, bool_node, self._get_boolean(node, required=False))
for date_node in ['creationtime', 'lastactivity', 'lastuserreply', 'nextreplydue', 'resolutiondue', ]:
node = ticket_tree.find(date_node)
if node is not None:
setattr(self, date_node, self._get_date(node, required=False))
@classmethod
def get_all(cls, api, departmentid, ticketstatusid= -1, ownerstaffid= -1, userid= -1):
'''
Get all of the tickets filtered by the parameters:
Lists are converted to comma-separated values.
Required:
departmentid Filter the tickets by the specified department id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
Optional:
ticketstatusid Filter the tickets by the specified ticket status id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
ownerstaffid Filter the tickets by the specified owner staff id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
userid Filter the tickets by the specified user id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
'''
if isinstance(departmentid, (list, tuple)):
departmentid = ','.join([str(id_item) for id_item in departmentid])
if isinstance(ticketstatusid, (list, tuple)):
ticketstatusid = ','.join([str(id_item) for id_item in ticketstatusid])
if isinstance(ownerstaffid, (list, tuple)):
ownerstaffid = ','.join([str(id_item) for id_item in ownerstaffid])
if isinstance(userid, (list, tuple)):
userid = ','.join([str(id_item) for id_item in userid])
response = api._request('%s/ListAll/%s/%s/%s/%s/' % (cls.controller, departmentid, ticketstatusid, ownerstaffid, userid), 'GET')
tree = etree.parse(response)
return [Ticket(api, **cls._parse_ticket(api, ticket_tree)) for ticket_tree in tree.findall('ticket')]
@classmethod
def get(cls, api, id):
try:
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
except KayakoResponseError, error:
if 'HTTP Error 404' in str(error):
return None
else:
raise
tree = etree.parse(response)
node = tree.find('ticket')
if node is None:
return None
params = cls._parse_ticket(api, node)
return Ticket(api, **params)
def add(self):
'''
Add this Ticket.
Requires:
subject The Ticket Subject
fullname <NAME> creator
email Email Address of creator
contents The contents of the first ticket post
departmentid The Department ID
ticketstatusid The Ticket Status ID
ticketpriorityid The Ticket Priority ID
tickettypeid The Ticket Type ID
At least one of these must be present:
userid The User ID, if the ticket is to be created as a user.
staffid The Staff ID, if the ticket is to be created as a staff
Optional:
ownerstaffid The Owner Staff ID, if you want to set an Owner for this ticket
type The ticket type: 'default' or 'phone'
'''
if self.id is not UnsetParameter:
raise KayakoRequestError('Cannot add a pre-existing %s. Use save instead. (id: %s)' % (self.__class__.__name__, self.id))
parameters = self.add_parameters
for required_parameter in self.__required_add_parameters__:
if required_parameter not in parameters:
raise KayakoRequestError('Cannot add %s: Missing required field: %s.' % (self.__class__.__name__, required_parameter))
if 'userid' not in parameters and 'staffid' not in parameters:
raise KayakoRequestError('To add a Ticket, at least one of the following parameters must be set: userid, staffid. (id: %s)' % self.id)
response = self.api._request(self.controller, 'POST', **parameters)
tree = etree.parse(response)
node = tree.find('ticket')
self._update_from_response(node)
def save(self):
'''
Save this ticket.
Saves only the following:
subject The Ticket Subject
fullname <NAME> creator
email Email Address of creator
departmentid The Department ID
ticketstatusid The Ticket Status ID
ticketpriorityid The Ticket Priority ID
tickettypeid The Ticket Type ID
ownerstaffid The Owner Staff ID, if you want to set an Owner for this ticket
userid The User ID, if you want to change the user for this ticket
'''
response = self._save('%s/%s/' % (self.controller, self.id))
tree = etree.parse(response)
node = tree.find('ticket')
self._update_from_response(node)
def delete(self):
self._delete('%s/%s/' % (self.controller, self.id))
def __str__(self):
return '<Ticket (%s): %s - %s>' % (self.id, 'UNSUBMITTED' if not self.displayid else self.displayid, self.subject)
|
StarcoderdataPython
|
163562
|
from netapp.snapshot.snapshot_schedule_info import SnapshotScheduleInfo
from netapp.netapp_object import NetAppObject
class SnapshotPolicyInfo(NetAppObject):
"""
A typedef containing information about the Snapshot Scheduling
Policies.
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_comment = None
@property
def comment(self):
"""
A human readable description associated with the snasphot
policy. The maximum length of this field can be 255
characters.
Attributes: optional-for-create, modifiable
"""
return self._comment
@comment.setter
def comment(self, val):
if val != None:
self.validate('comment', val)
self._comment = val
_policy_owner = None
@property
def policy_owner(self):
"""
Owner of the policy
Attributes: non-creatable, non-modifiable
"""
return self._policy_owner
@policy_owner.setter
def policy_owner(self, val):
if val != None:
self.validate('policy_owner', val)
self._policy_owner = val
_vserver_name = None
@property
def vserver_name(self):
"""
Vserver Name
Attributes: key, non-creatable, non-modifiable
"""
return self._vserver_name
@vserver_name.setter
def vserver_name(self, val):
if val != None:
self.validate('vserver_name', val)
self._vserver_name = val
_enabled = None
@property
def enabled(self):
"""
The state of the snapshot policy. If true, the snapshot
policy is enabled and scheduled snapshots will be created
on the volume associated with this policy.
Attributes: required-for-create, modifiable
"""
return self._enabled
@enabled.setter
def enabled(self, val):
if val != None:
self.validate('enabled', val)
self._enabled = val
_total_schedules = None
@property
def total_schedules(self):
"""
Total Number of Schedules in this Policy
Attributes: non-creatable, non-modifiable
"""
return self._total_schedules
@total_schedules.setter
def total_schedules(self, val):
if val != None:
self.validate('total_schedules', val)
self._total_schedules = val
_snapshot_policy_schedules = None
@property
def snapshot_policy_schedules(self):
"""
Information about individual snapshot schedules
"""
return self._snapshot_policy_schedules
@snapshot_policy_schedules.setter
def snapshot_policy_schedules(self, val):
if val != None:
self.validate('snapshot_policy_schedules', val)
self._snapshot_policy_schedules = val
_policy = None
@property
def policy(self):
"""
A human readable string describing the name of the
snapshot scheduling policy.
Attributes: key, required-for-create, non-modifiable
"""
return self._policy
@policy.setter
def policy(self, val):
if val != None:
self.validate('policy', val)
self._policy = val
@staticmethod
def get_api_name():
return "snapshot-policy-info"
@staticmethod
def get_desired_attrs():
return [
'comment',
'policy-owner',
'vserver-name',
'enabled',
'total-schedules',
'snapshot-policy-schedules',
'policy',
]
def describe_properties(self):
return {
'comment': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'policy_owner': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'vserver_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'enabled': { 'class': bool, 'is_list': False, 'required': 'optional' },
'total_schedules': { 'class': int, 'is_list': False, 'required': 'optional' },
'snapshot_policy_schedules': { 'class': SnapshotScheduleInfo, 'is_list': True, 'required': 'optional' },
'policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
StarcoderdataPython
|
5139696
|
import pandas as pd
import xgboost as xgb
from notecoin.huobi.model import BaseModel
class XgboostModel(BaseModel):
def __init__(self, windows=-15, *args, **kwargs):
super(XgboostModel, self).__init__(*args, **kwargs)
self.model = None
self.windows = windows
def solve(self, df, train=True):
df2 = df.copy()
df2 = df2.sort_values(['id'])
df2 = df2.reset_index(drop=True)
def ma_n(n):
df2['MA' + str(n)] = df2['open'].rolling(window=n).mean()
def shift_n(name, n):
df2[name + '_' + str(n)] = df2[name].shift(n)
[ma_n(i) for i in (2, 3, 4, 5, 10, 15, 20, 25, 30)]
cols = [col for col in df2.columns]
[[shift_n(name, n) for name in cols if (name in ('MA5', 'open') or 'MA' in name)] for n in
(1, 2, 3, 4, 5, 10, 15, 30)]
df2['label'] = df2['open'].shift(self.windows)
del df2['symbol'], df2['id']
if train:
df2.dropna(axis=0, how='any', inplace=True)
x = df2[df2.columns[1:-1]].values
y = df2['label'].values
return x, y
else:
del df2['label']
df2.dropna(axis=0, how='any', inplace=True)
x = df2[df2.columns[1:]].values
return x
def train(self, df, *args, **kwargs):
x, y = self.solve(df)
self.model = xgb.XGBRegressor(n_jobs=1).fit(x, y)
def predict(self, df, *args, **kwargs):
x = self.solve(df, train=False)
return self.model.predict(x)
class MultiXgboostModel(BaseModel):
def __init__(self, windows=None, *args, **kwargs):
super(MultiXgboostModel, self).__init__(*args, **kwargs)
if windows is None:
windows = [-5, -10, -15]
self.model = None
self.windows = windows
def solve(self, df, train=True):
df2 = df.copy()
df2 = df2.sort_values(['id'])
df2 = df2.reset_index(drop=True)
def ma_n(n):
df2['MA' + str(n)] = df2['open'].rolling(window=n).mean()
def shift_n(name, n):
df2[name + '_' + str(n)] = df2[name].shift(n)
[ma_n(i) for i in (2, 3, 4, 5, 10, 15, 20, 25, 30)]
cols = [col for col in df2.columns]
[[shift_n(name, n) for name in cols if (name in ('MA5', 'open') or 'MA' in name)] for n in
(1, 2, 3, 4, 5, 10, 15, 30)]
#del df2['symbol'], df2['id']
if train:
cols = []
df2['open2'] = df2['open'] - df2['open'].shift()
df2['open2'] = df2['open2'].apply(lambda x: 1 if x > 0 else 0)
for window in self.windows:
col = f'label{window}'
df2[col] = df2['open2'].shift(window)
cols.append(col)
df2.dropna(axis=0, how='any', inplace=True)
x = df2[df2.columns[1:-1 - len(cols)]].values
y = df2[cols].values
return df2, x, y
else:
df2.dropna(axis=0, how='any', inplace=True)
x = df2[df2.columns[1:]].values
return df2, x
def train(self, df, *args, **kwargs):
df2, x, y = self.solve(df)
from sklearn.multiclass import OneVsRestClassifier
from xgboost import XGBClassifier
self.model = OneVsRestClassifier(
XGBClassifier(eval_metric=['logloss', 'auc', 'error'], use_label_encoder=False))
self.model.fit(x, y)
def predict(self, df, *args, **kwargs):
df2, x = self.solve(df, train=False)
return df2, self.model.predict(x)
|
StarcoderdataPython
|
5129772
|
import sys
import os
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import argparse
import time
import shutil
import cv2
# for pretrained model
import torchvision.models as models
import glob
# load models and pretrained selector network
from models.nets import *
from models.selector import *
from data import *
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torchvision.utils import save_image
import torch.utils.data
import torch.utils.data.distributed
import matplotlib.pyplot as plt
import random
def make_glyph (args):
# generator initialize
generator = Generator (args.latent_dim)
output = []
save_fpaths = glob.glob(args.pretrained_location + '/*.pth.tar')
save_fpaths = [os.path.abspath(f) for f in save_fpaths]
exp_name = save_fpaths[0].split('/')[-2]
for idx, path in enumerate(save_fpaths):
checkpoint = torch.load(path, map_location=torch.device('cpu'))
prefix = 'module.'
n_clip = len(prefix)
gen = checkpoint['gen_model']
adapted_gen = {k[n_clip:]: v for k, v in gen.items() if k.startswith(prefix)}
generator.load_state_dict(adapted_gen)
target_input = plt.imread(args.input_location) # 64*(64*5)*3
if (len(target_input.shape)==2):
target_input = np.stack((target_input,)*3, axis=-1) # gray -> rgb
target_input = torch.from_numpy(target_input).float() # 64*(64*5)*3
target_input = torch.unsqueeze(target_input.permute(2,0,1), 0) # 1*3*64*(64*5)
alphabet_list = 'abcdefghijklmnopqrstuvwxyz'
alphabet_num = 5
rand_word = ''.join(random.sample(alphabet_list, alphabet_num))
position_list = alphabet_position(rand_word)
source_list = []
for p in position_list:
source_list.append(target_input[:,:,:,64*(p-1):64*p])
source_input = torch.cat(source_list, dim=3) # b*3*64*(64*5)
glyph_address = args.input_location.replace("_colorGrad64", '64')[:-5] + '0.png'
glyph_input = plt.imread(glyph_address)
if (len(glyph_input.shape)==2):
glyph_input = np.stack((glyph_input,)*3, axis=-1) # gray -> rgb
glyph_input = torch.from_numpy(glyph_input).float() # 64*(64*26)*3
glyph_input = torch.unsqueeze(glyph_input.permute(2,0,1), 0) # 1*3*64*(64*26)
save_image (source_input, 'source_test.png')
save_image (glyph_input, 'glyph_test.png')
with torch.no_grad():
output.append(generator(source_input, glyph_input))
print('Generation from {}-th save file done!'.format(idx))
return torch.squeeze(torch.stack(output, dim=0)), exp_name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_location',
help='location of input source',
type=str,
default='source_input.png')
parser.add_argument('--pretrained_location',
help='location of pretrained model',
type=str,
default='results/ckpt.pt')
parser.add_argument('--output_folder',
help='output folder',
type=str,
default='../outputs/')
parser.add_argument('--output_name',
help='location of output png',
type=str,
default='test.png')
parser.add_argument('--latent_dim',
help='latent vector dimension in generator',
type=int,
default=1024)
parser.add_argument('--color_path',
help='path for style data sources',
type=str,
default='datasets/Capitals_colorGrad64/train/')
parser.add_argument('--noncolor_path',
help='path for glyph data sources',
type=str,
default='datasets/Capitals64/BASE/')
parser.add_argument('--batch_size',
help='batch size',
type=int,
default=1)
args = parser.parse_args()
output_dir = os.path.abspath(args.output_folder)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
whatwemade, expname = make_glyph(args) # 1*3*64*(64*26)
oname, ftype = args.output_name.split('.')
for idx in range(whatwemade.shape[0]):
save_image(whatwemade[idx], output_dir + '/' + oname + '_' + expname + '_' + str(idx) + '.' + ftype)
print ("Congratulations!! {} saved:)".format(oname + '_' + expname + '_' + str(idx) + '.' + ftype))
|
StarcoderdataPython
|
3506946
|
def listify(x, n=1):
ret = None
if isinstance(x, list):
ret = x
else:
ret = [x] * n
return ret
|
StarcoderdataPython
|
134516
|
<filename>sskl_webui/my_hisapi.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
import redis
import requests
from frappe.model.document import Document
from frappe.utils import cint
from frappe import throw, msgprint, _, _dict
from iot.iot.doctype.iot_hdb_settings.iot_hdb_settings import IOTHDBSettings
@frappe.whitelist()
def taghisdata(sn=None, vsn=None, fields=None, condition=None):
vsn = vsn or sn
fields = fields or "*"
doc = frappe.get_doc('IOT Device', sn)
doc.has_permission("read")
inf_server = IOTHDBSettings.get_influxdb_server()
if not inf_server:
frappe.logger(__name__).error("InfluxDB Configuration missing in IOTHDBSettings")
return 500
query = 'SELECT ' + fields + ' FROM "' + vsn + '"'
if condition:
query = query + " WHERE " + condition
else:
query = query + " LIMIT 1000"
domain = frappe.get_value("Cloud Company", doc.company, "domain")
r = requests.session().get(inf_server + "/query", params={"q": query, "db": domain}, timeout=10)
if r.status_code == 200:
res = r.json()["results"][0]['series'][0]['values']
taghis = []
for i in range(0, len(res)):
hisvalue = {}
if len(res[i]) == 5:
hisvalue = {'name': res[i][1], 'value': res[i][3], 'time': res[i][0], 'quality': 0}
taghis.append(hisvalue)
elif len(res[i]) == 6:
hisvalue = {'name': res[i][1], 'value': res[i][4], 'time': res[i][0], 'quality': 0}
taghis.append(hisvalue)
#print(taghis)
return taghis or r.json()
@frappe.whitelist()
def iot_device_tree(sn=None):
sn = sn or frappe.form_dict.get('sn')
doc = frappe.get_doc('IOT Device', sn)
doc.has_permission("read")
client = redis.Redis.from_url(IOTHDBSettings.get_redis_server() + "/1")
return client.lrange(sn, 0, -1)
|
StarcoderdataPython
|
8065227
|
<filename>Wurm/PyWurm/Leg.py<gh_stars>0
from ControlledSystem import ControlledSystem
from math import sin, cos, sqrt, pi
# The leg can only enter and exit stance at I_max
class Leg(ControlledSystem):
def __init__( self, I_min, I_max, I_dot_max, a, k, c, phi, b ):
# Used to validate states and controls...
self.I_min = I_min
self.I_max = I_max
self.I_dot_max = I_dot_max
# System Parameters
self.b = b
self.A = 2*a*k*c*sin(phi / 2)
self.B = 4*a*a*k*cos(phi / 4)
def f(self, x, u):
th = x["th"]
th_dot = x["th_dot"]
I = x["I"]
I_dot = u
return {"th": th_dot,
"th_dot": - ( self.b * th_dot + self.A * sin(th) + self.B * sin(th / 2) ) / I,
"I": I_dot}
import pygame
def drawGait(screen, leg, traj, t):
T = traj.timeline[-1][0] # Time for a half step.
th_0 = traj.timeline[0][1]["th"]
t_residue = t
n = 0
state = False # Describes which legs are moving...
while t_residue > T:
t_residue -= T
n += 1
state = not state
if state:
state = 1
else:
state = 0
x = traj.estimate(t_residue)
def b_th(i,n):
if n > 1:
return -th_0 + b_th(i, n-1)
return i * pi / 3
def th(i):
if i % 2 == state:
return b_th(i,n)
return b_th(i,n) + x["th"]
center = (250, 250)
c_x, c_y = center
l_max = 50 * sqrt( leg.I_max )
pygame.draw.circle(screen, (0,0,0), center, 5)
def leg_state(i):
if i % 2 == state:
l = l_max
planted = True
else:
l = 50 * sqrt( x["I"] )
planted = False
theta = th(i)
return l, theta, planted
def draw_leg(i):
l, theta, planted = leg_state(i)
pole_end = (int(c_x + l_max * sin(theta)), int(c_y + l_max * cos(theta)))
bob = (int(c_x + l * sin(theta)), int(c_y + l * cos(theta)))
pygame.draw.line(screen, (0,0,0), center, pole_end,3)
pygame.draw.circle(screen, (0,0,255), bob, 14, 0)
if planted:
pygame.draw.circle(screen, (0,0,0), pole_end, 20, 3)
def draw_spring(start):
end = (start + 1) % 6
th_start = th(start)
th_end = th(end)
a = 50
att_start = (int(c_x + a * sin(th_start)), int(c_y + a * cos(th_start)))
att_end = (int(c_x + a * sin(th_end)), int(c_y + a * cos(th_end)))
length = sqrt( (att_start[0] - att_end[0]) ** 2 + (att_start[1] - att_end[1]) ** 2 ) / 50.0
def color(l):
if l < 1:
alpha = 1 - l
return (50 + alpha*175,50,50)
alpha = (l - 1) / sqrt(3)
return (50,50+alpha*175,50)
pygame.draw.line(screen, color(length), att_start, att_end,3)
for i in range(6):
draw_leg(i)
draw_spring(i)
# TODO: Draw springs.
|
StarcoderdataPython
|
11265843
|
from urllib.parse import urlencode, parse_qs
from django.conf import settings
from urllib.request import urlopen
import json
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from .exceptions import QQAPIException
from . import constants
import logging
# 日志记录器
logger = logging.getLogger('django')
class QQOauth(object):
"""QQ登录的工具类,内部封装业务逻辑的过程"""
def __init__(self, client_id=None, client_secret=None, redirect_uri=None, state=None):
"""构造方法:接受在够到对象时初次传入的参数"""
self.client_id = client_id or settings.QQ_CLIENT_ID
self.client_secret = client_secret or settings.QQ_CLIENT_SECRET
self.redirect_uri = redirect_uri or settings.QQ_REDIRECT_URI
self.state = state or settings.QQ_STATE
def get_login_url(self):
"""获取login_url
# login_url = https://graph.qq.com/oauth2.0/authorize?response_type=code&client_id=101474184
&redirect_uri=xx&state=next参数&scope=get_user_info
"""
# 准备url
url = 'https://graph.qq.com/oauth2.0/authorize?'
# 准备请求参数
params = {
'response_type':'code',
'client_id':self.client_id,
'redirect_uri':self.redirect_uri,
'state':self.state,
'scope':'get_user_info'
}
# 将params字典转成查询字符串
query_params = urlencode(params)
# 使用url拼接请求参数
login_url = url + query_params
return login_url
def get_access_token(self, code):
"""
使用code获取access_token
:param code: authorization code
:return: access_toekn
"""
# 准备url
url = 'https://graph.qq.com/oauth2.0/token?'
# 准备参数
params = {
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri
}
# 拼接请求地址
# url = url + urlencode(params)
url += urlencode(params)
# 美多商城给QQ服务器发送GET请求,获取access_token
try:
# response_data = (bytes)'access_token=FE04************************CCE2&expires_in=7776000&refresh_token=88E4************************BE14'
response_data = urlopen(url).read()
# (str)'access_token=FE04************************CCE2&expires_in=7776000&refresh_token=<PASSWORD>'
response_str = response_data.decode()
# 尽量的将response_str,转成字典,方便读取access_token
response_dict = parse_qs(response_str)
# 读取access_token
access_token = response_dict.get('access_token')[0]
except Exception as e:
logger.error(e)
# 在我们定义工具类中的工具方法的时候,如果出现了异常,直接抛出异常,谁使用我的工具谁捕获异常并解决
# 类比 BookInfo.objects.get(id=13468954890965468909765) ==> DoesNotExist
raise QQAPIException('获取access_token失败')
# 返回access_token
return access_token
def get_openid(self, access_token):
"""
使用access_token获取openid
:param access_token: 获取openid的凭据
:return: openid
"""
# 准备url
url = 'https://graph.qq.com/oauth2.0/me?access_token=%s' % access_token
response_str = ''
try:
# 发送GET请求,获取openid
# (bytes)'callback( {"client_id":"YOUR_APPID","openid":"YOUR_OPENID"} );'
response_data = urlopen(url).read()
# (str)'callback( {"client_id":"YOUR_APPID","openid":"YOUR_OPENID"} );'
response_str = response_data.decode()
# 使用字符串的切片,将response_str中的json字符串切出来
# 返回的数据 callback( {"client_id":"YOUR_APPID","openid":"YOUR_OPENID"} )\n;
response_dict = json.loads(response_str[10:-4])
# 获取openid
openid = response_dict.get('openid')
except Exception as e:
# 如果有异常,QQ服务器返回 "code=xxx&msg=xxx"
err_data = parse_qs(response_str)
logger.error(e)
raise QQAPIException('code=%s msg=%s' % (err_data.get('code'), err_data.get('msg')))
return openid
@staticmethod
def generate_save_user_token(openid):
"""
生成保存用户数据的token
:param openid: 用户的openid
:return: token
"""
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
data = {'openid': openid}
token = serializer.dumps(data)
return token.decode()
@staticmethod
def check_save_user_token(token):
"""
检验保存用户数据的token
:param token: token
:return: openid or None
"""
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
try:
data = serializer.loads(token)
except BadData:
return None
else:
return data.get('openid')
|
StarcoderdataPython
|
9625165
|
<reponame>lynnyi/clustering_on_transcript_compatibility_counts
from sklearn.metrics.pairwise import pairwise_distances
from scipy.stats import entropy
import pickle
import numpy as np
import sys
import multiprocessing as mp
import itertools
print(len(sys.argv))
if len(sys.argv)!=4:
print ('usage is \n python get_pairwise_distances.py ip-file op-file num-processes')
exit(1)
#def jensen_shannon(pqtuple):
# p=pqtuple[0]
# q=pqtuple[1]
def jensen_shannon(p,q):
# pshape=np.shape(p)
# qshape=np.shape(q)
# assert pshape[1]==1
# assert qshape[1]==1
# assert pshape[0]==qshape[0]
# assert min(p) >= 0
# assert min(q) >= 0
#assert sum(p)<=1 + np.finfo(float).eps and sum(p) >= 1- np.finfo(float).eps
#assert sum(q)<=1 + np.finfo(float).eps and sum(q) >= 1- np.finfo(float).eps
m=0.5*p+0.5*q
p = np.transpose(p[p > 0])
q = np.transpose(q[q > 0])
m = np.transpose(m[m > 0])
if entropy(m)-0.5*entropy(q)-0.5*entropy(p) < 0:
print (entropy(m)-0.5*entropy(q)-0.5*entropy(p))
print ('m = '+ str(entropy(m)))
print ('q = '+ str(entropy(q)))
print ('p = '+ str(entropy(p)))
return np.sqrt(entropy(m)-0.5*entropy(q)-0.5*entropy(p))
print (sys.argv[1])
print (sys.argv[2])
num_jobs=int(sys.argv[3])
with open(sys.argv[1], 'rb') as infile:
X = pickle.load(infile)
print (np.shape(X))
#Y=X
#num_rows=np.shape(Y)[0]
#for ind in range(num_rows):
# print(X[ind].sum(), ind)
D = pairwise_distances(X,metric=jensen_shannon,n_jobs=num_jobs)
#D=np.zeros((num_rows,num_rows))
#for i in range(num_rows):
# print(i)
# pool=mp.Pool(processes=40)
# pqtuple=itertools.product([X[i,:]], X)
# D[i,:]=pool.map(jensen_shannon,pqtuple)
with open(sys.argv[2],'wb') as outfile:
pickle.dump(D, outfile, pickle.HIGHEST_PROTOCOL)
|
StarcoderdataPython
|
9780404
|
<gh_stars>0
# PROJECT : kungfucms
# TIME : 19-2-8 下午10:20
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
# CELL : 13811754531
# WECHAT : 13811754531
# https://github.com/youngershen/
|
StarcoderdataPython
|
11398006
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/blogin/Projects/fix-masternode-tool/src/ui/ui_upd_mn_service_dlg.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_UpdMnServiceDlg(object):
def setupUi(self, UpdMnServiceDlg):
UpdMnServiceDlg.setObjectName("UpdMnServiceDlg")
UpdMnServiceDlg.resize(600, 311)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(UpdMnServiceDlg.sizePolicy().hasHeightForWidth())
UpdMnServiceDlg.setSizePolicy(sizePolicy)
self.verticalLayout = QtWidgets.QVBoxLayout(UpdMnServiceDlg)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout.setSpacing(12)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(UpdMnServiceDlg)
self.label.setWordWrap(True)
self.label.setOpenExternalLinks(True)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.line_2 = QtWidgets.QFrame(UpdMnServiceDlg)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.stackedWidget = QtWidgets.QStackedWidget(UpdMnServiceDlg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget.sizePolicy().hasHeightForWidth())
self.stackedWidget.setSizePolicy(sizePolicy)
self.stackedWidget.setObjectName("stackedWidget")
self.page0 = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.page0.sizePolicy().hasHeightForWidth())
self.page0.setSizePolicy(sizePolicy)
self.page0.setObjectName("page0")
self.gridLayout = QtWidgets.QGridLayout(self.page0)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setVerticalSpacing(8)
self.gridLayout.setObjectName("gridLayout")
self.lblOperatorPayout = QtWidgets.QLabel(self.page0)
self.lblOperatorPayout.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblOperatorPayout.setObjectName("lblOperatorPayout")
self.gridLayout.addWidget(self.lblOperatorPayout, 2, 0, 1, 1)
self.layOperatorPayout = QtWidgets.QHBoxLayout()
self.layOperatorPayout.setSpacing(8)
self.layOperatorPayout.setObjectName("layOperatorPayout")
self.edtOperatorPayoutAddress = QtWidgets.QLineEdit(self.page0)
self.edtOperatorPayoutAddress.setPlaceholderText("")
self.edtOperatorPayoutAddress.setClearButtonEnabled(True)
self.edtOperatorPayoutAddress.setObjectName("edtOperatorPayoutAddress")
self.layOperatorPayout.addWidget(self.edtOperatorPayoutAddress)
self.gridLayout.addLayout(self.layOperatorPayout, 2, 1, 1, 1)
self.lblIP = QtWidgets.QLabel(self.page0)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.lblIP.setFont(font)
self.lblIP.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblIP.setObjectName("lblIP")
self.gridLayout.addWidget(self.lblIP, 1, 0, 1, 1)
self.lblOperatorPayoutMsg = QtWidgets.QLabel(self.page0)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblOperatorPayoutMsg.sizePolicy().hasHeightForWidth())
self.lblOperatorPayoutMsg.setSizePolicy(sizePolicy)
self.lblOperatorPayoutMsg.setText("")
self.lblOperatorPayoutMsg.setWordWrap(True)
self.lblOperatorPayoutMsg.setOpenExternalLinks(True)
self.lblOperatorPayoutMsg.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lblOperatorPayoutMsg.setObjectName("lblOperatorPayoutMsg")
self.gridLayout.addWidget(self.lblOperatorPayoutMsg, 3, 1, 1, 1)
self.layPayoutAddress = QtWidgets.QHBoxLayout()
self.layPayoutAddress.setSpacing(8)
self.layPayoutAddress.setObjectName("layPayoutAddress")
self.edtIP = QtWidgets.QLineEdit(self.page0)
self.edtIP.setPlaceholderText("")
self.edtIP.setClearButtonEnabled(True)
self.edtIP.setObjectName("edtIP")
self.layPayoutAddress.addWidget(self.edtIP)
self.label_2 = QtWidgets.QLabel(self.page0)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.layPayoutAddress.addWidget(self.label_2)
self.edtPort = QtWidgets.QLineEdit(self.page0)
self.edtPort.setMaximumSize(QtCore.QSize(100, 16777215))
self.edtPort.setClearButtonEnabled(True)
self.edtPort.setObjectName("edtPort")
self.layPayoutAddress.addWidget(self.edtPort)
self.gridLayout.addLayout(self.layPayoutAddress, 1, 1, 1, 1)
self.stackedWidget.addWidget(self.page0)
self.verticalLayout.addWidget(self.stackedWidget)
self.line = QtWidgets.QFrame(UpdMnServiceDlg)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.lblManualCommands = QtWidgets.QLabel(UpdMnServiceDlg)
self.lblManualCommands.setText("")
self.lblManualCommands.setObjectName("lblManualCommands")
self.verticalLayout.addWidget(self.lblManualCommands)
self.edtManualCommands = QtWidgets.QTextBrowser(UpdMnServiceDlg)
self.edtManualCommands.setOpenExternalLinks(True)
self.edtManualCommands.setOpenLinks(True)
self.edtManualCommands.setObjectName("edtManualCommands")
self.verticalLayout.addWidget(self.edtManualCommands)
self.frame = QtWidgets.QFrame(UpdMnServiceDlg)
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout.setContentsMargins(12, 12, 12, 12)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnCancel = QtWidgets.QPushButton(self.frame)
self.btnCancel.setAutoDefault(False)
self.btnCancel.setObjectName("btnCancel")
self.horizontalLayout.addWidget(self.btnCancel)
self.lblDocumentation = QtWidgets.QLabel(self.frame)
self.lblDocumentation.setText("")
self.lblDocumentation.setOpenExternalLinks(True)
self.lblDocumentation.setObjectName("lblDocumentation")
self.horizontalLayout.addWidget(self.lblDocumentation)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnSendUpdateTx = QtWidgets.QPushButton(self.frame)
self.btnSendUpdateTx.setAutoDefault(False)
self.btnSendUpdateTx.setObjectName("btnSendUpdateTx")
self.horizontalLayout.addWidget(self.btnSendUpdateTx)
self.btnClose = QtWidgets.QPushButton(self.frame)
self.btnClose.setObjectName("btnClose")
self.horizontalLayout.addWidget(self.btnClose)
self.verticalLayout.addWidget(self.frame)
self.retranslateUi(UpdMnServiceDlg)
self.stackedWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(UpdMnServiceDlg)
UpdMnServiceDlg.setTabOrder(self.edtIP, self.edtPort)
UpdMnServiceDlg.setTabOrder(self.edtPort, self.edtOperatorPayoutAddress)
UpdMnServiceDlg.setTabOrder(self.edtOperatorPayoutAddress, self.edtManualCommands)
UpdMnServiceDlg.setTabOrder(self.edtManualCommands, self.btnSendUpdateTx)
UpdMnServiceDlg.setTabOrder(self.btnSendUpdateTx, self.btnClose)
UpdMnServiceDlg.setTabOrder(self.btnClose, self.btnCancel)
def retranslateUi(self, UpdMnServiceDlg):
_translate = QtCore.QCoreApplication.translate
UpdMnServiceDlg.setWindowTitle(_translate("UpdMnServiceDlg", "Update service"))
self.label.setText(_translate("UpdMnServiceDlg", "The transaction type associated with this action (ProUpServTx) is used to update information relating to the operator (<a href=\"https://docs.fix.org/en/stable/masternodes/maintenance.html#proupservtx\">details</a>)."))
self.lblOperatorPayout.setText(_translate("UpdMnServiceDlg", "<b>Operator Payout address</b> "))
self.lblIP.setText(_translate("UpdMnServiceDlg", "IP"))
self.label_2.setText(_translate("UpdMnServiceDlg", "Port"))
self.btnCancel.setText(_translate("UpdMnServiceDlg", "Cancel"))
self.btnSendUpdateTx.setText(_translate("UpdMnServiceDlg", "Send Update Transaction"))
self.btnClose.setText(_translate("UpdMnServiceDlg", "Close"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
UpdMnServiceDlg = QtWidgets.QDialog()
ui = Ui_UpdMnServiceDlg()
ui.setupUi(UpdMnServiceDlg)
UpdMnServiceDlg.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3228967
|
<reponame>cyandterry/Python-Study
"""
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [−2,1,−3,4,−1,2,1,−5,4],
the contiguous subarray [4,−1,2,1] has the largest sum = 6.
click to show more practice.
More practice:
If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
"""
class Solution:
# @param A, a list of integers
# @return an integer
def maxSubArray(self, A):
return self.maxSubArray_2(A)
def maxSubArray_1(self, A):
max_sum = A[0]
cur_sum = 0
for num in A:
cur_sum += num
max_sum = max(max_sum, cur_sum)
if cur_sum < 0:
cur_sum = 0
return max_sum
def maxSubArray_2(self, A):
res = A[0]
dp = A[0]
for num in A[1:]:
dp = max(num, dp+num)
res = max(res, dp)
return res
# Note
# 1. dp[i] means maximum subarray ends with A[i]
# 2. dp[0] = A[0]
# 3. dp[i] = max(A[i], A[i] + dp[i-1]) 意思就是如果end with A[i-1]的dp是负的话我们就不取,otherwise就取
# 4. dp[N-1]
# Because we don't need to store dp[i], so simplify to dp
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.