blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0af71064e926490ac415e9930d72e7cccec1d8c | 7464f15c33c74454f2a98dceb7f603919abba4d1 | /happy.py | 01383a2a50c7506bb341600a3deaf9076a692953 | [] | no_license | willingc/my-bit | 374bece797c59956e500504cd62940a2c1718013 | 535768dcb09297f1028e0e111fd062b91e8032c6 | refs/heads/master | 2016-08-08T21:26:22.119643 | 2015-11-30T03:23:59 | 2015-11-30T03:23:59 | 47,053,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | """
happy.py
by Carol Willing
November 28, 2015
Public Domain
Use this to display a 'Happy Face' image on micro:bit's 5x5 pixel grid of LEDs.
Remember... Writing a program is similar to planning a birthday party.
Program Birthday party
------- --------------
'Prepare' Prepare the room with balloons; order food; pick up a cake.
'Do' Do things during the party -- sing, dance, play videogames.
'Clean' Clean the table. Tidy up after the party. Take out the rubbish.
"""
from microbit import *
# Prepare. Put the preinstalled images into user friendly variables
my_happy_face = Image.HAPPY
my_sad_face = Image.SAD
# Do things! ----> Show the images on the display.
display.show(my_happy_face)
sleep(8000)
display.show(my_sad_face)
sleep(8000)
display.show(my_happy_face)
sleep(4000)
# Clean up stuff. Display 'BYE' and clear display. (Clean your room too.)
display.scroll("BYE")
display.clear()
| [
"[email protected]"
] | |
e5a9e28f6005491c144002425c212dd0d5803423 | a2e11ec88ef3c83b9f07129e76a3681a676d164f | /sessionproject3/sessionproject3/wsgi.py | a7a02fa35437ef303c13922290ff105dce0051b2 | [] | no_license | qwertypool/lofo | dadd7cd5b149a3a200b7111d803b1d0195d76642 | 3bc7bd125e7ea5a67f51dd6dd654e38a5f218055 | refs/heads/master | 2022-05-18T09:31:11.456634 | 2020-04-18T14:47:44 | 2020-04-18T14:47:44 | 256,773,858 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for sessionproject3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sessionproject3.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
c70b445d6d1bb1da816fcacacadb68decd13d563 | b424a13f032d5a607e6df4dd78bc47ad1d06a147 | /astroquery/simbad/tests/test_simbad.py | fe66d82dc76fea148ff9163e36a89ec61940870a | [] | no_license | EnjoyLifeFund/macSierra-py36-pkgs | 1e7eeb9b55415da6eb12465d67730d76e9cc619a | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | refs/heads/master | 2021-01-20T10:23:50.044019 | 2017-09-05T02:53:26 | 2017-09-05T02:53:26 | 90,333,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,899 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
from astropy.extern import six
import pytest
import astropy.units as u
from astropy.table import Table
import numpy as np
from ... import simbad
from ...utils.testing_tools import MockResponse
from ...utils import commons
from ...exceptions import TableParseError
from .test_simbad_remote import multicoords
GALACTIC_COORDS = commons.GalacticCoordGenerator(l=-67.02084, b=-29.75447,
unit=(u.deg, u.deg))
ICRS_COORDS = commons.ICRSCoordGenerator("05h35m17.3s -05h23m28s")
FK4_COORDS = commons.FK4CoordGenerator(ra=84.90759, dec=-80.89403,
unit=(u.deg, u.deg))
FK5_COORDS = commons.FK5CoordGenerator(ra=83.82207, dec=-80.86667,
unit=(u.deg, u.deg))
DATA_FILES = {
'id': 'query_id.data',
'coo': 'query_coo.data',
'cat': 'query_cat.data',
'bibobj': 'query_bibobj.data',
'bibcode': 'query_bibcode.data',
'objectids': 'query_objectids.data',
'error': 'query_error.data',
'sample': 'query_sample.data',
'region': 'query_sample_region.data',
}
class MockResponseSimbad(MockResponse):
query_regex = re.compile(r'query\s+([a-z]+)\s+')
def __init__(self, script, cache=True, **kwargs):
# preserve, e.g., headers
super(MockResponseSimbad, self).__init__(**kwargs)
self.content = self.get_content(script)
def get_content(self, script):
match = self.query_regex.search(script)
if match:
filename = DATA_FILES[match.group(1)]
content = open(data_path(filename), "rb").read()
return content
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_post(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(simbad.SimbadClass, '_request', post_mockreturn)
return mp
def post_mockreturn(self, method, url, data, timeout, **kwargs):
response = MockResponseSimbad(data['script'], **kwargs)
class last_query(object):
pass
self._last_query = last_query()
self._last_query.data = data
return response
@pytest.mark.parametrize(('radius', 'expected_radius'),
[('5d0m0s', '5.0d'),
('5d', '5.0d'),
('5.0d', '5.0d'),
(5 * u.deg, '5.0d'),
(5.0 * u.deg, '5.0d'),
(1.2 * u.deg, '1.2d'),
(0.5 * u.deg, '30.0m'),
('0d1m12s', '1.2m'),
(0.003 * u.deg, '10.8s'),
('0d0m15s', '15.0s')
])
def test_parse_radius(radius, expected_radius):
actual = simbad.core._parse_radius(radius)
assert actual == expected_radius
@pytest.mark.parametrize(('ra', 'dec', 'expected_ra', 'expected_dec'),
[(ICRS_COORDS.ra, ICRS_COORDS.dec, u'5:35:17.3',
u'-80:52:00')
])
def test_to_simbad_format(ra, dec, expected_ra, expected_dec):
actual_ra, actual_dec = simbad.core._to_simbad_format(ra, dec)
assert (actual_ra, actual_dec) == (expected_ra, expected_dec)
@pytest.mark.parametrize(('coordinates', 'expected_frame'),
[(GALACTIC_COORDS, 'GAL'),
(ICRS_COORDS, 'ICRS'),
(FK4_COORDS, 'FK4'),
(FK5_COORDS, 'FK5')
])
def test_get_frame_coordinates(coordinates, expected_frame):
actual_frame = simbad.core._get_frame_coords(coordinates)[2]
assert actual_frame == expected_frame
if actual_frame == 'GAL':
l, b = simbad.core._get_frame_coords(coordinates)[:2]
np.testing.assert_almost_equal(float(l) % 360, -67.02084 % 360)
np.testing.assert_almost_equal(float(b), -29.75447)
def test_parse_result():
result1 = simbad.core.Simbad._parse_result(
MockResponseSimbad('query id '), simbad.core.SimbadVOTableResult)
assert isinstance(result1, Table)
with pytest.raises(TableParseError) as ex:
simbad.core.Simbad._parse_result(MockResponseSimbad('query error '),
simbad.core.SimbadVOTableResult)
assert str(ex.value) == ('Failed to parse SIMBAD result! The raw response '
'can be found in self.last_response, and the '
'error in self.last_table_parse_error. '
'The attempted parsed result is in '
'self.last_parsed_result.\n Exception: 7:115: '
'no element found')
assert isinstance(simbad.Simbad.last_response.text, six.string_types)
assert isinstance(simbad.Simbad.last_response.content, six.binary_type)
votable_fields = ",".join(simbad.core.Simbad.get_votable_fields())
@pytest.mark.parametrize(('args', 'kwargs', 'expected_script'),
[(["m [0-9]"], dict(wildcard=True,
caller='query_object_async'),
("\nvotable {" + votable_fields + "}\n"
"votable open\n"
"query id wildcard m [0-9] \n"
"votable close"
)),
(["2006ApJ"], dict(caller='query_bibcode_async',
get_raw=True),
("\n\nquery bibcode 2006ApJ \n"))
])
def test_args_to_payload(args, kwargs, expected_script):
script = simbad.Simbad._args_to_payload(*args, **kwargs)['script']
assert script == expected_script
@pytest.mark.parametrize(('epoch', 'equinox'),
[(2000, 'thousand'),
('J-2000', None),
(None, '10e3b')
])
def test_validation(epoch, equinox):
with pytest.raises(ValueError):
# only one of these has to raise an exception
if equinox is not None:
simbad.core.validate_equinox(equinox)
if epoch is not None:
simbad.core.validate_epoch(epoch)
@pytest.mark.parametrize(('bibcode', 'wildcard'),
[('2006ApJ*', True),
('2005A&A.430.165F', None)
])
def test_query_bibcode_async(patch_post, bibcode, wildcard):
response1 = simbad.core.Simbad.query_bibcode_async(bibcode,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_bibcode_async(bibcode,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibcode_class(patch_post):
result1 = simbad.core.Simbad.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result1, Table)
def test_query_bibcode_instance(patch_post):
S = simbad.core.Simbad()
result2 = S.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result2, Table)
def test_query_objectids_async(patch_post):
response1 = simbad.core.Simbad.query_objectids_async('Polaris')
response2 = simbad.core.Simbad().query_objectids_async('Polaris')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_objectids(patch_post):
result1 = simbad.core.Simbad.query_objectids('Polaris')
result2 = simbad.core.Simbad().query_objectids('Polaris')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_bibobj_async(patch_post):
response1 = simbad.core.Simbad.query_bibobj_async('2005A&A.430.165F')
response2 = simbad.core.Simbad().query_bibobj_async('2005A&A.430.165F')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibobj(patch_post):
result1 = simbad.core.Simbad.query_bibobj('2005A&A.430.165F')
result2 = simbad.core.Simbad().query_bibobj('2005A&A.430.165F')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_catalog_async(patch_post):
response1 = simbad.core.Simbad.query_catalog_async('m')
response2 = simbad.core.Simbad().query_catalog_async('m')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_catalog(patch_post):
result1 = simbad.core.Simbad.query_catalog('m')
result2 = simbad.core.Simbad().query_catalog('m')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000'),
(multicoords, 0.5*u.arcsec, 2000.0, 'J2000'),
])
def test_query_region_async(patch_post, coordinates, radius, equinox, epoch):
response1 = simbad.core.Simbad.query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
response2 = simbad.core.Simbad().query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000')
])
def test_query_region(patch_post, coordinates, radius, equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, 0, 2000.0, 'J2000')])
def test_query_region_radius_error(patch_post, coordinates, radius,
equinox, epoch):
with pytest.raises(u.UnitsError):
simbad.core.Simbad.query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
with pytest.raises(u.UnitsError):
simbad.core.Simbad().query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, "0d", 2000.0, 'J2000'),
(GALACTIC_COORDS, 1.0 * u.marcsec, 2000.0, 'J2000')
])
def test_query_region_small_radius(patch_post, coordinates, radius,
equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True)
])
def test_query_object_async(patch_post, object_name, wildcard):
response1 = simbad.core.Simbad.query_object_async(object_name,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_object_async(object_name,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True),
])
def test_query_object(patch_post, object_name, wildcard):
result1 = simbad.core.Simbad.query_object(object_name,
wildcard=wildcard)
result2 = simbad.core.Simbad().query_object(object_name,
wildcard=wildcard)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_list_votable_fields():
simbad.core.Simbad.list_votable_fields()
simbad.core.Simbad().list_votable_fields()
def test_get_field_description():
simbad.core.Simbad.get_field_description('bibcodelist(y1-y2)')
simbad.core.Simbad().get_field_description('bibcodelist(y1-y2)')
with pytest.raises(Exception):
simbad.core.Simbad.get_field_description('xyz')
def test_votable_fields():
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
try:
simbad.core.Simbad.add_votable_fields('z')
except KeyError:
pass # this is the expected response
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
simbad.core.Simbad.reset_votable_fields()
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
def test_query_criteria1(patch_post):
Simbad = simbad.core.Simbad()
result = Simbad.query_criteria(
"region(box, GAL, 49.89 -0.3, 0.5d 0.5d)", otype='HII')
assert isinstance(result, Table)
assert "region(box, GAL, 49.89 -0.3, 0.5d 0.5d)" in Simbad._last_query.data['script']
def test_query_criteria2(patch_post):
S = simbad.core.Simbad()
S.add_votable_fields('ra(d)', 'dec(d)')
S.remove_votable_fields('coordinates')
assert S.get_votable_fields() == ['main_id', 'ra(d)', 'dec(d)']
result = S.query_criteria(otype='SNR')
assert isinstance(result, Table)
assert 'otype=SNR' in S._last_query.data['script']
def test_simbad_settings1():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates', 'dec(5)'])
simbad.core.Simbad.reset_votable_fields()
def test_simbad_settings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
# this is now allowed:
simbad.core.Simbad.add_votable_fields('ra(d)', 'dec(d)')
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates',
'ra', 'dec(5)', 'ra(d)',
'dec(d)']
# cleanup
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('fluxdata(J)')
simbad.core.Simbad.add_votable_fields('fluxdata(H)')
simbad.core.Simbad.add_votable_fields('fluxdata(K)')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates',
'fluxdata(J)', 'fluxdata(H)', 'fluxdata(K)'])
simbad.core.Simbad.remove_votable_fields('fluxdata', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_issue388():
# This is a python-3 issue: content needs to be decoded?
response = MockResponseSimbad('\nvotable {main_id,coordinates}\nvotable '
'open\nquery id m1 \nvotable close')
with open(data_path('m1.data'), "rb") as f:
response.content = f.read()
parsed_table = simbad.Simbad._parse_result(response,
simbad.core.SimbadVOTableResult)
assert parsed_table['MAIN_ID'][0] == b'M 1'
assert len(parsed_table) == 1
| [
"[email protected]"
] | |
f8f8a93e2b53a4b74d0c41930fd04e417f2189c8 | 2f418a0f2fcca40f84ec0863b31ff974b574350c | /scripts/addons_extern/cut_mesh-master/op_slice/slice_datastructure.py | 6c86f20d47db1178d36c9ecde0f011a0e1296f6c | [] | no_license | JT-a/blenderpython279 | 57a81b55564218f3b1417c2ffa97f5161897ec79 | 04846c82f794c22f87d677d9eb8cec1d05c48cda | refs/heads/master | 2021-06-25T06:58:07.670613 | 2017-09-11T11:14:36 | 2017-09-11T11:14:36 | 103,723,697 | 4 | 2 | null | 2017-09-16T04:09:31 | 2017-09-16T04:09:31 | null | UTF-8 | Python | false | false | 7,750 | py | '''
Created on Oct 8, 2015
@author: Patrick
'''
import time
import bpy
import bmesh
from mathutils import Vector, Matrix, kdtree
from mathutils.bvhtree import BVHTree
from mathutils.geometry import intersect_point_line, intersect_line_plane
from bpy_extras import view3d_utils
from ..bmesh_fns import grow_selection_to_find_face, flood_selection_faces, edge_loops_from_bmedges
from ..cut_algorithms import cross_section_2seeds_ver1, path_between_2_points
from ..geodesic import geodesic_walk, continue_geodesic_walk, gradient_descent
from .. import common_drawing
class Slice(object):
'''
A class which manages user placed points on an object to create a
piecewise path of geodesics, adapted to the objects surface.
'''
def __init__(self,context, cut_object):
self.cut_ob = cut_object
self.bme = bmesh.new()
self.bme.from_mesh(cut_object.data)
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
#non_tris = [f for f in self.bme.faces if len(f.verts) > 3]
#bmesh.ops.triangulate(self.bme, faces = non_tris, quad_method = 0, ngon_method = 0)
#non_tris = [f for f in self.bme.faces if len(f.verts) > 3]
#if len(non_tris):
#geom = bmesh.ops.connect_verts_concave(self.bme, non_tris)
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
self.bvh = BVHTree.FromBMesh(self.bme)
self.seed = None
self.seed_loc = None
self.target = None
self.target_loc = None
self.path = []
def reset_vars(self):
'''
'''
self.seed = None
self.seed_loc = None
self.target = None
self.target_loc = None
self.geo_data = [dict(), set(), set(), set()] #geos, fixed, close, far
self.path = []
def grab_initiate(self):
if self.target != None :
self.grab_undo_loc = self.target_loc
self.target_undo = self.target
self.path_undo = self.path
return True
else:
return False
def grab_mouse_move(self,context,x,y):
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
loc2, no2, face_ind2, d = self.bvh.ray_cast(imx * ray_origin, view_vector)
if loc != None and loc2 != None:
print((loc - loc2).length)
if face_ind == -1:
self.grab_cancel()
return
self.target = self.bme.faces[face_ind]
self.target_loc = loc
vrts, eds, ed_cross, f_cross, error = path_between_2_points(self.bme, self.bvh, mx,mx* self.seed_loc,mx*self.target_loc,
max_tests = 10000, debug = True,
prev_face = None, use_limit = True)
if not error:
self.path = vrts
#else:
#self.path = []
def grab_cancel(self):
self.target_loc = self.grab_undo_loc
self.target = self.target_undo
self.path = self.path_undo
return
def grab_confirm(self):
self.grab_undo_loc = None
self.target_undo = None
self.path_undo = []
return
def click_add_seed(self,context,x,y):
'''
x,y = event.mouse_region_x, event.mouse_region_y
this will add a point into the bezier curve or
close the curve into a cyclic curve
'''
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
if face_ind == -1:
self.selected = -1
return
self.seed = self.bme.faces[face_ind]
self.seed_loc = loc
self.geo_data = [dict(), set(), set(), set()]
def click_add_target(self, context, x, y):
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
if face_ind == -1: return
self.target = self.bme.faces[face_ind]
self.target_loc = loc
vrts, eds, ed_cross, f_cross, error = path_between_2_points(self.bme, self.bvh, mx,mx* self.seed_loc,mx*self.target_loc,
max_tests = 10000, debug = True,
prev_face = None, use_limit = True)
if not error:
self.path = vrts
else:
self.path = []
return
def draw(self,context):
if len(self.path):
mx = self.cut_ob.matrix_world
pts = [mx * v for v in self.path]
common_drawing.draw_polyline_from_3dpoints(context, pts, (.2,.1,.8,1), 3, 'GL_LINE')
if self.seed_loc != None:
mx = self.cut_ob.matrix_world
common_drawing.draw_3d_points(context, [mx * self.seed_loc], 8, color = (1,0,0,1))
if self.target_loc != None:
mx = self.cut_ob.matrix_world
common_drawing.draw_3d_points(context, [mx * self.target_loc], 8, color = (0,1,0,1))
class PolyCutPoint(object):
def __init__(self,co):
self.co = co
self.no = None
self.face = None
self.face_region = set()
def find_closest_non_manifold(self):
return None
class NonManifoldEndpoint(object):
def __init__(self,co, ed):
if len(ed.link_faces) != 1:
return None
self.co = co
self.ed = ed
self.face = ed.link_faces[0]
| [
"[email protected]"
] | |
33a16862ec2f40db072c68c1e4c243096bce805a | abb614790bdf41c7db9d09dfdea4385f78c2be52 | /rtk-RQA/rtk/hardware/component/connection/Socket.py | c1454c5a9c43e324ac69b5e3c374fd2decff5864 | [
"BSD-3-Clause"
] | permissive | codacy-badger/rtk | f981bb75aadef6aaeb5a6fa427d0a3a158626a2a | bdb9392164b0b32b0da53f8632cbe6e3be808b12 | refs/heads/master | 2020-03-19T02:46:10.320241 | 2017-10-26T20:08:12 | 2017-10-26T20:08:12 | 135,659,105 | 0 | 0 | null | 2018-06-01T02:43:23 | 2018-06-01T02:43:23 | null | UTF-8 | Python | false | false | 5,321 | py | #!/usr/bin/env python
"""
######################################################
Hardware.Component.Connection Package IC Socket Module
######################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.hardware.component.connection.Socket.py is part of the RTK
# Project
#
# All rights reserved.
import gettext
import locale
try:
import Configuration
import Utilities
from hardware.component.connection.Connection import Model as Connection
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
from rtk.hardware.component.connection.Connection import Model as \
Connection
__author__ = 'Andrew Rowland'
__email__ = '[email protected]'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
# Add localization support.
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class Socket(Connection):
"""
The Socket connection data model contains the attributes and methods of an
IC socket connection component. The attributes of an IC socket connection
are:
:cvar int subcategory: the Connection subcategory.
:ivar float base_hr: the MIL-HDBK-217FN2 base/generic hazard rate.
:ivar str reason: the reason(s) the Connection is overstressed.
:ivar float piE: the MIL-HDBK-217FN2 operating environment factor.
Hazard Rate Models:
# MIL-HDBK-217FN2, section 15.3.
"""
# MIL-HDBK-217FN2 hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_piQ = [1.0, 2.0]
_piE = [1.0, 3.0, 14.0, 6.0, 18.0, 8.0, 12.0, 11.0, 13.0, 25.0, 0.5, 14.0,
36.0, 650.0]
_lambdab_count = [0.0019, 0.0058, 0.027, 0.012, 0.035, 0.015, 0.023, 0.021,
0.025, 0.048, 0.00097, 0.027, 0.070, 1.3]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 74 # Subcategory ID in the common DB.
def __init__(self):
"""
Method to initialize a IC Socket connection data model instance.
"""
super(Socket, self).__init__()
# Define private dictionary attributes.
# Define private list attributes.
# Define private scalar attributes.
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.n_active_contacts = 0
self.piP = 0.0
self.base_hr = 0.00042
def set_attributes(self, values):
"""
Method to set the Multi-Pin Connection data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
(_code, _msg) = Connection.set_attributes(self, values[:133])
try:
self.base_hr = 0.00042
self.piP = float(values[133])
self.n_active_contacts = int(values[134])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Multi-Pin Connection data
model attributes.
:return: (n_active_contacts, piP)
:rtype: tuple
"""
_values = Connection.get_attributes(self)
_values = _values + (self.piP, self.n_active_contacts)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Multi-Pin Connection data
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 1:
self.hazard_rate_model['equation'] = 'lambdab * piQ'
# Quality factor.
self.piQ = self._piQ[self.quality - 1]
elif self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piE * piP'
# Active pins correction factor.
if self.n_active_contacts >= 2:
self.piP = exp(((self.n_active_contacts - 1) / 10.0)**0.51064)
else:
self.piP = 0.0
self.hazard_rate_model['piP'] = self.piP
# Environmental correction factor.
self.piE = self._piE[self.environment_active - 1]
return Connection.calculate_part(self)
| [
"[email protected]"
] | |
4c54b23822c77598fc8746f24f4c1bf18cdad087 | d9fb6c246965cbf290186268298859ddb913ee6e | /190813/03_mod.py | 3a21a5da1950eb762f029d3aa591e49c9be98f49 | [] | no_license | 91hongppie/algorithm | 1ca6d54de6eab252c708bf83835ace8a109d73fc | 4c2fa8178e0ef7afbf0b736387f05cbada72f95d | refs/heads/master | 2020-07-20T22:17:40.700366 | 2020-06-29T00:06:11 | 2020-06-29T00:06:11 | 206,717,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import sys
sys.stdin = open('sample_input_03.txt', 'r')
N = int(input())
for i in range(1, N+1):
play = list(map(int, input().split()))
test_words = [[] for i in range(play[0])]
for j in range(play[0]):
test_words[j] = list(map(str, input()))
for m in range(play[0]):
for n in range(play[0]):
mo_list = test_words[m][n:play[0]:] | [
"[email protected]"
] | |
cd4f12206ec91523ba27cb33a771f3673c839cd1 | cc129db64fc64d1cb9a99526583771c10e245deb | /tests/test_det_next_michigan_development_corporation.py | da9a98ab1e31ab67be68a83440ae713aa016e955 | [
"MIT"
] | permissive | avelosa/city-scrapers-det | a42df36b7d2e98f7be68ae17e22c03af7a20280c | 964b941b67fb5113cda5e2bebd2ba288ac1422d7 | refs/heads/main | 2023-02-02T01:19:07.396737 | 2020-09-29T16:52:11 | 2020-09-29T16:52:11 | 300,441,174 | 1 | 0 | MIT | 2020-10-01T22:30:23 | 2020-10-01T22:30:22 | null | UTF-8 | Python | false | false | 4,826 | py | from datetime import datetime
from os.path import dirname, join
import pytest
import scrapy
from city_scrapers_core.constants import BOARD, PASSED, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from scrapy.settings import Settings
from city_scrapers.spiders.det_next_michigan_development_corporation import (
DetNextMichiganDevelopmentCorporationSpider,
)
LOCATION = {
"name": "DEGC, Guardian Building",
"address": "500 Griswold St, Suite 2200, Detroit, MI 48226",
}
TITLE = "Board of Directors"
test_response = file_response(
join(dirname(__file__), "files", "det_next_michigan_development_corporation.html"),
url="http://www.degc.org/public-authorities/d-nmdc/",
)
freezer = freeze_time("2018-07-26")
spider = DetNextMichiganDevelopmentCorporationSpider()
spider.settings = Settings(values={"CITY_SCRAPERS_ARCHIVE": False})
freezer.start()
parsed_items = [item for item in spider._next_meetings(test_response)]
freezer.stop()
def test_initial_request_count():
freezer.start()
items = list(spider.parse(test_response))
freezer.stop()
assert len(items) == 3
urls = {r.url for r in items if isinstance(r, scrapy.Request)}
assert urls == {
"http://www.degc.org/public-authorities/d-nmdc/fy-2017-2018-meetings/",
"http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings/",
}
# current meeting http://www.degc.org/public-authorities/ldfa/
def test_title():
assert parsed_items[0]["title"] == TITLE
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2018, 9, 11, 9)
def test_end():
assert parsed_items[0]["end"] is None
def test_id():
assert (
parsed_items[0]["id"]
== "det_next_michigan_development_corporation/201809110900/x/board_of_directors"
)
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_location():
assert parsed_items[0]["location"] == LOCATION
def test_sources():
assert parsed_items[0]["source"] == "http://www.degc.org/public-authorities/d-nmdc/"
def test_links():
assert parsed_items[0]["links"] == []
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
# previous meetings e.g.
# http://www.degc.org/public-authorities/ldfa/fy-2017-2018-meetings/
test_prev_response = file_response(
join(
dirname(__file__),
"files",
"det_next_michigan_development_corporation_prev.html",
),
url="http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings",
)
freezer.start()
parsed_prev_items = [item for item in spider._parse_prev_meetings(test_prev_response)]
parsed_prev_items = sorted(parsed_prev_items, key=lambda x: x["start"], reverse=True)
freezer.stop()
def test_prev_request_count():
freezer.start()
items = list(spider._prev_meetings(test_response))
freezer.stop()
urls = {r.url for r in items if isinstance(r, scrapy.Request)}
assert len(urls) == 2
assert urls == {
"http://www.degc.org/public-authorities/d-nmdc/fy-2017-2018-meetings/",
"http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings/",
}
def test_prev_meeting_count():
assert len(parsed_prev_items) == 1
def test_prev_title():
assert parsed_prev_items[0]["title"] == TITLE
def test_prev_description():
assert parsed_prev_items[0]["description"] == ""
def test_prev_start():
assert parsed_prev_items[0]["start"] == datetime(2017, 8, 8, 9)
def test_prev_end():
assert parsed_prev_items[0]["end"] is None
def test_prev_id():
assert (
parsed_prev_items[0]["id"]
== "det_next_michigan_development_corporation/201708080900/x/board_of_directors"
)
def test_prev_status():
assert parsed_prev_items[0]["status"] == PASSED
def test_prev_location():
assert parsed_prev_items[0]["location"] == LOCATION
def test_prev_source():
assert (
parsed_prev_items[0]["source"]
== "http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings"
)
def test_prev_links():
assert parsed_prev_items[0]["links"] == [
{
"href": "http://www.degc.org/wp-content/uploads/2016-08-09-DNMDC-Special-Board-Meeting-Agenda-4-1.pdf", # noqa
"title": "D-NMDC Agenda",
},
]
@pytest.mark.parametrize("item", parsed_prev_items)
def test_prev_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_prev_items)
def test_prev_classification(item):
assert item["classification"] == BOARD
| [
"[email protected]"
] | |
4212426d83cef5a31b6993b1859aa096f5a86957 | c7bb490ef96fda51a946478a4f584814e1665a6a | /backend/urls.py | 06c33f1ea3c2e43ed3c886400d353b67ec87d687 | [] | no_license | pawanpaudel93/motion-planning-dashboard | e70acc9737cdedf0fd0beac0a0700cc88f9c2559 | 642f5955d518747dfc14f1f22a93ef20784329d8 | refs/heads/master | 2023-03-11T14:33:31.643898 | 2021-02-28T11:26:16 | 2021-02-28T11:26:16 | 340,398,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """MPD URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework import routers
from .api import urls as api_urls
from .api.views import index_view
router = routers.DefaultRouter()
urlpatterns = [
path('api/v1/', include(api_urls)),
path('admin/', admin.site.urls),
re_path(r'^.*$', index_view, name='index')
]
| [
"[email protected]"
] | |
e8c4c60a57463e9f15f1b88dd4eda1629eea2dfc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /2JHYavYqynX8ZCmMG_5.py | f3bd9ad800ee0f88625397c941672c01b7288b50 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py |
def ascii_sort(lst):
if sum([ord(x) for x in lst[0]]) <= sum([ord(x) for x in lst[1]]):
return lst[0]
return lst[1]
| [
"[email protected]"
] | |
0bc44e39ed3c0411a6484900df8dc4ccda28fa3a | 67b0379a12a60e9f26232b81047de3470c4a9ff9 | /profile/migrations/0042_auto_20170225_1639.py | 6f002bfd9f51f8ca97ff8153953db520d0afe6e9 | [] | no_license | vintkor/whitemandarin | 8ea9022b889fac718e0858873a07c586cf8da729 | 5afcfc5eef1bb1cc2febf519b04a4819a7b9648f | refs/heads/master | 2021-05-06T03:35:09.367375 | 2017-12-20T15:43:08 | 2017-12-20T15:43:08 | 114,904,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-25 14:39
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('profile', '0041_auto_20170217_1405'),
]
operations = [
migrations.AlterField(
model_name='user',
name='date_of_birth',
field=models.DateField(default=datetime.datetime(2017, 2, 25, 14, 39, 18, 342403, tzinfo=utc)),
),
]
| [
"[email protected]"
] | |
6eb0d30982c51c95fe8b185a70ce7a5e912cdd20 | 2da72c9f9bbb0b5db33710cddbdee28503e5a606 | /UCI/pyQT-matplot-example 2.py | 0228e2bce7c9d982c2ca7970f732c4860c0e6cc5 | [] | no_license | gddickinson/python_code | 2e71fb22b929cb26c2a1456b11dc515af048c441 | dbb20e171fb556e122350fb40e12cc76adbb9a66 | refs/heads/master | 2022-10-26T15:20:40.709820 | 2022-10-11T16:06:27 | 2022-10-11T16:06:27 | 44,060,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 16:50:19 2015
@author: George
"""
import sys
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib import NavigationToolbar2QTAgg as NavigationToolbar
import matplotlib.pyplot as plt
import random
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Just some button connected to `plot` method
self.button = QtGui.QPushButton('Plot')
self.button.clicked.connect(self.plot)
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.button)
self.setLayout(layout)
def plot(self):
''' plot some random stuff '''
# random data
data = [random.random() for i in range(10)]
# create an axis
ax = self.figure.add_subplot(111)
# discards the old graph
ax.hold(False)
# plot data
ax.plot(data, '*-')
# refresh canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = Window()
main.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
bd8527aee37e224f869349bec2f6fb2bdadc1d5b | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/예외처리_20200709144804.py | 9b8a16ecb397905296a8e33b88abcd084eadb309 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | try:
print("나누기 전용 계산기입니다.")
num1 = int(input("첫 번째 숫자를 입력하세요 : "))
num2 = int(input("두 번째 숫자를 입력하세요 : "))
print("{0} / {1} = {2}".format(n)) | [
"[email protected]"
] | |
cc878c320008f8db66aa030c2f2f6bc3e205a9cc | 6d1728bf105a7d6481d0bbca2b88f4478e0632d9 | /study/ch1/area.py | 1a498690da37f4f891110371603717db2e529035 | [] | no_license | Phantomn/Python | 00c63aceb2d4aa0db71fe5e33fe8b5159b41aadd | 12808adf4b52c60cfe94befb6daa1e8187224beb | refs/heads/Python | 2022-11-09T16:49:49.165884 | 2019-08-05T07:30:07 | 2019-08-05T07:30:07 | 44,149,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | horizon=0
vertical=0
print("Input horizon length : ", end="")
horizon=int(input())
print("Input vertical length : ",end="")
vertical=int(input())
print("rectangle is %d."%(horizon*vertical))
| [
"[email protected]"
] | |
0baadeafe82ed3f2330579af9aeb7806db738dc3 | 7f8c24fe161fee3f32e206e013ea89fc8eb9a50a | /example_api/urls.py | 4c07dd5d1421c42a6038b536a60b6f7e7826f9cc | [] | no_license | vnitikesh/rest-registration | a04f4cf643766d3844e7a63e0616157d1c1f1e9a | 0578589f6cb9b9138fa5915395bf616de57eaf0b | refs/heads/main | 2023-02-18T12:32:40.392439 | 2021-01-21T23:55:23 | 2021-01-21T23:55:23 | 331,453,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.urls import path
from . import views
from rest_framework.routers import DefaultRouter
urlpatterns = [
path('category/', views.CategoryListView.as_view(), name = 'category-list'),
path('category/<int:pk>/', views.CategoryDetailView.as_view(), name = 'category-detail'),
path('product/', views.ProductRecordView.as_view(), name = 'product-list'),
path('cart/', views.CartViewSet.as_view(), name = 'cart'),
path('checkout/', views.CheckoutView.as_view(), name = 'checkout'),
#path('order/', views.OrderViewSet.as_view(), name = 'order')
]
| [
"[email protected]"
] | |
637aebc9dc0ee30985a63efc692a3f892fbed308 | c6f9a46393048add6fad888d382978b9be12dd4c | /python/ql/test/experimental/dataflow/strange-pointsto-interaction-investigation/src/urandom_problem.py | d4a06529cf60991084b7d954d234703134c192b9 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | luchua-bc/ql | 6e9480e8c92cbb12570fcc7f65366bfdd54dad06 | a1d9228a66cb80329041fa8d95b08ce5697dec54 | refs/heads/master | 2023-01-23T17:11:54.776916 | 2022-07-20T14:36:37 | 2022-07-20T14:36:37 | 248,313,302 | 4 | 0 | MIT | 2023-01-16T09:13:30 | 2020-03-18T18:35:48 | CodeQL | UTF-8 | Python | false | false | 920 | py | # These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
# ------------------------------------------------------------------------------
# Actual tests
# ------------------------------------------------------------------------------
def give_src():
return SOURCE
foo = give_src()
SINK(foo) # $ flow="SOURCE, l:-3 -> foo"
import os
cond = os.urandom(1)[0] > 128 # $ unresolved_call=os.urandom(..)
if cond:
pass
if cond:
pass
foo = give_src() # $ unresolved_call=give_src()
SINK(foo) # $ unresolved_call=SINK(..) MISSING: flow="SOURCE, l:-15 -> foo"
| [
"[email protected]"
] | |
bd1236dee44cc218e34f71aa057ce6aeaae640d8 | 4f365fbdfd4701c3a294dfba17c1377d4eb369d8 | /jinja2htmlcompress.py | 507c7509a9a3a8418fcb4ce187fb21809e76fc26 | [
"BSD-3-Clause"
] | permissive | Orvillar/jinja2-htmlcompress | 4e725f9b6ceb6f327d4247d7dab6f55d344039ea | b34dc409762aaf205ccd59e37ad4b3dc5331904d | refs/heads/master | 2020-04-07T16:06:54.607802 | 2018-11-21T08:31:21 | 2018-11-21T08:31:21 | 158,515,466 | 0 | 0 | NOASSERTION | 2018-11-21T08:29:20 | 2018-11-21T08:29:19 | null | UTF-8 | Python | false | false | 6,354 | py | # -*- coding: utf-8 -*-
"""
jinja2htmlcompress
~~~~~~~~~~~~~~~~~~
A Jinja2 extension that eliminates useless whitespace at template
compilation time without extra overhead.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.ext import Extension
from jinja2.lexer import Token, describe_token
from jinja2 import TemplateSyntaxError
_tag_re = re.compile(r'(?:<(/?)([a-zA-Z0-9_-]+)\s*|(>\s*))(?s)')
_ws_normalize_re = re.compile(r'[ \t\r\n]+')
class StreamProcessContext(object):
def __init__(self, stream):
self.stream = stream
self.token = None
self.stack = []
def fail(self, message):
raise TemplateSyntaxError(message, self.token.lineno,
self.stream.name, self.stream.filename)
def _make_dict_from_listing(listing):
rv = {}
for keys, value in listing:
for key in keys:
rv[key] = value
return rv
class HTMLCompress(Extension):
isolated_elements = set(['script', 'style', 'noscript', 'textarea'])
void_elements = set(['br', 'img', 'area', 'hr', 'param', 'input',
'embed', 'col'])
block_elements = set(['div', 'p', 'form', 'ul', 'ol', 'li', 'table', 'tr',
'tbody', 'thead', 'tfoot', 'tr', 'td', 'th', 'dl',
'dt', 'dd', 'blockquote', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre'])
breaking_rules = _make_dict_from_listing([
(['p'], set(['#block'])),
(['li'], set(['li'])),
(['td', 'th'], set(['td', 'th', 'tr', 'tbody', 'thead', 'tfoot'])),
(['tr'], set(['tr', 'tbody', 'thead', 'tfoot'])),
(['thead', 'tbody', 'tfoot'], set(['thead', 'tbody', 'tfoot'])),
(['dd', 'dt'], set(['dl', 'dt', 'dd']))
])
def is_isolated(self, stack):
for tag in reversed(stack):
if tag in self.isolated_elements:
return True
return False
def is_breaking(self, tag, other_tag):
breaking = self.breaking_rules.get(other_tag)
return breaking and (tag in breaking or
('#block' in breaking and tag in self.block_elements))
def enter_tag(self, tag, ctx):
while ctx.stack and self.is_breaking(tag, ctx.stack[-1]):
self.leave_tag(ctx.stack[-1], ctx)
if tag not in self.void_elements:
ctx.stack.append(tag)
def leave_tag(self, tag, ctx):
if not ctx.stack:
ctx.fail('Tried to leave "%s" but something closed '
'it already' % tag)
if tag == ctx.stack[-1]:
ctx.stack.pop()
return
for idx, other_tag in enumerate(reversed(ctx.stack)):
if other_tag == tag:
for num in xrange(idx + 1):
ctx.stack.pop()
elif not self.breaking_rules.get(other_tag):
break
def normalize(self, ctx):
pos = 0
buffer = []
def write_data(value):
if not self.is_isolated(ctx.stack):
value = _ws_normalize_re.sub(' ', value.strip())
buffer.append(value)
for match in _tag_re.finditer(ctx.token.value):
closes, tag, sole = match.groups()
preamble = ctx.token.value[pos:match.start()]
write_data(preamble)
if sole:
write_data(sole)
else:
buffer.append(match.group())
(closes and self.leave_tag or self.enter_tag)(tag, ctx)
pos = match.end()
write_data(ctx.token.value[pos:])
return u''.join(buffer)
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
for token in stream:
if token.type != 'data':
yield token
continue
ctx.token = token
value = self.normalize(ctx)
yield Token(token.lineno, 'data', value)
class SelectiveHTMLCompress(HTMLCompress):
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
strip_depth = 0
while 1:
if stream.current.type == 'block_begin':
if stream.look().test('name:strip') or \
stream.look().test('name:endstrip'):
stream.skip()
if stream.current.value == 'strip':
strip_depth += 1
else:
strip_depth -= 1
if strip_depth < 0:
ctx.fail('Unexpected tag endstrip')
stream.skip()
if stream.current.type != 'block_end':
ctx.fail('expected end of block, got %s' %
describe_token(stream.current))
stream.skip()
if strip_depth > 0 and stream.current.type == 'data':
ctx.token = stream.current
value = self.normalize(ctx)
yield Token(stream.current.lineno, 'data', value)
else:
yield stream.current
stream.next()
def test():
from jinja2 import Environment
env = Environment(extensions=[HTMLCompress])
tmpl = env.from_string('''
<html>
<head>
<title>{{ title }}</title>
</head>
<script type=text/javascript>
if (foo < 42) {
document.write('Foo < Bar');
}
</script>
<body>
<li><a href="{{ href }}">{{ title }}</a><br>Test Foo
<li><a href="{{ href }}">{{ title }}</a><img src=test.png>
</body>
</html>
''')
print tmpl.render(title=42, href='index.html')
env = Environment(extensions=[SelectiveHTMLCompress])
tmpl = env.from_string('''
Normal <span> unchanged </span> stuff
{% strip %}Stripped <span class=foo > test </span>
<a href="foo"> test </a> {{ foo }}
Normal <stuff> again {{ foo }} </stuff>
<p>
Foo<br>Bar
Baz
<p>
Moep <span>Test</span> Moep
</p>
{% endstrip %}
''')
print tmpl.render(foo=42)
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
68bda07db08e3d6b58a8cbb0bf86ce63b584f900 | 5a1f77b71892745656ec9a47e58a078a49eb787f | /4_Backwoods_Forest/140-A_Fine_Mint/fine_mint.py | f17553bc8c35e99e05fe9b3bbd9916adfeaa85f8 | [
"MIT"
] | permissive | ripssr/Code-Combat | 78776e7e67c033d131e699dfeffb72ca09fd798e | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | refs/heads/master | 2020-06-11T20:17:59.817187 | 2019-07-21T09:46:04 | 2019-07-21T09:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | def pickUpCoin():
coin = hero.findNearestItem()
if coin:
hero.moveXY(coin.pos.x, coin.pos.y)
def attackEnemy():
enemy = hero.findNearestEnemy()
if enemy:
if hero.isReady("cleave"):
hero.cleave(enemy)
else:
hero.attack(enemy)
while True:
attackEnemy()
pickUpCoin()
| [
"[email protected]"
] | |
203c4c5c65469b178d194de6b85feec2a5037e9a | 129941a1fb7c0bbd9969f0dd8843b057ce9f3666 | /VAJets/PKUTreeMaker/test/Wcrab/crab3_analysismu.py | 09dc3efeef0cc17499456da57454ef8dcc335da1 | [] | no_license | PKUHEPEWK/VBS_WGamma | 7cf43f136dd92777ab7a8a742c163e222b1f4dbf | 0f94abb2d4303b1c08d62971a74f25b100cbe042 | refs/heads/master | 2020-03-25T04:36:21.119377 | 2019-07-15T02:56:32 | 2019-07-15T02:56:32 | 143,404,007 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'SMu16B-v1'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles =['Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFPuppi.txt']
# Name of the CMSSW configuration file
config.JobType.psetName = 'analysis_data.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.inputDataset = '/SingleMuon/Run2016B-03Feb2017_ver2-v2/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 40
config.Data.lumiMask = 'Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'
#config.Data.runRange = '246908-258750'
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'SMu16B-v1'
config.section_("Site")
config.Site.storageSite = 'T3_US_FNALLPC' #T2_CN_Beijing'
| [
"[email protected]"
] | |
a7b174b85eba3c6f121e88eb9985de14f93428b9 | 14ac991bba2eb7d59a1d76db792b7689316f8060 | /leetcode/00179.py | 2097fd3046480dd7c91a1af857c955626b82b82d | [] | no_license | munagekar/cp | bde88fa565a7e2158ebe0f2611c4718a3d2970f1 | c25d29f68943e3721233e177abe13068e5f40e4b | refs/heads/master | 2021-07-04T05:00:02.511874 | 2021-05-30T14:30:05 | 2021-05-30T14:30:05 | 240,286,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from itertools import zip_longest
from functools import cmp_to_key
def cmp(a, b):
if a + b > b + a:
return 1
else:
return -1
class Solution:
def largestNumber(self, nums: List[int]) -> str:
nums = map(str, nums)
nums = sorted(nums, key=cmp_to_key(cmp), reverse=True)
nums = "".join(nums)
return nums.lstrip("0") or "0" | [
"[email protected]"
] | |
62a61d7f251b2dd796c2a0864e338c6272236b1a | 87828431072e3c60a92dc274b078d7cf1e5705be | /back_python/account/migrations/0001_initial.py | 34d3acacd2cf509d472797922ba4727ed9535d39 | [] | no_license | cash2one/habit | 90adfd80427a0c0d04104ea5cf8123cf025b2d8b | 3782e498e1e40d6b638aaf2c7c1ac087c0739a36 | refs/heads/master | 2021-01-19T12:32:51.627847 | 2017-04-11T15:41:28 | 2017-04-11T15:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-25 08:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('activity', '0013_auto_20170125_1649'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tradeDate', models.DateField(auto_now=True, verbose_name='时间')),
('tradeType', models.CharField(choices=[('fee', '套餐服务费'), ('deposit', '押金'), ('milyInput', '套餐囤米'), ('milyInputByDeposit', '押金囤米'), ('milyOutput', '米粒打赏'), ('milyOutputByDonate', '米粒捐赠'), ('feedBack', '打卡奖励米粒'), ('feedBackReturnDeposit', '打卡返还押金'), ('aveDeposit', '平均分配懒人押金')], max_length=50, verbose_name='类型')),
('fee', models.IntegerField(default=0, verbose_name='套餐服务费')),
('deposit', models.IntegerField(default=0, verbose_name='囤米押金')),
('milyInput', models.IntegerField(default=0, verbose_name='套餐囤米')),
('milyInputByDeposit', models.IntegerField(default=0, verbose_name='押金囤米')),
('milyOutput', models.IntegerField(default=0, verbose_name='米粒打赏')),
('milyOutputByDonate', models.IntegerField(default=0, verbose_name='米粒捐赠')),
('feedBack', models.IntegerField(default=0, verbose_name='打卡奖励米粒')),
('feedBackReturnDeposit', models.IntegerField(default=0, verbose_name='打卡奖励押金')),
('aveDeposit', models.IntegerField(default=0, verbose_name='平均分配懒人押金')),
('createdTime', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updatedTime', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.Activity', verbose_name='活动')),
],
),
]
| [
"[email protected]"
] | |
72e87ff5fac87b45a4fbe10d20bbd6dc95907e38 | 242ebcb7220c2e16c141a6bea4a09c7cb5e4287d | /accounts/forms.py | 83f3c4a31f7b0a3a43e78a73a2980318f2d55c71 | [] | no_license | olivx/estudos_crud | 06ed8c269a4c36db3579daf6d6aef5e7d49dc5f9 | 24af031ed44a7c6cf567368556d368fe58ab1090 | refs/heads/master | 2021-01-11T09:28:49.355388 | 2017-03-03T15:17:25 | 2017-03-03T15:17:25 | 81,199,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | from django import forms
from django.contrib.auth import authenticate
from accounts.models import User
from django.utils.translation import ugettext_lazy as _
class RegisterForm(forms.ModelForm):
password1 = forms.CharField(max_length=30, widget=forms.PasswordInput, required=True)
password2 = forms.CharField(max_length=30, widget=forms.PasswordInput, required=True)
def clean_password2(self):
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.email = self.cleaned_data['username']
user.email = self.cleaned_data['email']
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class AuthenticanUserForm(forms.Form):
email = forms.EmailField(label='Email', max_length=30, required=True)
password = forms.CharField(label='Password', max_length=30, required=True, widget=forms.PasswordInput)
error_messages = {
'invalid_login': _(
"Please enter a correct %(email)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': _("This account is inactive."),
'email_confirmation': _(
'this email is not confirmed yet, please confirm the your eamil and try again'
),
}
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user = authenticate(email=email, password=password)
if self.user is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'email': 'Email'},
)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
if not user.profile.email_confirmation:
raise forms.ValidationError(
self.error_messages['email_confirmation'],
code='email_confirmation'
)
class Meta:
fields = ('email', 'password')
| [
"[email protected]"
] | |
1cd644fe4370089fe5cf86ae2fc2d3fa316e8e2e | e629d61db2f08f66cf46d934ab0f87fa1666de05 | /backend/lively_heart_25130/urls.py | 5c32c3d6b9e17dce8e7eb899ed0a90b4b5455ae7 | [] | no_license | crowdbotics-apps/lively-heart-25130 | ec80559da8d6b168df1ce75415c5d6b916c97ee1 | ed33785297cbb8f794034de1bc3c7fb81bdbe048 | refs/heads/master | 2023-03-24T16:57:41.146127 | 2021-03-19T21:41:18 | 2021-03-19T21:41:18 | 349,561,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | """lively_heart_25130 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Lively Heart"
admin.site.site_title = "Lively Heart Admin Portal"
admin.site.index_title = "Lively Heart Admin"
# swagger
api_info = openapi.Info(
title="Lively Heart API",
default_version="v1",
description="API documentation for Lively Heart App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
ef679fa89caf7d38e7aa2766c74680ff885e8be4 | ae9bb7babce2a0349ae932985cf418a03057c670 | /ProgramAndDataStructure/list/__init__.py | 50e5397d5291650f1e1f4a4e99a244b430ba0f89 | [] | no_license | Veraun/HogwartsSDET17-1 | d2592fcb4c9c63724c19bcf9edde349ebcd2c8af | 6648dbfb640b065ff2c76cb6889a8f9e4f124b91 | refs/heads/main | 2023-07-02T05:20:32.161248 | 2021-08-06T03:55:13 | 2021-08-06T03:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | '''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: HogwartsSDET17
@file: __init__.py.py
@time: 2021/5/20 19:54
@Email: Warron.Wang
''' | [
"[email protected]"
] | |
337238a653f2c421c1f017238cbef58842b56a43 | 567ecf4ea5afbd7eb3003f7e14e00c7b9289b9c6 | /ax/storage/json_store/decoders.py | 7a586e03ddb3b32b0a5780c941e67e791e29d11a | [
"MIT"
] | permissive | danielrjiang/Ax | f55ef168a59381b5a03c6d51bc394f6c72ed0f39 | 43014b28683b3037b5c7307869cb9b75ca31ffb6 | refs/heads/master | 2023-03-31T12:19:47.118558 | 2019-12-02T16:47:39 | 2019-12-02T16:49:36 | 225,493,047 | 0 | 0 | MIT | 2019-12-03T00:09:52 | 2019-12-03T00:09:51 | null | UTF-8 | Python | false | false | 3,501 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import AbandonedArm, BatchTrial, GeneratorRunStruct
from ax.core.generator_run import GeneratorRun
from ax.core.runner import Runner
from ax.core.trial import Trial
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401 # pragma: no cover
def batch_trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run_structs: List[GeneratorRunStruct],
runner: Optional[Runner],
abandoned_arms_metadata: Dict[str, AbandonedArm],
num_arms_created: int,
status_quo: Optional[Arm],
status_quo_weight_override: float,
optimize_for_power: Optional[bool],
) -> BatchTrial:
"""Load Ax BatchTrial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Batch
does not allow us to exactly recreate an existing object.
"""
batch = BatchTrial(experiment=experiment)
batch._index = index
batch._trial_type = trial_type
batch._status = status
batch._time_created = time_created
batch._time_completed = time_completed
batch._time_staged = time_staged
batch._time_run_started = time_run_started
batch._abandoned_reason = abandoned_reason
batch._run_metadata = run_metadata or {}
batch._generator_run_structs = generator_run_structs
batch._runner = runner
batch._abandoned_arms_metadata = abandoned_arms_metadata
batch._num_arms_created = num_arms_created
batch._status_quo = status_quo
batch._status_quo_weight_override = status_quo_weight_override
batch.optimize_for_power = optimize_for_power
return batch
def trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run: GeneratorRun,
runner: Optional[Runner],
num_arms_created: int,
) -> Trial:
"""Load Ax trial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Trial
does not allow us to exactly recreate an existing object.
"""
trial = Trial(experiment=experiment, generator_run=generator_run)
trial._index = index
trial._trial_type = trial_type
trial._status = status
trial._time_created = time_created
trial._time_completed = time_completed
trial._time_staged = time_staged
trial._time_run_started = time_run_started
trial._abandoned_reason = abandoned_reason
trial._run_metadata = run_metadata or {}
trial._runner = runner
trial._num_arms_created = num_arms_created
return trial
| [
"[email protected]"
] | |
856043c72dfa18187c13e630e6c9e58fcc3c660b | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/matplotlib/_pylab_helpers.py | 2407b573c4aabbe64132bc3a0ae71163132785bc | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 3,445 | py | """
Manage figures for pyplot interface.
"""
import atexit
import gc
class Gcf:
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes
----------
figs
dictionary of the form {*num*: *manager*, ...}
_activeQue
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@classmethod
def get_fig_manager(cls, num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not cls.has_fignum(num):
return
manager = cls.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
cls._activeQue.remove(manager)
del cls.figs[num]
manager.destroy()
gc.collect(1)
@classmethod
def destroy_fig(cls, fig):
"*fig* is a Figure instance"
num = next((manager.num for manager in cls.figs.values()
if manager.canvas.figure == fig), None)
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
# this is need to ensure that gc is available in corner cases
# where modules are being torn down after install with easy_install
import gc # noqa
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls._activeQue = []
cls.figs.clear()
gc.collect(1)
@classmethod
def has_fignum(cls, num):
"""
Return *True* if figure *num* exists.
"""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""
Return a list of figure managers.
"""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""
Return the number of figures being managed.
"""
return len(cls.figs)
@classmethod
def get_active(cls):
"""
Return the manager of the active figure, or *None*.
"""
if len(cls._activeQue) == 0:
return None
else:
return cls._activeQue[-1]
@classmethod
def set_active(cls, manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = cls._activeQue[:]
cls._activeQue = [m for m in oldQue if m != manager]
cls._activeQue.append(manager)
cls.figs[manager.num] = manager
@classmethod
def draw_all(cls, force=False):
"""
Redraw all figures registered with the pyplot
state machine.
"""
for f_mgr in cls.get_all_fig_managers():
if force or f_mgr.canvas.figure.stale:
f_mgr.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
| [
"[email protected]"
] | |
ee1a31f88eeb3c7e9f45e9d6e74e4f4ac8581dbf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_381/ch15_2020_09_14_14_10_44_836878.py | 03149e67069cc3803fab0866de1f386bfbe66feb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def chris(nome):
if chris == nome:
return 'Todo mundo odeia o Chris'
else:
return 'Olá, {0}'.format(nome)
nome = input('Qual seu nome?')
| [
"[email protected]"
] | |
af6c43ee70e8b9d1ae987c97a80ae8707f4b001e | 59dbbdf5d29d2490ec8a697dc137aa7456479e89 | /usage/meta.py | 492a6b0640a3b1458ec28a2c5f9d8bdf040928ea | [
"Apache-2.0"
] | permissive | absalon-james/usage | 15d424599528bec7d3184a72b5e9754c325e46ed | a67ceddda8a14244526b3b3a40c0c3feec7035d2 | refs/heads/master | 2021-01-21T14:58:06.023114 | 2016-10-03T20:56:16 | 2016-10-03T20:57:46 | 57,158,746 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | version = '0.1.2'
description = "Python tool for collecting usage information from ceilometer."
| [
"[email protected]"
] | |
7a1df63cd632b5b6f4ccaeaeee6eff6164e582d7 | bffcfa6103ee72d7ac394c14aa861e60616c7ab8 | /pytorch3d/datasets/__init__.py | 1687213018a29e5d75a4c5490368d52e5f4d893a | [
"BSD-3-Clause"
] | permissive | Amit2016-17/pytorch3d | ccac686bc1a3caeb4bd0f38519fbcb83f816501d | 7944d24d4872bdb01b821450840049e28d0ce12b | refs/heads/master | 2022-11-25T10:40:14.409087 | 2020-08-05T13:58:53 | 2020-08-05T14:00:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .r2n2 import R2N2, BlenderCamera
from .shapenet import ShapeNetCore
from .utils import collate_batched_meshes
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| [
"[email protected]"
] | |
7dfcead14cfcc41518ec35eaa9c96ca9cfbc0be3 | 8fb846f4f4ac5fd417489d731eae8a8a1bdc77c3 | /rllab/misc/console.py | b32d21a249a3d389e0aef97f641591cdb13bb35a | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | zhongwen/rllab | 0a9f9ea2d8995037b83aaae5853a299d5cf9e432 | d8239c05179fcc55d865db7ce933defa3baae24d | refs/heads/master | 2021-01-14T08:36:37.272071 | 2016-08-17T12:29:00 | 2016-08-17T12:29:00 | 65,801,245 | 1 | 1 | null | 2016-08-16T08:18:47 | 2016-08-16T08:18:46 | null | UTF-8 | Python | false | false | 5,514 | py | import sys
import time
import os
import errno
import shlex
import pydoc
import inspect
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def log(s): # , send_telegram=False):
print s
sys.stdout.flush()
class SimpleMessage(object):
def __init__(self, msg, logger=log):
self.msg = msg
self.logger = logger
def __enter__(self):
print self.msg
self.tstart = time.time()
def __exit__(self, etype, *args):
maybe_exc = "" if etype is None else " (with exception)"
self.logger("done%s in %.3f seconds" %
(maybe_exc, time.time() - self.tstart))
MESSAGE_DEPTH = 0
class Message(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
global MESSAGE_DEPTH # pylint: disable=W0603
print colorize('\t' * MESSAGE_DEPTH + '=: ' + self.msg, 'magenta')
self.tstart = time.time()
MESSAGE_DEPTH += 1
def __exit__(self, etype, *args):
global MESSAGE_DEPTH # pylint: disable=W0603
MESSAGE_DEPTH -= 1
maybe_exc = "" if etype is None else " (with exception)"
print colorize('\t' * MESSAGE_DEPTH + "done%s in %.3f seconds" % (maybe_exc, time.time() - self.tstart), 'magenta')
def prefix_log(prefix, logger=log):
return lambda s: logger(prefix + s)
def tee_log(file_name):
f = open(file_name, 'w+')
def logger(s):
log(s)
f.write(s)
f.write('\n')
f.flush()
return logger
def collect_args():
splitted = shlex.split(' '.join(sys.argv[1:]))
return {arg_name[2:]: arg_val
for arg_name, arg_val in zip(splitted[::2], splitted[1::2])}
def type_hint(arg_name, arg_type):
def wrap(f):
meta = getattr(f, '__tweak_type_hint_meta__', None)
if meta is None:
f.__tweak_type_hint_meta__ = meta = {}
meta[arg_name] = arg_type
return f
return wrap
def tweak(fun_or_val, identifier=None):
if callable(fun_or_val):
return tweakfun(fun_or_val, identifier)
return tweakval(fun_or_val, identifier)
def tweakval(val, identifier):
if not identifier:
raise ValueError('Must provide an identifier for tweakval to work')
args = collect_args()
for k, v in args.iteritems():
stripped = k.replace('-', '_')
if stripped == identifier:
log('replacing %s in %s with %s' % (stripped, str(val), str(v)))
return type(val)(v)
return val
def tweakfun(fun, alt=None):
"""Make the arguments (or the function itself) tweakable from command line.
See tests/test_misc_console.py for examples.
NOTE: this only works for the initial launched process, since other processes
will get different argv. What this means is that tweak() calls wrapped in a function
to be invoked in a child process might not behave properly.
"""
cls = getattr(fun, 'im_class', None)
method_name = fun.__name__
if alt:
cmd_prefix = alt
elif cls:
cmd_prefix = cls + '.' + method_name
else:
cmd_prefix = method_name
cmd_prefix = cmd_prefix.lower()
args = collect_args()
if cmd_prefix in args:
fun = pydoc.locate(args[cmd_prefix])
if type(fun) == type:
argspec = inspect.getargspec(fun.__init__)
else:
argspec = inspect.getargspec(fun)
# TODO handle list arguments
defaults = dict(
zip(argspec.args[-len(argspec.defaults or []):], argspec.defaults or []))
replaced_kwargs = {}
cmd_prefix += '-'
if type(fun) == type:
meta = getattr(fun.__init__, '__tweak_type_hint_meta__', {})
else:
meta = getattr(fun, '__tweak_type_hint_meta__', {})
for k, v in args.iteritems():
if k.startswith(cmd_prefix):
stripped = k[len(cmd_prefix):].replace('-', '_')
if stripped in meta:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
replaced_kwargs[stripped] = meta[stripped](v)
elif stripped not in argspec.args:
raise ValueError(
'%s is not an explicit parameter of %s' % (stripped, str(fun)))
elif stripped not in defaults:
raise ValueError(
'%s does not have a default value in method %s' % (stripped, str(fun)))
elif defaults[stripped] is None:
raise ValueError(
'Cannot infer type of %s in method %s from None value' % (stripped, str(fun)))
else:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
# TODO more proper conversions
replaced_kwargs[stripped] = type(defaults[stripped])(v)
def tweaked(*args, **kwargs):
all_kw = dict(zip(argspec[0], args) +
kwargs.items() + replaced_kwargs.items())
return fun(**all_kw)
return tweaked
| [
"[email protected]"
] | |
2a4a81a565fab19cc75a574eb4d85c9994bb0767 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/test/test_file.py | e9a1ceeeb8a48606fa1ad65140adac2fd3689d05 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 11,928 | py | import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest, gc_collect
from collections import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((IOError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
class CAutoFileTests(AutoFileTests):
open = io.open
class PyAutoFileTests(AutoFileTests):
open = staticmethod(pyio.open)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises((IOError, ValueError), sys.stdin.seek, -1)
else:
print((
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.'), file=sys.__stdout__)
self.assertRaises((IOError, ValueError), sys.stdin.truncate)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests):
open = io.open
class PyOtherFileTests(OtherFileTests):
open = staticmethod(pyio.open)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| [
"[email protected]"
] | |
81adc9b89c325fae8eb969a4530b965c9f2ee337 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-ice/aliyunsdkice/request/v20201109/DescribeQueryConfigsRequest.py | d4de87a4f55304fb57408fb816c6f72a3b5b2c81 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,437 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkice.endpoint import endpoint_data
class DescribeQueryConfigsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ICE', '2020-11-09', 'DescribeQueryConfigs','ice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
| [
"[email protected]"
] | |
d01f9d1b57765a72c85ec040eab037e9d12c89bb | ca77e9e45d666771c7b0897e7e3093b3d3c12f65 | /scripts/trigger/add_prices.py | ec79a8be575fe0f59c9b16754b18afc1910a7a29 | [] | no_license | 2gDigitalPost/custom | 46175d3a3fc4c3be21dc20203ff0a48fb93b5639 | 6a3a804ef4ef6178044b70ad1e4bc5c56ab42d8d | refs/heads/master | 2020-04-04T07:40:17.962611 | 2016-12-28T18:35:28 | 2016-12-28T18:35:28 | 39,648,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | """
This file was generated automatically from a custom script found in Project -> Script Editor.
The custom script was moved to a file so that it could be integrated with GitHub.
"""
__author__ = 'Topher.Hughes'
__date__ = '04/08/2015'
import traceback
def main(server=None, input=None):
"""
The main function of the custom script. The entire script was copied
and pasted into the body of the try statement in order to add some
error handling. It's all legacy code, so edit with caution.
:param server: the TacticServerStub object
:param input: a dict with data like like search_key, search_type, sobject, and update_data
:return: None
"""
if not input:
input = {}
try:
# CUSTOM_SCRIPT00035
# Matthew Tyler Misenhimer
# This is used to have the prices on projects trickle up to titles, then orders
# This is DEPRECATED
sobj = input.get('sobject')
sk = input.get('search_key')
price_str = sobj.get('price')
price = 0
if price_str not in [None,'']:
price = float(price_str)
proj = server.eval("@SOBJECT(twog/proj['code','%s'])" % sobj.get('proj_code'))[0]
current_proj_price_str = proj.get('price')
current_proj_price = 0
if current_proj_price_str not in [None,'']:
current_proj_price = float(current_proj_price_str)
new_proj_price = current_proj_price + price
server.update(proj.get('__search_key__'), {'price': new_proj_price})
title = server.eval("@SOBJECT(twog/title['code','%s'])" % proj.get('title_code'))[0]
current_title_price_str = title.get('price')
current_title_price = 0
if current_title_price_str not in [None,'']:
current_title_price = float(current_title_price_str)
new_title_price = current_title_price + price
server.update(title.get('__search_key__'), {'price': new_title_price})
order = server.eval("@SOBJECT(twog/order['code','%s'])" % title.get('order_code'))[0]
current_order_price_str = order.get('price')
current_order_price = 0
if current_order_price_str not in [None,'']:
current_order_price = float(current_order_price_str)
new_order_price = current_order_price + price
server.update(order.get('__search_key__'), {'price': new_order_price})
except AttributeError as e:
traceback.print_exc()
print str(e) + '\nMost likely the server object does not exist.'
raise e
except KeyError as e:
traceback.print_exc()
print str(e) + '\nMost likely the input dictionary does not exist.'
raise e
except Exception as e:
traceback.print_exc()
print str(e)
raise e
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1da98ce1969f888ec8962c9239a84d4f7a580f78 | b72dbc51279d3e59cb6410367b671f8a956314c1 | /leet_code/leet_372.py | 5c1d0057a5ac67543ab059922519a69fe52287d6 | [] | no_license | ddobokki/coding-test-practice | 7b16d20403bb1714d97adfd1f47aa7d3ccd7ea4b | c88d981a1d43b986169f7884ff3ef1498e768fc8 | refs/heads/main | 2023-07-08T15:09:32.269059 | 2021-08-08T12:19:44 | 2021-08-08T12:19:44 | 344,116,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from typing import List
class Solution:
def superPow(self, a: int, b: List[int]) -> int:
if a in [1,0]:
return a
return int(pow(a,int("".join(str(i) for i in b)),1337))
| [
"[email protected]"
] | |
4f02cd88aa3d26c3be1bbb4b45c2049a6e8a6317 | 9ab9d9a3883471763edbceea59a0e83170581b5f | /eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/bed_extend_to.py | 2985cc3497acf222c69151a76b253624baa01752 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | asmmhossain/phyG | 24dc211dad5b3e89c87ff384e841f2e98bbd52db | 023f505b705ab953f502cbc55e90612047867583 | refs/heads/master | 2022-11-21T12:43:46.172725 | 2014-02-14T12:33:08 | 2014-02-14T12:33:08 | 13,800,552 | 0 | 1 | NOASSERTION | 2020-07-25T21:05:41 | 2013-10-23T11:04:25 | Python | UTF-8 | Python | false | false | 1,132 | py | #!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
"""
Read BED file and extend each record to the specified minimum length. If chromosome
size information is provided trim extended intervals.
usage: %prog amount [ chrom_file ] < bed_file
"""
import sys
from bx.intervals.io import GenomicIntervalReader
length = int( sys.argv[1] )
chrom_len = None
if len( sys.argv ) > 2:
chrom_len = dict( ( fields[0], int( fields[1] ) ) for fields in map( str.split, open( sys.argv[2] ) ) )
for interval in GenomicIntervalReader( sys.stdin ):
if interval.end - interval.start < length:
start = interval.start
end = interval.end
# Extend in positive direction on strand
if interval.strand == "+":
end = start + length
else:
start = end - length
# Trim
if start < 0:
start = 0
if chrom_len and end > chrom_len[interval.chrom]:
end = chrom_len[interval.chrom]
# Set new start and end
interval.start = start
interval.end = end
# Output possibly adjusted interval
print interval
| [
"[email protected]"
] | |
cccee8c95ce17bb44043b1a20a899ac4161055be | ee22ec2076a79e8de3011377fe205bc87163ab9f | /src/basic-c3/func-let.py | 8c9c6ff3fea14adfbe60b86692ad4981a5710241 | [] | no_license | n18018/programming-term2 | 039a95c67372a38a34e2aa8c5975045a9fc731be | 86c455269eed312def529604e1ac3b00f476226c | refs/heads/master | 2020-03-22T08:59:29.545280 | 2018-08-29T07:57:37 | 2018-08-29T07:57:37 | 139,806,131 | 0 | 0 | null | 2018-07-05T06:42:11 | 2018-07-05T06:42:11 | null | UTF-8 | Python | false | false | 326 | py | # 関数を定義
def mul_func(a, b):
return a * b
def div_func(a, b):
return a / b
# mul_func関数を変数に代入
func = mul_func
# 代入した変数で関数を使う
result = func(2, 3)
print(result)
# div_func関数を変数に代入する場合
func2 = div_func
result = func2(10, 5)
print(result)
| [
"[email protected]"
] | |
2a1f5e4881e26548e1ce7fdb9043a6c590f91749 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/devtestlab/latest/get_lab.py | f41e214f30cdd3cfa752fbedcad88a1a4eccd182 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 16,776 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetLabResult',
'AwaitableGetLabResult',
'get_lab',
]
@pulumi.output_type
class GetLabResult:
"""
A lab.
"""
def __init__(__self__, announcement=None, artifacts_storage_account=None, created_date=None, default_premium_storage_account=None, default_storage_account=None, environment_permission=None, extended_properties=None, lab_storage_type=None, load_balancer_id=None, location=None, mandatory_artifacts_resource_ids_linux=None, mandatory_artifacts_resource_ids_windows=None, name=None, network_security_group_id=None, premium_data_disk_storage_account=None, premium_data_disks=None, provisioning_state=None, public_ip_id=None, support=None, tags=None, type=None, unique_identifier=None, vault_name=None, vm_creation_resource_group=None):
if announcement and not isinstance(announcement, dict):
raise TypeError("Expected argument 'announcement' to be a dict")
pulumi.set(__self__, "announcement", announcement)
if artifacts_storage_account and not isinstance(artifacts_storage_account, str):
raise TypeError("Expected argument 'artifacts_storage_account' to be a str")
pulumi.set(__self__, "artifacts_storage_account", artifacts_storage_account)
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if default_premium_storage_account and not isinstance(default_premium_storage_account, str):
raise TypeError("Expected argument 'default_premium_storage_account' to be a str")
pulumi.set(__self__, "default_premium_storage_account", default_premium_storage_account)
if default_storage_account and not isinstance(default_storage_account, str):
raise TypeError("Expected argument 'default_storage_account' to be a str")
pulumi.set(__self__, "default_storage_account", default_storage_account)
if environment_permission and not isinstance(environment_permission, str):
raise TypeError("Expected argument 'environment_permission' to be a str")
pulumi.set(__self__, "environment_permission", environment_permission)
if extended_properties and not isinstance(extended_properties, dict):
raise TypeError("Expected argument 'extended_properties' to be a dict")
pulumi.set(__self__, "extended_properties", extended_properties)
if lab_storage_type and not isinstance(lab_storage_type, str):
raise TypeError("Expected argument 'lab_storage_type' to be a str")
pulumi.set(__self__, "lab_storage_type", lab_storage_type)
if load_balancer_id and not isinstance(load_balancer_id, str):
raise TypeError("Expected argument 'load_balancer_id' to be a str")
pulumi.set(__self__, "load_balancer_id", load_balancer_id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if mandatory_artifacts_resource_ids_linux and not isinstance(mandatory_artifacts_resource_ids_linux, list):
raise TypeError("Expected argument 'mandatory_artifacts_resource_ids_linux' to be a list")
pulumi.set(__self__, "mandatory_artifacts_resource_ids_linux", mandatory_artifacts_resource_ids_linux)
if mandatory_artifacts_resource_ids_windows and not isinstance(mandatory_artifacts_resource_ids_windows, list):
raise TypeError("Expected argument 'mandatory_artifacts_resource_ids_windows' to be a list")
pulumi.set(__self__, "mandatory_artifacts_resource_ids_windows", mandatory_artifacts_resource_ids_windows)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group_id and not isinstance(network_security_group_id, str):
raise TypeError("Expected argument 'network_security_group_id' to be a str")
pulumi.set(__self__, "network_security_group_id", network_security_group_id)
if premium_data_disk_storage_account and not isinstance(premium_data_disk_storage_account, str):
raise TypeError("Expected argument 'premium_data_disk_storage_account' to be a str")
pulumi.set(__self__, "premium_data_disk_storage_account", premium_data_disk_storage_account)
if premium_data_disks and not isinstance(premium_data_disks, str):
raise TypeError("Expected argument 'premium_data_disks' to be a str")
pulumi.set(__self__, "premium_data_disks", premium_data_disks)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_id and not isinstance(public_ip_id, str):
raise TypeError("Expected argument 'public_ip_id' to be a str")
pulumi.set(__self__, "public_ip_id", public_ip_id)
if support and not isinstance(support, dict):
raise TypeError("Expected argument 'support' to be a dict")
pulumi.set(__self__, "support", support)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_identifier and not isinstance(unique_identifier, str):
raise TypeError("Expected argument 'unique_identifier' to be a str")
pulumi.set(__self__, "unique_identifier", unique_identifier)
if vault_name and not isinstance(vault_name, str):
raise TypeError("Expected argument 'vault_name' to be a str")
pulumi.set(__self__, "vault_name", vault_name)
if vm_creation_resource_group and not isinstance(vm_creation_resource_group, str):
raise TypeError("Expected argument 'vm_creation_resource_group' to be a str")
pulumi.set(__self__, "vm_creation_resource_group", vm_creation_resource_group)
@property
@pulumi.getter
def announcement(self) -> Optional['outputs.LabAnnouncementPropertiesResponse']:
"""
The properties of any lab announcement associated with this lab
"""
return pulumi.get(self, "announcement")
@property
@pulumi.getter(name="artifactsStorageAccount")
def artifacts_storage_account(self) -> str:
"""
The lab's artifact storage account.
"""
return pulumi.get(self, "artifacts_storage_account")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The creation date of the lab.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="defaultPremiumStorageAccount")
def default_premium_storage_account(self) -> str:
"""
The lab's default premium storage account.
"""
return pulumi.get(self, "default_premium_storage_account")
@property
@pulumi.getter(name="defaultStorageAccount")
def default_storage_account(self) -> str:
"""
The lab's default storage account.
"""
return pulumi.get(self, "default_storage_account")
@property
@pulumi.getter(name="environmentPermission")
def environment_permission(self) -> Optional[str]:
"""
The access rights to be granted to the user when provisioning an environment
"""
return pulumi.get(self, "environment_permission")
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional[Mapping[str, str]]:
"""
Extended properties of the lab used for experimental features
"""
return pulumi.get(self, "extended_properties")
@property
@pulumi.getter(name="labStorageType")
def lab_storage_type(self) -> Optional[str]:
"""
Type of storage used by the lab. It can be either Premium or Standard. Default is Premium.
"""
return pulumi.get(self, "lab_storage_type")
@property
@pulumi.getter(name="loadBalancerId")
def load_balancer_id(self) -> str:
"""
The load balancer used to for lab VMs that use shared IP address.
"""
return pulumi.get(self, "load_balancer_id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mandatoryArtifactsResourceIdsLinux")
def mandatory_artifacts_resource_ids_linux(self) -> Optional[Sequence[str]]:
"""
The ordered list of artifact resource IDs that should be applied on all Linux VM creations by default, prior to the artifacts specified by the user.
"""
return pulumi.get(self, "mandatory_artifacts_resource_ids_linux")
@property
@pulumi.getter(name="mandatoryArtifactsResourceIdsWindows")
def mandatory_artifacts_resource_ids_windows(self) -> Optional[Sequence[str]]:
"""
The ordered list of artifact resource IDs that should be applied on all Windows VM creations by default, prior to the artifacts specified by the user.
"""
return pulumi.get(self, "mandatory_artifacts_resource_ids_windows")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroupId")
def network_security_group_id(self) -> str:
"""
The Network Security Group attached to the lab VMs Network interfaces to restrict open ports.
"""
return pulumi.get(self, "network_security_group_id")
@property
@pulumi.getter(name="premiumDataDiskStorageAccount")
def premium_data_disk_storage_account(self) -> str:
"""
The lab's premium data disk storage account.
"""
return pulumi.get(self, "premium_data_disk_storage_account")
@property
@pulumi.getter(name="premiumDataDisks")
def premium_data_disks(self) -> Optional[str]:
"""
The setting to enable usage of premium data disks.
When its value is 'Enabled', creation of standard or premium data disks is allowed.
When its value is 'Disabled', only creation of standard data disks is allowed.
"""
return pulumi.get(self, "premium_data_disks")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIpId")
def public_ip_id(self) -> str:
"""
The public IP address for the lab's load balancer.
"""
return pulumi.get(self, "public_ip_id")
@property
@pulumi.getter
def support(self) -> Optional['outputs.LabSupportPropertiesResponse']:
"""
The properties of any lab support message associated with this lab
"""
return pulumi.get(self, "support")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> str:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> str:
"""
The lab's Key vault.
"""
return pulumi.get(self, "vault_name")
@property
@pulumi.getter(name="vmCreationResourceGroup")
def vm_creation_resource_group(self) -> str:
"""
The resource group in which all new lab virtual machines will be created. To let DevTest Labs manage resource group creation, set this value to null.
"""
return pulumi.get(self, "vm_creation_resource_group")
class AwaitableGetLabResult(GetLabResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLabResult(
announcement=self.announcement,
artifacts_storage_account=self.artifacts_storage_account,
created_date=self.created_date,
default_premium_storage_account=self.default_premium_storage_account,
default_storage_account=self.default_storage_account,
environment_permission=self.environment_permission,
extended_properties=self.extended_properties,
lab_storage_type=self.lab_storage_type,
load_balancer_id=self.load_balancer_id,
location=self.location,
mandatory_artifacts_resource_ids_linux=self.mandatory_artifacts_resource_ids_linux,
mandatory_artifacts_resource_ids_windows=self.mandatory_artifacts_resource_ids_windows,
name=self.name,
network_security_group_id=self.network_security_group_id,
premium_data_disk_storage_account=self.premium_data_disk_storage_account,
premium_data_disks=self.premium_data_disks,
provisioning_state=self.provisioning_state,
public_ip_id=self.public_ip_id,
support=self.support,
tags=self.tags,
type=self.type,
unique_identifier=self.unique_identifier,
vault_name=self.vault_name,
vm_creation_resource_group=self.vm_creation_resource_group)
def get_lab(expand: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLabResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Specify the $expand query. Example: 'properties($select=defaultStorageAccount)'
:param str name: The name of the lab.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devtestlab/latest:getLab', __args__, opts=opts, typ=GetLabResult).value
return AwaitableGetLabResult(
announcement=__ret__.announcement,
artifacts_storage_account=__ret__.artifacts_storage_account,
created_date=__ret__.created_date,
default_premium_storage_account=__ret__.default_premium_storage_account,
default_storage_account=__ret__.default_storage_account,
environment_permission=__ret__.environment_permission,
extended_properties=__ret__.extended_properties,
lab_storage_type=__ret__.lab_storage_type,
load_balancer_id=__ret__.load_balancer_id,
location=__ret__.location,
mandatory_artifacts_resource_ids_linux=__ret__.mandatory_artifacts_resource_ids_linux,
mandatory_artifacts_resource_ids_windows=__ret__.mandatory_artifacts_resource_ids_windows,
name=__ret__.name,
network_security_group_id=__ret__.network_security_group_id,
premium_data_disk_storage_account=__ret__.premium_data_disk_storage_account,
premium_data_disks=__ret__.premium_data_disks,
provisioning_state=__ret__.provisioning_state,
public_ip_id=__ret__.public_ip_id,
support=__ret__.support,
tags=__ret__.tags,
type=__ret__.type,
unique_identifier=__ret__.unique_identifier,
vault_name=__ret__.vault_name,
vm_creation_resource_group=__ret__.vm_creation_resource_group)
| [
"[email protected]"
] | |
ae4cb13734a0740053a6c4093337ac9c7f2ab6d8 | de707c94c91f554d549e604737b72e6c86eb0755 | /supervised_learning/0x02-tensorflow/7-evaluate.py | 16e4666e5785a1670cb87f5a081e939092818dc2 | [] | no_license | ejonakodra/holbertonschool-machine_learning-1 | 885cf89c1737573228071e4dc8e26304f393bc30 | 8834b201ca84937365e4dcc0fac978656cdf5293 | refs/heads/main | 2023-07-10T09:11:01.298863 | 2021-08-11T03:43:59 | 2021-08-11T03:43:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/env python3
"""
Defines a function that evaluates output of
neural network classifier
"""
import tensorflow as tf
def evaluate(X, Y, save_path):
"""
Evaluates output of neural network
parameters:
X [numpy.ndarray]: contains the input data to evaluate
Y [numpy.ndarray]: contains the one-hot labels for X
save_path [string]: location to load the model from
returns:
the network's prediction, accuracy, and loss, respectively
"""
with tf.Session() as sess:
saver = tf.train.import_meta_graph(save_path + '.meta')
saver.restore(sess, save_path)
x = tf.get_collection('x')[0]
y = tf.get_collection('y')[0]
y_pred = tf.get_collection('y_pred')[0]
accuracy = tf.get_collection('accuracy')[0]
loss = tf.get_collection('loss')[0]
prediction = sess.run(y_pred, feed_dict={x: X, y: Y})
accuracy = sess.run(accuracy, feed_dict={x: X, y: Y})
loss = sess.run(loss, feed_dict={x: X, y: Y})
return (prediction, accuracy, loss)
| [
"[email protected]"
] | |
3b7f9e6dbe9c7e658110923f1a4756af7ddbc9ba | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_274/ch134_2020_04_01_11_05_19_507472.py | ead9069b851dccdce2014878c391a3cdbe73018b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | def verifica_quadrado_perfeito(n):
m=n
i=2
while m > -1:
m=m-i
i=i+2
if m**2 == n:
return True
else:
return False | [
"[email protected]"
] | |
81264f2bcadaa766a81e3a63ef481439ed76116f | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/fpolicy/event_name.py | 476ecd9c31bd1a9cacb1652502a4f667427125da | [
"MIT"
] | permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | class EventName(basestring):
"""
Event name
"""
@staticmethod
def get_api_name():
return "event-name"
| [
"[email protected]"
] | |
77e13c60ab887ef65af5d208fbcad6ac63b78f87 | f067b46c0bd8bf4fbc2471c42c4a74cb08359bd5 | /server/config/settings/components/thumb.py | 6b24b0bc3fdca3195cad3f422c9b6577525857e0 | [] | no_license | bopo/project-template | ffbf3516c9f486fadb46a767688cb26badda6a3d | 0eedd18c236b66516e543750673934f4932555ca | refs/heads/develop | 2020-03-23T01:57:29.471378 | 2018-07-17T05:38:48 | 2018-07-17T05:38:48 | 140,947,688 | 0 | 1 | null | 2022-04-21T04:40:56 | 2018-07-14T13:18:49 | Python | UTF-8 | Python | false | false | 1,330 | py | # -*- coding: utf-8 -*-
# INSTALLED_APPS += [
# "django_thumbor",
# ]
# INSTALLED_APPS += ('easy_thumbnails',)
# THUMBNAIL_ALIASES = {
# '': {
# 'avatar': {'size': (50, 50), 'crop': True},
# },
# }
# THUMB_LIST = '500x500'
# THUMB_DETAIL = '800x800'
# The host serving the thumbor resized images
THUMBOR_SERVER = 'http://localhost:8888'
# The prefix for the host serving the original images
# This must be a resolvable address to allow thumbor to reach the images
THUMBOR_MEDIA_URL = 'http://localhost:8888/media'
# If you want the static to be handled by django thumbor
# default as False, set True to handle it if you host your statics
THUMBOR_STATIC_ENABLED = False
# The prefix for the host serving the original static images
# this must be a resolvable address to allow thumbor to reach the images
THUMBOR_STATIC_URL = 'http://localhost:8888/static'
# The same security key used in the thumbor service to
# match the URL construction
THUMBOR_SECURITY_KEY = 'MY_SECURE_KEY'
# Default arguments passed to the `generate_url` helper or
# the `thumbor_url` templatetag
THUMBOR_ARGUMENTS = {}
# An alias represents a named set of arguments to the generate_url function
# or thumbor_url template tag. Use it to share general thumbnail
# configurations without repeating yourself.
THUMBOR_ALIASES = {}
| [
"[email protected]"
] | |
368933543f3030bfc38b32795b89b4bccf0c2b47 | d8c1419eba8aeec8c203e819aae46475b744a66f | /archive/main.py | e6f339fa6539de2f2ff591d9c551fbb00f096b86 | [] | no_license | StefenYin/yeadon | a66aa1808ef662c76cd5d96db0f74cd25e3abcc7 | 03ae2c5881795e44f1890818fcb3530ba3c6feac | refs/heads/master | 2021-01-17T08:50:12.313236 | 2012-12-18T19:54:52 | 2012-12-18T19:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import stadium as stad
#import segment
import human as hum
import matplotlib.pyplot as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import data
import densities
# INPUTS ARE 95 MEASUREMENTS, DENSITIES, AND ORIENTATION ANGLES
# read input file of 95 measurements
# create solid objects
# create segment objects
# create human object
# plot human, no angles
# read in angles file
# plot human, with joint angles
# plot human conforming to a bicycle
# SECOND ITERATION: MOVE FROM FILE INPUTS (FOR ANGLES ONLY) TO QT GUI
externalangles = np.zeros( 3 )
externalangles[0] = 0
jointangles = np.zeros( 18 )
print "Creating human object."
H = hum.human(externalangles)
H.draw()
| [
"[email protected]"
] | |
14b6673a73fd4152a4af7be21d6eb6e4233c7f7e | 944401a6292baa2d23b9738898e0b0cb199d0795 | /color_quantization/octree/img_quality_assessment(IQA)/psnr/rgb_cs/rgb_psnr_sky.py | cdd42100b4527e977262e59e0ed94e2810f09ba1 | [] | no_license | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 1,028 | py | """
image quality assessment (IQA) of the quantized images and the original image in RGB color space
----- method: PSNR
----- version 1.0 ("skimage" library)
----- http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.compare_psnr
"""
import numpy as np
from PIL import Image
import glob
import csv
import skimage.measure as skm
# ---- obtain the original and quantized images
temp_img = np.array(Image.open('../../../../img/sky.jpg'))
quantized_img_path_list = []
quantized_img_path_list = glob.glob(r'../../../img/sky/rgb_cs/quantized_img/*.png')
quantized_img_path_list.sort()
# ---- compute PSNR
score_list = []
for i in quantized_img_path_list:
quantized_img = np.array(Image.open(i))
score = skm.compare_psnr(temp_img, quantized_img)
score_list.append(score)
# print(score_list)
# ---- save psnr score to csv file
csvfile = "sky_psnr.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in score_list:
writer.writerow([val]) | [
"[email protected]"
] | |
4e3c0ef1f25cdcd986f146665468ac1c76395c52 | fac16ad71ac9b09afc9abf0528a98171ac02afc4 | /payment/payments/migrations/0003_category_product.py | ada7734a9c41e562f17f56d3edb03d1a44dd48c7 | [] | no_license | evansmwendwa/payment_gateway | 96dbaf3728ebe4e0875152c96ecfbe7b7004dd98 | afdeab38524ded46d1e557bab696afca9c387e7b | refs/heads/master | 2020-03-10T09:38:25.395169 | 2018-04-12T23:52:34 | 2018-04-12T23:52:34 | 129,314,383 | 0 | 0 | null | 2018-04-12T21:44:34 | 2018-04-12T21:44:33 | null | UTF-8 | Python | false | false | 1,097 | py | # Generated by Django 2.0.3 on 2018-03-26 03:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('payments', '0002_auto_20180326_0248'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('categoryName', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productName', models.CharField(max_length=100)),
('productPrice', models.IntegerField()),
('productBrand', models.CharField(max_length=100)),
('productCategory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Category')),
],
),
]
| [
"[email protected]"
] | |
454cbfb46c7d918fb69092033e9e5117676beb29 | 6eef7d400474384c9e36cafbbae95e3c34dbb6ad | /manage.py | 9546f991d846e27cec4ace859f5bbc2dda3e97ad | [] | no_license | codeAligned/clinvitae | 61d3c160e9dbc65d548818292681a27501d330ce | 4a75c14113dc562991c7d2d1a5812d2db91e2da0 | refs/heads/master | 2020-05-17T12:02:33.514187 | 2019-02-21T06:47:35 | 2019-02-21T06:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ben_kremer_clinvitae.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
cd0c89d314658b289357e3eaf240900c29f54130 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /aoj/aoj-icpc/300/1305.py | e3d262fb73a310e2dafd28e76451fa6d53bedd63 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | while 1:
n = input()
if n == 0: break
ans = {}
for i in range(n):
group,name = raw_input().split(":")
if i == 0: first = group
ans[group] = set(name[:-1].split(","))
while 1:
for key in ans:
flag = 0
if key == first: continue
for key1 in ans:
if key in ans[key1]:
ans[key1] |= ans[key]
ans[key1].discard(key)
flag = 1
if flag:
del ans[key]
break
if flag == 0: break
print len(ans[first])
| [
"[email protected]"
] | |
9c455ce4b8af925afea25a90680844bd0cd02e46 | 301b039050c00a9efa4f3a5635e8b633f8adf988 | /caffe2/python/layers/functional.py | 08612d21a4babfe8b412473834b03ea02a2621a1 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | sunpan822/caffe2 | 9704b6fe556d272fbedfd6edfdb796f6a8f02970 | a3c56d892eb85054b4e7cbd1cf0a0d07422ae796 | refs/heads/master | 2020-04-12T14:31:45.919799 | 2019-04-19T04:10:40 | 2019-04-19T04:10:40 | 162,555,100 | 1 | 0 | Apache-2.0 | 2018-12-20T09:14:48 | 2018-12-20T09:14:47 | null | UTF-8 | Python | false | false | 5,022 | py | # @package functional
# Module caffe2.python.layers.functional
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema, scope, workspace
from caffe2.python.layers.layers import (
ModelLayer,
)
import caffe2.proto.caffe2_pb2 as caffe2_pb2
import numpy as np
import six
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Functional(ModelLayer):
def __init__(self, model, input_record, output_names_or_num, function,
name='functional', output_dtypes=None, **kwargs):
# allow coercion
input_record = schema.as_record(input_record)
super(Functional, self).__init__(model, name, input_record, **kwargs)
self._function = function
self._kwargs = kwargs
return_struct = (
isinstance(output_names_or_num, list) or
(isinstance(output_names_or_num, six.integer_types) and
output_names_or_num != 1)
)
with scope.NameScope(self.name, reset=True):
if isinstance(output_names_or_num, int):
struct_output_schema = schema.NewRecord(
model.net, schema.RawTuple(output_names_or_num))
elif isinstance(output_names_or_num, schema.Field):
self.output_schema = output_names_or_num.clone(keep_blobs=True)
return
else:
if not isinstance(output_names_or_num, list):
output_names_or_num = [output_names_or_num]
out_tuple = [(out, np.void) for out in output_names_or_num]
struct_output_schema = schema.NewRecord(
model.net, schema.Struct(*out_tuple))
num_outputs = len(struct_output_schema.field_blobs())
# functional layer returns Struct if more than one outputs or output is
# a list, otherwise Scalar
if return_struct:
self.output_schema = struct_output_schema
else:
self.output_schema = struct_output_schema[0]
# If output_dtypes is provided, use it for output schema. Otherwise
# the shape and type will be inferred.
if output_dtypes is not None:
if not isinstance(output_dtypes, list):
output_dtypes = [output_dtypes] * num_outputs
assert len(output_dtypes) == num_outputs
for dtype, scalar in zip(output_dtypes,
self.output_schema.all_scalars()):
scalar.set_type(dtype)
return
# Fake execution of the function to infer shapes and types automatically
had_issues = False
try:
type_net = core.Net('_temp_type_and_shape_inference_net')
schema.InitEmptyRecord(type_net, input_record, enforce_types=True)
function(type_net, self.input_record, self.output_schema, **kwargs)
(shapes, types) = workspace.InferShapesAndTypes([type_net], {})
for i in range(num_outputs):
scalar_schema = (self.output_schema[i] if return_struct
else self.output_schema)
blob = scalar_schema()
if blob not in types or blob not in shapes:
had_issues = True
continue
if shapes[blob] == []:
# Scalar type
shape = tuple()
elif shapes[blob][0] == 0:
shape = tuple(shapes[blob][1:])
else:
logger.warning("unexpeced shape: {}".format(shapes[blob]))
# If batch dimension is not first - give up on shape
# inference for that blob
had_issues = True
continue
# TODO(amalevich): Move it to some shared library
dtype = None
if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
dtype = (np.float64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
dtype = (np.float32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT32:
dtype = (np.int32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT64:
dtype = (np.int64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT16:
dtype = (np.float16, shape)
if dtype is not None:
scalar_schema.set_type(dtype)
except TypeError as ex:
had_issues = True
logger.warning(str(ex))
if had_issues:
logger.warning(
"Type inference had problems for layer: {}".format(self.name))
def add_ops(self, net):
self._function(
net, self.input_record, self.output_schema, **(self._kwargs))
| [
"[email protected]"
] | |
1de7c275d0299c2c4771f2e76446f0388e3b6064 | 57dbcfe5fe149b5353d42d687ebacfee36f16551 | /sambam/sam_strip_tags.py | 07dd6983bcdc366b975a62036992193da80974d7 | [
"MIT"
] | permissive | peterjc/picobio | 74d3f570a6344dc3fbd3ddca46d65c4292ce0ee7 | 63a5f8b5670afc3680bdeac0d9663d8fcbe904c1 | refs/heads/master | 2023-09-06T04:26:31.955632 | 2023-08-31T14:12:25 | 2023-08-31T14:12:25 | 2,184,466 | 34 | 14 | null | null | null | null | UTF-8 | Python | false | false | 3,572 | py | #!/usr/bin/env python
"""Python script to remove tags from SAM/BAM files.
This script is designed to be used as part of a Unix pipeline. It
takes as optional command line arguments a white list of tags to
preserve (or a black list of tags to remove). It reads SAM format
data from stdin, and writes SAM format data to stdout.
Simple usage with SAM files, keeping only read-group tags:
$ ./sam_strip_tags.py RG < original.sam > only_RG.sam
Simple usage with BAM files with conversion to/from SAM via samtools:
$ samtools view -h original.bam | ./sam_strip_tags.py RG | samtools view -S -b - > only_RG.bam
If your SAM/BAM files lack @SQ headers, you may need to give
samtools the reference FASTA file as well.
To remove particular tags (a black list rather than a white list)
include the switch -v (for invert, like the grep option). For example,
to remove any original quality (OC) tags, use:
$ ./sam_strip_tags.py -v OQ < original.sam > no_OQ.sam
Likewise with BAM files via samtools,
$ samtools view -h original.bam | ./sam_strip_tags.py -v OQ | samtools view -S -b - > no_OQ.bam
Copyright Peter Cock 2012. All rights reserved. See:
https://github.com/peterjc/picobio
"""
import sys
if "-v" in sys.argv[1:]:
black_list = set(x.strip() for x in sys.argv[1:] if x != "-v")
sys.stderr.write("Removing these tags: %s\n" % ", ".join(black_list))
for line in sys.stdin:
if line[0] != "@":
# Should be a read
(
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
) = line.rstrip().split("\t", 11)
tags = "\t".join(t for t in tags.split("\t") if t[:2] not in black_list)
line = (
"\t".join(
[
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
]
)
+ "\n"
)
sys.stdout.write(line)
else:
white_list = set(x.strip() for x in sys.argv[1:])
sys.stderr.write("Keeping only these tags: %s\n" % ", ".join(white_list))
for line in sys.stdin:
if line[0] != "@":
# Should be a read
(
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
) = line.rstrip().split("\t", 11)
tags = "\t".join(t for t in tags.split("\t") if t[:2] in white_list)
line = (
"\t".join(
[
qname,
flag,
rname,
pos,
mapq,
cigar,
rnext,
pnext,
tlen,
seq,
qual,
tags,
]
)
+ "\n"
)
sys.stdout.write(line)
| [
"[email protected]"
] | |
6fa53185e2e05b9e6e6db365d3d3defaf82130cf | f8e64dd069b2d65f1b9af53e03c42d97301e9a1d | /apps/currency/forms/fields.py | 9c65327d4b8701519fd5e5bf2100f8c390ed6e36 | [] | no_license | grengojbo/django-currency | 8daef53e442d7409f02c68dec48ff535b1712377 | 26e26cfb09ae8e62851a81bc8d821e1530eef20c | refs/heads/master | 2021-04-12T04:32:53.928776 | 2010-04-28T17:00:39 | 2010-04-28T17:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | from django.utils.translation import ugettext_lazy as _
from django import forms
from widgets import InputMoneyWidget
from currency.money import Money
from currency.models import Currency
__all__ = ('MoneyField',)
class MoneyField(forms.DecimalField):
def __init__(self, currency_widget=None, *args, **kwargs):
self.widget = InputMoneyWidget(currency_widget=currency_widget)
super(MoneyField, self).__init__(*args, **kwargs)
def clean(self, value):
if not isinstance(value, tuple):
raise Exception("Invalid value provided for MoneyField.clean (expected tupple)")
amount = super(MoneyField, self).clean(value[0])
currency = Currency.objects.get_currency(value[1])
if not currency:
raise forms.ValidationError(_(u'Input currency'))
return Money(amount=amount, currency=currency) | [
"[email protected]"
] | |
b59c5b90bec745c23ed7e23d949ecbabbe82375a | 4762b15498e642b39edfff3745e9ea134f081893 | /workshop_admin/moodle/migrations/0002_statement.py | 302a1756920a5a26ec21dd32551a7dd89f96533f | [] | no_license | Informatinks/informatics-back | d1d29e7297e547a8749b8da4d6c70565495fc509 | be298f72c072023be004895faf88cff9806650f6 | refs/heads/master | 2022-12-10T05:33:34.637043 | 2019-11-01T16:29:12 | 2019-11-01T16:29:12 | 171,288,054 | 0 | 3 | null | 2022-12-08T04:53:26 | 2019-02-18T13:20:53 | Python | UTF-8 | Python | false | false | 671 | py | # Generated by Django 2.2.1 on 2019-07-02 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moodle', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Statement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('summary', models.TextField()),
],
options={
'db_table': 'mdl_statements',
'managed': False,
},
),
]
| [
"[email protected]"
] | |
118e3b71b782fa295f2a247d81a815d8673f60c5 | b4982d7ffb9e65db8432e7728f89fa2dd4878aa6 | /Object Oriented Concept/encapsulation.py | f8aed772cf8b60e7fcaabebf4a62a52ede6aebd2 | [] | no_license | anupjungkarki/30-Days-Python-Challenge | 1d8e794235ac60e098f704cefa2c4a461134e8a4 | 96be38590a159d59397b122f8ee171574f5a556c | refs/heads/master | 2023-06-04T13:24:16.250487 | 2022-12-05T09:46:26 | 2022-12-05T09:46:26 | 327,277,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | # Encapsulation is one of the method of the fundamental concept in object oriented programming(OOP).Other programming have access specifier
# to handle with the private data but in python private data is easily access from the outside of the class so Encapsulation helps to
# restrict to access data and variable outside the class.
# Here access of private key is possible
class Car:
def __init__(self, name, mileage):
self._name = name
self.mileage = mileage
def description(self):
return f'The{self._name} car gives the mileage of {self.mileage} km/1hr'
obj = Car('BMW 7-Series', 39.53)
# accessing the protected variable by class method
print(obj.description())
# accessing the protected variable directly from outside
print(obj._name)
print(obj.mileage)
# Now lets work some encapsulation method
class Car:
def __init__(self, name, mileage):
self.__name = name # Private Variable
self.mileage = mileage
def description(self):
return f'The {self.__name} car given the mileage of {self.mileage} km/1hr'
obj = Car('BMW 7-Series', 39.53)
# Accessing the private variable by class method
print(obj.description())
# Accessing the private variable directly from the outside
# print(obj.__name)
# print(obj.mileage)
# It give an error while trying to access from the outside the class but we can also access by using Name MANGLING
# print(obj.mileage)
# print(obj._car__name) # mangled name
| [
"[email protected]"
] | |
009c97483cd7634d38ffeac4a1744beaae479f57 | ae7d5d11351af9201ce6181c48b8c60363c7ed00 | /packages/data/setup.py | 28faa87c3d988024ce6993d21ad79eeb365f0a85 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | natefoo/galaxy | 818037d03f39ccfb3714c7e784fd64d7ad8f4d2e | 64150c5bd803e75ed032e9f15acd003bae92b5ef | refs/heads/master | 2023-08-17T02:57:02.580487 | 2020-03-26T13:33:01 | 2020-03-26T13:33:01 | 31,212,836 | 2 | 1 | NOASSERTION | 2019-04-25T12:30:28 | 2015-02-23T15:01:46 | Python | UTF-8 | Python | false | false | 3,207 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
SOURCE_DIR = "galaxy"
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('%s/project_galaxy_data.py' % SOURCE_DIR, 'rb') as f:
init_contents = f.read().decode('utf-8')
def get_var(var_name):
pattern = re.compile(r'%s\s+=\s+(.*)' % var_name)
match = pattern.search(init_contents).group(1)
return str(ast.literal_eval(match))
version = get_var("__version__")
PROJECT_NAME = get_var("PROJECT_NAME")
PROJECT_URL = get_var("PROJECT_URL")
PROJECT_AUTHOR = get_var("PROJECT_AUTHOR")
PROJECT_EMAIL = get_var("PROJECT_EMAIL")
PROJECT_DESCRIPTION = get_var("PROJECT_DESCRIPTION")
TEST_DIR = 'tests'
PACKAGES = [
'galaxy',
'galaxy.datatypes',
'galaxy.datatypes.dataproviders',
'galaxy.datatypes.display_applications',
'galaxy.datatypes.util',
'galaxy.datatypes.test',
'galaxy.model',
'galaxy.model.dataset_collections',
'galaxy.model.migrate',
'galaxy.model.orm',
'galaxy.model.store',
'galaxy.model.tool_shed_install',
'galaxy.quota',
'galaxy.security',
]
ENTRY_POINTS = '''
[console_scripts]
galaxy-build-objects=galaxy.model.store.build_objects:main
galaxy-manage-db=galaxy.model.orm.scripts:manage_db
'''
PACKAGE_DATA = {
# Be sure to update MANIFEST.in for source dist.
'galaxy': [
'datatypes/test/*',
],
}
PACKAGE_DIR = {
SOURCE_DIR: SOURCE_DIR,
}
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
if os.path.exists("requirements.txt"):
requirements = open("requirements.txt").read().split("\n")
else:
# In tox, it will cover them anyway.
requirements = []
test_requirements = [
# TODO: put package test requirements here
]
setup(
name=PROJECT_NAME,
version=version,
description=PROJECT_DESCRIPTION,
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
author=PROJECT_AUTHOR,
author_email=PROJECT_EMAIL,
url=PROJECT_URL,
packages=PACKAGES,
entry_points=ENTRY_POINTS,
package_data=PACKAGE_DATA,
package_dir=PACKAGE_DIR,
include_package_data=True,
install_requires=requirements,
license="AFL",
zip_safe=False,
keywords='galaxy',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Academic Free License (AFL)',
'Operating System :: POSIX',
'Topic :: Software Development',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite=TEST_DIR,
tests_require=test_requirements
)
| [
"[email protected]"
] | |
d72a1163acfa6e897a9e131e9d3523083253254c | 0268f4c895f9f54e93fc7e3d2b0334206a4e6d9e | /day14/03-tk.py | a2ce5018023977ebed3408b81989034151538d9e | [] | no_license | zhangzongyan/python0702 | adebccacf26e300ec7a681bdf0f7ab7bdf228eeb | 7dcb6133d241fdf97b0812b9f25933ab389d2663 | refs/heads/master | 2020-03-22T21:05:51.218502 | 2018-08-15T09:54:42 | 2018-08-15T09:54:42 | 140,656,620 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py |
import tkinter as tk
# 按钮触发的方法
def click_button():
print("已点击")
def click_button2():
print("再次点击")
root = tk.Tk()
root.geometry("400x600")
root.title("这是一个测试窗口")
#root.minsize(width=400, height=300)
#root.maxsize(width=400, height=300)
#root.resizable(width=0,height=0) # width 0不可伸缩, 1可伸缩
'''
# 按钮类Button
button = tk.Button(root, text="确定", fg="red", bg = "black", command=click_button)
button["fg"] = "blue"
button["text"] = "退出"
button.config(fg="yellow")
button.pack(side="top", expand=0) # pack布局
button.invoke() # 触发按钮
button.config(command = click_button2)
button2 = tk.Button(root, text="退出")
button2.pack(side="left", expand=0)
'''
# 网格布局
b1 = tk.Button(root, text="1")
b2 = tk.Button(root, text="2")
b3 = tk.Button(root, text="3")
b4 = tk.Button(root, text="4")
b5 = tk.Button(root, text="5")
b1.grid(row = 1, column=1)
b2.grid(row = 1, column=0)
b3.grid(row = 1, column=2)
b4.grid(row = 2, column=0, columnspan=2)
b5.grid(row = 2, column=2)
'''
#place
b1 = tk.Button(root, text="1")
b2 = tk.Button(root, text="2")
b1.place(x=0, y= 0)
b2.place(x=100, y=100)
'''
root.mainloop() # 不结束
| [
"[email protected]"
] | |
f0e927d2314e78e9861ea2b166aa51741f5e0816 | 648796da46791794ee5de7a8004da437c840323e | /pipeline_update/pipe_tools.py | 7c911ec5f4ef1987dbd89864e8381f7f40df112d | [] | no_license | YulianaGomez/ml_pp | 86530a2ee26bb2f39117ec6a458368a5c1c74104 | 3891350e1ef6fbf2fd29a792387182601f94c250 | refs/heads/master | 2020-03-07T19:09:25.958025 | 2018-05-25T22:34:28 | 2018-05-25T22:34:28 | 127,663,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,470 | py |
import numpy as np
import pdb
import itertools as it
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from sklearn.metrics import f1_score
import pandas as pd
import os
import sys
import datetime
import glob
import re
import graphviz
import seaborn as sns
import numpy as np
from scipy.stats import norm
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.preprocessing import StandardScaler
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
import statsmodels.api as sm
from patsy import dmatrices
from sklearn.metrics import roc_auc_score
from sklearn import tree
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
import json
"""
Homework 2: ML Pipeline
Looking at data regarding credit distress and trying to predict who will
have credit distress in the next two years. Below is a pipeline of various
ml tools that can be used to analyze, explore, and clean data.
author: Yuliana Zamora
Date: April 17, 2018
"""
# Reading csv data from file - must be in same directory
def load_data(csv_file,nrows=None):
return pd.read_csv(csv_file,nrows=nrows)
#converts a string that is camelCase into snake_case
#https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
def camel_case(column_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#Give data with specific column
def histogram(data_frame):
sns.distplot(data_frame)
plt.show()
#Given specific column or row, returns statistical summary
def summary(data_frame):
return data_frame.describe()
#Creating a correlation heat map from data set where var_name is the
#variable which has the most correlation
def cor_heat(data_frame,var_name):
corrmat = data_frame.corr()
k = 12
cols = corrmat.nlargest(k, var_name)[var_name].index
cm = np.corrcoef(data_frame[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
#Scatter plots of desired variables in list
def plotCorr(dataFrame, list):
sns.set()
sns.pairplot(dataFrame[list], size = 2.5)
return plt.show()
#Shows data is missing, we should delete the corresponding variable and pretend it never existed - threshold as parameter
def miss_data(data_frame):
total = data_frame.isnull().sum().sort_values(ascending=False)
percent = (data_frame.isnull().sum()/data_frame.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
return missing_data.head(20)
#Dealing with missing data
def clean_miss(data_frame):
missing_data = miss_data(data_frame)
data_frame = data_frame.drop((missing_data[missing_data['Total'] > 1]).index,1)
data_frame.isnull().sum().max() #just checking that there's no missing data missing...
return data_frame
#Univariate analysis - scaling data, prints out low range and high range
def scale(data_frame, var_scale):
data_scaled = StandardScaler().fit_transform(data_frame[var_scale][:,np.newaxis]);
low_range = data_scaled[data_scaled[:,0].argsort()][:10]
high_range= data_scaled[data_scaled[:,0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range)
#Bivariate analysis
def bivariate(data_frame, var_1,var_2):
varx = var_1
vary = var_2
data = pd.concat([data_frame[varx], data_frame[vary]], axis=1)
data.plot.scatter(x=varx, y=vary, ylim=(0,100));
plt.show()
#histogram and normal probability plot
def norm_plot(data_frame,var_name):
sns.distplot(data_frame[var_name], fit=norm);
fig = plt.figure()
res = stats.probplot((data_frame)[var_name], plot=plt)
plt.show()
#Fill in empty values
def fill_empty(data_frame,var, new_var):
return data_frame[var].fillna(new_var)
#Discretize continuous variables
def descretize(data_frame, var, num):
return pd.cut(data_frame[var],num,retbins=True)
#Creating dummy variables from categorical variables
def dummy_var(data_frame, var):
return pd.get_dummies(data_frame[var])
#Creating dictionary with no repeated column items
def column_dic(data_frame):
dict = {line[:1]:line[1:].split()[0] for line in data_frame}
print (dict)
#Logistic regression = iv, independent variable, var_list - dependent variables
def logReg(data_frame, IV, var_list):
#organizing variable list to independent and dependent variables
#taking care of hyphen if first word contains it
if '-' in var_list[0]:
formula = IV + "~"+'Q("'+var_list[0]+'")'
else:
formula = IV + "~"+var_list[0]
#taking care of the rest of the potential hyphens
for i in range(1, len(var_list)):
if '-' in var_list[i]:
formula = formula + "+"+'Q("'+var_list[i]+'")'
else:
formula = formula + "+"+ var_list[i]
y, X = dmatrices(formula,data_frame, return_type="dataframe")
y = np.ravel(y)
model = LogisticRegression()
model = model.fit(X, y)
print (pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_)))))
return model.score(X,y)
#Nearest Neighbors -
def knearest(data_frame,train, test):
#data_frame = data_frame.reshape(-1,1)
X = data_frame[train].reshape(-1,1)
Y = data_frame[test].reshape(-1,1)
X_train = X[:100]
Y_train = Y[:100]
X_validate = X[100:]
Y_validate = Y[100:]
neighbor = KNeighborsClassifier(n_neighbors = 2, weights ='uniform')
neighbor.fit(X_train, Y_train)
predicted = neighbor.predict(X_validate)
print (predicted)
def merging_data(dataframe_1,dataframe_2):
return pd.merge(dataframe_1,dataframe_2)
def merging_data2(dataframe_1,dataframe_2):
dataframe_1['fully_funded'] = 1
return dataframe_1
def get_combos(param_grid_dict):
all = sorted(param_grid_dict)
all_combos=[]
combinations = it.product(*(param_grid_dict[Name] for Name in all))
for i in combinations:
lil_combo = {}
for iter,key in enumerate(all):
lil_combo[key] = i[iter]
all_combos.append(lil_combo)
return (all_combos)
#change items into binary columns
def to_binary(df,array_col):
for i in array_col:
print(i)
#df[i] = df[i].apply(lambda x: 1 if x == 't' else (0 if x =='f' else np.nan))
df[i] = df[i].apply(lambda x: 1 if x == 't' else 0)
return df
#analyzing results from classifiers
def get_metrics(y_pred, val_Y):
metric_results ={}
#loss = f1_score(y_pred,val_Y)
perf_metrics = [.01,.02,.05,.10,.20,.30,.50]
for i in perf_metrics:
#pdb.set_trace()
metric_results["precision at" + str([i])] = precision_score(val_Y, y_pred[:,0] > 1 - i)
metric_results["recall at" + str([i])] = recall_score(val_Y, y_pred[:,0] > 1 - i)
metric_results["F1 at" + str([i])] = f1_score(val_Y, y_pred[:,0] > 1 - i)
metric_results["ROC"] = roc_auc_score(val_Y, y_pred[:,0])
prec,rec,thresh = precision_recall_curve(val_Y, y_pred[:,0])
metric_results["PREC"] = prec.tolist()
metric_results["REC"] = rec.tolist()
metric_results["THRESH"] = thresh.tolist()
return (metric_results)
#plotting precisison and recal graphs, input one column for y_pred in class_comp method
def plot_precision_recall(val_Y,y_pred,model_name,output_type):
#pdb.set_trace()
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
prec = prec[:-1]
recall_curve = rec[:-1]
pct_above_per_thresh = []
number_scored = len(y_pred)
for value in thresh:
num_above_thresh = len(y_pred[y_pred>=value])
pct_above_thresh = num_above_thresh / float(len(y_pred))
if pct_above_thresh <= 1:
pct_above_per_thresh.append(pct_above_thresh)
else:
pdb.set_trace()
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, prec, 'b')
print("PLOTTING STUFF")
print(pct_above_per_thresh)
print(prec[:-1])
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax1.set_ylim([0,1])
ax1.set_ylim([0,1])
ax2.set_xlim([0,1])
name = model_name
plt.title(name)
#pdb.set_trace()
if (output_type == 'save'):
plt.savefig(name)
elif (output_type == 'show'):
plt.show()
else:
plt.show()
def temp_val(data_frame,target,features):
models_params = {
LogisticRegression: {'C':[10**-1,10**-2,10**-3],'penalty':['l1','l2']},
KNeighborsClassifier:{'n_neighbors':[5,10,25,100], 'p':[1,2,3],'n_jobs':[2]},
DecisionTreeClassifier:{'max_depth': [5,10,15],'min_samples_leaf':[2,5,10]},
RandomForestClassifier:{'n_estimators':[100] , 'criterion':['gini','entropy'], 'max_features':['sqrt','log2'] , 'max_depth':[5,10],'n_jobs':[4], 'min_samples_leaf':[10,50,100]},
GradientBoostingClassifier:{'learning_rate':[.1,.01],'n_estimators':[100] ,'max_features':['sqrt','log2'] , 'max_depth':[1,2,3]},
BaggingClassifier:{'max_samples':[.1,.25,.65], 'n_jobs':[4]},
#SVC:{'kernel':['linear','rbf'],'gamma':[10,1,.1,.01], 'C':[10,1,.1,.01], 'probability':[True]}
}
# start time of our data
#start_time = '2002-09-13'
start_time_date = data_frame['date_posted'].min()
#last date of data including labels and outcomes that we have
#end_time = '2014-05-12'
end_time_date = data_frame['date_posted'].max()
#how far out do we want to predict (let's say in months for now)
prediction_windows = [1]
#how often is this prediction being made? every day? every month? once a year?
update_window = 12
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
#start_time_date = datetime.strptime(start_time, '%Y-%m-%d')
#end_time_date = datetime.strptime(end_time, '%Y-%m-%d')
for prediction_window in prediction_windows:
print(start_time_date,end_time_date)
test_end_time = end_time_date
while (test_end_time >= start_time_date + 2 * relativedelta(months=+prediction_window)):
test_start_time = test_end_time - relativedelta(months=+prediction_window)
train_end_time = test_start_time - relativedelta(days=+1) # minus 1 day
train_start_time = train_end_time - relativedelta(months=+prediction_window)
while (train_start_time >= start_time_date ):
#pdb.set_trace()
print (train_start_time,train_end_time,test_start_time,test_end_time, prediction_window)
train_start_time -= relativedelta(months=+prediction_window)
# call function to get data
train_set, test_set = extract_train_test_sets(train_start_time, train_end_time, test_start_time, test_end_time,data_frame)
#pdb.set_trace()
class_comp(train_set,test_set,target,features,models_params)
# fit on train data
# predict on test data
test_end_time -= relativedelta(months=+update_window)
#Splitting the data for training and testing sets
def extract_train_test_sets(train_start_time, train_end_time, test_start_time, test_end_time, df):
train_set = df[(df['date_posted'] > train_start_time) & (df['date_posted']<train_end_time)]
test_set = df[(df['date_posted'] > test_start_time) & (df['date_posted']<test_end_time)]
return train_set, test_set
def class_comp(train_set,test_set,target,features,models_params):
out = open("out.txt","a")
X = train_set[features]
y = train_set[target]
metrics = {}
#validation
val_X = test_set[features]
val_Y = test_set[target]
for m, m_param in models_params.items():
listofparam = get_combos(m_param)
print("start training for {0}".format(m))
out.write("start training for {0}\n".format(m))
for params in listofparam:
print (params)
out.write(json.dumps(params))
model = m(**params)
model.fit(X,y)
#y_pred vector of prob estimates
#val_y are true values
y_pred = model.predict_proba(val_X)
metrics[m] = get_metrics(y_pred,val_Y)
print("this is valy")
print (val_Y)
print("this is y_pred")
print (y_pred)
plot_precision_recall(val_Y, y_pred[:,0],model,'show')
out.write("----------------------------\n")
out.write("Using %s classifier \n" % models_params)
out.write(json.dumps(metrics[m]))
| [
"[email protected]"
] | |
15a05b515ac2a5fa114c23136a7a3cd7a6f74e1d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/clips_pattern/pattern-master/pattern/text/de/inflect.py | 69c40f1e7abcdb645f18e27579ac930f4b905f6d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 29,115 | py | #### PATTERN | DE | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for German word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - attributive and predicative of adjectives,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX German morphology word forms):
# 75% for gender()
# 72% for pluralize()
# 84% for singularize() (for nominative)
# 87% for Verbs.find_lemma()
# 87% for Verbs.find_lexeme()
# 98% for predicative
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
INDICATIVE, IMPERATIVE, SUBJUNCTIVE,
PROGRESSIVE,
PARTICIPLE, GERUND
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# German inflection of depends on gender, role and number + the determiner (if any).
# Inflection gender.
# Masculine is the most common, so it is the default for all functions.
MASCULINE, FEMININE, NEUTER, PLURAL = \
MALE, FEMALE, NEUTRAL, PLURAL = \
M, F, N, PL = "m", "f", "n", "p"
# Inflection role.
# - nom = subject, "Der Hund bellt" (the dog barks).
# - acc = object, "Das Mädchen küsst den Hund" (the girl kisses the dog).
# - dat = object (indirect), "Der Mann gibt einen Knochen zum Hund" (the man gives the dog a bone).
# - gen = property, "die Knochen des Hundes" (the dog's bone).
NOMINATIVE, ACCUSATIVE, DATIVE, GENITIVE = SUBJECT, OBJECT, INDIRECT, PROPERTY = \
"nominative", "accusative", "dative", "genitive"
article_definite = {
("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das", ("p", "nom"): "die",
("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das", ("p", "acc"): "die",
("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem", ("p", "dat"): "den",
("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des", ("p", "gen"): "der",
}
article_indefinite = {
("m", "nom"): "ein" , ("f", "nom"): "eine" , ("n", "nom"): "ein" , ("p", "nom"): "eine",
("m", "acc"): "einen", ("f", "acc"): "eine" , ("n", "acc"): "ein" , ("p", "acc"): "eine",
("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem", ("p", "dat"): "einen",
("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines", ("p", "gen"): "einer",
}
def definite_article(word, gender=MALE, role=SUBJECT):
""" Returns the definite article (der/die/das/die) for a given word.
"""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
def indefinite_article(word, gender=MALE, role=SUBJECT):
""" Returns the indefinite article (ein) for a given word.
"""
return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
DEFINITE = "definite"
INDEFINITE = "indefinite"
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role)
_article = article
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article, gender, role), word)
#### GENDER #########################################################################################
gender_masculine = (
"ant", "ast", "ich", "ig", "ismus", "ling", "or", "us"
)
gender_feminine = (
"a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion", "sis",
u"tät", "tion", "ung", "ur"
)
gender_neuter = (
"chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um","al", "an", "ar",
u"ät", "ent", "ett", "ier", "iv", "o", "on", "nis", "sal"
)
gender_majority_vote = {
MASCULINE: (
"ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb", "ef", "eg",
"el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir", "kt", "lf",
"li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of", "og", "or",
"pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr", "rs", "rt",
"rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt", "zt"
),
FEMININE: (
"be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge", "he", "hr",
"ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me", "na", "ne",
"ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve", "ze"
),
NEUTER: (
"ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im", "io", "is",
"iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns", "ol", "om",
"op", "os", "ot", "pt", "rk", "rn", "ro", "to", "tt", "ul", "um", "uz"
)
}
def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g
#### PLURALIZE ######################################################################################
plural_inflections = [
("aal", u"äle" ), ("aat", "aaten"), ( "abe", "aben" ), ("ach", u"ächer"), ("ade", "aden" ),
("age", "agen" ), ("ahn", "ahnen"), ( "ahr", "ahre" ), ("akt", "akte" ), ("ale", "alen" ),
("ame", "amen" ), ("amt", u"ämter"), ( "ane", "anen" ), ("ang", u"änge" ), ("ank", u"änke" ),
("ann", u"änner" ), ("ant", "anten"), ( "aph", "aphen"), ("are", "aren" ), ("arn", "arne" ),
("ase", "asen" ), ("ate", "aten" ), ( "att", u"ätter"), ("atz", u"ätze" ), ("aum", "äume" ),
("aus", u"äuser" ), ("bad", u"bäder"), ( "bel", "bel" ), ("ben", "ben" ), ("ber", "ber" ),
("bot", "bote" ), ("che", "chen" ), ( "chs", "chse" ), ("cke", "cken" ), ("del", "del" ),
("den", "den" ), ("der", "der" ), ( "ebe", "ebe" ), ("ede", "eden" ), ("ehl", "ehle" ),
("ehr", "ehr" ), ("eil", "eile" ), ( "eim", "eime" ), ("eis", "eise" ), ("eit", "eit" ),
("ekt", "ekte" ), ("eld", "elder"), ( "ell", "elle" ), ("ene", "enen" ), ("enz", "enzen" ),
("erd", "erde" ), ("ere", "eren" ), ( "erk", "erke" ), ("ern", "erne" ), ("ert", "erte" ),
("ese", "esen" ), ("ess", "esse" ), ( "est", "este" ), ("etz", "etze" ), ("eug", "euge" ),
("eur", "eure" ), ("fel", "fel" ), ( "fen", "fen" ), ("fer", "fer" ), ("ffe", "ffen" ),
("gel", "gel" ), ("gen", "gen" ), ( "ger", "ger" ), ("gie", "gie" ), ("hen", "hen" ),
("her", "her" ), ("hie", "hien" ), ( "hle", "hlen" ), ("hme", "hmen" ), ("hne", "hnen" ),
("hof", u"höfe" ), ("hre", "hren" ), ( "hrt", "hrten"), ("hse", "hsen" ), ("hte", "hten" ),
("ich", "iche" ), ("ick", "icke" ), ( "ide", "iden" ), ("ieb", "iebe" ), ("ief", "iefe" ),
("ieg", "iege" ), ("iel", "iele" ), ( "ien", "ium" ), ("iet", "iete" ), ("ife", "ifen" ),
("iff", "iffe" ), ("ift", "iften"), ( "ige", "igen" ), ("ika", "ikum" ), ("ild", "ilder" ),
("ilm", "ilme" ), ("ine", "inen" ), ( "ing", "inge" ), ("ion", "ionen"), ("ise", "isen" ),
("iss", "isse" ), ("ist", "isten"), ( "ite", "iten" ), ("itt", "itte" ), ("itz", "itze" ),
("ium", "ium" ), ("kel", "kel" ), ( "ken", "ken" ), ("ker", "ker" ), ("lag", u"läge" ),
("lan", u"läne" ), ("lar", "lare" ), ( "lei", "leien"), ("len", "len" ), ("ler", "ler" ),
("lge", "lgen" ), ("lie", "lien" ), ( "lle", "llen" ), ("mel", "mel" ), ("mer", "mer" ),
("mme", "mmen" ), ("mpe", "mpen" ), ( "mpf", "mpfe" ), ("mus", "mus" ), ("mut", "mut" ),
("nat", "nate" ), ("nde", "nden" ), ( "nen", "nen" ), ("ner", "ner" ), ("nge", "ngen" ),
("nie", "nien" ), ("nis", "nisse"), ( "nke", "nken" ), ("nkt", "nkte" ), ("nne", "nnen" ),
("nst", "nste" ), ("nte", "nten" ), ( "nze", "nzen" ), ("ock", u"öcke" ), ("ode", "oden" ),
("off", "offe" ), ("oge", "ogen" ), ( "ohn", u"öhne" ), ("ohr", "ohre" ), ("olz", u"ölzer" ),
("one", "onen" ), ("oot", "oote" ), ( "opf", u"öpfe" ), ("ord", "orde" ), ("orm", "ormen" ),
("orn", u"örner" ), ("ose", "osen" ), ( "ote", "oten" ), ("pel", "pel" ), ("pen", "pen" ),
("per", "per" ), ("pie", "pien" ), ( "ppe", "ppen" ), ("rag", u"räge" ), ("rau", u"raün" ),
("rbe", "rben" ), ("rde", "rden" ), ( "rei", "reien"), ("rer", "rer" ), ("rie", "rien" ),
("rin", "rinnen"), ("rke", "rken" ), ( "rot", "rote" ), ("rre", "rren" ), ("rte", "rten" ),
("ruf", "rufe" ), ("rzt", "rzte" ), ( "sel", "sel" ), ("sen", "sen" ), ("ser", "ser" ),
("sie", "sien" ), ("sik", "sik" ), ( "sse", "ssen" ), ("ste", "sten" ), ("tag", "tage" ),
("tel", "tel" ), ("ten", "ten" ), ( "ter", "ter" ), ("tie", "tien" ), ("tin", "tinnen"),
("tiv", "tive" ), ("tor", "toren"), ( "tte", "tten" ), ("tum", "tum" ), ("tur", "turen" ),
("tze", "tzen" ), ("ube", "uben" ), ( "ude", "uden" ), ("ufe", "ufen" ), ("uge", "ugen" ),
("uhr", "uhren" ), ("ule", "ulen" ), ( "ume", "umen" ), ("ung", "ungen"), ("use", "usen" ),
("uss", u"üsse" ), ("ute", "uten" ), ( "utz", "utz" ), ("ver", "ver" ), ("weg", "wege" ),
("zer", "zer" ), ("zug", u"züge" ), (u"ück", u"ücke" )
]
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if pos == NOUN:
for a, b in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rules (baseline = 69%).
if w.startswith("ge"):
return w
if w.endswith("gie"):
return w
if w.endswith("e"):
return w + "n"
if w.endswith("ien"):
return w[:-2] + "um"
if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")):
return w
if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
return w + "en"
if w.endswith("in"):
return w + "nen"
if w.endswith("nis"):
return w + "se"
if w.endswith(("eld", "ild", "ind")):
return w + "er"
if w.endswith("o"):
return w + "s"
if w.endswith("a"):
return w[:-1] + "en"
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
umlaut = w[-3]
umlaut = umlaut.replace("a", u"ä")
umlaut = umlaut.replace("o", u"ö")
umlaut = umlaut.replace("u", u"ü")
return w[:-3] + umlaut + w[-2:] + "e"
for a, b in (
("ag", u"äge"),
("ann", u"änner"),
("aum", u"äume"),
("aus", u"äuser"),
("zug", u"züge")):
if w.endswith(a):
return w[:-len(a)] + b
return w + "e"
return w
#### SINGULARIZE ###################################################################################
singular_inflections = [
( "innen", "in" ), (u"täten", u"tät"), ( "ahnen", "ahn"), ( "enten", "ent"), (u"räser", "ras"),
( "hrten", "hrt"), (u"ücher", "uch"), (u"örner", "orn"), (u"änder", "and"), (u"ürmer", "urm"),
( "ahlen", "ahl"), ( "uhren", "uhr"), (u"ätter", "att"), ( "suren", "sur"), ( "chten", "cht"),
( "kuren", "kur"), ( "erzen", "erz"), (u"güter", "gut"), ( "soren", "sor"), (u"änner", "ann"),
(u"äuser", "aus"), ( "taten", "tat"), ( "isten", "ist"), (u"bäder", "bad"), (u"ämter", "amt"),
( "eiten", "eit"), ( "raten", "rat"), ( "ormen", "orm"), ( "ionen", "ion"), ( "nisse", "nis"),
(u"ölzer", "olz"), ( "ungen", "ung"), (u"läser", "las"), (u"ächer", "ach"), ( "urten", "urt"),
( "enzen", "enz"), ( "aaten", "aat"), ( "aphen", "aph"), (u"öcher", "och"), (u"türen", u"tür"),
( "sonen", "son"), (u"ühren", u"ühr"), (u"ühner", "uhn"), ( "toren", "tor"), (u"örter", "ort"),
( "anten", "ant"), (u"räder", "rad"), ( "turen", "tur"), (u"äuler", "aul"), ( u"änze", "anz"),
( "tten", "tte"), ( "mben", "mbe"), ( u"ädte", "adt"), ( "llen", "lle"), ( "ysen", "yse"),
( "rben", "rbe"), ( "hsen", "hse"), ( u"raün", "rau"), ( "rven", "rve"), ( "rken", "rke"),
( u"ünge", "ung"), ( u"üten", u"üte"), ( "usen", "use"), ( "tien", "tie"), ( u"läne", "lan"),
( "iben", "ibe"), ( "ifen", "ife"), ( "ssen", "sse"), ( "gien", "gie"), ( "eten", "ete"),
( "rden", "rde"), ( u"öhne", "ohn"), ( u"ärte", "art"), ( "ncen", "nce"), ( u"ünde", "und"),
( "uben", "ube"), ( "lben", "lbe"), ( u"üsse", "uss"), ( "agen", "age"), ( u"räge", "rag"),
( "ogen", "oge"), ( "anen", "ane"), ( "sken", "ske"), ( "eden", "ede"), ( u"össe", "oss"),
( u"ürme", "urm"), ( "ggen", "gge"), ( u"üren", u"üre"), ( "nten", "nte"), ( u"ühle", u"ühl"),
( u"änge", "ang"), ( "mmen", "mme"), ( "igen", "ige"), ( "nken", "nke"), ( u"äcke", "ack"),
( "oden", "ode"), ( "oben", "obe"), ( u"ähne", "ahn"), ( u"änke", "ank"), ( "inen", "ine"),
( "seen", "see"), ( u"äfte", "aft"), ( "ulen", "ule"), ( u"äste", "ast"), ( "hren", "hre"),
( u"öcke", "ock"), ( "aben", "abe"), ( u"öpfe", "opf"), ( "ugen", "uge"), ( "lien", "lie"),
( u"ände", "and"), ( u"ücke", u"ück"), ( "asen", "ase"), ( "aden", "ade"), ( "dien", "die"),
( "aren", "are"), ( "tzen", "tze"), ( u"züge", "zug"), ( u"üfte", "uft"), ( "hien", "hie"),
( "nden", "nde"), ( u"älle", "all"), ( "hmen", "hme"), ( "ffen", "ffe"), ( "rmen", "rma"),
( "olen", "ole"), ( "sten", "ste"), ( "amen", "ame"), ( u"höfe", "hof"), ( u"üste", "ust"),
( "hnen", "hne"), ( u"ähte", "aht"), ( "umen", "ume"), ( "nnen", "nne"), ( "alen", "ale"),
( "mpen", "mpe"), ( "mien", "mie"), ( "rten", "rte"), ( "rien", "rie"), ( u"äute", "aut"),
( "uden", "ude"), ( "lgen", "lge"), ( "ngen", "nge"), ( "iden", "ide"), ( u"ässe", "ass"),
( "osen", "ose"), ( "lken", "lke"), ( "eren", "ere"), ( u"üche", "uch"), ( u"lüge", "lug"),
( "hlen", "hle"), ( "isen", "ise"), ( u"ären", u"äre"), ( u"töne", "ton"), ( "onen", "one"),
( "rnen", "rne"), ( u"üsen", u"üse"), ( u"haün", "hau"), ( "pien", "pie"), ( "ihen", "ihe"),
( u"ürfe", "urf"), ( "esen", "ese"), ( u"ätze", "atz"), ( "sien", "sie"), ( u"läge", "lag"),
( "iven", "ive"), ( u"ämme", "amm"), ( u"äufe", "auf"), ( "ppen", "ppe"), ( "enen", "ene"),
( "lfen", "lfe"), ( u"äume", "aum"), ( "nien", "nie"), ( "unen", "une"), ( "cken", "cke"),
( "oten", "ote"), ( "mie", "mie"), ( "rie", "rie"), ( "sis", "sen"), ( "rin", "rin"),
( "ein", "ein"), ( "age", "age"), ( "ern", "ern"), ( "ber", "ber"), ( "ion", "ion"),
( "inn", "inn"), ( "ben", "ben"), ( u"äse", u"äse"), ( "eis", "eis"), ( "hme", "hme"),
( "iss", "iss"), ( "hen", "hen"), ( "fer", "fer"), ( "gie", "gie"), ( "fen", "fen"),
( "her", "her"), ( "ker", "ker"), ( "nie", "nie"), ( "mer", "mer"), ( "ler", "ler"),
( "men", "men"), ( "ass", "ass"), ( "ner", "ner"), ( "per", "per"), ( "rer", "rer"),
( "mus", "mus"), ( "abe", "abe"), ( "ter", "ter"), ( "ser", "ser"), ( u"äle", "aal"),
( "hie", "hie"), ( "ger", "ger"), ( "tus", "tus"), ( "gen", "gen"), ( "ier", "ier"),
( "ver", "ver"), ( "zer", "zer"),
]
singular = {
u"Löwen": u"Löwe",
}
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the singular of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if word in singular:
return singular[word]
if pos == NOUN:
for a, b in singular_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rule: strip known plural suffixes (baseline = 51%).
for suffix in ("nen", "en", "n", "e", "er", "s"):
if w.endswith(suffix):
w = w[:-len(suffix)]
break
# Corrections (these add about 1% accuracy):
if w.endswith(("rr", "rv", "nz")):
return w + "e"
return w
return w
#### VERB CONJUGATION ##############################################################################
# The verb table was trained on CELEX and contains the top 2000 most frequent verbs.
prefix_inseparable = (
"be", "emp", "ent", "er", "ge", "miss", u"über", "unter", "ver", "voll", "wider", "zer"
)
prefix_separable = (
"ab", "an", "auf", "aus", "bei", "durch", "ein", "fort", "mit", "nach", "vor", "weg",
u"zurück", "zusammen", "zu", "dabei", "daran", "da", "empor", "entgegen", "entlang",
"fehl", "fest", u"gegenüber", "gleich", "herab", "heran", "herauf", "heraus", "herum",
"her", "hinweg", "hinzu", "hin", "los", "nieder", "statt", "umher", "um", "weg",
"weiter", "wieder", "zwischen"
) + ( # There are many more...
"dort", "fertig", "frei", "gut", "heim", "hoch", "klein", "klar", "nahe", "offen", "richtig"
)
prefixes = prefix_inseparable + prefix_separable
def encode_sz(s):
return s.replace(u"ß", "ss")
def decode_sz(s):
return s.replace("ss", u"ß")
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "de-verbs.txt"),
language = "de",
format = [0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21, 24, 52, 54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
default = {6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
)
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
"""
v = verb.lower()
# Common prefixes: be-finden and emp-finden probably inflect like finden.
if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
for prefix in prefixes:
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Common sufixes: setze nieder => niedersetzen.
b, suffix = " " in v and v.split()[:2] or (v, "")
# Infinitive -ln: trommeln.
if b.endswith(("ln", "rn")):
return b
# Lemmatize regular inflections.
for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
if b.endswith(x): b = b[:-len(x)]; break
# Subjunctive: hielte => halten, schnitte => schneiden.
for x, y in (
("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"),
("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"),
(u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")):
if b.endswith(x): b = b[:-len(x)] + y; break
b = b.replace("eeiss", "eiss")
b = b.replace("eeid", "eit")
# Subjunctive: wechselte => wechseln
if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
b = b + "e"
# abknallst != abknalln => abknallen
if b.endswith(("hl", "ll", "ul", "eil")):
b = b + "e"
# Strip ge- from (likely) gerund:
if b.startswith("ge") and v.endswith("t"):
b = b[2:]
# Corrections (these add about 1.5% accuracy):
if b.endswith(("lnde", "rnde")):
b = b[:-3]
if b.endswith(("ae", "al", u"öe", u"üe")):
b = b.rstrip("e") + "te"
if b.endswith(u"äl"):
b = b + "e"
return suffix + b + "n"
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
# Split common prefixes.
x, x1, x2 = "", "", ""
for prefix in prefix_separable:
if v.startswith(prefix):
b, x = b[len(prefix):], prefix
x1 = (" " + x).rstrip()
x2 = x + "ge"
break
# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
pl = b.endswith("el") and b[:-2]+"l" or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(("ln", "rn")) and v or b+"en"
# Present tense ending in -d or -t gets -e:
pr = b.endswith(("d", "t")) and b+"e" or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st"
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end"
# Past tense regular:
pt = encode_sz(pr) + "t"
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt
ge = x and x+"ge"+pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
lexeme = a = [
v,
pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present
pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past
b+"e"+x1, pr+"t"+x1, x+pw, # imperative
s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I
s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II
]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the lexicon.
return [a[i] or lexeme[i] for i in range(len(a))]
def tenses(self, verb, parse=True):
""" Returns a list of possible tenses for the given inflected verb.
"""
tenses = _Verbs.tenses(self, verb, parse)
if len(tenses) == 0:
# auswirkte => wirkte aus
for prefix in prefix_separable:
if verb.startswith(prefix):
tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse)
break
return tenses
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
# Strong inflection: no article.
adjectives_strong = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "e",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "e",
("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en", ("p", "gen"): "er",
}
# Mixed inflection: after indefinite article ein & kein and possessive determiners.
adjectives_mixed = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Weak inflection: after definite article.
adjectives_weak = {
("m", "nom"): "e", ("f", "nom"): "e" , ("n", "nom"): "e", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "e", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Uninflected + exceptions.
adjective_attributive = {
"etwas" : "etwas",
"genug" : "genug",
"viel" : "viel",
"wenig" : "wenig"
}
def attributive(adjective, gender=MALE, role=SUBJECT, article=None):
""" For a predicative adjective, returns the attributive form (lowercase).
In German, the attributive is formed with -e, -em, -en, -er or -es,
depending on gender (masculine, feminine, neuter or plural) and role
(nominative, accusative, dative, genitive).
"""
w, g, c, a = \
adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None
if w in adjective_attributive:
return adjective_attributive[w]
if a is None \
or a in ("mir", "dir", "ihm") \
or a in ("ein", "etwas", "mehr") \
or a.startswith(("all", "mehrer", "wenig", "viel")):
return w + adjectives_strong.get((g, c), "")
if a.startswith(("ein", "kein")) \
or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
return w + adjectives_mixed.get((g, c), "")
if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
or a.startswith((
"derselb", "derjenig", "jed", "jeglich", "jen", "manch",
"dies", "solch", "welch")):
return w + adjectives_weak.get((g, c), "")
# Default to strong inflection.
return w + adjectives_strong.get((g, c), "")
def predicative(adjective):
""" Returns the predicative adjective (lowercase).
In German, the attributive form preceding a noun is always used:
"ein kleiner Junge" => strong, masculine, nominative,
"eine schöne Frau" => mixed, feminine, nominative,
"der kleine Prinz" => weak, masculine, nominative, etc.
The predicative is useful for lemmatization.
"""
w = adjective.lower()
if len(w) > 3:
for suffix in ("em", "en", "er", "es", "e"):
if w.endswith(suffix):
b = w[:max(-len(suffix), -(len(w)-3))]
if b.endswith("bl"): # plausibles => plausibel
b = b[:-1] + "el"
if b.endswith("pr"): # propres => proper
b = b[:-1] + "er"
return b
return w
#### COMPARATIVE & SUPERLATIVE #####################################################################
COMPARATIVE = "er"
SUPERLATIVE = "st"
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given (inflected) adjective.
"""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):]
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
#print(comparative(u"schönes"))
#print(superlative(u"schönes"))
#print(superlative(u"große"))
| [
"[email protected]"
] | |
c3d0c6798414ea088eb7b3efc5bd017d1d44eda3 | 55267c377da7a2a6676978d958e07c07bfc9d9b6 | /nbutil.py | 395b05b0e7c54b1f0b25ec174c5bb9c33908ef84 | [] | no_license | larsks/netbox-data-scripts | 54916afab045bed663c2a08ca90f102bf7efeeaa | 91aa6554aa815bdfc894a500037e942962c16705 | refs/heads/master | 2023-01-11T16:50:50.551000 | 2020-11-11T22:33:14 | 2020-11-11T22:33:14 | 309,502,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | import click
import json
import logging
import pynetbox
import resources
import netbox
LOG = logging.getLogger(__name__)
logging.basicConfig(level='DEBUG')
@click.group(
context_settings=dict(auto_envvar_prefix='NETBOX'))
@click.option('--url', '-u')
@click.option('--token', '-t')
@click.pass_context
def main(ctx, url, token):
ctx.obj = netbox.Netbox(url, token=token)
@main.command()
@click.option('--site', '-s', required=True)
@click.option('--device-role', '-r')
@click.argument('factfiles', nargs=-1)
@click.pass_context
def load(ctx, site, device_role, factfiles):
api = ctx.obj
devices = []
for factfile in factfiles:
with open(factfile) as fd:
facts = json.load(fd)
if 'ansible_facts' not in facts:
LOG.warning('invalid fact file: %s', factfile)
continue
if facts['ansible_facts'].get('ansible_virtualization_role') != 'host':
LOG.warning('skipping virtual machine: %s', factfile)
continue
try:
dev = resources.device.from_ansible_facts(facts['ansible_facts'])
except KeyError as err:
LOG.warning('failed loading device from %s: missing %s',
factfile, err)
else:
devices.append(dev)
for dev in devices:
try:
_dev = api.dcim.devices.filter(name=dev.name)[0]
except IndexError:
LOG.info('adding %s', dev)
try:
_site = api.dcim.sites.filter(name=site)[0]
except IndexError:
_site = api.dcim.sites.create(name=site)
try:
manufacturer = api.dcim.manufacturers.filter(
name=dev.device_type.manufacturer)[0]
except IndexError:
obj = resources.manufacturer(name=dev.device_type.manufacturer)
LOG.info('create new manufacturer %s', obj)
manufacturer = api.dcim.manufacturers.create(**obj.to_dict())
try:
devtype = api.dcim.device_types.filter(
manufacturer_name=manufacturer.name,
model=dev.device_type.model)[0]
except IndexError:
obj = resources.device_type(
manufacturer=manufacturer.id,
model=dev.device_type.model)
LOG.info('create new device type %s', obj)
devtype = api.dcim.device_types.create(**obj.to_dict())
try:
devrole = api.dcim.device_roles.filter(
name=dev.device_role)[0]
except IndexError:
obj = resources.device_role(name=dev.device_role)
LOG.info('create new device role %s', obj)
devrole = api.dcim.device_roles.create(**obj.to_dict())
dev.site = _site.id
dev.device_type = devtype.id
dev.device_role = devrole.id
try:
_dev = api.dcim.devices.create(**dev.to_dict())
except pynetbox.core.query.RequestError as err:
breakpoint()
...
for interface in dev.interfaces.interfaces:
try:
_iface = api.dcim.interfaces.filter(
device_id=_dev.id, name=interface.name)[0]
except IndexError:
LOG.info('create new interface %s on %s', interface, dev)
_iface = api.dcim.interfaces.create(
device=_dev.id, **interface.to_dict())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2157fa5f00a7ea2f2da78c201b0648401aa85d19 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_attending.py | 42c967da90aa064ad1ee81dd35207c570ee2ae1f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _ATTENDING():
def __init__(self,):
self.name = "ATTENDING"
self.definitions = attend
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['attend']
| [
"[email protected]"
] | |
b14c2b98a07fad5acc877d946f624a0191ab7c48 | 3cfd5edbacb48d5197d709f52f77433194cedf2a | /app/middlewares/acl.py | 72dd97eb8c38bb3d704106b06790ff099a0bf2a5 | [] | no_license | pikoUsername/A-Search | 1ebb3062a930225cc3a7e5a515f77371aed862b6 | 59377c4e8cb6d0af09375aca1c03f35c371a212f | refs/heads/master | 2023-02-18T19:10:01.007817 | 2021-01-18T14:10:48 | 2021-01-18T14:10:48 | 325,986,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | from typing import Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from ..models import dbc, User, Chat
class AclMiddleware(BaseMiddleware):
async def setup_chat(self, data: dict, tg_user: types.User, tg_chat: Optional[types.Chat] = None):
user_id = tg_user.id
chat_id = tg_chat.id if tg_chat else tg_user.id
user = await User.get(user_id)
if not user:
user = await dbc.add_new_user(tg_user)
chat = await Chat.get(chat_id)
if not chat:
chat = await dbc.add_new_chat(tg_chat)
data["user"] = user
data["chat"] = chat
async def on_pre_process_message(self, message: types.Message, data: dict):
await self.setup_chat(data, message.from_user, message.chat)
async def on_pre_process_callback_query(self, query: types.CallbackQuery, data: dict):
await self.setup_chat(data, query.from_user, query.message.chat if query.message else None)
| [
"[email protected]"
] | |
63b8925658c1f05ca2b3c52b232b086acf5307c0 | f2b5889d73cc9fcfd58a2dc807253bd4796849b5 | /naginpy/pipeforward.py | a4893a4b1d3370e7b48d50c402601de681886f75 | [
"MIT"
] | permissive | dalejung/naginpy | e290cb2d26728c625d9b4199dbf1956fe1f6a0c9 | bbc2b380a278a129449ee170fb22efa7f687b6e8 | refs/heads/master | 2020-12-25T18:17:16.498018 | 2018-08-19T18:14:12 | 2018-08-19T18:14:12 | 23,586,699 | 4 | 1 | MIT | 2018-08-19T06:29:59 | 2014-09-02T16:40:21 | Python | UTF-8 | Python | false | false | 419 | py | """
df = value %>%
sum %>%
filter(is_na) %>%
summarize
df = value |>
sum |>
filter(is_na) |>
summarize
with PipeForward(value) as df:
_ = value
_ = sum(_)
_ = filter(_, is_na)
_ = summarize(_)
df = _
with PipeForward(value):
sum
filter(10)
summarize
with value.pipe():
"""
with value.pipe():
sum #>>
filter(10) #>>
summarize
value >> sum
| [
"[email protected]"
] | |
cc6aeb11c159d67d3188ad48a3943fd5c5bb5b57 | 34bf67017440fe47658559f91fe153c153a359f4 | /126.py | ab76eec45e690df7ee056355c5e29df63513c5d3 | [] | no_license | KevinWangTHU/LeetCode | 1be5f8f1ab587eea5365abb940785c9fe26f5214 | a7916e0818b0853ec75e24724bde94c49234c7dc | refs/heads/master | 2021-05-04T10:16:26.666260 | 2017-08-09T04:17:12 | 2017-08-09T04:18:49 | 53,427,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | import collections, string
class Solution(object):
def findLadders(self, beginWord, endWord, wordlist):
"""
:type beginWord: str
:type endWord: str
:type wordlist: Set[str]
:rtype: List[List[int]]
"""
def construct_paths(source, dest, tree):
if source == dest:
return [[source]]
return [[source] + path for succ in tree[source] # path can be [] - for failed trials.
for path in construct_paths(succ, dest, tree)]
def add_path(tree, word, neigh, is_forw):
if is_forw:
tree[word] += neigh,
else:
tree[neigh] += word,
def bfs_level(cur, other, tree, is_forw, wordlist):
if not cur:
return False
if len(cur) > len(other):
return bfs_level(other, cur, tree, not is_forw, wordlist)
for word in (cur | other):
wordlist.discard(word)
next, done = set(), False
while cur:
word = cur.pop()
for neigh in [word[:idx] + c + word[idx+1:]
for c in string.ascii_lowercase
for idx in range(len(word))]:
if neigh in other:
done = True
add_path(tree, word, neigh, is_forw)
if not done and neigh in wordlist:
next.add(neigh)
add_path(tree, word, neigh, is_forw)
return done or bfs_level(next, other, tree, is_forw, wordlist)
tree, paths = collections.defaultdict(list), []
is_found = bfs_level(set([beginWord]), set([endWord]), tree, True, wordlist)
return construct_paths(beginWord, endWord, tree)
s=Solution()
print s.findLadders("hit", "dog", {"hog", "hig", "hip"})
| [
"[email protected]"
] | |
4e8773dfd7c43372b1e0e2487c9908b3ce02e2ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02695/s928241641.py | 8a3650d94b02032a7e04ac7856e18f47bbcccc2d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | import copy
def gen_mono_inc_seqs(N, M, cur_list, cur_len):
if cur_len == N:
return cur_list
result = []
for l in cur_list:
last_val = l[len(l)-1]
for i in range(last_val, M+1):
tmp = copy.copy(l)
tmp.append(i)
result.append(tmp)
return gen_mono_inc_seqs(N, M, result, cur_len+1)
def mono_inc_seqs(N, M):
l = [ [i] for i in range(1, M+1) ]
return gen_mono_inc_seqs(N, M, l, 1)
N, M, Q = map(int, input().split())
a, b, c, d = [0] * Q, [0] * Q, [0] * Q, [0] * Q
for i in range(Q):
a_, b_, c_, d_ = map(int, input().split())
a[i], b[i], c[i], d[i] = a_, b_, c_, d_
max_result = -1
seqs = mono_inc_seqs(N, M)
for seq in seqs:
tmp = 0
for i in range(Q):
if seq[b[i]-1] - seq[a[i]-1] == c[i]:
tmp += d[i]
max_result = max(max_result, tmp)
print(max_result)
| [
"[email protected]"
] | |
efcacf5019e593a4bf64f6c3a04e37e1c9331b44 | c6588d0e7d361dba019743cacfde83f65fbf26b8 | /x12/5030/435005030.py | a57f914a95dac66f74356e3869e7f5bc1cf84657 | [] | no_license | djfurman/bots-grammars | 64d3b3a3cd3bd95d625a82204c3d89db6934947c | a88a02355aa4ca900a7b527b16a1b0f78fbc220c | refs/heads/master | 2021-01-12T06:59:53.488468 | 2016-12-19T18:37:57 | 2016-12-19T18:37:57 | 76,887,027 | 0 | 0 | null | 2016-12-19T18:30:43 | 2016-12-19T18:30:43 | null | UTF-8 | Python | false | false | 879 | py | from bots.botsconfig import *
from records005030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'RK',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'SID', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 30},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'LQ', MIN: 0, MAX: 100, LEVEL: [
{ID: 'MSG', MIN: 0, MAX: 100},
]},
{ID: 'LX', MIN: 0, MAX: 4, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 50},
{ID: 'LH3', MIN: 0, MAX: 100},
{ID: 'LH2', MIN: 0, MAX: 8},
{ID: 'LFH', MIN: 0, MAX: 20},
{ID: 'LEP', MIN: 0, MAX: 3},
{ID: 'LH4', MIN: 0, MAX: 4},
{ID: 'CRC', MIN: 0, MAX: 5},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
dc9a696d53a940224de5525365420e23e1c82e96 | 5077fc5d82caa3b3ed5ce0e062bfe75cd4037ebc | /forever_thinking/bilibili获取封面.py | 260f9926d90e3490e4b217ca8bb4cc9d9081eb75 | [] | no_license | asswecanfat/git_place | ee10e1057d8307d3c72f57291b5bcb6d0579017e | 244ff0de11ffbe1aa9f20308e43af39486507f6f | refs/heads/master | 2021-07-18T23:06:14.324164 | 2020-09-02T12:15:27 | 2020-09-02T12:15:27 | 210,833,462 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | import requests
from bs4 import BeautifulSoup
from attr import attrib, attrs
import json
import re
import random
import os
@attrs
class BiliBili(object):
file_path = attrib(default=r'C:\Users\10248\Desktop\1.txt')
pic_path = attrib(default=r'C:\Users\10248\Desktop')
source_wab_url = attrib(default='https://search.bilibili.com/all?keyword=')
headers = attrib(default={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/75.0.3770.142 Safari/537.36'})
def update_url(self, av_num):
self.source_wab_url = '{}{}{}'.format(self.source_wab_url, av_num, '&from_source=banner_search')
def get_url_data(self, url):
return requests.get(url, headers=self.headers) # reponse
def download_cover(self):
reponse = self.get_url_data(self.source_wab_url)
self.source_wab_url.__init__()
# self.test_save_data(reponse)
pic_url = '{}{}'.format(' http:', self.deal_web_data(reponse))
final_pic_path = r'{}\{}'.format(self.pic_path, str(random.randint(0, 1000)) + '.jpg')
while os.path.exists(final_pic_path):
final_pic_path = r'{}\{}'.format(self.pic_path, str(random.randint(0, 1000)) + '.jpg')
with open(final_pic_path, 'wb') as f:
f.write(self.get_url_data(pic_url).content)
print('封面获取成功!')
def deal_web_data(self, reponse):
soup = BeautifulSoup(reponse.text, 'lxml')
point = soup.find_all('script')
# print(point[6])
real_data = re.split(r'=|;\(', point[6].text)[1]
# print(real_data)
now = json.loads(real_data)
# print(now['allData']['video'][0]['pic'])
return now['allData']['video'][0]['pic']
def test_save_data(self, reponse):
with open(self.file_path, 'wb') as f:
f.write(reponse.content)
if __name__ == '__main__':
bi = BiliBili()
av_n = input('请输入av号:')
bi.update_url(av_n)
bi.download_cover()
| [
"[email protected]"
] | |
afca61d5d8ba52a219c2ad7064268eca41cd96c6 | 495ce92166457a6d5818d786a6a3303d3280fcd0 | /src/registration/urls.py | ac889db2d836112cd2cb69c66483cb85276e9187 | [] | no_license | patrickhusi/django-inspectional-registration | 616e7d44716c41b09b32c30415a1cf86d3b7324f | c0aee3ddc4f1a5e870643a605d8a9575b3a7520f | refs/heads/master | 2020-12-25T22:57:45.123082 | 2015-08-01T00:19:32 | 2015-08-01T00:19:32 | 39,487,644 | 0 | 0 | null | 2015-07-22T05:35:21 | 2015-07-22T05:35:21 | null | UTF-8 | Python | false | false | 2,964 | py | # coding=utf-8
"""
URLconf for django-inspectional-registration
"""
__author__ = 'Alisue <[email protected]>'
from registration.compat import url
from registration.compat import patterns
from registration.views import RegistrationView
from registration.views import RegistrationClosedView
from registration.views import RegistrationCompleteView
from registration.views import ActivationView
from registration.views import ActivationCompleteView
urlpatterns = patterns('',
url(r'^activate/complete/$', ActivationCompleteView.as_view(),
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$', ActivationView.as_view(),
name='registration_activate'),
url(r'^register/$', RegistrationView.as_view(),
name='registration_register'),
url(r'^register/closed/$', RegistrationClosedView.as_view(),
name='registration_disallowed'),
url(r'^register/complete/$', RegistrationCompleteView.as_view(),
name='registration_complete'),
)
# django.contrib.auth
from registration.conf import settings
from django.contrib.auth import views as auth_views
if settings.REGISTRATION_DJANGO_AUTH_URLS_ENABLE:
prefix = settings.REGISTRATION_DJANGO_AUTH_URL_NAMES_PREFIX
suffix = settings.REGISTRATION_DJANGO_AUTH_URL_NAMES_SUFFIX
import django
if django.VERSION >= (1, 6):
uidb = r"(?P<uidb64>[0-9A-Za-z_\-]+)"
token = r"(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})"
password_reset_confirm_rule = (
r"^password/reset/confirm/%s/%s/$" % (uidb, token)
)
else:
uidb = r"(?P<uidb36>[0-9A-Za-z]+)"
token = r"(?P<token>.+)"
password_reset_confirm_rule = (
r"^password/reset/confirm/%s-%s/$" % (uidb, token)
)
urlpatterns += patterns('',
url(r'^login/$', auth_views.login,
{'template_name': 'registration/login.html'},
name=prefix+'login'+suffix),
url(r'^logout/$', auth_views.logout,
{'template_name': 'registration/logout.html'},
name=prefix+'logout'+suffix),
url(r'^password/change/$', auth_views.password_change,
name=prefix+'password_change'+suffix),
url(r'^password/change/done/$', auth_views.password_change_done,
name=prefix+'password_change_done'+suffix),
url(r'^password/reset/$', auth_views.password_reset,
name=prefix+'password_reset'+suffix, kwargs=dict(
post_reset_redirect=prefix+'password_reset_done'+suffix)),
url(password_reset_confirm_rule,
auth_views.password_reset_confirm,
name=prefix+'password_reset_confirm'+suffix),
url(r'^password/reset/complete/$', auth_views.password_reset_complete,
name=prefix+'password_reset_complete'+suffix),
url(r'^password/reset/done/$', auth_views.password_reset_done,
name=prefix+'password_reset_done'+suffix),
)
| [
"[email protected]"
] | |
5d241edba0322488b4b7f84cee1a16c8cd0b1bd6 | cdd0fa35e6867932d9821b54f3e9897306139d1a | /myPracticeProblems/ordered_dict.py | ac21f387d95bb5f5a10a305313ea69109d20cc7d | [] | no_license | jisshub/python-development | cfd4246981999d5bc8cfe4cc15a57ebfada2691e | 392e7362bf8e83930d410984e985d73a0a2f40d1 | refs/heads/master | 2021-01-05T02:25:12.896814 | 2020-03-23T16:05:25 | 2020-03-23T16:05:25 | 240,844,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from collections import OrderedDict
ordered_dict = OrderedDict()
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
print(ordered_dict)
new_dict = dict()
new_dict["a"] = 44
new_dict["a"] = 44
new_dict["b"] = 44
print(new_dict)
| [
"[email protected]"
] | |
b0b42a8618f56c00d5b0d03cce3873bd96adb26e | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatter3d/line/_showscale.py | 534d53f00aee0a02ffb55e951c76e575cebf5dfe | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 425 | py | import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='showscale', parent_name='scatter3d.line', **kwargs
):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='info',
**kwargs
)
| [
"[email protected]"
] | |
b1a1e15b3a0558a5a77872235e3522ea33bab5cc | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /rcc/models/jaxb_element.py | 49e4e3b8f1e30a23cafa6a6b5a8c3fbc12ef4791 | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,874 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class JAXBElement(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'QName',
'value': 'object',
'nil': 'bool',
'global_scope': 'bool',
'type_substituted': 'bool'
}
attribute_map = {
'name': 'name',
'value': 'value',
'nil': 'nil',
'global_scope': 'globalScope',
'type_substituted': 'typeSubstituted'
}
def __init__(self, name=None, value=None, nil=None, global_scope=None, type_substituted=None, local_vars_configuration=None): # noqa: E501
"""JAXBElement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self._nil = None
self._global_scope = None
self._type_substituted = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
if nil is not None:
self.nil = nil
if global_scope is not None:
self.global_scope = global_scope
if type_substituted is not None:
self.type_substituted = type_substituted
@property
def name(self):
"""Gets the name of this JAXBElement. # noqa: E501
:return: The name of this JAXBElement. # noqa: E501
:rtype: QName
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JAXBElement.
:param name: The name of this JAXBElement. # noqa: E501
:type: QName
"""
self._name = name
@property
def value(self):
"""Gets the value of this JAXBElement. # noqa: E501
:return: The value of this JAXBElement. # noqa: E501
:rtype: object
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this JAXBElement.
:param value: The value of this JAXBElement. # noqa: E501
:type: object
"""
self._value = value
@property
def nil(self):
"""Gets the nil of this JAXBElement. # noqa: E501
:return: The nil of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._nil
@nil.setter
def nil(self, nil):
"""Sets the nil of this JAXBElement.
:param nil: The nil of this JAXBElement. # noqa: E501
:type: bool
"""
self._nil = nil
@property
def global_scope(self):
"""Gets the global_scope of this JAXBElement. # noqa: E501
:return: The global_scope of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._global_scope
@global_scope.setter
def global_scope(self, global_scope):
"""Sets the global_scope of this JAXBElement.
:param global_scope: The global_scope of this JAXBElement. # noqa: E501
:type: bool
"""
self._global_scope = global_scope
@property
def type_substituted(self):
"""Gets the type_substituted of this JAXBElement. # noqa: E501
:return: The type_substituted of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._type_substituted
@type_substituted.setter
def type_substituted(self, type_substituted):
"""Sets the type_substituted of this JAXBElement.
:param type_substituted: The type_substituted of this JAXBElement. # noqa: E501
:type: bool
"""
self._type_substituted = type_substituted
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JAXBElement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JAXBElement):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
46a6be98cd37c203fd6efd53b180795a67a6b079 | ecff7ab1d962ff895b3e9a0b4239329dd03ce966 | /webpage_text/__init__.py | b20daaa188f87b44418af0b010d45a46826360d1 | [
"MIT"
] | permissive | MSLNZ/pr-webpage-text | ea91e138b3e476688a07210e2b0625cb23538ff8 | 7790e8bbeb5cfbb9c0d7ac508903acd7414ff9d5 | refs/heads/main | 2022-09-15T12:26:29.947169 | 2022-08-05T21:21:26 | 2022-08-05T21:21:26 | 227,973,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,399 | py | import os
import re
import sys
import argparse
import configparser
from gevent import monkey
monkey.patch_all()
import gevent
from gevent import pywsgi
import requests
from flask import (
Flask,
Markup,
render_template,
request,
send_from_directory,
)
gevent.get_hub().NOT_ERROR += (KeyboardInterrupt,)
PORT = 1683
endpoint_dict = {}
default_dict = {}
default_endpoint = 'defaults'
app = Flask(__name__)
@app.errorhandler(404)
def page_not_found(*args):
return render_template('page_not_found.html', names=endpoint_dict.keys(), url=request.host_url), 404
@app.route('/favicon.ico')
def favicon():
return send_from_directory('static', 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/<name>', methods=['GET', 'PUT'])
def page(name):
if name not in endpoint_dict:
return page_not_found()
if request.method == 'PUT':
data = request.json
data['text'] = Markup(re.sub(r'\n|\\n', '<br>', data['text']))
endpoint_dict[name].update(data)
return render_template('page.html', title=name, **endpoint_dict[name])
@app.route('/'+default_endpoint, methods=['GET'])
def defaults():
return default_dict
def run(*args):
"""Run the web server.
This function is only meant to be called from the command line via the
`webpage-text` entry point (see setup.py).
"""
host = '0.0.0.0'
text = ''
size = 100
refresh = 1.0
use_flask = False
enable_log = False
parser = argparse.ArgumentParser(description='Start a web server to display text on a web page.')
parser.add_argument(
'-c', '--config',
help='path to a configuration file (INI format)'
)
parser.add_argument(
'-H', '--host', default=host,
help='hostname or IP address of the server [default={}]'.format(host)
)
parser.add_argument(
'-p', '--port', default=PORT, type=int,
help='port to run the server on [default={}]'.format(PORT)
)
parser.add_argument(
'-e', '--endpoints', nargs='*',
help='the names of the URL endpoints'
)
parser.add_argument(
'-t', '--text', default=text, nargs='*',
help='initial text to display at each endpoint [default={!r}]'.format(text)
)
parser.add_argument(
'-s', '--size', default=size, type=int,
help='font size (in px) of the text [default={}]'.format(size)
)
parser.add_argument(
'-r', '--refresh', default=refresh, type=float,
help='number of seconds for a web browser to wait before automatically '
'refreshing the web page [default={}]'.format(refresh)
)
parser.add_argument(
'-l', '--log', action='store_true', help='show INFO log messages from the gevent WSGI server'
)
parser.add_argument(
'-f', '--flask', action='store_true', help='use the flask development server in debug mode'
)
if not args:
args = sys.argv[1:]
args = parser.parse_args(args)
if args.config is not None:
if not os.path.isfile(args.config):
sys.exit('FileNotFoundError: ' + args.config)
ini = configparser.ConfigParser()
ini.read(args.config)
host = ini.get('server', 'host', fallback=host)
port = ini.getint('server', 'port', fallback=PORT)
endpoints = [e.strip() for e in ini.get('server', 'endpoints', fallback='').split(',') if e.strip()]
use_flask = ini.getboolean('server', 'use_flask', fallback=use_flask)
enable_log = ini.getboolean('server', 'enable_log', fallback=enable_log)
text = ini.get('text', 'initial', fallback=text)
size = ini.getint('text', 'size', fallback=size)
refresh = ini.getfloat('text', 'refresh', fallback=refresh)
else:
host = args.host
port = args.port
endpoints = args.endpoints
use_flask = args.flask
enable_log = args.log
text = ' '.join(args.text) if args.text else args.text
size = args.size
refresh = args.refresh
if not endpoints:
sys.exit('You must specify at least 1 endpoint')
for endpoint in endpoints:
if endpoint == default_endpoint:
sys.exit('The name of an endpoint cannot be {!r} because this name is reserved'.format(default_endpoint))
print('Added endpoint http://{}:{}/{}'.format(host, port, endpoint))
endpoint_dict[endpoint] = {'text': text, 'size': size, 'refresh': refresh}
default_dict['size'] = size
default_dict['refresh'] = refresh
if use_flask:
# use the development server from flask
app.run(host=host, port=port, debug=True)
else:
print('Server running on http://{}:{}/ (Press CTRL+C to quit)'.format(host, port))
log = 'default' if enable_log else None
server = pywsgi.WSGIServer((host, port), application=app.wsgi_app, log=log)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def put(text, endpoint, host='127.0.0.1', port=PORT, size=None, refresh=None):
"""Update the text that is displayed on a web page.
The URL of the web page to update follows the ``http://host:port/endpoint`` nomenclature.
Parameters
----------
text : str
The text to display on the web page.
endpoint : str
The endpoint of the web page's URL.
host : str, optional
The hostname or IP address of the web server.
port : int, optional
The port number of the web server.
size : int, optional
The font size of the `text`.
refresh : float, optional
The number of second a web browser will wait before it automatically refreshes.
"""
url = 'http://{}:{}/'.format(host, port)
try:
default = default_dict[url]
except KeyError:
default = requests.get(url+default_endpoint).json()
default_dict[url] = {'size': default['size'], 'refresh': default['refresh']}
if size is None:
size = default['size']
if refresh is None:
refresh = default['refresh']
reply = requests.put(url+endpoint.lstrip('/'), json={'text': text, 'size': size, 'refresh': refresh})
if not reply.ok:
matches = re.findall(r'/(\w+)</p>', reply.content.decode())
raise ValueError('Invalid endpoint {!r}. Must be one of: {}'.format(endpoint, ', '.join(matches)))
| [
"[email protected]"
] | |
20f48de587f36ac22f7b751403edee7311221783 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/python/ops/linalg/linalg.py | 22c87ea697b7d702dec0fb5fe037ea1157fdaf58 | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,785 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.linalg namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.linalg.linalg_impl import *
from tensorflow.python.ops.linalg.linear_operator import *
from tensorflow.python.ops.linalg.linear_operator_composition import *
from tensorflow.python.ops.linalg.linear_operator_diag import *
from tensorflow.python.ops.linalg.linear_operator_full_matrix import *
from tensorflow.python.ops.linalg.linear_operator_identity import *
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
# pylint: enable=wildcard-import
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
# pylint: disable=protected-access
slogdet = gen_linalg_ops._log_matrix_determinant
# pylint: disable=protected-access
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
# Seal API.
del absolute_import
del array_ops
del division
del gen_linalg_ops
del linalg_ops
del math_ops
del ops
del print_function
del special_math_ops
| [
"[email protected]"
] | |
cd5ab0ff640c9c8555b6af3aad71c70091b91ec4 | 2760effda15d884af413ca2a35809d03fabea377 | /lc-1222.py | fb44d86b4ecfc652aaac148671173ef0b40bbe00 | [] | no_license | UtsavRaychaudhuri/leetcode | 31943b98ad89d96d72ee4b6b1d1c8d70429d1e1f | 77a13580fd6231830558b1cf8c84f8b3b62b99d0 | refs/heads/master | 2020-11-27T18:02:23.712639 | 2020-09-29T19:39:49 | 2020-09-29T19:39:49 | 229,552,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | class Solution(object):
def __init__(self):
self.outarray=[]
def queensAttacktheKing(self, queens, king):
"""
:type queens: List[List[int]]
:type king: List[int]
:rtype: List[List[int]]
"""
self.checkleft(king,queens)
self.checkup(king,queens)
self.checkdown(king,queens)
self.checkright(king,queens)
self.checkdiagonal(king,queens)
return self.outarray
def checkleft(self,king,queens):
j=king[1]
for i in range(king[0],-1,-1):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkright(self,king,queens):
i=king[0]
for j in range(king[1],10):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkup(self,king,queens):
j=king[1]
for i in range(king[0],10):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkdown(self,king,queens):
i=king[0]
for j in range(king[1],-1,-1):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkdiagonal(self,king,queens):
i=king[0]
j=king[1]
while(i>=0 and j>=0):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
i-=1
j-=1
i,j=king[0],king[1]
while(i<=9 and j<=9):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
i+=1
j+=1
i,j=king[0],king[1]
while(j>=0 and i<=9):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
i+=1
j-=1
i,j=king[0],king[1]
while(i>=0 and j<=9):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
j+=1
i-=1
| [
"[email protected]"
] | |
09267857397c18219dcb468ef2b121a2fea8f574 | c83e356d265a1d294733885c373d0a4c258c2d5e | /mayan/apps/locales/managers.py | b80a4b8368ef07497f74fee837058582ac4e31a0 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3 | 4160809d2c96707a196b8c94ea9e4df1a119d96a | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | refs/heads/master | 2023-08-21T23:36:41.230179 | 2021-10-02T03:51:12 | 2021-10-02T03:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from django.contrib.auth import get_user_model
from django.db import models
class UserLocaleProfileManager(models.Manager):
def get_by_natural_key(self, user_natural_key):
User = get_user_model()
try:
user = User.objects.get_by_natural_key(user_natural_key)
except User.DoesNotExist:
raise self.model.DoesNotExist
return self.get(user__pk=user.pk)
| [
"[email protected]"
] | |
498488d0e02adf53cce7096cd9c7afa81a6a5814 | 64267b1f7ca193b0fab949089b86bc7a60e5b859 | /slehome/account/migrations/0046_auto_20150130_0600.py | 4d7e8d0246f4262cdd73c9abdd7338982e3d2674 | [] | no_license | hongdangodori/slehome | 6a9f2b4526c2783932627b982df0540762570bff | 3e558c78c3943dadf0ec485738a0cc98dea64353 | refs/heads/master | 2021-01-17T12:00:34.221088 | 2015-02-06T13:44:00 | 2015-02-06T13:44:00 | 28,847,585 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0045_auto_20150130_0558'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='43f9a685bc7146b4ecc63bdf9bc3e5136b7543f436a42e4a2f2ae749ffb0c6db', max_length=64),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
7e198ce9b23e20291e65927d4cb4929ce449664b | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/create_reassignment_task_response.py | 9d3d17bcac4d36154a7f4e3ba477519e68ca9043 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,225 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateReassignmentTaskResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str'
}
attribute_map = {
'job_id': 'job_id'
}
def __init__(self, job_id=None):
"""CreateReassignmentTaskResponse
The model defined in huaweicloud sdk
:param job_id: 任务ID。
:type job_id: str
"""
super(CreateReassignmentTaskResponse, self).__init__()
self._job_id = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
@property
def job_id(self):
"""Gets the job_id of this CreateReassignmentTaskResponse.
任务ID。
:return: The job_id of this CreateReassignmentTaskResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this CreateReassignmentTaskResponse.
任务ID。
:param job_id: The job_id of this CreateReassignmentTaskResponse.
:type job_id: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateReassignmentTaskResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
69ed92de644fca515a276845a1ab3c88f930d96c | ecf6fe6aa87b2c3f041acc30fab11b0cafe3dd46 | /architecture_py/archi_v3_4.py | c44736bffc6e0190265c5c5a8ec71479998ec8b7 | [] | no_license | antgratia/Memoire_code | 73c7806c4576c2e73e00d9a84b1063a2c8f6b559 | 2cdc1339ea24896a6628238f6467edff80f98166 | refs/heads/main | 2023-06-20T16:19:07.041464 | 2021-07-13T11:53:48 | 2021-07-13T11:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,719 | py |
import numpy as np
import os
from keras import backend as K
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Concatenate, Dropout
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.utils import plot_model
import tensorflow as tf
import sys
import traceback
import csv
from time import time
type_archi = 'ALL'
epsilon = 0.0
dropout_rate = 0.4
axis = 3
compress_factor = 0.5
# load dataset
(train_x, train_y), (test_x, test_y) = keras.datasets.cifar10.load_data()
# normalize to range 0-1
train_x = train_x / 255.0
test_x = test_x / 255.0
val_x = train_x[:5000]
val_y = train_y[:5000]
# init training time
training_time = 0
# init result test/train
test_result_loss = ""
test_result_acc = ""
train_result_loss = ""
train_result_acc = ""
nb_layers = "not build"
def id_block(X, f, filters, activation):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Add()([X, X_shortcut])# SKIP Connection
X = Activation(activation)(X)
return X
def conv_block(X, f, filters, activation, s=2):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X_shortcut = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
if epsilon != 0:
X_shortcut = BatchNormalization(epsilon = epsilon, axis=axis)(X_shortcut)
X = Add()([X, X_shortcut])
X = Activation(activation)(X)
return X
def denseBlock(X, f, nb_filter, nb_layer, padding, activation):
x_input = X
for _ in range(0,nb_layer):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
X = Concatenate()([X, x_input])
return X
def transition_block(X, f, nb_filter, padding, activation, op, stride):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
if (op == 'avg'):
X = AveragePooling2D(pool_size = f, strides=stride, padding=padding)(X)
else :
X = MaxPooling2D(pool_size=f, strides=stride, padding=padding)(X)
return X
try:
def getModel():
X_input = X = Input([32, 32, 3])
X = Conv2D(18, kernel_size=5, strides=5, activation='relu', padding='valid')(X)
X = conv_block(X, 2, 36, 'selu', 1)
X = Conv2D(72, kernel_size=7, strides=2, activation='relu', padding='same')(X)
X = conv_block(X, 7, 144, 'tanh', 7)
X = GlobalMaxPooling2D()(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=X_input, outputs=X)
return model
model = getModel()
#plot_model(model, show_shapes=True, to_file="../architecture_img/archi_v3_4.png")
model.compile(optimizer='adam', loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
start = time()
es = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, restore_best_weights=True, patience=1)
list_cb = [es]
history = model.fit(train_x, train_y, epochs=50, batch_size=64, validation_split=0.3, callbacks=list_cb)
training_time = time()-start
print(model.evaluate(test_x, test_y))
log_file = open("../architecture_log/archi_v3_4.log" , "w")
# save test result
log_file.write('test result : ' + str(model.evaluate(test_x, test_y)))
test_result_loss = model.evaluate(test_x, test_y)[0]
test_result_acc = model.evaluate(test_x, test_y)[1]
# save train result
log_file.write('train result : ' + str(model.evaluate(test_x, test_y)))
log_file.write('History train result : ' + str(history.history))
train_result_loss = model.evaluate(train_x, train_y)[0]
train_result_acc = model.evaluate(train_x, train_y)[1]
print('OK: file ../architecture_log/archi_v3_4.log has been create')
nb_layers = len(model.layers)
log_file.close()
except:
print('error: file ../architecture_log/archi_v3_4_error.log has been create')
error_file = open("../architecture_log/archi_v3_4_error.log" , "w")
traceback.print_exc(file=error_file)
result_loss = "Error"
result_acc = "Error"
error_file.close()
finally:
file = open('../architecture_results_v3.csv', 'a', newline ='')
with file:
# identifying header
header = ['file_name', 'training_time(s)', 'test_result_loss', 'test_result_acc', 'train_result_acc', 'train_result_loss', 'nb_layers', 'epochs', 'type_archi']
writer = csv.DictWriter(file, fieldnames = header)
# writing data row-wise into the csv file
# writer.writeheader()
writer.writerow({'file_name' : 'archi_v3_4',
'training_time(s)': training_time,
'test_result_loss': test_result_loss,
'test_result_acc': test_result_acc,
'train_result_acc': train_result_acc,
'train_result_loss': train_result_loss,
'nb_layers': nb_layers,
'epochs' : len(history.history['loss']),
'type_archi': type_archi})
print('add line into architecture_results_v3.csv')
file.close()
| [
"[email protected]"
] | |
4a0714091ddd90df0ea8c7a0b01751aad0843151 | 398089ec2210e1b6a12aecf8ed91cdeced6b36fc | /employer/views.py | 37965cec7645b608559570279fbd8da925ea939d | [
"Apache-2.0"
] | permissive | WilliamQLiu/job-waffle | 7ca8cb6357884e99a9c054bbd25d10222816dde7 | 59e4bc550dc1c2131fa427f188fbc2bb287aa938 | refs/heads/master | 2022-05-04T12:18:53.018609 | 2015-04-10T03:18:34 | 2015-04-10T03:18:34 | 27,843,538 | 1 | 1 | Apache-2.0 | 2021-06-10T17:29:08 | 2014-12-10T22:48:48 | JavaScript | UTF-8 | Python | false | false | 7,702 | py | """
A view takes a web request and returns a web response
The response can be a web page, a redirect, a 404 error, etc
GET is used for requests that do not affect the state of the system
POST is used for making changes in the database
Under the hood, Django just converts HTTP POST and GET objects into a
'QueryDict', which is a Django dict, which is a Python dict
"""
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import render, render_to_response, RequestContext, Http404
from django.utils.decorators import method_decorator # Allow LoggedInMixin
from django.views.generic import TemplateView, View, ListView, UpdateView, DeleteView, CreateView
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
import django_filters
# For debugging
from django.http.request import QueryDict
from django.utils.datastructures import MultiValueDict
import logging
from .models import Job
from .forms import JobForm, JobSearchForm
from .serializers import JobSerializer
from rest_framework import viewsets, authentication, permissions, filters
from haystack.query import SearchQuerySet
from haystack.inputs import AutoQuery, Exact, Clean, Raw
# Debugging: Log levels (DEBUG, INFO, WARNING, ERROR, CRITICAL)
logger = logging.getLogger(__name__) # get instance of a logger
class LoggedInMixin(object):
""" Mixin to ensure user is logged in """
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoggedInMixin, self).dispatch(*args, **kwargs)
def find_job(request):
""" 'Find Job' Page """
query_what = None
query_where = None
form = JobSearchForm(request.GET) # <class 'employer.forms.JobSearchForm'>
form_search = form.search() # Get Search Results from the form
# GET data from the form; make sure fields aren't non-empty values
# filter Haystack's SearchQuerySet, for details see:
# http://django-haystack.readthedocs.org/en/v2.3.1/searchqueryset_api.html
if ('query_what' in request.GET and request.GET['query_what']) or \
('query_where' in request.GET and request.GET['query_where']):
query_what = request.GET['query_what'] # query for what field
query_where = request.GET['query_where'] # query for where field
myquery = query_what + " " + query_where # combine search queries
search_results = form_search.filter(content__contains=myquery) # AND
else:
query_what = 'You submitted an empty form'
query_where = 'You submitted an empty form'
search_results = form_search
# If you want to filter by Model instead of by Haystack's SearchQuerySet
#my_data = Job.objects.filter(active=True).order_by('timestamp_created')
context = {'search_results': search_results}
return render(request, 'find_job.html', context)
def post_job(request):
""" 'Post Job' Page """
if request.method == 'POST':
form = JobForm(data=request.POST) # create form, populate data from request
if form.is_valid():
#Return authenticated user, if any
#username = None
#if request.user.is_authenticated():
# username = request.user.username
company = form.cleaned_data['company']
location = form.cleaned_data['location']
title = form.cleaned_data['title']
description = form.cleaned_data['description']
status = form.cleaned_data['status']
salary_min = form.cleaned_data['salary_min']
salary_max = form.cleaned_data['salary_max']
my_data = Job(created_by=request.user, company=company,
location=location, timestamp_created=timezone.now(),
title=title, description=description, status=status,
salary_min=salary_min, salary_max=salary_max)
my_data.save()
messages.success(request, 'Thanks!')
return HttpResponseRedirect('/')
else: # Request is a 'GET' instead of 'POST'
form = JobForm() # get a blank form
#logger.info("Not a POST")
return render(request, 'post_job.html', {'form': form})
def manage_job_posts(request):
""" 'Manage Job Posts' Page """
my_data = Job.objects.filter(active=True).order_by('timestamp_created')
context = {'my_data': my_data}
return render(request, 'manage_job_posts.html', context)
class JobCreateView(LoggedInMixin, CreateView):
""" Allow Users to Create Jobs """
model = Job
template_name = "job_create.html"
def get_success_url(self):
""" After posting job, go to job management """
return reverse('job-post')
def get_context_data(self, **kwargs):
context = super(JobCreateView, self).get_context_data(**kwargs)
context['action'] = reverse('job-create')
return context
def form_valid(self, form):
form.instance.user = self.request.user
return super(JobCreateView, self).form_valid(form)
class JobUpdateView(LoggedInMixin, UpdateView):
""" Allow Users to Update Job """
model = Job
template_name = 'job_update.html'
def get_success_url(self):
""" After updating a job, takes you back to job profile """
return reverse('manage_job_posts')
def get_queryset(self):
specific_id = self.kwargs['pk'] # Pass variable 'pk' from urls.py
return Job.objects.filter(id=specific_id)
class JobListView(LoggedInMixin, ListView):
""" View a specific job """
model = Job
template_name = "job_view.html"
def get_success_url(self):
return reverse('job-list')
def get_queryset(self):
specific_id = self.kwargs['pk'] # Pass variable 'pk' from urls.py
return Job.objects.filter(id=specific_id)
class JobDeleteView(LoggedInMixin, DeleteView):
""" Delete a specific job """
model = Job
template_name = "job_delete.html"
def get_success_url(self):
""" After deleting a job, takes you back to profile """
return reverse('manage_job_posts')
def get_queryset(self):
specific_id = self.kwargs['pk'] # Pass variable 'pk' from urls.py
return Job.objects.filter(id=specific_id)
# FOR DJANGO REST FRAMEWORK (DRF)
class DefaultsMixin(object):
"""
Default settings for view authentication, permissions,
filtering and pagination
"""
authentication_classes = (
authentication.BasicAuthentication,
authentication.TokenAuthentication,
)
permission_classes = (
permissions.IsAuthenticated, # Access to GET, POST, HEAD, OPTIONS
#IsReadOnlyRequest,
#permissions.IsAuthenticatedOrReadOnly
)
filter_backends = (
filters.DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
)
paginate_by = 50
paginate_by_param = 'page_size'
max_paginate_by = 500
# DRF FILTERS
class JobFilter(django_filters.FilterSet):
company = django_filters.CharFilter(name='company')
class Meta:
model = Job
fields = ('timestamp_updated', 'company', 'title')
# DRF VIEWSETS
class JobViewSet(DefaultsMixin, viewsets.ModelViewSet):
queryset = Job.objects.all()
serializer_class = JobSerializer
filter_class = JobFilter
search_fields = ('name')
ordering_fields = ('timestamp_updated')
| [
"[email protected]"
] | |
5682f517f5c1795e283d9fbc3d17cb77b2c67060 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Wyx_w_M_w_Sob_to_Wz_focus/IN_Sob_k15_EroM/Sob_k15_s001_EroM/pyr_Tcrop255_p60_j15/pyr_5s/L3/step10_a.py | 0112794823a31b19968d9437b4bb2fa90a4fd65b | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,850 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_5side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_W_gt_W_ch_norm_v2
use_loss_obj = [G_sobel_k15_erose_M_loss_info_builder.set_loss_target("UNet_Wz").copy()]
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
##################################
### 1side1
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side2
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_2__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_2__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side3
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_3__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_3__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_3__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side4
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_4__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_4__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_4__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_1side_4__2side_4__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1_5s1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
b3e740a0b9efebccd943477359ab43b75987d7c2 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/sw5.py | 6ff3c55a6f3707c4e80f76687713728c3404fcd7 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sw5':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
6432cf6c0bb2012d7369a431a646f38b43800201 | 29a4e8ffa77a09c418712bb243e1b4d24336e0c1 | /nbgrader/formgrader/base.py | 326ee4c2852b3763977915e7b9e277acf09f721b | [
"BSD-3-Clause"
] | permissive | silky/nbgrader | f52634438d79df80de077569e94562f08f123f0b | 30f461ee06a03a1e2ed1789016bb49e9f59e61eb | refs/heads/master | 2021-01-18T00:23:18.300627 | 2016-01-08T22:06:45 | 2016-01-08T22:06:45 | 50,624,512 | 1 | 0 | null | 2016-01-29T00:21:36 | 2016-01-29T00:21:36 | null | UTF-8 | Python | false | false | 2,554 | py | import json
import functools
from tornado import web
def authenticated(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = self.auth.authenticate(self.request)
if result is True:
return f(self, *args, **kwargs) # Success
elif result is False:
raise web.HTTPError(403) # Forbidden
else:
self.redirect(result, permanent=False) # Redirect
return wrapper
class BaseHandler(web.RequestHandler):
@property
def gradebook(self):
return self.settings['gradebook']
@property
def auth(self):
return self.settings['auth']
@property
def mathjax_url(self):
return self.settings['mathjax_url']
@property
def notebook_dir(self):
return self.settings['notebook_dir']
@property
def notebook_dir_format(self):
return self.settings['notebook_dir_format']
@property
def nbgrader_step(self):
return self.settings['nbgrader_step']
@property
def exporter(self):
return self.settings['exporter']
@property
def log(self):
return self.settings['log']
def render(self, name, **ns):
template = self.settings['jinja2_env'].get_template(name)
return template.render(**ns)
def write_error(self, status_code, **kwargs):
if status_code == 500:
html = self.render(
'gradebook_500.tpl',
base_url=self.auth.base_url,
error_code=500)
elif status_code == 502:
html = self.render(
'gradebook_500.tpl',
base_url=self.auth.base_url,
error_code=502)
elif status_code == 403:
html = self.render(
'gradebook_403.tpl',
base_url=self.auth.base_url,
error_code=403)
else:
return super(BaseHandler, self).write_error(status_code, **kwargs)
self.write(html)
self.finish()
class BaseApiHandler(BaseHandler):
def get_json_body(self):
"""Return the body of the request as JSON data."""
if not self.request.body:
return None
body = self.request.body.strip().decode('utf-8')
try:
model = json.loads(body)
except Exception:
self.log.debug("Bad JSON: %r", body)
self.log.error("Couldn't parse JSON", exc_info=True)
raise web.HTTPError(400, 'Invalid JSON in body of request')
return model
| [
"[email protected]"
] | |
f61b69e59a5f3df86af281eb1cb0ccc016d7d18e | 63d28241de5d5f8f6ea998865124106761eba317 | /beaker/__init__.py | 2d669b3124144289cba77e9e8b1cf96bb56f896c | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | isabella232/beaker-1 | e2aba9947d176c4683921b862a6168af68d7e7f6 | 3d5b74a61eaadd28d6917ab039fb9292cbc533ef | refs/heads/master | 2022-01-05T20:44:55.956425 | 2019-06-17T17:07:58 | 2019-06-17T17:07:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | __version__ = '1.9.0+dd.26'
| [
"[email protected]"
] | |
1a90a7a11a31b6d2bd8d513513d6dff28f93aca6 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping23/StrippingRD/StrippingD23MuLines.py | f22c2b8963448ca27883ac74c5c22c1eb3680061 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,651 | py | """
Module for construction of D+ -> mu l+ l- lines
Performance
Full.dst:
#########
StrippingReport INFO Event 500000, Good event 500000
| *Decision name*|*Rate,%*|*Accepted*| *Mult*|*ms/evt*|
|!StrippingD23MuD23MuLine | 0.0014| 7| 1.000| 0.112|
|!StrippingD23MuD2MueeLine | 0.0030| 15| 1.000| 0.131|
|!StrippingD23MuD23PiLine | 0.0130| 65| 1.354| 0.033|
MC: D+ -> 3pi (21163012)
########################
StrippingReport INFO Event 100000, Good event 100000
| *Decision name*|*Rate,%*|*Accepted*| *Mult*|*ms/evt*|
|!StrippingD23MuD23PiLine | 0.6500| 650| 1.008| 0.569|
MC: D+ -> K 2pi (21163020)
##########################
StrippingReport INFO Event 100000, Good event 100000
| *Decision name*|*Rate,%*|*Accepted*| *Mult*|*ms/evt*|
|!StrippingD23MuD23PiLine | 0.0130| 13| 1.077| 0.266|
Exported symbols (use python help!):
-
"""
__author__ = ["Oliver Gruenberg"]
__date__ = "19.05.2015"
__version__ = "$Revision: 1.0 $"
#############################################################################
__all__ = ("D23MuLinesConf",
"config_default", )
#############################################################################
from Gaudi.Configuration import *
from Configurables import FilterDesktop, CombineParticles, DaVinci__N3BodyDecays
from PhysSelPython.Wrappers import Selection, DataOnDemand
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
#from StrippingSelections.Utils import checkConfig
from GaudiKernel.PhysicalConstants import c_light
#############################################################################
default_config = {
"NAME" : "D23Mu",
"WGs" : [ "RD" ],
"STREAMS" : [ "Leptonic" ],
"BUILDERTYPE" : "D23MuLinesConf",
"CONFIG" : {
# TrackCuts
"MinTrIPChi2" : 25.0,
"MaxTrChi2Dof" : 3.0,
"MaxTrGhp" : 0.3,
# CombiCuts
"MaxDoca" : 0.3, # (mm)
"mDiffDLoose" : 150, # (MeV)
"mDiffDTight" : 150, # (MeV)
# MotherCuts
"MaxIPChi2" : 25,
"MinVDChi2" : 225,
"MaxVtxChi2Dof" : 9,
"MinDira" : 0.0,
"MinTau" : 0.1, # (ps)
# scalings
"Postscale" : 1,
"D23MuPrescale" : 1,
"D2MueePrescale" : 1,
"D23PiPrescale" : 0.01,
"CommonRelInfoTools" : [ { "Type": "RelInfoVertexIsolation", "Location":"VtxIsoInfo" },
{ "Type": "RelInfoVertexIsolationBDT", "Location":"VtxIsoInfoBDT" },
{ "Type" : "RelInfoBs2MuMuBIsolations",
"RecursionLevel" : 0,
"Variables" : [],
"Location" : "BsMuMuBIsolation",
"tracktype" : 3,
"makeTrackCuts" : False, },
] # closes CommonRelInfoTools
} # closes CONFIG
} # closes default_config
class D23MuLinesConf(LineBuilder) :
"""
Builder
"""
__configuration_keys__ = ( # TrackCuts
"MinTrIPChi2",
"MaxTrChi2Dof",
"MaxTrGhp",
# CombiCuts
"MaxDoca",
"mDiffDLoose",
"mDiffDTight",
# MotherCuts
"MaxIPChi2",
"MinVDChi2",
"MaxVtxChi2Dof",
"MinDira",
"MinTau",
# scalings
"Postscale",
"D23MuPrescale",
"D2MueePrescale",
"D23PiPrescale",
"CommonRelInfoTools", )
def __init__(self, name = "D23Mu", config = default_config) :
LineBuilder.__init__(self, name, config)
#############################################################################
self.TrackCuts = """
(MIPCHI2DV(PRIMARY) > %(MinTrIPChi2)s)
& (TRCHI2DOF < %(MaxTrChi2Dof)s)
& (TRGHP < %(MaxTrGhp)s)
""" %config
self.Combination12Cuts = "(ADOCA(1,2) < %(MaxDoca)s*mm)" %config
self.CombinationCutsLoose = """
(ADAMASS(1920*MeV) < %(mDiffDLoose)s*MeV)
& (ADOCA(1,3) < %(MaxDoca)s*mm)
& (ADOCA(2,3) < %(MaxDoca)s*mm)
""" %config
self.CombinationCutsTight = """
(ADAMASS(1920*MeV) < %(mDiffDTight)s*MeV)
& (ADOCA(1,3) < %(MaxDoca)s*mm)
& (ADOCA(2,3) < %(MaxDoca)s*mm)
""" %config
self.MotherCuts = """
(BPVIPCHI2() < %(MaxIPChi2)s )
& (BPVVDCHI2 > %(MinVDChi2)s )
& (VFASPF(VCHI2/VDOF) < %(MaxVtxChi2Dof)s )
& (BPVDIRA > %(MinDira)s )
& (BPVLTIME() > %(MinTau)s*ps )
""" %config
#############################################################################
D23Mu_name = name+"D23Mu"
D2Muee_name = name+"D2Muee"
D23Pi_name = name+"D23Pi"
self.selD23Mu = self.makeD23Mu(D23Mu_name)
self.selD2Muee = self.makeD2Muee(D2Muee_name)
self.selD23Pi = self.makeD23Pi(D23Pi_name)
#############################################################################
self.D23Mu_Line = StrippingLine(D23Mu_name+"Line",
prescale = config["D23MuPrescale"],
postscale = config["Postscale"],
MDSTFlag = True,
selection = self.selD23Mu,
RelatedInfoTools = [
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 0.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Mu : "ConeIso05Dp",
"Phys/StdAllLooseMuons" :
["ConeIso05mu1", "ConeIso05mu2", "ConeIso05mu3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.0,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Mu : "ConeIso10Dp",
"Phys/StdAllLooseMuons" :
["ConeIso10mu1", "ConeIso10mu2", "ConeIso10mu3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Mu : "ConeIso15Dp",
"Phys/StdAllLooseMuons" :
["ConeIso15mu1", "ConeIso15mu2", "ConeIso15mu3"], }, },
{ "Type": "RelInfoTrackIsolationBDT",
"RecursionLevel" : 1,
"Variables" : 0,
"Locations": { "Phys/StdAllLooseMuons" :
["TrackIsoBDTmu1","TrackIsoBDTmu2","TrackIsoBDTmu3"], }, },
{ "Type" : "RelInfoBs2MuMuTrackIsolations",
"RecursionLevel" : 1,
"Variables" : [],
"IsoTwoBody" : True,
"Locations" : { "Phys/StdAllLooseMuons" :
["BsMuMuTrackIsomu1","BsMuMuTrackIsomu2","BsMuMuTrackIsomu3"] ,}, },
] + config["CommonRelInfoTools"] # end of RelatedInfoTools
)# closes Strippingline
self.D2Muee_Line = StrippingLine(D2Muee_name+"Line",
prescale = config["D2MueePrescale"],
postscale = config["Postscale"],
MDSTFlag = True,
selection = self.selD2Muee,
RelatedInfoTools = [
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 0.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD2Muee : "ConeIso05Dp",
"Phys/StdAllLooseMuons" : "ConeIso05mu",
"Phys/StdAllLooseElectrons" : ["ConeIso05e1", "ConeIso05e2"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.0,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD2Muee : "ConeIso10Dp",
"Phys/StdAllLooseMuons" : "ConeIso10mu",
"Phys/StdAllLooseElectrons" : ["ConeIso10e1", "ConeIso10e2"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD2Muee : "ConeIso15Dp",
"Phys/StdAllLooseMuons" : "ConeIso15mu",
"Phys/StdAllLooseElectrons" : ["ConeIso15e1", "ConeIso15e2"], }, },
{ "Type": "RelInfoTrackIsolationBDT",
"RecursionLevel" : 1,
"Variables" : 0,
"Locations": { "Phys/StdAllLooseMuons" : "TrackIsoBDTmu",
"Phys/StdAllLooseElectrons" : ["TrackIsoBDTe1","TrackIsoBDTe2"], }, },
{ "Type" : "RelInfoBs2MuMuTrackIsolations",
"RecursionLevel" : 1,
"Variables" : [],
"IsoTwoBody" : True,
"Locations" : { "Phys/StdAllLooseMuons" : "BsMuMuTrackIsomu",
"Phys/StdAllLooseElectrons" :
["BsMuMuTrackIsoe1","BsMuMuTrackIsoe2"] ,}, },
] + config["CommonRelInfoTools"] # end of RelatedInfoTools
) # closes Strippingline
self.D23Pi_Line = StrippingLine(D23Pi_name+"Line",
prescale = config["D23PiPrescale"],
postscale = config["Postscale"],
MDSTFlag = True,
selection = self.selD23Pi,
RelatedInfoTools = [
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 0.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Pi : "ConeIso05Dp",
"Phys/StdAllLoosePions" :
["ConeIso05pi1", "ConeIso05pi2", "ConeIso05pi3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.0,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Pi : "ConeIso10Dp",
"Phys/StdAllLoosePions" :
["ConeIso10pi1", "ConeIso10pi2", "ConeIso10pi3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Pi : "ConeIso15Dp",
"Phys/StdAllLoosePions" :
["ConeIso15pi1", "ConeIso15pi2", "ConeIso15pi3"], }, },
{ "Type": "RelInfoTrackIsolationBDT",
"RecursionLevel" : 1,
"Variables" : 0,
"Locations": { "Phys/StdAllLoosePions" :
["TrackIsoBDTpi1","TrackIsoBDTpi2","TrackIsoBDTpi3"], }, },
{ "Type" : "RelInfoBs2MuMuTrackIsolations",
"RecursionLevel" : 1,
"Variables" : [],
"IsoTwoBody" : True,
"Locations" : { "Phys/StdAllLoosePions" :
["BsMuMuTrackIsopi1","BsMuMuTrackIsopi2","BsMuMuTrackIsopi3"] ,}, },
] + config["CommonRelInfoTools"] # end of RelatedInfoTools
) # closes Strippingline
#############################################################################
self.registerLine(self.D23Mu_Line)
self.registerLine(self.D2Muee_Line)
self.registerLine(self.D23Pi_Line)
#############################################################################
def makeD23Mu(self,name):
D23Mu = DaVinci__N3BodyDecays("Combine"+name)
D23Mu.DecayDescriptors = [ "[D+ -> mu+ mu+ mu-]cc","[D+ -> mu+ mu+ mu+]cc" ]
D23Mu.DaughtersCuts = { "mu+" : self.TrackCuts }
D23Mu.Combination12Cut = self.Combination12Cuts
D23Mu.CombinationCut = self.CombinationCutsLoose
D23Mu.MotherCut = self.MotherCuts
_myMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
return Selection (name, Algorithm = D23Mu, RequiredSelections = [ _myMuons ])
#############################################################################
def makeD2Muee(self,name):
D2Muee = DaVinci__N3BodyDecays("Combine"+name)
D2Muee.DecayDescriptors = [ "[D+ -> mu+ e+ e-]cc","[D+ -> mu- e+ e+]cc","[D+ -> mu+ e+ e+]cc" ]
D2Muee.DaughtersCuts = { "mu+" : self.TrackCuts, "e+" : self.TrackCuts }
D2Muee.Combination12Cut = self.Combination12Cuts
D2Muee.CombinationCut = self.CombinationCutsLoose
D2Muee.MotherCut = self.MotherCuts
_myMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
_myElectrons = DataOnDemand(Location = "Phys/StdLooseElectrons/Particles")
return Selection (name, Algorithm = D2Muee, RequiredSelections = [ _myMuons, _myElectrons ])
#############################################################################
def makeD23Pi(self,name):
D23Pi = DaVinci__N3BodyDecays("Combine"+name)
D23Pi.DecayDescriptors = [ "[D+ -> pi+ pi+ pi-]cc" ]
D23Pi.DaughtersCuts = { "pi+" : self.TrackCuts }
D23Pi.Combination12Cut = self.Combination12Cuts
D23Pi.CombinationCut = self.CombinationCutsTight
D23Pi.MotherCut = self.MotherCuts
_myPions = DataOnDemand(Location = "Phys/StdLoosePions/Particles")
return Selection (name, Algorithm = D23Pi, RequiredSelections = [ _myPions ])
#############################################################################
| [
"[email protected]"
] | |
6685eda1a70bab345ddc6f996c018feac6a6c741 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03371/s173081591.py | e631cebeb539f9cb5923fd6e498f3a402e717958 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import sys
read = sys.stdin.read
readline = sys.stdin.buffer.readline
sys.setrecursionlimit(10 ** 8)
INF = float('inf')
MOD = 10 ** 9 + 7
def main():
A, B, C, X, Y = map(int, readline().split())
ans = A*X+B*Y
if X>=Y:
ans = min(ans, C*Y*2+A*(X-Y), C*X*2)
else:
ans = min(ans, C*X*2+B*(Y-X), C*Y*2)
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
21d259e2bd0230f61d9018f3536a28303133178b | e8d4fe2361d71aef6519f666152f14137156159c | /impacket-0.9.11/build/lib.linux-i686-2.6/impacket/dcerpc/dcerpc_v4.py | 5099c5aaaf72e06ee8a13bc334ed7399833f905f | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Apache-1.1",
"BSD-2-Clause"
] | permissive | kenzshi/DDoSProject | 11d7e676a150964a9f78f1b7e1df4468dd9d973f | 9587a2be7f4773d19a96a35d1128f5041f0472da | refs/heads/master | 2021-01-10T19:48:21.355849 | 2015-03-16T09:52:22 | 2015-03-16T09:52:22 | 30,205,639 | 42 | 32 | null | null | null | null | UTF-8 | Python | false | false | 8,951 | py | # Copyright (c) 2003-2012 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: dcerpc_v4.py 529 2012-04-29 21:39:46Z [email protected] $
#
# Description:
# Handle basic DCE/RPC protocol, version 4.
#
import array
import socket
import struct
from impacket import ImpactPacket
from impacket import uuid
from impacket import dcerpc
import dcerpc, conv
class DCERPC_RawCall(ImpactPacket.Header):
def __init__(self, op_num, data = ''):
self.OP_NUM = op_num
ImpactPacket.Header.__init__(self)
self.setData(data)
def setData(self, data):
self.get_bytes()[:] = array.array('B', data)
def get_header_size(self):
return len(self.get_bytes())
class MSRPCHeader(ImpactPacket.Header):
__SIZE = 80
def __init__(self, aBuffer = None):
ImpactPacket.Header.__init__(self, MSRPCHeader.__SIZE)
self.set_version(4)
self.set_type(dcerpc.MSRPC_REQUEST)
self.set_flags((0x08, 0x00))
self.set_representation((0x10, 0x00, 0x00))
self.set_serial((0, 0))
## self.set_if_version(3)
self.set_seq_num(0)
self.set_if_hint(0xFFFF)
self.set_activity_hint(0xFFFF)
if aBuffer: self.load_header(aBuffer)
def get_version(self):
return self.get_byte(0)
def set_version(self, version):
self.set_byte(0, version)
def get_type(self):
return self.get_byte(1)
def set_type(self, type):
self.set_byte(1, type)
def get_flags(self):
""" This method returns a tuple in (flags1, flags2) form."""
return (self.get_byte(2), self.get_byte(3))
def set_flags(self, flags):
""" This method takes a tuple in (flags1, flags2) form."""
self.set_byte(2, flags[0])
self.set_byte(3, flags[1])
def get_representation(self):
""" This method returns a tuple in (major, minor) form."""
return (self.get_byte(4), self.get_byte(5), self.get_byte(6))
def set_representation(self, representation):
""" This method takes a tuple in (major, minor) form."""
self.set_byte(4, representation[0])
self.set_byte(5, representation[1])
self.set_byte(6, representation[1])
def get_serial(self):
""" This method returns a tuple in (high, low) form."""
return (self.get_byte(7), self.get_byte(79))
def set_serial(self, serial):
""" This method takes a tuple in (high, low) form."""
self.set_byte(7, serial[0])
self.set_byte(79, serial[1])
def get_obj_binuuid(self):
return self.get_bytes().tolist()[8:8+16]
def set_obj_binuuid(self, binuuid):
assert 16 == len(binuuid)
self.get_bytes()[8:8+16] = array.array('B', binuuid)
def get_if_binuuid(self):
return self.get_bytes().tolist()[24:24+16]
def set_if_binuuid(self, binuuid):
assert 16 == len(binuuid)
self.get_bytes()[24:24+16] = array.array('B', binuuid)
def get_activity_binuuid(self):
return self.get_bytes().tolist()[40:40+16]
def set_activity_binuuid(self, binuuid):
assert 16 == len(binuuid)
self.get_bytes()[40:40+16] = array.array('B', binuuid)
def get_server_boottime(self):
return self.get_long(56, '<')
def set_server_boottime(self, time):
self.set_long(56, time, '<')
def get_if_version(self):
return self.get_long(60, '<')
def set_if_version(self, version):
self.set_long(60, version, '<')
def get_seq_num(self):
return self.get_long(64, '<')
def set_seq_num(self, num):
self.set_long(64, num, '<')
def get_op_num(self):
return self.get_word(68, '<')
def set_op_num(self, op):
self.set_word(68, op, '<')
def get_if_hint(self):
return self.get_word(70, '<')
def set_if_hint(self, hint):
self.set_word(70, hint, '<')
def get_activity_hint(self):
return self.get_word(72, '<')
def set_activity_hint(self, hint):
self.set_word(72, hint, '<')
def get_frag_len(self):
return self.get_word(74, '<')
def set_frag_len(self, len):
self.set_word(74, len, '<')
def get_frag_num(self):
return self.get_word(76, '<')
def set_frag_num(self, num):
self.set_word(76, num, '<')
def get_auth_proto(self):
return self.get_byte(78)
def set_auth_proto(self, proto):
self.set_byte(78, proto)
def get_header_size(self):
return MSRPCHeader.__SIZE
def contains(self, aHeader):
ImpactPacket.Header.contains(self, aHeader)
if self.child():
contents_size = self.child().get_size()
self.set_op_num(self.child().OP_NUM)
self.set_frag_len(contents_size)
def get_ctx_id(self):
# return self.get_word(20, '<')
return 0
def set_ctx_id(self, id):
# self.set_word(20, id, '<')
pass
class DCERPC_v4(dcerpc.DCERPC):
DEFAULT_FRAGMENT_SIZE = 1392
def __init__(self, transport):
dcerpc.DCERPC.__init__(self, transport)
self.__activity_uuid = uuid.generate()
self.__seq_num = 0
self._bind = 0 # Don't attempt binding unless it explicitly requested.
self.set_idempotent(0)
def set_default_max_fragment_size(self):
self.set_max_fragment_size(DCERPC_v4.DEFAULT_FRAGMENT_SIZE)
def bind(self, uuid, bogus_binds = ''):
"""If idempotent is non-zero, the package will be sent with
that flag enabled. Certain services react by skiping the CONV
phase during the binding.
"""
self._bind = 1 # Will bind later, when the first packet is transferred.
self.__if_uuid = uuid[:16]
self.__if_version = struct.unpack('<L', uuid[16:20])[0]
def get_idempotent(self):
return self.__idempotent
def set_idempotent(self, flag):
self.__idempotent = flag
def conv_bind(self):
# Receive CONV handshake.
# ImpactDecode: this block.
data = self._transport.recv()
rpc = MSRPCHeader(data)
activity_uuid = rpc.get_activity_binuuid()
_conv = conv.WhoAreYou(data[rpc.get_header_size():])
# ImpactDecode
rpc = MSRPCHeader()
rpc.set_type(dcerpc.MSRPC_RESPONSE)
rpc.set_if_binuuid(conv.MSRPC_UUID_CONV)
flags = rpc.get_flags()
rpc.set_flags((flags[0], 0x04))
rpc.set_activity_binuuid(activity_uuid)
_conv = conv.WhoAreYou2()
rpc.contains(_conv)
# The CONV response must be sent to the endpoint from where the request was received.
old_address = self._transport.get_addr()
peer_address = self._transport.get_recv_addr()
self._transport.set_addr(peer_address)
self._transport.send(rpc.get_packet())
self._transport.set_addr(old_address)
def send(self, data):
if isinstance(data, dcerpc.MSRPCHeader):
opnum = data['op_num']
packet = data['pduData']
else:
opnum = data.OP_NUM
packet = data.get_packet()
frag_num = 0
rpc = MSRPCHeader()
self.set_ctx_id(self._ctx)
rpc.set_if_binuuid(self.__if_uuid)
rpc.set_if_version(self.__if_version)
rpc.set_activity_binuuid(self.__activity_uuid)
rpc.set_seq_num(self.__seq_num)
frag = DCERPC_RawCall(opnum)
if self._max_frag:
offset = 0
while 1:
toSend = packet[offset:offset+self._max_frag]
if not toSend: break
flags = dcerpc.MSRPC_NOTAFRAG | dcerpc.MSRPC_RECRESPOND
if self.__idempotent: flags |= dcerpc.MSRPC_NOTFORIDEMP
offset += len(toSend)
if offset == len(packet): flags |= dcerpc.MSRPC_LASTFRAG
rpc.set_flags((flags, 0))
frag.setData(toSend)
rpc.contains(frag)
rpc.set_frag_num(frag_num)
self._transport.send(rpc.get_packet())
frag_num += 1
if self._bind and not self.__idempotent:
self._bind = 0
self.conv_bind()
self.recv() # Discard RPC_ACK.
else:
if self.__idempotent:
rpc.set_flags((dcerpc.MSRPC_NOTFORIDEMP, 0))
rpc.contains(packet)
self._transport.send(rpc.get_packet())
if self._bind and not self.__idempotent:
self._bind = 0
self.conv_bind()
self.recv() # Discard RPC_ACK.
self.__seq_num += 1
def recv(self):
data = self._transport.recv()
rpc = MSRPCHeader(data)
off = rpc.get_header_size()
return data[off:]
| [
"[email protected]"
] | |
8f26465aaa04312133e55a3be07fa3ebfdaba5af | 3196460db64eded2daa77457643c8dd1ed1ba99e | /codechef/steve/COINS-wrong2.py | b8ec3851c8d755ef25b8402d188b8199eba086e0 | [] | no_license | prototypemagic/proto-mastery | 94c649958792f00ea2a057b63ed0f7717b5ab05d | 45f7ef2e998fa7dbc071f5c42217a83fd9340f51 | refs/heads/master | 2020-05-28T08:55:45.769199 | 2012-09-10T22:12:00 | 2012-09-10T22:12:00 | 3,097,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/env python
# Steve Phillips / elimisteve
# 2012.01.04
# The following is wrong, to say the least, because input == 13 should
# produce output == 13, not 14. As the problem states, you cannot
# exchange Bytelandian coins for other Bytelandian coins.
def naive_max(num):
# Given in problem description
return num/2 + num/3 + num/4
def clever_max(num):
'''Turns every 12 bytelandian coins into 13, plus remainder'''
# NOT given in problem description
naive = naive_max(num)
maybe_bigger = (num/12) * 13 + (num % 12) # WRONG!
return maybe_bigger if maybe_bigger > naive else naive
n = 0
while True:
try:
n = int( raw_input().strip() )
print max([n, clever_max(n),
clever_max(n/2) + clever_max(n/3) + clever_max(n/4)])
except:
break
| [
"[email protected]"
] | |
143fe68d7f6815fea8d18f1fb028024f23bd7c51 | bd02997a44218468b155eda45dd9dd592bb3d124 | /leetcode_course-schedule2.py | ca4b99612fab4cd4749f3814a1054bbfb691055d | [] | no_license | rheehot/ProblemSolving_Python | 88b1eb303ab97624ae6c97e05393352695038d14 | 4d6dc6aea628f0e6e96530646c66216bf489427f | refs/heads/master | 2023-02-13T03:30:07.039231 | 2021-01-04T06:04:11 | 2021-01-04T06:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | '''
Problem Solving leetcode course-schedule2
Author: Injun Son
Date: October 25, 2020
'''
import sys
import collections
import heapq
import functools
import itertools
import re
import math
import bisect
from typing import *
def canFinish(numCourses: int, prerequisites: List[List[int]]) -> bool:
graph = collections.defaultdict(list)
# 그래프 구성
for x, y in prerequisites:
graph[x].append(y)
traced = set()
visited = set()
def dfs(i):
# 순환 구조이면 False
if i in traced:
return False
# 이미 방문 했던 노드이면 True
if i in visited:
return True
traced.add(i)
for y in graph[i]:
if not dfs(y):
return False
#탐색 종료 후 순환 노드 삭제
traced.remove(i)
#탐색 종료 후 방문 노드 추가
visited.add(i)
return True
for x in list(graph):
if not dfs(x):
return False
return True
print(canFinish(2, [[1,0]]))
print(canFinish(2, [[1,0], [0,1]])) | [
"[email protected]"
] | |
449b4ad21c79a9864610111fbf3661076f001bc8 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v10/services/services/customer_asset_service/client.py | eb20ce17a4036bc57b106277613558af2955bb19 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 21,026 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import customer_asset_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import CustomerAssetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CustomerAssetServiceGrpcTransport
class CustomerAssetServiceClientMeta(type):
"""Metaclass for the CustomerAssetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomerAssetServiceTransport]]
_transport_registry["grpc"] = CustomerAssetServiceGrpcTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[CustomerAssetServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CustomerAssetServiceClient(metaclass=CustomerAssetServiceClientMeta):
"""Service to manage customer assets."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomerAssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomerAssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CustomerAssetServiceTransport:
"""Returns the transport used by the client instance.
Returns:
CustomerAssetServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def asset_path(
customer_id: str,
asset_id: str,
) -> str:
"""Returns a fully-qualified asset string."""
return "customers/{customer_id}/assets/{asset_id}".format(
customer_id=customer_id,
asset_id=asset_id,
)
@staticmethod
def parse_asset_path(path: str) -> Dict[str, str]:
"""Parses a asset path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def customer_asset_path(
customer_id: str,
asset_id: str,
field_type: str,
) -> str:
"""Returns a fully-qualified customer_asset string."""
return "customers/{customer_id}/customerAssets/{asset_id}~{field_type}".format(
customer_id=customer_id,
asset_id=asset_id,
field_type=field_type,
)
@staticmethod
def parse_customer_asset_path(path: str) -> Dict[str, str]:
"""Parses a customer_asset path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/customerAssets/(?P<asset_id>.+?)~(?P<field_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CustomerAssetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the customer asset service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, CustomerAssetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CustomerAssetServiceTransport):
# transport is a CustomerAssetServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_customer_assets(
self,
request: Union[
customer_asset_service.MutateCustomerAssetsRequest, dict
] = None,
*,
customer_id: str = None,
operations: Sequence[
customer_asset_service.CustomerAssetOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> customer_asset_service.MutateCustomerAssetsResponse:
r"""Creates, updates, or removes customer assets. Operation statuses
are returned.
List of thrown errors: `AssetLinkError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`FieldError <>`__ `HeaderError <>`__ `InternalError <>`__
`MutateError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateCustomerAssetsRequest, dict]):
The request object. Request message for
[CustomerAssetService.MutateCustomerAssets][google.ads.googleads.v10.services.CustomerAssetService.MutateCustomerAssets].
customer_id (str):
Required. The ID of the customer
whose customer assets are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.CustomerAssetOperation]):
Required. The list of operations to
perform on individual customer assets.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateCustomerAssetsResponse:
Response message for a customer asset
mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a customer_asset_service.MutateCustomerAssetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, customer_asset_service.MutateCustomerAssetsRequest
):
request = customer_asset_service.MutateCustomerAssetsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_customer_assets
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CustomerAssetServiceClient",)
| [
"[email protected]"
] | |
066e81a0fbe03a8fbc53b78c094138284f850ede | 6c80119e02bb29761fc7854c5a2f2a144451ca5a | /tests/fakeIDP.py | 971281cd5d87940746d418b565f9b43de490a12b | [
"BSD-2-Clause"
] | permissive | josjevv/pysaml2 | c412a21db7a52334bf67feeabc38f877a121f973 | f806786f6dad8fc2b03daa0e1d55682daead3ec8 | refs/heads/master | 2020-12-25T12:17:41.628279 | 2013-04-22T11:45:25 | 2013-04-22T11:45:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,520 | py | from urlparse import parse_qs
from saml2.saml import AUTHN_PASSWORD
from saml2.samlp import attribute_query_from_string, logout_request_from_string
from saml2 import BINDING_HTTP_REDIRECT, pack
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_SOAP
from saml2.server import Server
from saml2.soap import parse_soap_enveloped_saml_attribute_query
from saml2.soap import parse_soap_enveloped_saml_logout_request
from saml2.soap import make_soap_enveloped_saml_thingy
__author__ = 'rolandh'
TYP = {
"GET": [BINDING_HTTP_REDIRECT],
"POST": [BINDING_HTTP_POST, BINDING_SOAP]
}
def unpack_form(_str, ver="SAMLRequest"):
SR_STR = "name=\"%s\" value=\"" % ver
RS_STR = 'name="RelayState" value="'
i = _str.find(SR_STR)
i += len(SR_STR)
j = _str.find('"', i)
sr = _str[i:j]
k = _str.find(RS_STR, j)
k += len(RS_STR)
l = _str.find('"', k)
rs = _str[k:l]
return {ver:sr, "RelayState":rs}
class DummyResponse(object):
def __init__(self, code, data, headers=None):
self.status_code = code
self.text = data
self.headers = headers or []
class FakeIDP(Server):
def __init__(self, config_file=""):
Server.__init__(self, config_file)
#self.sign = False
def receive(self, url, method="GET", **kwargs):
"""
Interface to receive HTTP calls on
:param url:
:param method:
:param kwargs:
:return:
"""
if method == "GET":
path, query = url.split("?")
qs_dict = parse_qs(kwargs["data"])
req = qs_dict["SAMLRequest"][0]
rstate = qs_dict["RelayState"][0]
else:
# Could be either POST or SOAP
path = url
try:
qs_dict = parse_qs(kwargs["data"])
req = qs_dict["SAMLRequest"][0]
rstate = qs_dict["RelayState"][0]
except KeyError:
req = kwargs["data"]
rstate = ""
response = ""
# Get service from path
for key, vals in self.config.getattr("endpoints", "idp").items():
for endp, binding in vals:
if path == endp:
assert binding in TYP[method]
if key == "single_sign_on_service":
return self.authn_request_endpoint(req, binding,
rstate)
elif key == "single_logout_service":
return self.logout_endpoint(req, binding)
for key, vals in self.config.getattr("endpoints", "aa").items():
for endp, binding in vals:
if path == endp:
assert binding in TYP[method]
if key == "attribute_service":
return self.attribute_query_endpoint(req, binding)
return response
def authn_request_endpoint(self, req, binding, relay_state):
req = self.parse_authn_request(req, binding)
if req.message.protocol_binding == BINDING_HTTP_REDIRECT:
_binding = BINDING_HTTP_POST
else:
_binding = req.message.protocol_binding
try:
resp_args = self.response_args(req.message, [_binding])
except Exception:
raise
identity = { "surName":"Hedberg", "givenName": "Roland",
"title": "supertramp", "mail": "[email protected]"}
userid = "Pavill"
authn_resp = self.create_authn_response(identity,
userid=userid,
authn=(AUTHN_PASSWORD,
"http://www.example.com/login"),
**resp_args)
response = "%s" % authn_resp
_dict = pack.factory(_binding, response,
resp_args["destination"], relay_state,
"SAMLResponse")
return DummyResponse(200, **_dict)
def attribute_query_endpoint(self, xml_str, binding):
if binding == BINDING_SOAP:
_str = parse_soap_enveloped_saml_attribute_query(xml_str)
else:
_str = xml_str
aquery = attribute_query_from_string(_str)
extra = {"eduPersonAffiliation": "faculty"}
userid = "Pavill"
name_id = aquery.subject.name_id
attr_resp = self.create_attribute_response(extra, aquery.id,
None,
sp_entity_id=aquery.issuer.text,
name_id=name_id,
attributes=aquery.attribute)
if binding == BINDING_SOAP:
# SOAP packing
#headers = {"content-type": "application/soap+xml"}
soap_message = make_soap_enveloped_saml_thingy(attr_resp)
# if self.sign and self.sec:
# _signed = self.sec.sign_statement_using_xmlsec(soap_message,
# class_name(attr_resp),
# nodeid=attr_resp.id)
# soap_message = _signed
response = "%s" % soap_message
else: # Just POST
response = "%s" % attr_resp
return DummyResponse(200, response)
def logout_endpoint(self, xml_str, binding):
if binding == BINDING_SOAP:
_str = parse_soap_enveloped_saml_logout_request(xml_str)
else:
_str = xml_str
req = logout_request_from_string(_str)
_resp = self.create_logout_response(req, [binding])
if binding == BINDING_SOAP:
# SOAP packing
#headers = {"content-type": "application/soap+xml"}
soap_message = make_soap_enveloped_saml_thingy(_resp)
# if self.sign and self.sec:
# _signed = self.sec.sign_statement_using_xmlsec(soap_message,
# class_name(attr_resp),
# nodeid=attr_resp.id)
# soap_message = _signed
response = "%s" % soap_message
else: # Just POST
response = "%s" % _resp
return DummyResponse(200, response)
| [
"[email protected]"
] | |
94cd40578f30825025b17f2297e50eb9b0f8a635 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /adafruit-circuitpython-bundle-py-20210402/lib/adafruit_epd/ssd1680.py | 66392a74f69fe9a3ecffe574e03a380758bc1e95 | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,592 | py | # SPDX-FileCopyrightText: 2018 Dean Miller for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_epd.ssd1680` - Adafruit SSD1680 - ePaper display driver
====================================================================================
CircuitPython driver for Adafruit SSD1680 display breakouts
* Author(s): Melissa LeBlanc-Williams
"""
import time
from micropython import const
import adafruit_framebuf
from adafruit_epd.epd import Adafruit_EPD
__version__ = "2.9.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_EPD.git"
_SSD1680_DRIVER_CONTROL = const(0x01)
_SSD1680_GATE_VOLTAGE = const(0x03)
_SSD1680_SOURCE_VOLTAGE = const(0x04)
_SSD1680_INIT_SETTING = const(0x08)
_SSD1680_INIT_WRITE_REG = const(0x09)
_SSD1680_INIT_READ_REG = const(0x0A)
_SSD1680_BOOSTER_SOFT_START = const(0x0C)
_SSD1680_DEEP_SLEEP = const(0x10)
_SSD1680_DATA_MODE = const(0x11)
_SSD1680_SW_RESET = const(0x12)
_SSD1680_HV_DETECT = const(0x14)
_SSD1680_VCI_DETECT = const(0x15)
_SSD1680_TEMP_CONTROL = const(0x18)
_SSD1680_TEMP_WRITE = const(0x1A)
_SSD1680_TEMP_READ = const(0x1B)
_SSD1680_EXTTEMP_WRITE = const(0x1C)
_SSD1680_MASTER_ACTIVATE = const(0x20)
_SSD1680_DISP_CTRL1 = const(0x21)
_SSD1680_DISP_CTRL2 = const(0x22)
_SSD1680_WRITE_BWRAM = const(0x24)
_SSD1680_WRITE_REDRAM = const(0x26)
_SSD1680_READ_RAM = const(0x27)
_SSD1680_VCOM_SENSE = const(0x28)
_SSD1680_VCOM_DURATION = const(0x29)
_SSD1680_WRITE_VCOM_OTP = const(0x2A)
_SSD1680_WRITE_VCOM_CTRL = const(0x2B)
_SSD1680_WRITE_VCOM_REG = const(0x2C)
_SSD1680_READ_OTP = const(0x2D)
_SSD1680_READ_USERID = const(0x2E)
_SSD1680_READ_STATUS = const(0x2F)
_SSD1680_WRITE_WS_OTP = const(0x30)
_SSD1680_LOAD_WS_OTP = const(0x31)
_SSD1680_WRITE_LUT = const(0x32)
_SSD1680_CRC_CALC = const(0x34)
_SSD1680_CRC_READ = const(0x35)
_SSD1680_PROG_OTP = const(0x36)
_SSD1680_WRITE_DISPLAY_OPT = const(0x37)
_SSD1680_WRITE_USERID = const(0x38)
_SSD1680_OTP_PROGMODE = const(0x39)
_SSD1680_WRITE_BORDER = const(0x3C)
_SSD1680_END_OPTION = const(0x3F)
_SSD1680_SET_RAMXPOS = const(0x44)
_SSD1680_SET_RAMYPOS = const(0x45)
_SSD1680_AUTOWRITE_RED = const(0x46)
_SSD1680_AUTOWRITE_BW = const(0x47)
_SSD1680_SET_RAMXCOUNT = const(0x4E)
_SSD1680_SET_RAMYCOUNT = const(0x4F)
_SSD1680_NOP = const(0xFF)
class Adafruit_SSD1680(Adafruit_EPD):
"""driver class for Adafruit SSD1680 ePaper display breakouts"""
# pylint: disable=too-many-arguments
def __init__(
self, width, height, spi, *, cs_pin, dc_pin, sramcs_pin, rst_pin, busy_pin
):
super().__init__(
width, height, spi, cs_pin, dc_pin, sramcs_pin, rst_pin, busy_pin
)
if width % 8 != 0:
width += 8 - width % 8
self._buffer1_size = int(width * height / 8)
self._buffer2_size = self._buffer1_size
if sramcs_pin:
self._buffer1 = self.sram.get_view(0)
self._buffer2 = self.sram.get_view(self._buffer1_size)
else:
self._buffer1 = bytearray(self._buffer1_size)
self._buffer2 = bytearray(self._buffer2_size)
self._framebuf1 = adafruit_framebuf.FrameBuffer(
self._buffer1, width, height, buf_format=adafruit_framebuf.MHMSB
)
self._framebuf2 = adafruit_framebuf.FrameBuffer(
self._buffer2, width, height, buf_format=adafruit_framebuf.MHMSB
)
self.set_black_buffer(0, True)
self.set_color_buffer(1, False)
# pylint: enable=too-many-arguments
def begin(self, reset=True):
"""Begin communication with the display and set basic settings"""
if reset:
self.hardware_reset()
self.power_down()
def busy_wait(self):
"""Wait for display to be done with current task, either by polling the
busy pin, or pausing"""
if self._busy:
while self._busy.value:
time.sleep(0.01)
else:
time.sleep(0.5)
def power_up(self):
"""Power up the display in preparation for writing RAM and updating"""
self.hardware_reset()
self.busy_wait()
self.command(_SSD1680_SW_RESET)
self.busy_wait()
# driver output control
self.command(
_SSD1680_DRIVER_CONTROL,
bytearray([self._height - 1, (self._height - 1) >> 8, 0x00]),
)
# data entry mode
self.command(_SSD1680_DATA_MODE, bytearray([0x03]))
# Set voltages
self.command(_SSD1680_WRITE_VCOM_REG, bytearray([0x36]))
self.command(_SSD1680_GATE_VOLTAGE, bytearray([0x17]))
self.command(_SSD1680_SOURCE_VOLTAGE, bytearray([0x41, 0x00, 0x32]))
# Set ram X start/end postion
self.command(_SSD1680_SET_RAMXPOS, bytearray([0x01, 0x10]))
# Set ram Y start/end postion
self.command(
_SSD1680_SET_RAMYPOS,
bytearray([0, 0, self._height - 1, (self._height - 1) >> 8]),
)
# Set border waveform
self.command(_SSD1680_WRITE_BORDER, bytearray([0x05]))
# Set ram X count
self.command(_SSD1680_SET_RAMXCOUNT, bytearray([0x01]))
# Set ram Y count
self.command(_SSD1680_SET_RAMYCOUNT, bytearray([self._height - 1, 0]))
self.busy_wait()
def power_down(self):
"""Power down the display - required when not actively displaying!"""
self.command(_SSD1680_DEEP_SLEEP, bytearray([0x01]))
time.sleep(0.1)
def update(self):
"""Update the display from internal memory"""
self.command(_SSD1680_DISP_CTRL2, bytearray([0xF4]))
self.command(_SSD1680_MASTER_ACTIVATE)
self.busy_wait()
if not self._busy:
time.sleep(3) # wait 3 seconds
def write_ram(self, index):
"""Send the one byte command for starting the RAM write process. Returns
the byte read at the same time over SPI. index is the RAM buffer, can be
0 or 1 for tri-color displays."""
if index == 0:
return self.command(_SSD1680_WRITE_BWRAM, end=False)
if index == 1:
return self.command(_SSD1680_WRITE_REDRAM, end=False)
raise RuntimeError("RAM index must be 0 or 1")
def set_ram_address(self, x, y): # pylint: disable=unused-argument, no-self-use
"""Set the RAM address location, not used on this chipset but required by
the superclass"""
# Set RAM X address counter
self.command(_SSD1680_SET_RAMXCOUNT, bytearray([x + 1]))
# Set RAM Y address counter
self.command(_SSD1680_SET_RAMYCOUNT, bytearray([y, y >> 8]))
| [
"[email protected]"
] | |
2c577726ddb93acc298d9aa48b796d856a11327a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02818/s867983819.py | 672fccfd638abcdbc2e8bfd4c826f7fa452e2450 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | A, B, K = map(int, input().split())
count = 0
if K == 0:
print('%d %d' % (A, B))
elif K <= A:
print('%d %d' % (A-K, B))
elif A < K <= B+A:
print('%d %d' % (0, B-(K-A)))
else:
print('0 0')
| [
"[email protected]"
] | |
5a77ff53b47783a74d0756216f1c09c0dcf2c10e | 8796273a71427c8d9869431926341fbcac54095f | /imdemo/utils/singleton.py | efdc68aae590b919e315b4fbb42972ee95d1400c | [] | no_license | fatelei/im-demo | e2c377a4fc9c7ce5ab31210ed76f1532d537a790 | 032bac4e0cfe7365e389c64a1ce3a5aec7dd9208 | refs/heads/master | 2021-01-09T21:46:21.401059 | 2016-01-17T08:14:55 | 2016-01-17T08:14:55 | 45,176,036 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf8 -*-
"""
imdemo.utils.singleton
~~~~~~~~~~~~~~~~~~~~~~
Singleton mode.
"""
def singleton_class(obj):
instances = {}
def wrapper(*args, **kwargs):
name = obj.__name__
if name not in instances:
instance = obj(*args, **kwargs)
instances[name] = instance
return instances[name]
return wrapper
| [
"[email protected]"
] | |
f706442a26c3078a4ba76a8bf393f15c6c2a95f4 | 4e30c855c253cc1d972d29e83edb9d5ef662d30a | /product/models/stock.py | bfd6d68450fd500838468f3728f011573c8ed506 | [
"MIT"
] | permissive | rajeshr188/django-onex | 8b531fc2f519d004d1da64f87b10ffacbd0f2719 | 0a190ca9bcf96cf44f7773686205f2c1f83f3769 | refs/heads/master | 2023-08-21T22:36:43.898564 | 2023-08-15T12:08:24 | 2023-08-15T12:08:24 | 163,012,755 | 2 | 0 | NOASSERTION | 2023-07-22T09:47:28 | 2018-12-24T17:46:35 | Python | UTF-8 | Python | false | false | 17,058 | py | from decimal import Decimal
from django.db import models
from django.db.models import OuterRef, Subquery, Sum
from django.db.models.functions import Coalesce
from django.shortcuts import reverse
from dea.models import Journal
from utils.friendlyid import encode
from ..managers import StockLotManager, StockManager
class Stock(models.Model):
"""
represents stock for each product variant.this stock is used in sale/purchase purposes
"""
created = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
reorderat = models.IntegerField(default=1)
variant = models.ForeignKey(
"product.ProductVariant", on_delete=models.CASCADE, related_name="stocks"
)
objects = StockManager()
class Meta:
ordering = ("-created",)
def __str__(self):
cb = self.current_balance()
return f"{self.variant} {cb['wt']} {cb['qty']}"
def get_absolute_url(self):
return reverse("product_stock_detail", args=(self.pk,))
def get_update_url(self):
return reverse("product_stock_update", args=(self.pk,))
def get_pure_by_melting(self):
bal = self.current_balance()
return bal["wt"] * self.melting
def get_pure_by_cost(self):
bal = self.current_balance()
return bal["wt"] * self.cost
def audit(self):
"""
get last audit cb,totalin,total out and then append following
"""
try:
last_statement = self.stockstatement_set.latest()
except StockStatement.DoesNotExist:
last_statement = None
if last_statement is not None:
ls_wt = last_statement.Closing_wt
ls_qty = last_statement.Closing_qty
else:
ls_wt = 0
ls_qty = 0
stock_in = self.stock_in_txns(last_statement)
stock_out = self.stock_out_txns(last_statement)
cb_wt = ls_wt + (stock_in["wt"] - stock_out["wt"])
cb_qty = ls_qty + (stock_in["qty"] - stock_out["qty"])
return StockStatement.objects.create(
stock=self,
Closing_wt=cb_wt,
Closing_qty=cb_qty,
total_wt_in=stock_in["wt"],
total_qty_in=stock_in["qty"],
total_wt_out=stock_out["wt"],
total_qty_out=stock_out["qty"],
)
def stock_in_txns(self, ls):
"""
return all the In transactions since last audit"""
st = self.stocktransaction_set.all()
if ls:
st = st.filter(created__gte=ls.created)
st = st.filter(movement_type__in=["P", "SR", "AR", "AD", "IN"])
return st.aggregate(
qty=Coalesce(models.Sum("quantity", output_field=models.IntegerField()), 0),
wt=Coalesce(
models.Sum("weight", output_field=models.DecimalField()), Decimal(0.0)
),
)
def stock_out_txns(self, ls):
"""
return all Out Transactions since last audit
"""
st = self.stocktransaction_set.all()
if ls:
st = st.filter(created__gte=ls.created)
st = st.filter(movement_type__in=["PR", "S", "A", "RM", "OT"])
return st.aggregate(
qty=Coalesce(models.Sum("quantity", output_field=models.IntegerField()), 0),
wt=Coalesce(
models.Sum("weight", output_field=models.DecimalField()), Decimal(0.0)
),
)
def current_balance(self):
"""
compute balance from last audit and append following
"""
bal = {}
Closing_wt: Decimal = 0
Closing_qty: int = 0
try:
ls = self.stockstatement_set.latest()
Closing_wt = ls.Closing_wt
Closing_qty = ls.Closing_qty
except StockStatement.DoesNotExist:
ls = None
in_txns = self.stock_in_txns(ls)
out_txns = self.stock_out_txns(ls)
bal["wt"] = Closing_wt + (in_txns["wt"] - out_txns["wt"])
bal["qty"] = Closing_qty + (in_txns["qty"] - out_txns["qty"])
return bal
# def get_age(self):
# """
# returns age of stock in days
# """
# return (self.created - self.updated_on).days
def transact(self, weight, quantity, journal, movement_type):
"""
Modifies weight and quantity associated with the stock based on movement type
Returns none
"""
StockTransaction.objects.create(
journal=journal,
stock=self,
weight=weight,
quantity=quantity,
movement_type_id=movement_type,
)
self.update_status()
def merge_lots(self):
"""
merges all lots in to individual lots representing this stock of its product variant.
single operation to merge lots blindly.
merge only non huid/non-unique lots
"""
all_lots = self.lots.exclude(is_unique=True)
current = all_lots.current_balance()
new_lot = StockLot.objects.create(
wt=current.wt, qty=current.qty, stock=current.stock
)
new_lot.transact(
wt=current.wt, qty=current.qty, journal=None, movement_type="AD"
)
for i in all_lots:
i.transact(wt=current.wt, qty=current.qty, journal=None, movement_type="RM")
return new_lot
class StockLot(models.Model):
"""
StockLot core idea:
1 productV has many lots and all lots[productv] reference one stock
on purchase add to stocklot from purchase_item
on sale choose from stocklot from sale_item
a lot belongs to a purchase and can be split/merged into new lot belonging to same purchase
smaller lots can be stockout'ed and stockin'ed seperately
"""
# should this be mptt?Maybe
created = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
quantity = models.IntegerField(default=0)
weight = models.DecimalField(max_digits=10, decimal_places=3)
barcode = models.CharField(
max_length=155, null=True, blank=True, unique=True, editable=False
)
huid = models.CharField(max_length=6, null=True, blank=True, unique=True)
stock_code = models.CharField(max_length=4, blank=True, null=True)
purchase_touch = models.DecimalField(max_digits=10, decimal_places=3)
purchase_rate = models.DecimalField(
max_digits=10, decimal_places=3, null=True, blank=True
)
is_unique = models.BooleanField(default=False)
status = models.CharField(
max_length=10,
choices=(
("Empty", "Empty"),
("Available", "Available"),
("Sold", "Sold"),
("Approval", "Approval"),
("Return", "Return"),
),
default="Empty",
)
# related fields
stock = models.ForeignKey(Stock, on_delete=models.CASCADE, related_name="lots")
# redundant aint it?
variant = models.ForeignKey(
"product.ProductVariant", on_delete=models.CASCADE, related_name="stock_lots"
)
purchase_item = models.ForeignKey(
"purchase.InvoiceItem",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name="item_lots",
)
objects = StockLotManager()
def __str__(self):
return f"{self.barcode} | {self.huid or ''} | {self.variant} | {self.current_balance()}"
@classmethod
def with_balance(cls):
balance_subquery = (
StockLotBalance.objects.filter(stocklot_id=OuterRef("pk"))
.values("stocklot_id")
.annotate(total_balance=Coalesce(Sum("balance"), 0))
.values("total_balance")
)
queryset = cls.objects.annotate(balance=Subquery(balance_subquery))
return queryset
def generate_barcode(self):
print("generating barcode")
if not self.barcode:
self.barcode = encode(self.pk)
self.save()
def update_status(self):
cb = self.current_balance()
if cb["wt"] <= 0.0 or cb["qty"] <= 0:
self.status = "Empty"
else:
self.status = "Available"
self.save()
def audit(self):
try:
last_statement = self.stockstatement_set.latest()
except StockStatement.DoesNotExist:
last_statement = None
if last_statement is not None:
ls_wt = last_statement.Closing_wt
ls_qty = last_statement.Closing_qty
else:
ls_wt = 0
ls_qty = 0
stock_in = self.stock_in_txns(last_statement)
stock_out = self.stock_out_txns(last_statement)
cb_wt = ls_wt + (stock_in["wt"] - stock_out["wt"])
cb_qty = ls_qty + (stock_in["qty"] - stock_out["qty"])
return StockStatement.objects.create(
stock=self.stock,
stock_batch=self,
Closing_wt=cb_wt,
Closing_qty=cb_qty,
total_wt_in=stock_in["wt"] if stock_in["wt"] else 0.0,
total_qty_in=stock_in["qty"] if stock_in["qty"] else 0,
total_wt_out=stock_out["wt"] if stock_out["wt"] else 0.0,
total_qty_out=stock_out["qty"] if stock_out["qty"] else 0,
)
def stock_in_txns(self, ls):
# filter since last audit
st = self.stocktransaction_set.all()
if ls:
st = st.filter(created__gte=ls.created)
st = st.filter(movement_type__id__in=["P", "SR", "AR", "AD", "IN"])
return st.aggregate(
qty=Coalesce(models.Sum("quantity", output_field=models.IntegerField()), 0),
wt=Coalesce(
models.Sum("weight", output_field=models.DecimalField()), Decimal(0.0)
),
)
def stock_out_txns(self, ls):
# filter since last audit
st = self.stocktransaction_set.all()
if ls:
st = st.filter(created__gte=ls.created)
st = st.filter(movement_type__id__in=["PR", "S", "A", "RM", "OT"])
return st.aggregate(
qty=Coalesce(models.Sum("quantity", output_field=models.IntegerField()), 0),
wt=Coalesce(
models.Sum("weight", output_field=models.DecimalField()), Decimal(0.0)
),
)
def current_balance(self):
# compute cb from last audit and append following
bal = {}
try:
ls = self.stockstatement_set.latest()
Closing_wt = ls.Closing_wt
Closing_qty = ls.Closing_qty
except StockStatement.DoesNotExist:
ls = None
Closing_wt = 0
Closing_qty = 0
in_txns = self.stock_in_txns(ls)
out_txns = self.stock_out_txns(ls)
bal["wt"] = Closing_wt + (in_txns["wt"] - out_txns["wt"])
bal["qty"] = Closing_qty + (in_txns["qty"] - out_txns["qty"])
return bal
def get_total_sold(self):
return self.sold_items.aggregate(
qty=Coalesce(models.Sum("quantity", output_field=models.IntegerField()), 0),
wt=Coalesce(
models.Sum("weight", output_field=models.DecimalField()), Decimal(0.0)
),
)
def transact(self, weight, quantity, journal, movement_type):
"""
Modifies weight and quantity associated with the stock based on movement type
Returns none
"""
StockTransaction.objects.create(
journal=journal,
lot=self,
weight=weight,
quantity=quantity,
movement_type_id=movement_type,
stock=self.stock,
)
self.update_status()
def merge(self, lot: int):
"""
a lots qty and weight remains same troughout its life,
any add/remove/merge/split on a lot is performed via transactions,
and current balance of a lot is derived from transaction.
Return : new_lot:StockLot
"""
if self.variant != lot.variant or self.stock != lot.stock:
raise Exception(
"cannot merge lots from different variant or associated with different stock"
)
new_lot = StockLot(
variant=self.variant,
weight=lot.weight + self.eight,
quantity=lot.quantity + self.quantity,
)
self.transact(self.weight, self.quantity, journal=None, movement_type="RM")
lot.transact(lot.weight, lot.quantity, journal=None, movement_type="RM")
new_lot.transact(
self.weight + lot.weight,
self.quantity + lot.quantity,
journal=None,
movement_type="AD",
)
return new_lot
def split(self, wt: Decimal, qty: int):
"""
split a lot by creating a new lot and transfering the wt & qty to new lot
"""
if not self.is_unique and self.quantity > qty and self.weight > wt:
new_lot = StockLot(variant=self.variant, weight=wt, quantity=qty)
new_lot.transact(wt, qty, journal=None, movement_type="AD")
self.transact(wt, qty, journal=None, movement_type="RM")
return new_lot
raise Exception("Unique lots cant be split")
def get_age(self):
return (timezone.now() - self.created).days
class Movement(models.Model):
"""represents movement_type with direction of stock/lot transaction
ex: [('purchase','+'),('purchase return','-'),('sales','-'),('sale return','+'),
('split','-'),('merge','+')]
"""
id = models.CharField(max_length=3, primary_key=True)
name = models.CharField(max_length=30)
direction = models.CharField(max_length=1, default="+")
class StockTransaction(models.Model):
created = models.DateTimeField(auto_now_add=True)
quantity = models.IntegerField(default=0)
weight = models.DecimalField(max_digits=10, decimal_places=3, default=0)
description = models.TextField()
# relational Fields
# user = models.ForeignKey(CustomUser)
movement_type = models.ForeignKey(Movement, on_delete=models.CASCADE, default="P")
stock = models.ForeignKey(Stock, on_delete=models.CASCADE)
lot = models.ForeignKey(StockLot, on_delete=models.CASCADE, default=1)
journal = models.ForeignKey(Journal, on_delete=models.CASCADE, related_name="stxns")
class Meta:
ordering = ("-created",)
get_latest_by = ["created"]
def __str__(self):
return str(self.pk)
def get_absolute_url(self):
return reverse("product_stocktransaction_detail", args=(self.pk,))
def get_update_url(self):
return reverse("product_stocktransaction_update", args=(self.pk,))
class StockStatement(models.Model):
ss_method = (
("Auto", "Auto"),
("Physical", "Physical"),
)
method = models.CharField(max_length=20, choices=ss_method, default="Auto")
stock = models.ForeignKey(Stock, on_delete=models.CASCADE)
lot = models.ForeignKey(StockLot, on_delete=models.CASCADE, null=True)
created = models.DateTimeField(auto_now=True)
Closing_wt = models.DecimalField(max_digits=14, decimal_places=3)
Closing_qty = models.IntegerField()
total_wt_in = models.DecimalField(max_digits=14, decimal_places=3, default=0.0)
total_wt_out = models.DecimalField(max_digits=14, decimal_places=3, default=0.0)
total_qty_in = models.IntegerField(default=0.0)
total_qty_out = models.IntegerField(default=0.0)
class Meta:
ordering = ("created",)
get_latest_by = ["created"]
def __str__(self):
return f"{self.stock} - qty:{self.Closing_qty} wt:{self.Closing_wt}"
class StockBalance(models.Model):
stock = models.OneToOneField(Stock, on_delete=models.DO_NOTHING, primary_key=True)
Closing_wt = models.DecimalField(max_digits=14, decimal_places=3)
Closing_qty = models.IntegerField()
in_wt = models.DecimalField(max_digits=14, decimal_places=3)
in_qty = models.IntegerField()
out_wt = models.DecimalField(max_digits=14, decimal_places=3)
out_qty = models.IntegerField()
class Meta:
managed = False
db_table = "stock_balance"
def get_qty_bal(self):
return self.Closing_qty + self.in_qty - self.out_qty
def get_wt_bal(self):
return self.Closing_wt + self.in_wt - self.out_wt
class StockLotBalance(models.Model):
lot = models.OneToOneField(StockLot, on_delete=models.DO_NOTHING, primary_key=True)
Closing_wt = models.DecimalField(max_digits=14, decimal_places=3)
Closing_qty = models.IntegerField()
in_wt = models.DecimalField(max_digits=14, decimal_places=3, default=0.0)
in_qty = models.IntegerField(default=0)
out_wt = models.DecimalField(max_digits=14, decimal_places=3, default=0.0)
out_qty = models.IntegerField(default=0)
class Meta:
managed = False
db_table = "stockbatch_balance"
def get_qty_bal(self):
return self.Closing_qty + self.in_qty - self.out_qty
def get_wt_bal(self):
return self.Closing_wt + self.in_wt - self.out_wt
| [
"[email protected]"
] | |
34e948024f0bec94ff0ac644ed0ec34b906fbcf6 | c058f51b99f91faebf27183b2b579e9f96e0d8f5 | /botorch/sampling/index_sampler.py | ac64388a6725fbe6d9d097bcda515413de547a4f | [
"MIT"
] | permissive | pytorch/botorch | 255d62f698cc615c750e9343c278a63c7e96a586 | 4cc5ed59b2e8a9c780f786830c548e05cc74d53c | refs/heads/main | 2023-08-22T15:23:51.071048 | 2023-08-22T05:30:38 | 2023-08-22T05:30:38 | 142,940,093 | 2,891 | 373 | MIT | 2023-09-13T00:16:13 | 2018-07-30T23:59:57 | Jupyter Notebook | UTF-8 | Python | false | false | 2,289 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Sampler to be used with `EnsemblePosteriors` to enable
deterministic optimization of acquisition functions with ensemble models.
"""
from __future__ import annotations
import torch
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.sampling.base import MCSampler
from torch import Tensor
class IndexSampler(MCSampler):
r"""A sampler that calls `posterior.rsample_from_base_samples` to
generate the samples via index base samples."""
def forward(self, posterior: EnsemblePosterior) -> Tensor:
r"""Draws MC samples from the posterior.
Args:
posterior: The ensemble posterior to sample from.
Returns:
The samples drawn from the posterior.
"""
self._construct_base_samples(posterior=posterior)
samples = posterior.rsample_from_base_samples(
sample_shape=self.sample_shape, base_samples=self.base_samples
)
return samples
def _construct_base_samples(self, posterior: EnsemblePosterior) -> None:
r"""Constructs base samples as indices to sample with them from
the Posterior.
Args:
posterior: The ensemble posterior to construct the base samples
for.
"""
if self.base_samples is None or self.base_samples.shape != self.sample_shape:
with torch.random.fork_rng():
torch.manual_seed(self.seed)
base_samples = torch.multinomial(
posterior.weights,
num_samples=self.sample_shape.numel(),
replacement=True,
).reshape(self.sample_shape)
self.register_buffer("base_samples", base_samples)
if self.base_samples.device != posterior.device:
self.to(device=posterior.device) # pragma: nocover
def _update_base_samples(
self, posterior: EnsemblePosterior, base_sampler: IndexSampler
) -> None:
r"""Null operation just needed for compatibility with
`CachedCholeskyAcquisitionFunction`."""
pass
| [
"[email protected]"
] | |
2bb7d800683997697c30b40167e239a1b671acbd | 9f5fcff2513f2d78f27e5313698dcc47fce1e754 | /Experiment/RL_EA_search/graphnas/rs_trainer.py | 60d453b4a9dc3b195ba7af0fdb0ad1d16b376820 | [
"Apache-2.0"
] | permissive | ncucjm/notebook | c2495f790e9fc2ca55c1c29a8eaa2dc1bfe7463f | 7271a0d1b10cdd6298e223c7ff150d4df031aa76 | refs/heads/master | 2023-07-20T05:55:48.946687 | 2021-01-27T09:12:19 | 2021-01-27T09:12:19 | 202,633,012 | 0 | 0 | null | 2023-07-06T21:28:29 | 2019-08-16T00:58:45 | Jupyter Notebook | UTF-8 | Python | false | false | 1,510 | py | import time
import torch
import numpy as np
from collections import deque
from graphnas.trainer import Trainer
class RandomSearch_Trainer(Trainer):
"""
This class implements a Random Search method, on the Search Space
provided to it.
"""
def __init__(self, args):
super(RandomSearch_Trainer, self).__init__(args)
self.args = args
self.random_seed = args.random_seed
self.cycles = args.cycles
def train(self):
print("\n\n===== Random Search ====")
start_time = time.time()
self.best_ind_acc = 0.0
self.best_ind = []
while self.cycles > 0:
individual = self._generate_random_individual()
ind_actions = self._construct_action([individual])
gnn = self.form_gnn_info(ind_actions[0])
_, ind_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
print("individual:", individual, " val_score:", ind_acc)
if ind_acc > self.best_ind_acc:
self.best_ind = individual.copy()
self.best_ind_acc = ind_acc
end_time = time.time()
total_time = end_time - start_time
print('Total elapsed time: ' + str(total_time))
print('[BEST STRUCTURE]', self.best_ind)
print('[BEST STRUCTURE] Actions: ',
self._construct_action([self.best_ind]))
print('[BEST STRUCTURE] Accuracy: ', self.best_ind_acc)
print("===== Random Search DONE ====")
| [
"[email protected]"
] | |
6e5c4d9328171eeb50e8290adcc1ce764248f029 | 8d472f9facb895dda9e1df81f3bb6c2f81b9c357 | /master/bt5/slapos_jio/SkinTemplateItem/portal_skins/slapos_hal_json_style/Project_hasItem.py | d66daa1f26bb81642f295654c3ff99a0f40cc6d8 | [] | no_license | SlapOS/slapos.core | 852485eed9382685f3df6ba8532f8192bb1389c4 | 369e8d56636e1c59a745e68dc68154abfc5b7840 | refs/heads/master | 2023-08-31T04:42:34.722241 | 2023-08-30T15:13:08 | 2023-08-30T15:13:08 | 1,825,920 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | import json
return json.dumps(len(context.Project_getComputeNodeTrackingList()))
| [
"[email protected]"
] | |
d48f8bec41176e377a39ba8177cac60f159340b7 | 297497957c531d81ba286bc91253fbbb78b4d8be | /third_party/python/esprima/esprima/__init__.py | aa2398f4102b2e9d4553bb39f890861fda8ee0ea | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 154 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
version = '4.0.1'
__version__ = (4, 0, 1)
from .esprima import *
| [
"[email protected]"
] | |
4ec3a3ad39f84c17851919fc61bb7c8ea7077454 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq1255.py | 433c18dd8d62c95a5ce1435e5cbc4fa0ed500276 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=49
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.rx(-1.3603096190043806).on(input_qubit[2])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.CZ.on(input_qubit[4],input_qubit[3])) # number=35
c.append(cirq.H.on(input_qubit[3])) # number=36
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=43
c.append(cirq.X.on(input_qubit[2])) # number=44
c.append(cirq.H.on(input_qubit[2])) # number=46
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=47
c.append(cirq.H.on(input_qubit[2])) # number=48
c.append(cirq.rx(-1.9697785938008003).on(input_qubit[1])) # number=37
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=40
c.append(cirq.X.on(input_qubit[0])) # number=32
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=33
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.X.on(input_qubit[1])) # number=25
c.append(cirq.X.on(input_qubit[1])) # number=41
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=26
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[3])) # number=30
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=42
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
c.append(cirq.X.on(input_qubit[1])) # number=22
c.append(cirq.X.on(input_qubit[1])) # number=23
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1255.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
5e504538adc68c06ea2082edf5674a0e82a28dc0 | 4f75cc33b4d65d5e4b054fc35b831a388a46c896 | /.history/app_20210903181729.py | d0919a5fe032e1f8eaa9d4770a1d04d5bbe154c3 | [] | no_license | Lr-2002/newpage | c3fe2acc451e24f6408996ea1271c61c321de702 | c589ad974e7100aa9b1c2ccc095a959ff68069b6 | refs/heads/main | 2023-09-03T06:13:53.428236 | 2021-11-23T10:41:21 | 2021-11-23T10:41:21 | 402,606,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | from flask import Flask ,render_template,url_for
from flask_sqlalchemy import SQLAlchemy
import os
import sys
import click
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.root_path,'data.db')
# / / / / 是文件的绝对路径
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =False
db = SQLAlchemy(app)
@app.cli.command()
@click.option('--drop',is_flag=True,help = 'Create after drop.')
def initdb(drop):
if drop:
db.drop_all()
db.create_all()
click.echo('Initialize database.')
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
class Movie(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(20))
year = db.Column(db.String(4))
# name = 'Grey Li'
# movies = [
# {'title': 'My Neighbor Totoro', 'year': '1988'},
# {'title': 'Dead Poets Society', 'year': '1989'},
# {'title': 'A Perfect World', 'year': '1993'},
# {'title': 'Leon', 'year': '1994'},
# {'title': 'Mahjong', 'year': '1996'},
# {'title': 'Swallowtail Butterfly', 'year': '1996'},
# {'title': 'King of Comedy', 'year': '1999'},
# {'title': 'Devils on the Doorstep', 'year': '1999'},
# {'title': 'WALL-E', 'year': '2008'},
# {'title': 'The Pork of Music', 'year': '2012'},
# ]
# @app.route('/static/<name>')
# def static(name):
# # url_for('static')
# return name
@app.route('/')
def hello():
user =
return render_template('index.html',name=name,movies = movies)
# if __name__ == '__main__':
# app.run()
| [
"[email protected]"
] | |
384496359f968c0c15c69e0e31cf20fe03eecb7d | 5fd297a27951074f3434d45ab7367687a15ad3b1 | /cep/__init__.py | 0499b31d041f05719ad12c78b1179c582ae8bb89 | [] | no_license | Cazuky/cepy | 808e155cdcf70bc25ef87f658e11c53bdb715530 | eca789cf632156a2e61b9d6fb4422056105a1c8d | refs/heads/master | 2021-01-11T14:12:31.974783 | 2012-11-27T17:45:25 | 2012-11-27T17:45:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | from flaskapp import create_application
__version__ = '0.0.0'
| [
"[email protected]"
] | |
88fae0f7f05b8375208a3a5144f972d9792eac4c | 4ed038a638725ac77731b0b97ddd61aa37dd8d89 | /cairis/misc/KaosModel.py | 39259e225455f6f69fa7f7bc60346a9b9a88fa53 | [
"Apache-2.0"
] | permissive | RachelLar/cairis_update | 0b784101c4aff81ff0390328eb615e335301daa2 | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | refs/heads/master | 2021-01-19T06:25:47.644993 | 2016-07-11T20:48:11 | 2016-07-11T20:48:11 | 63,103,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,998 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pydot
from cairis.core.Borg import Borg
from cairis.core.ARM import *
from cairis.core.colourcodes import usabilityColourCode
from cairis.core.colourcodes import threatColourCode
from cairis.core.colourcodes import obstacleColourCode
class KaosModel:
def __init__(self,associations,envName,kaosModelType = 'goal',goalName = '', db_proxy=None, font_name=None, font_size=None):
self.theAssociations = associations
self.theEnvironmentName = envName
self.theGoalName = goalName
b = Borg()
if db_proxy is None or font_size is None or font_name is None:
self.dbProxy = b.dbProxy
self.fontName = b.fontName
self.fontSize = b.fontSize
else:
self.dbProxy = db_proxy
self.fontName = font_name
self.fontSize = font_size
self.theGraph = pydot.Dot()
self.theKaosModel = kaosModelType
if (self.theKaosModel == 'task'):
self.theGraph.set_graph_defaults(rankdir='LR')
else:
self.theGraph.set_graph_defaults(rankdir='BT')
self.theGraphName = b.tmpDir + '/' + self.theKaosModel + '.dot'
def size(self):
return len(self.theAssociations)
def buildNode(self,dimName,objtName):
objtUrl = dimName + '#' + objtName
if (dimName == 'goal'):
self.theGraph.add_node(pydot.Node(objtName,shape='parallelogram',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
# soft-goal attributes self.theGraph.add_node(pydot.Node(objtName,shape='polygon',style='rounded',sides='6',distortion='-0.537997',orientation='52',skew='-0.960726',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'obstacle'):
obsId = self.dbProxy.getDimensionId(objtName,'obstacle')
envId = self.dbProxy.getDimensionId(self.theEnvironmentName,'environment')
self.theGraph.add_node(pydot.Node(objtName,shape='polygon',skew='-0.4',style='filled',pencolor='black',colorscheme='ylorrd9',fillcolor=obstacleColourCode(self.dbProxy.obstacleProbability(obsId,envId)),fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'domainproperty'):
self.theGraph.add_node(pydot.Node(objtName,shape='house',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'requirement'):
self.theGraph.add_node(pydot.Node(objtName,shape='parallelogram',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'countermeasure'):
self.theGraph.add_node(pydot.Node(objtName,shape='hexagon',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif ((dimName == 'role') and (self.theKaosModel != 'task')):
self.theGraph.add_node(pydot.Node(objtName,shape='hexagon',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif ((dimName == 'role') and (self.theKaosModel == 'task')):
self.theGraph.add_node(pydot.Node(objtName,shape='ellipse',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'usecase'):
self.theGraph.add_node(pydot.Node(objtName,shape='ellipse',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'task'):
objt = self.dbProxy.dimensionObject(objtName,'task')
if (objt.assumption() == True):
objtLabel = "<<Assumption>>" + objtName
else:
objtLabel = objtName
taskScore = self.dbProxy.taskUsabilityScore(objtName,self.theEnvironmentName)
self.theGraph.add_node(pydot.Node(objtName,label=objtLabel,shape='ellipse',style='filled',color=usabilityColourCode(taskScore),fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'misusecase'):
ellipseColour = 'black'
if (self.theKaosModel == 'task'):
riskName = objtName[8:]
riskObjt = self.dbProxy.dimensionObject(riskName,'risk')
riskScores = self.dbProxy.riskScore(riskObjt.threat(),riskObjt.vulnerability(),self.theEnvironmentName,riskName)
highestScore = 0
for riskScore in riskScores:
currentScore = riskScore[2]
if (currentScore > highestScore):
highestScore = currentScore
ellipseColour = threatColourCode(highestScore)
self.theGraph.add_node(pydot.Node(objtName,shape='ellipse',style='filled',color=ellipseColour,fontcolor='white',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'persona'):
objt = self.dbProxy.dimensionObject(objtName,'persona')
if (objt.assumption() == True):
objtLabel = "<<Assumption>>" + objtName
self.theGraph.add_node(pydot.Node(objtName,label=objtLabel,shape='circle',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
else:
self.theGraph.add_node(pydot.Node(objtName,shape='circle',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'attacker'):
self.theGraph.add_node(pydot.Node(objtName,shape='circle',style='filled',color='black',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'response'):
self.theGraph.add_node(pydot.Node(objtName,shape='note',fontname=self.fontName,fontsize=self.fontSize,URL=objtUrl))
elif (dimName == 'asset'):
fontColour = 'black'
nodeColour = 'black'
if (self.theKaosModel == 'task'):
fontColour = 'blue'
nodeColour = 'blue'
self.theGraph.add_node(pydot.Node(objtName,shape='record',fontname=self.fontName,fontsize=self.fontSize,fontcolor=fontColour,color=nodeColour,URL=objtUrl))
else:
raise UnknownNodeType(dimName)
def layout(self,renderer = ''):
if (renderer == ''):
if ((self.theKaosModel == 'goal') or (self.theKaosModel == 'template_goal') or (self.theKaosModel == 'obstacle')):
renderer = 'dot'
if (self.theKaosModel == 'responsibility'):
renderer = 'twopi'
elif (self.theKaosModel == 'task'):
renderer = 'dot'
self.theGraph.write_xdot(self.theGraphName,prog=renderer)
return open(self.theGraphName).read()
def buildGoalModel(self,isComponent=False):
self.nodeNameSet = set([])
refNodes = set([])
# the Graph get_edge function doesn't appear to work, so we'll keep a set of edges ourselves.
edgeSet = set([])
for association in self.theAssociations:
goalName = association.goal()
associationType = association.type()
subGoalName = association.subGoal()
alternativeFlag = association.alternative()
goalDimName = association.goalDimension()
subGoalDimName = association.subGoalDimension()
goalEnv = association.environment()
if ((self.theGoalName != '' or isComponent == True) and goalName not in self.nodeNameSet):
self.buildNode(goalDimName,goalName)
if ((self.theGoalName != '' or isComponent == True) and subGoalName not in self.nodeNameSet):
self.buildNode(subGoalDimName,subGoalName)
if ((associationType == 'obstruct') or (associationType == 'resolve')):
if ((subGoalName,goalName) not in edgeSet):
goalEdge = pydot.Edge(subGoalName,goalName,dir='forward',arrowhead='veetee',weight='1')
self.theGraph.add_edge(goalEdge)
edgeSet.add((subGoalName,goalName))
elif (associationType == 'depend'):
if ((subGoalName,goalName) not in edgeSet):
objtUrl = 'depend#' + goalEnv + '/' + goalName + '/' + subGoalName
self.theGraph.add_node(pydot.Node(objtUrl,shape='circle',label=' ',height='.2',width='.2',URL=objtUrl))
edge1 = pydot.Edge(goalName,objtUrl,dir='forward',arrowhead='vee',weight='1')
self.theGraph.add_edge(edge1)
edge2 = pydot.Edge(objtUrl,subGoalName,dir='forward',arrowhead='vee',weight='1')
self.theGraph.add_edge(edge2)
edgeSet.add((subGoalName,goalName))
else:
refNodeName = goalName + '#' + associationType
# This is probably a good time to see if there is already another goalassociation in the graph for another environment
assocDirection = 'forward'
arrowHead = 'vee'
if ((subGoalName,refNodeName) not in edgeSet):
objtUrl = 'link#' + goalEnv + '/' + goalName + '/' + subGoalName + '/' + goalDimName + '/' + subGoalDimName
if (alternativeFlag == 1):
refNodeName = goalName + '#' + subGoalName + '#' + associationType
if (refNodeName not in refNodes):
if (associationType == 'and'):
objtUrl = 'linkand#' + goalEnv + '/' + goalName + '/' + subGoalName + '/' + goalDimName + '/' + subGoalDimName
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',label=' ',height='.2',width='.2',URL=objtUrl))
elif (associationType == 'or'):
objtUrl = 'linkor#' + goalEnv + '/' + goalName + '/' + subGoalName + '/' + goalDimName + '/' + subGoalDimName
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',style='filled',color='black',label=' ',height='.2',width='.2',URL=objtUrl))
elif (associationType == 'responsible'):
objtUrl = 'linkresponsible#' + goalEnv + '/' + goalName + '/' + subGoalName + '/' + goalDimName + '/' + subGoalDimName
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',style='filled',color='red',label=' ',height='.2',width='.2',URL=objtUrl))
elif (associationType == 'conflict'):
objtUrl = 'linkconflict#' + goalEnv + '/' + goalName + '/' + subGoalName + '/' + goalDimName + '/' + subGoalDimName
self.theGraph.add_node(pydot.Node(refNodeName,shape='circle',color='red',label=' ',height='.2',width='.2',URL=objtUrl))
assocDirection = 'none'
arrowHead = 'none'
goalEdge = pydot.Edge(refNodeName,goalName,dir=assocDirection,arrowhead=arrowHead,weight='1')
if ((refNodeName,goalName) not in edgeSet):
self.theGraph.add_edge(goalEdge)
edgeSet.add((refNodeName,goalName))
refNodes.add(refNodeName)
if ((subGoalName,refNodeName) not in edgeSet):
self.theGraph.add_edge(pydot.Edge(subGoalName,refNodeName,dir='none',weight='1'))
edgeSet.add((subGoalName,refNodeName))
else:
pass
# Mark the node with a ? so we know the association properties might vary by environment
# modifiedRefNodeName = '\"' + refNodeName + '\"'
# refNode = self.theGraph.get_node(modifiedRefNodeName)
# refNode.set('label','?')
def buildTaskModel(self):
self.nodeNameSet = set([])
edgeSet = set([])
fontSize = '7.5'
for association in self.theAssociations:
goalName = association.goal()
subGoalName = association.subGoal()
goalDimName = association.goalDimension()
subGoalDimName = association.subGoalDimension()
assocLabel = association.rationale()
fontColour = 'black'
edgeColour = 'black'
edgeStyle = 'solid'
assocDir = 'none'
arrowHead = 'none'
arrowTail = 'none'
assocType = association.type()
if (self.theGoalName != '' and goalName not in self.nodeNameSet):
self.buildNode(goalDimName,goalName)
self.nodeNameSet.add(goalName)
if (self.theGoalName != '' and subGoalName not in self.nodeNameSet):
self.buildNode(subGoalDimName,subGoalName)
self.nodeNameSet.add(subGoalName)
if (assocType in ('misusecasethreatasset_association','misusecasevulnerabilityasset_association','taskmisusecasethreat_association','taskmisusecasevulnerability_association')):
fontColour = 'red'
edgeColour = 'red'
assocDir = 'forward'
arrowHead = 'vee'
elif (assocType in ('misusecasethreatmitigation_association','misusecasevulnerabilitymitigation_association','taskmisusecasemitigation_association')):
fontColour = 'green'
edgeColour = 'green'
assocDir = 'forward'
arrowHead = 'vee'
elif (assocType == 'taskasset_association'):
fontColour = 'blue'
edgeColour = 'blue'
arrowTail = 'vee'
elif (assocType == 'rolepersona_association'):
arrowHead = 'empty'
if (assocType in ('misusecasethreatasset_association','misusecasevulnerabilityasset_association','taskasset_association')):
arrowHead = 'none'
arrowTail = 'vee'
if (assocType == 'taskmisusecasemitigation_association'):
arrowHead = 'none'
arrowTail = 'vee'
if (assocType == 'usecasetask_association'):
arrowTail = 'vee'
edgeStyle = 'dashed'
objtUrl = goalDimName + '#' + subGoalDimName + '#' + assocType
if ((subGoalName,goalName,assocLabel) not in edgeSet):
if assocLabel == '':
assocLabel = ' '
self.theGraph.add_edge(pydot.Edge(subGoalName,goalName,style=edgeStyle,dir=assocDir,arrowhead=arrowHead,arrowtail=arrowTail,label=assocLabel,fontsize=fontSize,weight='1',fontcolor=fontColour,color=edgeColour,URL=objtUrl))
edgeSet.add((subGoalName,goalName,assocLabel))
def graph(self):
try:
elements = []
if (self.theKaosModel == 'goal' and self.theGoalName == ''):
elements = self.dbProxy.goalModelElements(self.theEnvironmentName)
elif (self.theKaosModel == 'obstacle' and self.theGoalName == ''):
elements = self.dbProxy.obstacleModelElements(self.theEnvironmentName)
elif (self.theKaosModel == 'responsibility' and self.theGoalName == ''):
elements = self.dbProxy.responsibilityModelElements(self.theEnvironmentName)
elif (self.theKaosModel == 'task' and self.theGoalName == ''):
elements = self.dbProxy.taskModelElements(self.theEnvironmentName)
for element in elements:
self.buildNode(element[0],element[1])
if ((self.theKaosModel == 'goal') or (self.theKaosModel == 'responsibility') or (self.theKaosModel == 'obstacle')):
self.buildGoalModel()
elif (self.theKaosModel == 'template_goal'):
self.buildGoalModel(True)
else:
self.buildTaskModel()
return self.layout()
except DatabaseProxyException, errTxt:
raise ARMException(errTxt)
| [
"[email protected]"
] | |
63616405b27720b76566b120e130bee0ac7bae8e | cfa464f5e4ec36b740d6e884f0ca1e170ebd2efb | /0x15-api/1-export_to_CSV.py | 402fb19b0b58ff81079e112f6fdb96aead0b7b14 | [] | no_license | Immaannn2222/holberton-system_engineering-devops | 6ea0c4f3af2943c242e1928a2b4e66932f193a34 | bcf001f3693fc55d54842ad92848ee783edee37a | refs/heads/master | 2020-12-22T21:12:22.507064 | 2020-10-12T19:46:37 | 2020-10-12T19:46:37 | 236,933,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | #!/usr/bin/python3
"""HTTP WITH PYTHON"""
import csv
import requests
from sys import argv
if __name__ == "__main__":
"""main"""
to_do = requests.get('https://jsonplaceholder.typicode.com/todos/',
params={"userId": argv[1]})
user = requests.get('https://jsonplaceholder.typicode.com/users',
params={"id": argv[1]})
list_json = to_do.json()
user_json = user.json()
for i in user_json:
name = i.get("username")
i_d = i.get('id')
with open(str(i_d) + '.csv', mode='w') as f:
csv = csv.writer(f, delimiter=',', quoting=csv.QUOTE_ALL)
for t in list_json:
status = t.get("completed")
task_title = t.get("title")
csv.writerow([i_d, name, status, task_title])
| [
"[email protected]"
] | |
59a610eb83b8706f74f0002b97f722652d711751 | 83c57f25a1c8b29bb84078340efabaf527a9452e | /pytest/xiaoniu88/pipelines.py | bcf58746076d6fa5a9859ffc60911edbe065bfe3 | [] | no_license | ifzz/py | df06cf5da5920dae979c2c8454bfa02c36dfeeb1 | 2305e651613725ca51d6a87306f3ef83d6c51939 | refs/heads/master | 2021-01-18T08:32:21.256271 | 2016-03-11T10:30:28 | 2016-03-11T10:30:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class Xiaoniu88Pipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
cca8684959f85d2e036f5c9887666fd2e912318b | 5865cc1b70db72b7a9a9a07547f05a1f47959bb1 | /supervised_learning/0x02-tensorflow/0-create_placeholders.py | 030bfd54225f665c91036d412c47e2ec3b3197fd | [] | no_license | nildiert/holbertonschool-machine_learning | c8cefc3a784348f09128c0f4d82d65b9d56000c5 | 273f81feaa14fe24ac4db5d82be0d13299e857b8 | refs/heads/master | 2020-12-21T12:27:48.280880 | 2020-09-25T17:58:33 | 2020-09-25T17:58:33 | 236,429,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | #!/usr/bin/env python3
""" This function creates two placeholders """
import tensorflow as tf
def create_placeholders(nx, classes):
""" Method to create placeholders """
x = tf.placeholder(tf.float32, shape=(None, nx), name='x')
y = tf.placeholder(tf.float32, shape=(None, classes), name='y')
return (x, y)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.