blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
933b59e302f98e982ead78cbe8328132cfbe6402
|
6f04a6ef99c581ed2f0519c897f254a7b63fb61d
|
/rastervision/utils/zxy2geotiff.py
|
80210424cc55d2fa376a4eef16bfa35762587c46
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
dgketchum/raster-vision
|
18030c9a8bfe99386aa95adbf8e3ec51d204947f
|
fe74bef30daa5821023946576b00c584ddc56de8
|
refs/heads/master
| 2020-08-30T13:56:08.598240 | 2019-11-03T17:38:33 | 2019-11-03T17:38:33 | 218,400,435 | 3 | 1 |
NOASSERTION
| 2019-10-29T23:09:57 | 2019-10-29T23:09:57 | null |
UTF-8
|
Python
| false | false | 7,481 |
py
|
import tempfile
from PIL import Image
import numpy as np
import click
import mercantile
import rasterio
from rasterio.windows import Window
import pyproj
from rastervision.utils.files import (download_if_needed, get_local_path,
upload_or_copy)
from rastervision.command.aux.cogify_command import create_cog
def lnglat2merc(lng, lat):
"""Convert lng, lat point to x/y Web Mercator tuple."""
return pyproj.transform(
pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857'), lng, lat)
def merc2lnglat(x, y):
"""Convert x, y Web Mercator point to lng/lat tuple."""
return pyproj.transform(
pyproj.Proj(init='epsg:3857'), pyproj.Proj(init='epsg:4326'), x, y)
def merc2pixel(tile_x, tile_y, zoom, merc_x, merc_y, tile_sz=256):
"""Convert Web Mercator point to pixel coordinates.
This is within the coordinate frame of a single ZXY tile.
Args:
tile_x: (int) x coordinate of ZXY tile
tile_y: (int) y coordinate of ZXY tile
zoom: (int) zoom level of ZXY tile
merc_x: (float) Web Mercator x axis of point
merc_y: (float) Web Mercator y axis of point
tile_sz: (int) size of ZXY tile
"""
tile_merc_bounds = mercantile.xy_bounds(tile_x, tile_y, zoom)
pix_y = int(
round(tile_sz * ((tile_merc_bounds.top - merc_y) /
(tile_merc_bounds.top - tile_merc_bounds.bottom))))
pix_x = int(
round(tile_sz * ((merc_x - tile_merc_bounds.left) /
(tile_merc_bounds.right - tile_merc_bounds.left))))
return (pix_x, pix_y)
def _zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog=False):
"""Generates a GeoTIFF of a bounded region from a ZXY tile server.
Args:
tile_schema: (str) the URI schema for zxy tiles (ie. a slippy map tile server)
of the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles
are assumed to be indexed using TMS coordinates, where the y axis starts
at the southernmost point. The URI can be for http, S3, or the local
file system.
zoom: (int) the zoom level to use when retrieving tiles
bounds: (list) a list of length 4 containing min_lat, min_lng,
max_lat, max_lng
output_uri: (str) where to save the GeoTIFF. The URI can be for http, S3, or the
local file system
"""
min_lat, min_lng, max_lat, max_lng = bounds
if min_lat >= max_lat:
raise ValueError('min_lat must be < max_lat')
if min_lng >= max_lng:
raise ValueError('min_lng must be < max_lng')
is_tms = False
if '{-y}' in tile_schema:
tile_schema = tile_schema.replace('{-y}', '{y}')
is_tms = True
tmp_dir_obj = tempfile.TemporaryDirectory()
tmp_dir = tmp_dir_obj.name
# Get range of tiles that cover bounds.
output_path = get_local_path(output_uri, tmp_dir)
tile_sz = 256
t = mercantile.tile(min_lng, max_lat, zoom)
xmin, ymin = t.x, t.y
t = mercantile.tile(max_lng, min_lat, zoom)
xmax, ymax = t.x, t.y
# The supplied bounds are contained within the "tile bounds" -- ie. the
# bounds of the set of tiles that covers the supplied bounds. Therefore,
# we need to crop out the imagery that lies within the supplied bounds.
# We do this by computing a top, bottom, left, and right offset in pixel
# units of the supplied bounds against the tile bounds. Getting the offsets
# in pixel units involves converting lng/lat to web mercator units since we
# assume that is the CRS of the tiles. These offsets are then used to crop
# individual tiles and place them correctly into the output raster.
nw_merc_x, nw_merc_y = lnglat2merc(min_lng, max_lat)
left_pix_offset, top_pix_offset = merc2pixel(xmin, ymin, zoom, nw_merc_x,
nw_merc_y)
se_merc_x, se_merc_y = lnglat2merc(max_lng, min_lat)
se_left_pix_offset, se_top_pix_offset = merc2pixel(xmax, ymax, zoom,
se_merc_x, se_merc_y)
right_pix_offset = tile_sz - se_left_pix_offset
bottom_pix_offset = tile_sz - se_top_pix_offset
uncropped_height = tile_sz * (ymax - ymin + 1)
uncropped_width = tile_sz * (xmax - xmin + 1)
height = uncropped_height - top_pix_offset - bottom_pix_offset
width = uncropped_width - left_pix_offset - right_pix_offset
transform = rasterio.transform.from_bounds(nw_merc_x, se_merc_y, se_merc_x,
nw_merc_y, width, height)
with rasterio.open(
output_path,
'w',
driver='GTiff',
height=height,
width=width,
count=3,
crs='epsg:3857',
transform=transform,
dtype=rasterio.uint8) as dataset:
out_x = 0
for xi, x in enumerate(range(xmin, xmax + 1)):
tile_xmin, tile_xmax = 0, tile_sz - 1
if x == xmin:
tile_xmin += left_pix_offset
if x == xmax:
tile_xmax -= right_pix_offset
window_width = tile_xmax - tile_xmin + 1
out_y = 0
for yi, y in enumerate(range(ymin, ymax + 1)):
tile_ymin, tile_ymax = 0, tile_sz - 1
if y == ymin:
tile_ymin += top_pix_offset
if y == ymax:
tile_ymax -= bottom_pix_offset
window_height = tile_ymax - tile_ymin + 1
# Convert from xyz to tms if needed.
# https://gist.github.com/tmcw/4954720
if is_tms:
y = (2**zoom) - y - 1
tile_uri = tile_schema.format(x=x, y=y, z=zoom)
tile_path = download_if_needed(tile_uri, tmp_dir)
img = np.array(Image.open(tile_path))
img = img[tile_ymin:tile_ymax + 1, tile_xmin:tile_xmax + 1, :]
window = Window(out_x, out_y, window_width, window_height)
dataset.write(
np.transpose(img[:, :, 0:3], (2, 0, 1)), window=window)
out_y += window_height
out_x += window_width
if make_cog:
create_cog(output_path, output_uri, tmp_dir)
else:
upload_or_copy(output_path, output_uri)
@click.command()
@click.argument('tile_schema')
@click.argument('zoom')
@click.argument('bounds')
@click.argument('output_uri')
@click.option('--make-cog', is_flag=True, default=False)
def zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog):
"""Generates a GeoTIFF of a bounded region from a ZXY tile server.
TILE_SCHEMA: the URI schema for zxy tiles (ie. a slippy map tile server) of
the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles are
assumed to be indexed using TMS coordinates, where the y axis starts at
the southernmost point. The URI can be for http, S3, or the local file
system.
ZOOM: the zoom level to use when retrieving tiles
BOUNDS: a space-separated string containing min_lat, min_lng, max_lat,
max_lng
OUTPUT_URI: where to save the GeoTIFF. The URI can be for http, S3, or the
local file system.
"""
bounds = [float(x) for x in bounds.split(' ')]
_zxy2geotiff(tile_schema, int(zoom), bounds, output_uri, make_cog=make_cog)
if __name__ == '__main__':
zxy2geotiff()
|
[
"[email protected]"
] | |
30459cc5e6a093410d325a173ea9cba76452b99a
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/HotelSoldOrdersIncrementGetRequest.py
|
08229a2b1227b49f5b2a06b967fb59b0da52b1e9
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class HotelSoldOrdersIncrementGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.end_modified = None
self.need_guest = None
self.need_message = None
self.page_no = None
self.page_size = None
self.start_modified = None
self.status = None
self.use_has_next = None
def getapiname(self):
return 'taobao.hotel.sold.orders.increment.get'
|
[
"[email protected]"
] | |
24bce9adfd9986c448487e74e16658ad17c265dd
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/poster/tags/V00-00-01/SConscript
|
b190c174d8a3bdc4f9abefb1be153557a06627e1
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,985 |
#------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package poster
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
import os
from SConsTools.standardExternalPackage import standardExternalPackage
#
# For the standard external packages which contain includes, libraries,
# and applications it is usually sufficient to call standardExternalPackage()
# giving few keyword arguments. Here is a complete list of arguments:
#
# PREFIX - top directory of the external package
# INCDIR - include directory, absolute or relative to PREFIX
# INCLUDES - include files to copy (space-separated list of patterns)
# PYDIR - Python src directory, absolute or relative to PREFIX
# LINKPY - Python files to link (patterns), or all files if not present
# PYDIRSEP - if present and evaluates to True installs python code to a
# separate directory arch/$SIT_ARCH/python/<package>
# LIBDIR - libraries directory, absolute or relative to PREFIX
# COPYLIBS - library names to copy
# LINKLIBS - library names to link, or all libs if LINKLIBS and COPYLIBS are empty
# BINDIR - binaries directory, absolute or relative to PREFIX
# LINKBINS - binary names to link, or all binaries if not present
# PKGLIBS - names of libraries that have to be linked for this package
# DEPS - names of other packages that we depend upon
# PKGINFO - package information, such as RPM package name
# here is an example setting up a fictional package
pkg = "poster"
pkg_ver = "0.8.1"
PREFIX = os.path.join('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = os.path.join("lib", '$PYTHON', "site-packages", pkg)
PYDIRSEP = True
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage ( pkg, **locals() )
|
[
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7
|
|
9217751689c20a44cbffa776fd1f9c8aabb36593
|
5a396f14b3689273aaf1a6e20dcb0853d78a9f04
|
/GetSharedWithDomainTeamDriveACLs.py
|
0c114d9b0daa023ea4ef045d01a424197485f1cf
|
[] |
no_license
|
NosIreland/GAM-Scripts3
|
642b4dd827189352afd8357a41b576d6acf159bc
|
de3ee3007e6906c5b6d28fef8aea27827646db00
|
refs/heads/master
| 2023-03-04T21:58:44.594405 | 2021-02-18T14:39:20 | 2021-02-18T14:39:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,744 |
py
|
#!/usr/bin/env python3
"""
# Purpose: For a Google Drive User(s), delete all drive file ACLs for Team Drive files shared with a list of specified domains
# Note: This script requires Advanced GAM:
# https://github.com/taers232c/GAMADV-XTD3
# Customize: Set DOMAIN_LIST and DESIRED_ALLOWFILEDISCOVERY
# Python: Use python or python3 below as appropriate to your system; verify that you have version 3
# $ python -V or python3 -V
# Python 3.x.y
# Usage:
# For all Team Drives, start at step 1; For Team Drives selected by user/group/OU, start at step 7
# All Team Drives
# 1: Get all Team Drives.
# $ gam redirect csv ./TeamDrives.csv print teamdrives fields id,name
# 2: Get ACLs for all Team Drives
# $ gam redirect csv ./TeamDriveACLs.csv multiprocess csv TeamDrives.csv gam print drivefileacls ~id fields emailaddress,role,type
# 3: Customize GetTeamDriveOrganizers.py for this task:
# Set DOMAIN_LIST as required
# Set ONE_ORGANIZER = True
# Set SHOW_GROUP_ORGANIZERS = False
# Set SHOW_USER_ORGANIZERS = True
# 4: From that list of ACLs, output a CSV file with headers "id,name,organizers"
# that shows the organizers for each Team Drive
# $ python3 GetTeamDriveOrganizers.py TeamDriveACLs.csv TeamDrives.csv TeamDriveOrganizers.csv
# 5: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line
# DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type domain em
# DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type domain allowfilediscovery true em
# DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type domain allowfilediscovery false em
# $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDriveOrganizers.csv gam user ~organizers print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions pm type domain em
# 6: Go to step 11
# Selected Team Drives
# 7: If want Team Drives for a specific set of organizers, replace <UserTypeEntity> with your user selection in the command below
# $ gam redirect csv ./AllTeamDrives.csv <UserTypeEntity> print teamdrives role organizer fields id,name
# 8: Customize DeleteDuplicateRows.py for this task:
# Set ID_FIELD = 'id'
# 9: Delete duplicate Team Drives (some may have multiple organizers).
# $ python3 DeleteDuplicateRows.py ./AllTeamDrives.csv ./TeamDrives.csv
# 10: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line
# DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type domain em
# DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type domain allowfilediscovery true em
# DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type domain allowfilediscovery false em
# $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDrives.csv gam user ~User print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions pm type domain em
# Common code
# 11: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,permissionId,role,domain,allowFileDiscovery"
# that lists the driveFileIds and permissionIds for all ACLs shared with the selected domains.
# (n.b., driveFileTitle, role, domain and allowFileDiscovery are not used in the next step, they are included for documentation purposes)
# $ python3 GetSharedWithDomainTeamDriveACLs.py filelistperms.csv deleteperms.csv
# 12: Inspect deleteperms.csv, verify that it makes sense and then proceed
# 13: Delete the ACLs
# $ gam csv deleteperms.csv gam user "~Owner" delete drivefileacl "~driveFileId" "~permissionId"
"""
import csv
import re
import sys
FILE_NAME = 'name'
ALT_FILE_NAME = 'title'
# If you want to limit finding ACLS for a specific list of domains, use the list below, e.g., DOMAIN_LIST = ['domain.com',] DOMAIN_LIST = ['domain1.com', 'domain2.com',]
DOMAIN_LIST = []
# Specify desired value of allowFileDiscovery field: True, False, Any (matches True and False)
DESIRED_ALLOWFILEDISCOVERY = 'Any'
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'permissionId', 'role', 'domain', 'allowFileDiscovery'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in iter(row.items()):
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v == 'domain':
permissions_N = mg.group(1)
domain = row[f'permissions.{permissions_N}.domain']
allowFileDiscovery = row.get(f'permissions.{permissions_N}.allowFileDiscovery', str(row.get(f'permissions.{permissions_N}.withLink') == 'False'))
if (not DOMAIN_LIST or domain in DOMAIN_LIST) and (DESIRED_ALLOWFILEDISCOVERY in ('Any', allowFileDiscovery)):
outputCSV.writerow({'Owner': row['Owner'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'permissionId': f'id:{row[f"permissions.{permissions_N}.id"]}',
'role': row[f'permissions.{permissions_N}.role'],
'domain': domain,
'allowFileDiscovery': allowFileDiscovery})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
|
[
"[email protected]"
] | |
dc5f47e41dd896ee44f05aa76d5189db027ffe70
|
d2c4151eff768af64946ababc2e41c13d8973cd3
|
/ABC146/a.py
|
c10b5d53d83522345cefe135c52ff627ef03099c
|
[] |
no_license
|
Intel-out-side/AtCoder
|
2de19b71981247135432aed2d6d9c2a16c3ab7f0
|
0c419d2df15fff02032432cb1b1323612484e16e
|
refs/heads/master
| 2022-06-23T04:21:12.886072 | 2022-06-13T14:39:07 | 2022-06-13T14:39:07 | 235,240,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 181 |
py
|
week = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
day = str(input())
if day == "SUN":
result = 7
else:
result = week.index("SUN") - week.index(day)
print(result)
|
[
"[email protected]"
] | |
cf6a0c4833d16887ee9ee3e5afefb8ed33431c13
|
eacff46eda2c6b509449979a16002b96d4645d8e
|
/Collections-a-installer/community-general-2.4.0/tests/integration/targets/launchd/files/ansible_test_service.py
|
87a23fc47d816bb4b2deacd93a3bcfb45fbf1a9f
|
[
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
permissive
|
d-amien-b/simple-getwordpress
|
5e6d4d15d5f87124ab591e46b63fec552998fdc3
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
refs/heads/master
| 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 |
MIT
| 2021-03-31T16:16:45 | 2021-03-26T07:30:00 |
HTML
|
UTF-8
|
Python
| false | false | 594 |
py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
if __name__ == '__main__':
if sys.version_info[0] >= 3:
import http.server
import socketserver
PORT = int(sys.argv[1])
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
httpd.serve_forever()
else:
import mimetypes
mimetypes.init()
mimetypes.add_type('application/json', '.json')
import SimpleHTTPServer
SimpleHTTPServer.test()
|
[
"[email protected]"
] | |
4cbf6e3fcafd24fc240850a479e41ddfe6d770ac
|
d5b339d5b71c2d103b186ed98167b0c9488cff09
|
/marvin/cloudstackAPI/deleteCondition.py
|
e6c1d13261e1a92194f4e5a345cf1351557e1bd8
|
[
"Apache-2.0"
] |
permissive
|
maduhu/marvin
|
3e5f9b6f797004bcb8ad1d16c7d9c9e26a5e63cc
|
211205ae1da4e3f18f9a1763f0f8f4a16093ddb0
|
refs/heads/master
| 2020-12-02T17:45:35.685447 | 2017-04-03T11:32:11 | 2017-04-03T11:32:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
"""Removes a condition"""
from baseCmd import *
from baseResponse import *
class deleteConditionCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the condition."""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
self.required = ["id", ]
class deleteConditionResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
|
[
"[email protected]"
] | |
9fe7a328b27380a9afc1f19106fa9edd8aa1033c
|
21208873652ce9a35035801cea488004e337b07b
|
/data_loader/__init__.py
|
784c4dc41287ec0e8680637c3b93983f20eae44f
|
[
"Apache-2.0"
] |
permissive
|
zlszhonglongshen/crnn.pytorch
|
55321a6764a6143be7ab9d2c6b3bcafcdd9470e7
|
bf7a7c62376eee93943ca7c68e88e3d563c09aa8
|
refs/heads/master
| 2022-11-07T22:57:28.983335 | 2020-06-19T03:01:35 | 2020-06-19T03:01:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,398 |
py
|
# -*- coding: utf-8 -*-
# @Time : 18-11-16 下午5:46
# @Author : zhoujun
import copy
from torch.utils.data import DataLoader
from torchvision import transforms
def get_dataset(data_path, module_name, transform, dataset_args):
"""
获取训练dataset
:param data_path: dataset文件列表,每个文件内以如下格式存储 ‘path/to/img\tlabel’
:param module_name: 所使用的自定义dataset名称,目前只支持data_loaders.ImageDataset
:param transform: 该数据集使用的transforms
:param dataset_args: module_name的参数
:return: 如果data_path列表不为空,返回对应的Dataset对象,否则None
"""
from . import dataset
s_dataset = getattr(dataset, module_name)(transform=transform, data_path=data_path, **dataset_args)
return s_dataset
def get_transforms(transforms_config):
tr_list = []
for item in transforms_config:
if 'args' not in item:
args = {}
else:
args = item['args']
cls = getattr(transforms, item['type'])(**args)
tr_list.append(cls)
tr_list = transforms.Compose(tr_list)
return tr_list
def get_dataloader(module_config, num_label):
if module_config is None:
return None
config = copy.deepcopy(module_config)
dataset_args = config['dataset']['args']
dataset_args['num_label'] = num_label
if 'transforms' in dataset_args:
img_transfroms = get_transforms(dataset_args.pop('transforms'))
else:
img_transfroms = None
# 创建数据集
dataset_name = config['dataset']['type']
data_path_list = dataset_args.pop('data_path')
if 'data_ratio' in dataset_args:
data_ratio = dataset_args.pop('data_ratio')
else:
data_ratio = [1.0]
_dataset_list = []
for data_path in data_path_list:
_dataset_list.append(get_dataset(data_path=data_path, module_name=dataset_name, dataset_args=dataset_args, transform=img_transfroms))
if len(data_ratio) > 1 and len(dataset_args['data_ratio']) == len(_dataset_list):
from . import dataset
loader = dataset.Batch_Balanced_Dataset(dataset_list=_dataset_list, ratio_list=data_ratio, loader_args=config['loader'])
else:
_dataset = _dataset_list[0]
loader = DataLoader(dataset=_dataset, **config['loader'])
loader.dataset_len = len(_dataset)
return loader
|
[
"[email protected]"
] | |
b8954b6cea35abb939ed06c8276b23e8b81f83d3
|
b2e340f22a7f613dc33ea361ba87a393d65b723c
|
/LogicAnalyzer/config/config.py
|
f19d2b4e3d649ece283274df9b734d2dc8094f99
|
[
"MIT"
] |
permissive
|
CospanDesign/logic-analyzer
|
6369cfc423f3fae050f9ab784a6ae94003422654
|
284ea339c001b4845a46fcb0672511487271c9c3
|
refs/heads/master
| 2021-01-20T18:58:53.477152 | 2016-06-24T02:22:04 | 2016-06-24T02:22:04 | 61,488,220 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,335 |
py
|
import logging
import json
TRIGGER = "trigger"
TRIGGER_MASK = "trigger_mask"
TRIGGER_EDGE = "trigger_edge"
TRIGGER_BOTH_EDGE = "both_edges"
TRIGGER_REPEAT = "repeat"
TRIGGER_AFTER = "trigger_after"
CAPABILITY_NAMES = [
TRIGGER,
TRIGGER_MASK,
TRIGGER_EDGE,
TRIGGER_BOTH_EDGE,
TRIGGER_REPEAT,
TRIGGER_AFTER
]
CALLBACK_START = "start"
CALLBACK_STOP = "stop"
CALLBACK_FORCE = "force"
CALLBACK_UPDATE = "update"
CALLBACK_GET_SIZE = "get_size"
CALLBACK_CLOSE = "close"
CALLBACK_NAMES = [
CALLBACK_START,
CALLBACK_STOP,
CALLBACK_FORCE,
CALLBACK_UPDATE,
CALLBACK_GET_SIZE,
CALLBACK_CLOSE
]
class Config(object):
@staticmethod
def get_name():
return "Invalid Config, make your own!!"
def __init__(self):
self.log = logging.getLogger("LAX")
self.caps = {}
self.callbacks = {}
self.channels = []
for name in CAPABILITY_NAMES:
self.caps[name] = None
for name in CALLBACK_NAMES:
self.callbacks[name] = None
def get_channel_dict(self):
"""
Return a dictionary that maps names to channel(s)
"""
return self.channels
def get_capabilities(self):
"""
Return a list of capabilities (strings) that this device supports
"""
names = []
for name in self.caps:
if self.caps[name] is not None:
names.append(name)
return names
def has_capability(self, name):
"""
Return true if the device has the capabilities
"""
return self.caps[name] is not None
def get_value(self, name):
"Get the value of a capability"
if not self.has_capability(name):
raise AssertionError("LAX Does not have capability")
else:
return self.caps[name]
def set_callback(self, name, func):
self.log.debug("Setting callback for: %s" % name)
self.callbacks[name] = func
def ready(self):
"""The controller tells the config interface it's ready"""
raise AssertionError("%s not implemented" % sys._getframe().f_code.co_name)
def captured(self):
"""callback when capture occurs"""
raise AssertionError("%s not implemented" % sys._getframe().f_code.co_name)
|
[
"[email protected]"
] | |
1935bfa537c5f257092b4e5689d56e2394be68bb
|
a09c10c29478fed167c94d83d5dff9371f9a1680
|
/Client.py
|
ec5149235aa6288eed9ea16cd6590f770fc45567
|
[] |
no_license
|
batra98/Distributed-Web-Cache
|
83e208689b18b95724dd0ba657b4ef89e9054d2a
|
7e08dfe4dd6739c779c59da3ab7301f3cb33af6a
|
refs/heads/master
| 2022-11-28T05:21:33.220922 | 2020-08-07T10:15:32 | 2020-08-07T10:15:32 | 285,793,260 | 2 | 0 | null | 2020-08-07T09:41:56 | 2020-08-07T09:41:56 | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
import socket
import sys
def send(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(bytes(message, 'ascii'))
response = str(sock.recv(1024), 'ascii')
print("Received: {}".format(response))
return response.split(None, 1)
finally:
sock.close()
def get(ip, port, key):
return send(ip, port, "get {0}".format(key))
def add(ip, port, key, data):
return send(ip, port, "add {0} {1}".format(key, data))
def add_node(ip, port, key):
return send(ip, port, "addnode {0}".format(key))
def rm_node(ip, port, key):
return send(ip, port, "rmnode {0}".format(key))
def stats(ip, port):
return send(ip, port, "stats")
def performance(ip,port):
return send(ip,port, "performance")
def test_load_balancing(ip,port,num_node,num_data):
return send(ip,port, "test {0} {1}".format(num_node,num_data))
def clean(ip,port):
return send(ip,port,"clean")
if __name__ == "__main__":
ip, port = sys.argv[1], int(sys.argv[2])
while True:
command = input("> ")
send(ip, port, command)
|
[
"[email protected]"
] | |
02745dd02ec7954ea531da8ddfb292e43a976771
|
8a102033a266d39128e4b64aa0780cf67055e196
|
/1330.py
|
3fe9a718323d5727aeb4c2c1501dafb25b860ada
|
[] |
no_license
|
yuseungwoo/baekjoon
|
4dec0798b8689b9378121b9d178713c9cf14a53f
|
099031e2c4401e27edcdc05bd6c9e6a558b09bb9
|
refs/heads/master
| 2020-09-03T15:25:40.764723 | 2018-10-08T02:35:27 | 2018-10-08T02:35:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
# coding: utf-8
a, b = map(int, input().split())
if a > b:
print('>')
if a < b:
print('<')
if a == b:
print('==')
|
[
"[email protected]"
] | |
bf91de8bc79c94d76bf93ea0cc534b567dc2e161
|
4d9bd7874fc5a4f2ec56bb172f4e93a9601c4c83
|
/main.py
|
4864dd4bbecc043b09c96f4fb427a06e03a0c031
|
[] |
no_license
|
liziniu/Model-Uncertainty-in-Neural-Networks
|
ff65009b3c165c4fd82efb9759cb26d41f914a2e
|
67c6042c52dd7e7a918ab42d34764bbb9a88c8a2
|
refs/heads/master
| 2020-05-04T00:26:47.315086 | 2019-04-06T03:19:47 | 2019-04-06T03:19:47 | 178,884,785 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,506 |
py
|
from model1.default import get_config
from model1.model import Model
from utli import load_data, get_session, update_para
import argparse
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=int, default=1)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--num_units", type=int, default=100)
parser.add_argument("--pi", type=float, default=0.25)
parser.add_argument("--mu1", type=float, default=0.0)
parser.add_argument("--std1", type=float, default=0.5)
parser.add_argument("--mu2", type=float, default=0.0)
parser.add_argument("--std2", type=float, default=1.5)
parser.add_argument("--train", action="store_true", default=False)
parser.add_argument("--load_path", type=str, default="logs/model1/")
return parser.parse_args()
def main(args):
sess = get_session()
default_para = get_config()
para = update_para(default_para, args)
model = Model(sess, para)
x_train, x_test, y_train, y_test = load_data()
x_train_ = x_train[:-5000]
y_train_ = y_train[:-5000]
x_valid = x_train[-5000:]
y_valid = y_train[-5000:]
if args.train:
model.train(x_train_, y_train_, x_valid, y_valid)
else:
model.load(args.load_path)
model.test(x_test, y_test)
if __name__ == "__main__":
args = arg_parse()
main(args)
|
[
"[email protected]"
] | |
014a6b6fc7c93c425ce7da5ad70dfce4b7273ee8
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/largestTime_20200903122053.py
|
697fb00998e96926352a6433e5a6da6d088d57dd
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 552 |
py
|
from itertools import permutations
def Time(A):
# getting the different permutations
# get the one that falls between 0000 and 2359
# then place the semi colon in the proper place
# otherwise return an empty string
A = [str(i) for i in A]
perm = permutations(A)
newArray = []
for i in list(perm):
string = "".join(i)
newArray.append(string)
newArray = [int(i) for i in newArray]
for i in newArray:
if i > 0000 and i =<2359:
Time([1,2,3,4])
|
[
"[email protected]"
] | |
47213a723487f5382748a8d76a7546ee674ea1f5
|
a26d91163fe40924c7c4f9d94fcd973989b68983
|
/watchlist_app/migrations/0003_alter_movie_description.py
|
bed775bad8d30539b2b34f84c48a3511902e2b22
|
[] |
no_license
|
rcoffie/Django-Rest-Tut
|
a840ecb838098ed2d525c1b5321f042e0d29c5fb
|
9925bfb11b92a49aa6973e3929b2d05d9528ee27
|
refs/heads/master
| 2023-08-25T06:43:41.019767 | 2021-10-27T15:27:06 | 2021-10-27T15:27:06 | 409,567,488 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 384 |
py
|
# Generated by Django 3.2.5 on 2021-09-23 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watchlist_app', '0002_rename_movies_movie'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='description',
field=models.TextField(),
),
]
|
[
"[email protected]"
] | |
9207f63b377b4990be34f2882127edcd256361e6
|
70e1d7c3e375ecff09df36e5a4ceda5691221968
|
/tmp.py
|
1936e8cb7425e44fc01199bc2937e82f0e03ce0a
|
[
"Apache-2.0"
] |
permissive
|
doublechenching/ship_detection
|
5b91aa4a7fbe6eb5a88389d1a517641a53740890
|
1ba4926e0d28043863df05ae8afc3d5b336b350d
|
refs/heads/master
| 2020-04-06T17:53:23.855070 | 2018-11-15T08:47:02 | 2018-11-15T08:47:02 | 157,676,999 | 8 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,213 |
py
|
Nucleoplasm 12885 0.25
Cytosol 8228 0.16
Plasma membrane 3777 0.07
Nucleoli 3621 0.07
Mitochondria 2965 0.06
Golgi apparatus 2822 0.06
Nuclear bodies 2513 0.05
Nuclear speckles 1858 0.04
Nucleoli fibrillar center 1561 0.03
Centrosome 1482 0.03
Nuclear membrane 1254 0.02
Intermediate filaments 1093 0.02
Microtubules 1066 0.02
Endoplasmic reticulum 1008 0.02
Microtubule organizing center 902 0.02
Cell junctions 802 0.02
Actin filaments 688 0.01
Focal adhesion sites 537 0.01
Cytokinetic bridge 530 0.01
Cytoplasmic bodies 328 0.01
Aggresome 322 0.01
Mitotic spindle 210 0.00
Lipid droplets 172 0.00
Peroxisomes 53 0.00
Endosomes 45 0.00
Lysosomes 28 0.00
Microtubule ends 21 0.00
Rods & rings 11 0.00
|
[
"[email protected]"
] | |
49125a103d0ef8ad23344162256cf34b29c740c5
|
5c0506e42fc7f0325728994223f1b0be4f1187fc
|
/summa_py_textrank.py
|
2fd1d59fa66724ab7ba0f6a9607be02ff57006a6
|
[] |
no_license
|
Trevahok/summarizer
|
602d492385c3130c6c9f11dd82e71177541ede73
|
cfd134e79ec5dfac3530081c6863421ab667207d
|
refs/heads/master
| 2020-03-19T20:36:21.680650 | 2018-06-12T06:54:36 | 2018-06-12T06:54:36 | 136,908,134 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,124 |
py
|
from urllib.request import urlopen
from summa.summarizer import summarize
from sys import argv
from bs4 import BeautifulSoup as bs
import PyPDF2
def from_link():
page=urlopen(argv[1])
soup=bs(page,'lxml')
text=soup.find_all('p')
text='\n'.join([ i.text for i in text])
print(summarize(text,ratio=0.2))
def from_pdf():
pdfdoc = open(argv[1], 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfdoc)
count = pdfReader.numPages
for i in range(count):
page = pdfReader.getPage(i)
print('Page Number: ',i,'\n')
print(summarize(page.extractText(),ratio=0.2))
print('\n\n')
def from_txt():
file=open(argv[1],'r')
text=file.read()
print(summarize(text,ratio=0.2))
if __name__=="__main__":
try:
filetype = argv[2]
if filetype=='url':
from_link()
elif filetype=='pdf':
from_pdf()
else:
from_txt()
except IndexError:
print("\nUsage:\n \tsummarize 'http:// url.to.summarize' url \n or \n \tsummarize 'path/to/file/file.pdf' pdf \n or \n \tsummarize 'path/to/file/file.txt' txt ")
|
[
"[email protected]"
] | |
c9708fe103af2012e13994b656c45ba4a852077c
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/8kyu-interpreters-hq9-plus/Python/solution1.py
|
bdd53cce40278d9d04a75b8b2e61e0cc09d79511
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 487 |
py
|
# Python - 3.6.0
gets = lambda i: 's' if i != 1 else ''
HQ9 = {
'H': 'Hello World!',
'Q': 'Q',
'9': '\n'.join(
f'{i} bottle{gets(i)} of beer on the wall, {i} bottle{gets(i)} of beer.\nTake one down and pass it around, {i - 1 if i > 1 else "no more"} bottle{gets(i - 1)} of beer on the wall.' for i in range(99, 0, -1)
) + '\nNo more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall.'
}.get
|
[
"[email protected]"
] | |
703a8e40bd746970ed7d5c2e13f250617fe1a660
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02574/s746022410.py
|
9331bad8a322a0b5502729d4fc4e2aa050191d05
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
import math,itertools,fractions,heapq,collections,bisect,sys,queue,copy
sys.setrecursionlimit(10**7)
inf=10**20
mod=10**9+7
dd=[(-1,0),(0,1),(1,0),(0,-1)]
ddn=[(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
# def LF(): return [float(x) for x in sys.stdin.readline().split()]
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def LS(): return sys.stdin.readline().split()
def S(): return input()
def main():
N=I()
A=LI()
g=0
for x in A:
g=math.gcd(g,x)
if g>1:
return 'not coprime'
sosu=[0]*1000100
for x in A:
if x==1:
continue
sosu[x]+=1
if sosu[x]>1:
return 'setwise coprime'
for y in range(2,int(math.sqrt(x))+1):
if x%y!=0:
continue
z=x//y
if y==z:
sosu[y]+=1
if sosu[y]>1:
return 'setwise coprime'
else:
sosu[y]+=1
if sosu[y]>1:
return 'setwise coprime'
sosu[z]+=1
if sosu[z]>1:
return 'setwise coprime'
return 'pairwise coprime'
# main()
print(main())
|
[
"[email protected]"
] | |
b2432c7ce576836fc769e1c9a990bb2a1b00d91c
|
ef243d91a1826b490e935fa3f3e6c29c3cc547d0
|
/cv2/cv2/MergeExposures.py
|
7d68ec4c5e8da4d27c6ad8ddb544c23ea3973a7e
|
[] |
no_license
|
VentiFang/Python_local_module
|
6b3d0b22399e817057dfd15d647a14bb1e41980e
|
c44f55379eca2818b29732c2815480ee755ae3fb
|
refs/heads/master
| 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,989 |
py
|
# encoding: utf-8
# module cv2.cv2
# from F:\Python\Python36\lib\site-packages\cv2\cv2.cp36-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # F:\Python\Python36\lib\site-packages\cv2\cv2.cp36-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2 as __cv2
class MergeExposures(__cv2.Algorithm):
# no doc
def process(self, src, times, response, dst=None): # real signature unknown; restored from __doc__
"""
process(src, times, response[, dst]) -> dst
. @brief Merges images.
.
. @param src vector of input images
. @param dst result image
. @param times vector of exposure time values for each image
. @param response 256x1 matrix with inverse camera response function for each pixel value, it should
. have the same number of channels as images.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
|
[
"[email protected]"
] | |
d40d4c0886ebeb7c5e6c46de7f421799756c92b7
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_318/ch23_2019_03_27_15_00_44_973644.py
|
e268beaac58b3819e4295d3aa5d048c89b2d4156
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 223 |
py
|
def verifica_idade(x):
if(x>20):
print("Liberado EUA e BRASIL")
return x
if(x<21 and x>17):
print("Liberado BRASIL")
return x
if(x<18):
print("Nao esta liberado")
return x
|
[
"[email protected]"
] | |
3f982e8a36a779567542f4c382cd555febeef961
|
ed10dc841d5b4f6a038e8f24f603750992d9fae9
|
/lldb/test/API/lang/objc/foundation/TestFoundationDisassembly.py
|
bf9a40fc8da9b49c77e740cb835ab78aef313bfc
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
WYK15/swift-Ollvm10
|
90c2f0ade099a1cc545183eba5c5a69765320401
|
ea68224ab23470963b68dfcc28b5ac769a070ea3
|
refs/heads/main
| 2023-03-30T20:02:58.305792 | 2021-04-07T02:41:01 | 2021-04-07T02:41:01 | 355,189,226 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,449 |
py
|
"""
Test the lldb disassemble command on foundation framework.
"""
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipUnlessDarwin
class FoundationDisassembleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@expectedFailureDarwin('rdar://problem/54977700')
@skipIfAsan
def test_foundation_disasm(self):
"""Do 'disassemble -n func' on each and every 'Code' symbol entry from the Foundation.framework."""
self.build()
# Enable synchronous mode
self.dbg.SetAsync(False)
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
foundation_framework = None
for module in target.modules:
if module.file.basename == "Foundation":
foundation_framework = module.file.fullpath
break
self.assertTrue(
foundation_framework is not None,
"Foundation.framework path located")
self.runCmd("image dump symtab '%s'" % foundation_framework)
raw_output = self.res.GetOutput()
# Now, grab every 'Code' symbol and feed it into the command:
# 'disassemble -n func'.
#
# The symbol name is on the last column and trails the flag column which
# looks like '0xhhhhhhhh', i.e., 8 hexadecimal digits.
codeRE = re.compile(r"""
\ Code\ {9} # ' Code' followed by 9 SPCs,
.* # the wildcard chars,
0x[0-9a-f]{8} # the flag column, and
\ (.+)$ # finally the function symbol.
""", re.VERBOSE)
for line in raw_output.split(os.linesep):
match = codeRE.search(line)
if match:
func = match.group(1)
self.runCmd('image lookup -s "%s"' % func)
self.runCmd('disassemble -n "%s"' % func)
@skipIfAsan
def test_simple_disasm(self):
"""Test the lldb 'disassemble' command"""
self.build()
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Stop at +[NSString stringWithFormat:].
symbol_name = "+[NSString stringWithFormat:]"
break_results = lldbutil.run_break_set_command(
self, "_regexp-break %s" % (symbol_name))
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name=symbol_name,
num_locations=1)
# Stop at -[MyString initWithNSString:].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString initWithNSString:]',
num_expected_locations=1,
sym_exact=True)
# Stop at the "description" selector.
lldbutil.run_break_set_by_selector(
self,
'description',
num_expected_locations=1,
module_name='a.out')
# Stop at -[NSAutoreleasePool release].
break_results = lldbutil.run_break_set_command(
self, "_regexp-break -[NSAutoreleasePool release]")
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name='-[NSAutoreleasePool release]',
num_locations=1)
self.runCmd("run", RUN_SUCCEEDED)
# First stop is +[NSString stringWithFormat:].
self.expect(
"thread backtrace",
"Stop at +[NSString stringWithFormat:]",
substrs=["Foundation`+[NSString stringWithFormat:]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Skip another breakpoint for +[NSString stringWithFormat:].
self.runCmd("process continue")
# Followed by a.out`-[MyString initWithNSString:].
self.expect(
"thread backtrace",
"Stop at a.out`-[MyString initWithNSString:]",
substrs=["a.out`-[MyString initWithNSString:]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Followed by -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Skip another breakpoint for -[MyString description].
self.runCmd("process continue")
# Followed by -[NSAutoreleasePool release].
self.expect("thread backtrace", "Stop at -[NSAutoreleasePool release]",
substrs=["Foundation`-[NSAutoreleasePool release]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
|
[
"[email protected]"
] | |
c46b8a2458a636eea1bde0cc9df07da0126d1e1c
|
0c13891448e6c3136e2f651c776d1d11edee2577
|
/src/template_method.py
|
91d30a13953d467cfa30df4a7500ae59f97997f2
|
[
"MIT"
] |
permissive
|
MrRezoo/design-patterns-python
|
31cb7b73ae05c5bd361eb3455df234c20529f465
|
8f8e2501ad8e05f1a75ce5be659d926c0ec99698
|
refs/heads/master
| 2023-08-01T22:01:01.186910 | 2021-10-02T07:57:49 | 2021-10-02T07:57:49 | 349,936,987 | 8 | 1 |
MIT
| 2021-04-07T14:55:10 | 2021-03-21T08:13:44 |
Python
|
UTF-8
|
Python
| false | false | 1,178 |
py
|
"""
Behavioral pattern:
Template method
Example:
when we have static job between several classes use one ABC class
"""
from abc import ABC, abstractmethod
class Top(ABC):
def template_method(self):
self.first_common()
self.second_common()
self.third_require()
self.fourth_require()
self.hook()
def first_common(self):
print('I am first common...')
def second_common(self):
print('I am second common')
@abstractmethod
def third_require(self):
pass
@abstractmethod
def fourth_require(self):
pass
def hook(self):
pass
class One(Top):
def third_require(self):
print('This is Third require from One...')
def fourth_require(self):
print('This is Fourth require from One...')
def hook(self):
print('This is Hook from One')
class Two(Top):
def third_require(self):
print('This is Third require from Two...')
def fourth_require(self):
print('This is Fourth require from Two...')
def client(class_):
class_.template_method()
if __name__ == '__main__':
client(Two())
|
[
"[email protected]"
] | |
c2d75f8bbadf428b1d890435ae40bd179a74edc5
|
1d1a21b37e1591c5b825299de338d18917715fec
|
/ML,DL, RL/Machine Learning/ml/m42_xgb_qpu.py
|
477dafeb04fe720c755d988a9fb2f21ae8325e6c
|
[] |
no_license
|
brunoleej/study_git
|
46279c3521f090ebf63ee0e1852aa0b6bed11b01
|
0c5c9e490140144caf1149e2e1d9fe5f68cf6294
|
refs/heads/main
| 2023-08-19T01:07:42.236110 | 2021-08-29T16:20:59 | 2021-08-29T16:20:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,122 |
py
|
# XGBoost
# tree_method = 'gpu_hist' : cPU 대신, 실행을 시켰을 때 전체 GPU는 활동을 안하는데 CUDA만 활동
# predictor='gpu_predictor' : GPU로 예측 수행
# predictor='cpu_predictor' : CPU로 예측 수행
# gpu_id=0 : GPU 선택하여 처리
from xgboost import XGBClassifier, XGBRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import r2_score
datasets = load_boston()
x = datasets.data
y = datasets.target
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=66)
model = XGBRegressor(n_estimators=100000, learning_rate=0.01,
tree_method = 'gpu_hist',
# predictor='gpu_predictor'
predictor='cpu_predictor',
gpu_id=0
)
model.fit(x_train, y_train, verbose=1, eval_metric=['rmse'],
eval_set =[(x_train, y_train), (x_test, y_test)],
early_stopping_rounds=10000
)
aaa = model.score(x_test, y_test)
print("model.score : ",aaa)
# model.score : 0.9254888275792001
|
[
"[email protected]"
] | |
47b74b1775ebe7c948754a92b962e1cee4c592e8
|
4d327de5447519d3c00e6572f74362380783006f
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/rankedBattles/RankedBattlesCalendarPopover.py
|
1597d3315544c1d1c8513bc4b036cfea883af256
|
[] |
no_license
|
XFreyaX/WorldOfTanks-Decompiled
|
706ac55d919b766aa89f90c97a75672bf2142611
|
5025466edd0dd3e5e50a6c60feb02ae793f6adac
|
refs/heads/master
| 2021-09-21T15:10:32.655452 | 2018-08-28T07:34:00 | 2018-08-28T07:34:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,129 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/rankedBattles/RankedBattlesCalendarPopover.py
from datetime import datetime
import BigWorld
from gui.Scaleform.locale.COMMON import COMMON
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.managers.UtilsManager import UtilsManager
from gui.ranked_battles.ranked_models import CYCLE_STATUS
from helpers import i18n, dependency
from gui.Scaleform.daapi.view.meta.RankedBattlesCalendarPopoverMeta import RankedBattlesCalendarPopoverMeta
from gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES
from gui.shared.formatters import text_styles
from helpers import time_utils
from skeletons.gui.game_control import IRankedBattlesController
from skeletons.connection_mgr import IConnectionManager
ARROW_LEFT = 3
class RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):
rankedController = dependency.descriptor(IRankedBattlesController)
connectionMgr = dependency.descriptor(IConnectionManager)
arrowDirection = ARROW_LEFT
def __init__(self, ctx=None):
super(RankedBattlesCalendarPopover, self).__init__()
self.__seasonInfo = self.rankedController.getCurrentSeason()
self.__currentCycle = self.__seasonInfo.getNumber()
self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()
self.__weekDays = self._createUtilsManager().getWeekDayNames(full=True, isLower=False, isUpper=False, useRegionSettings=False)
data = ctx.get('data', None)
if data is not None:
self.arrowDirection = data.arrowDirection
return
def _createUtilsManager(self):
return UtilsManager()
def _populate(self):
super(RankedBattlesCalendarPopover, self)._populate()
self.as_setDataS({'rawDate': self.__selectedDate,
'arrowDirection': self.arrowDirection,
'statusText': self.__getCurrnetCycleString(),
'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO})
self.onDaySelect(time_utils.getCurrentTimestamp())
calendar = self.__getCalendar()
if calendar is not None:
calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())
calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())
calendar.as_openMonthS(self.__selectedDate)
calendar.as_selectDateS(self.__selectedDate)
calendar.as_setHighlightedDaysS([self.__seasonInfo.getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])
calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.RANKED_CALENDAR_DAY_INFO)
return
def onDaySelect(self, date):
formattedDate = datetime.fromtimestamp(date)
selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]
self.as_setDayDataS({'primeTimeGroupData': self.__constructPrimeTimes(date),
'dayText': text_styles.superPromoTitle(formattedDate.day),
'dayNameText': text_styles.middleTitle(selectedDayOfWeek)})
def __getCycleListString(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM
cycles = self.__seasonInfo.getAllCycles()
result = []
for cycle in sorted(cycles.values()):
formatter = text_styles.main if cycle.status == CYCLE_STATUS.CURRENT else text_styles.standard
startDate = time_utils.getTimeStructInLocal(cycle.startDate)
endDate = time_utils.getTimeStructInLocal(cycle.endDate)
result.append(formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon))))
def __constructPrimeTimes(self, selectedTime):
items = []
serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(selectedTime, groupIdentical=True)
frmt = BigWorld.wg_getShortTimeFormat
for serverName in sorted(serversPeriodsMapping.keys()):
periodsStr = []
dayPeriods = serversPeriodsMapping[serverName]
if dayPeriods:
for periodStart, periodEnd in dayPeriods:
periodsStr.append(i18n.makeString(RANKED_BATTLES.CALENDARDAY_TIME, start=frmt(periodStart), end=frmt(periodEnd)))
else:
periodsStr = i18n.makeString(COMMON.COMMON_DASH)
if dayPeriods:
items.append({'serverNameText': text_styles.highlightText(serverName),
'primeTimeText': '\n'.join(periodsStr)})
return items
def __getCurrnetCycleString(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM
cycles = self.__seasonInfo.getAllCycles()
for cycle in sorted(cycles.values()):
if cycle.status == CYCLE_STATUS.CURRENT:
formatter = text_styles.main
startDate = time_utils.getTimeStructInLocal(cycle.startDate)
endDate = time_utils.getTimeStructInLocal(cycle.endDate)
return formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon)))
def __getAttentionText(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT
cycleNumber = self.__currentCycle
timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.getCycleEndDate())
endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES.STATUS_TIMELEFT)
if timeDelta <= time_utils.ONE_HOUR:
formatter = text_styles.alert
else:
formatter = text_styles.neutral
return formatter(i18n.makeString(key, cycleNumber=cycleNumber, timeLeft=endTimeStr))
def __getCalendar(self):
return self.components.get(VIEW_ALIAS.CALENDAR)
|
[
"[email protected]"
] | |
f9a929408b32170e178231ad8907c38aa8647599
|
9cef4ef20efd0eec18846242e78be0b9be144c30
|
/teacher_cade/day19/14.greenlet.py
|
e2d537f9ff67e3f0659a5afb381d8128caa9ab71
|
[] |
no_license
|
Vaild/python-learn
|
4e6511a62a40b6104b081e0f8fe30f7d829901f5
|
5d602daf3b4b7e42349b7d9251df1f4dd62c299c
|
refs/heads/master
| 2022-11-19T00:47:48.808384 | 2020-07-20T14:27:49 | 2020-07-20T14:27:49 | 279,044,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 360 |
py
|
#!/usr/bin/python3
# coding=utf-8
from greenlet import greenlet
import time
def test1():
while True:
print("---A--")
gr2.switch()
time.sleep(0.5)
def test2():
while True:
print("---B--")
gr1.switch()
time.sleep(0.5)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
# 切换到gr1中运行
gr1.switch()
|
[
"[email protected]"
] | |
5cae4351928b729521bafe551e04ae158fbbd2f3
|
d60acaac9e460c5693efe61449667b3c399c53c8
|
/diffeq/logisticbifurcation.py
|
392cc43dfa415350c9c23054e6d5784488977d9c
|
[] |
no_license
|
HussainAther/mathematics
|
53ea7fb2470c88d674faa924405786ba3b860705
|
6849cc891bbb9ac69cb20dfb13fe6bb5bd77d8c5
|
refs/heads/master
| 2021-07-22T00:07:53.940786 | 2020-05-07T03:11:17 | 2020-05-07T03:11:17 | 157,749,226 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,159 |
py
|
import matplotlib.pyplot as plt
import numpy as np
"""
Logistic map bifurcation
"""
def logistic(r, x):
"""
Logistic map function for nonlinear systems
"""
return r * x * (1 - x)
x = np.linspace(0, 1)
fig, ax = plt.subplots(1, 1)
ax.plot(x, logistic(2, x), "k")
def plotsystem(r, x0, n, ax=None):
"""
Plot the function and the y=x diagonal line.
"""
t = np.linspace(0, 1)
ax.plot(t, logistic(r, t), "k", lw=2)
ax.plot([0, 1], [0, 1], "k", lw=2)
# Recursively apply y=f(x) and plot two lines:
# (x, x) -> (x, y)
# (x, y) -> (y, y)
x = x0
for i in range(n):
y = logistic(r, x)
# Plot the two lines.
ax.plot([x, x], [x, y], "k", lw=1)
ax.plot([x, y], [y, y], "k", lw=1)
# Plot the positions with increasing
# opacity.
ax.plot([x], [y], "ok", ms=10,
alpha=(i + 1) / n)
x = y
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6),
sharey=True)
plotsystem(2.5, .1, 10, ax=ax1)
plotsystem(3.5, .1, 10, ax=ax2)
n = 10000
r = np.linspace(2.5, 4.0, n)
iterations = 1000
last = 100
x = 1e-5 * np.ones(n)
# lyapunov = np.zeros(n)
# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 9),
# sharex=True)
for i in range(iterations):
x = logistic(r, x)
# We compute the partial sum of the
# Lyapunov exponent.
# lyapunov += np.log(abs(r - 2 * r * x))
# We display the bifurcation diagram.
if i >= (iterations - last):
ax1.plot(r, x, ",k", alpha=.25)
ax1.set_xlim(2.5, 4)
ax1.set_title("Bifurcation diagram")
# Display the Lyapunov exponent.
# Horizontal line.
# ax2.axhline(0, color="k", lw=.5, alpha=.5)
# Negative Lyapunov exponent.
# ax2.plot(r[lyapunov < 0],
# lyapunov[lyapunov < 0] / iterations,
# ".k", alpha=.5, ms=.5)
# Positive Lyapunov exponent.
# ax2.plot(r[lyapunov >= 0],
# lyapunov[lyapunov >= 0] / iterations,
# ".r", alpha=.5, ms=.5)
# ax2.set_xlim(2.5, 4)
# ax2.set_ylim(-2, 1)
# ax2.set_title("Lyapunov exponent")
# plt.tight_layout()
|
[
"[email protected]"
] | |
9bc70906c5a573ba42746d4a2f4efbf81e0e86c1
|
98f730ec6a43d8be4a34b0f2a44a9d35989d2287
|
/pynifi_client/models/tenants_entity.py
|
b4af3df3c70dc03de0e1a0bfb4fb63eb26b9a058
|
[] |
no_license
|
scottwr98/pynifi-client
|
9337a4f322536ee466d419a788b8b5948cdc62d7
|
013ac2ffa591284a0d6cbb9ed552681cc6f91165
|
refs/heads/master
| 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,219 |
py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from pynifi_client.models.tenant_entity import TenantEntity # noqa: F401,E501
class TenantsEntity(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'users': 'list[TenantEntity]',
'user_groups': 'list[TenantEntity]'
}
attribute_map = {
'users': 'users',
'user_groups': 'userGroups'
}
def __init__(self, users=None, user_groups=None): # noqa: E501
"""TenantsEntity - a model defined in Swagger""" # noqa: E501
self._users = None
self._user_groups = None
self.discriminator = None
if users is not None:
self.users = users
if user_groups is not None:
self.user_groups = user_groups
@property
def users(self):
"""Gets the users of this TenantsEntity. # noqa: E501
:return: The users of this TenantsEntity. # noqa: E501
:rtype: list[TenantEntity]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this TenantsEntity.
:param users: The users of this TenantsEntity. # noqa: E501
:type: list[TenantEntity]
"""
self._users = users
@property
def user_groups(self):
"""Gets the user_groups of this TenantsEntity. # noqa: E501
:return: The user_groups of this TenantsEntity. # noqa: E501
:rtype: list[TenantEntity]
"""
return self._user_groups
@user_groups.setter
def user_groups(self, user_groups):
"""Sets the user_groups of this TenantsEntity.
:param user_groups: The user_groups of this TenantsEntity. # noqa: E501
:type: list[TenantEntity]
"""
self._user_groups = user_groups
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TenantsEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
b0391f312af0eaea6305f39574b9a3f17f511b59
|
b1e7481f8b5bf40c2547c95b1863e25b11b8ef78
|
/Kai/crab/NANOv7_NoveCampaign/2018/crab_cfg_2018_ElMu_A.py
|
6670c230a950ef43e35c8eec67ec84cd44951904
|
[
"Apache-2.0"
] |
permissive
|
NJManganelli/FourTopNAOD
|
3df39fd62c0546cdbb1886b23e35ebdc1d3598ad
|
c86181ae02b1933be59d563c94e76d39b83e0c52
|
refs/heads/master
| 2022-12-22T22:33:58.697162 | 2022-12-17T01:19:36 | 2022-12-17T01:19:36 | 143,607,743 | 1 | 1 |
Apache-2.0
| 2022-06-04T23:11:42 | 2018-08-05T11:40:42 |
Python
|
UTF-8
|
Python
| false | false | 1,524 |
py
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_ElMu_A'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'crab_PSet_2018_ElMu_A.py'
config.JobType.maxMemoryMB = 3000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_ElMu_A.sh'
config.JobType.inputFiles = ['crab_script_2018_ElMu_A.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = [] #['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/MuonEG/Run2018A-02Apr2020-v1/NANOAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
# config.Data.outLFNDirBase = '/store/user/{user}/NoveCampaign'.format(user=getUsernameFromCRIC())
config.Data.outLFNDirBase = '/store/group/fourtop/NoveCampaign'
config.Data.publication = True
config.Data.outputDatasetTag = 'NoveCampaign'
config.section_("Site")
config.Site.storageSite = 'T2_BE_IIHE'
|
[
"[email protected]"
] | |
3fff1e8913ecade61c264485b8d0d2ab2e8f1eef
|
ff768174490619c119d166273365dcc480e7201c
|
/tuiuiu/tuiuiuimages/migrations/0008_image_created_at_index.py
|
dbcc329edff45d13de512a5f638dee64e8a53c0d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
caputomarcos/tuiuiu.io
|
15ea9323be09b69efb6b88490c2bb558ffb4cc55
|
d8fb57cf95487e7fe1454b2130ef18acc916da46
|
refs/heads/master
| 2022-03-02T12:56:43.889894 | 2017-09-23T22:53:51 | 2017-09-23T22:53:51 | 102,543,365 | 3 | 1 |
NOASSERTION
| 2022-02-02T10:46:32 | 2017-09-06T00:30:06 |
Python
|
UTF-8
|
Python
| false | false | 457 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tuiuiuimages', '0007_image_file_size'),
]
operations = [
migrations.AlterField(
model_name='image',
name='created_at',
field=models.DateTimeField(db_index=True, verbose_name='Created at', auto_now_add=True),
),
]
|
[
"[email protected]"
] | |
311abd6f195d36542f90a27ae366b5bbe1325dd5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02586/s677278751.py
|
be1ac288360753caa4fa9d68ee573cb35be6d292
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 879 |
py
|
import sys
sys.setrecursionlimit(10**7)
readline = sys.stdin.buffer.readline
def readstr():return readline().rstrip().decode()
def readstrs():return list(readline().decode().split())
def readint():return int(readline())
def readints():return list(map(int,readline().split()))
def printrows(x):print('\n'.join(map(str,x)))
def printline(x):print(' '.join(map(str,x)))
r,c,k = readints()
a = [[0]*(c+1) for i in range(r+1)]
for i in range(k):
R,C,V = readints()
a[R][C] = V
dp = [0]*(r+1)*(c+1)*4
for x in range((r+1)*(c+1)*4):
i = x//((c+1)*4)
l = x%((c+1)*4)
j = l//4
l %= 4
if i==0 or j==0:
continue
if l==0:
dp[x] = max(dp[i*(c+1)*4 + (j-1)*4], dp[(i-1)*(c+1)*4 + j*4 + 3])
else:
dp[x] = max(dp[i*(c+1)*4 + (j-1)*4 + l], dp[(i-1)*(c+1)*4 + j*4 + 3]+a[i][j], dp[i*(c+1)*4 + (j-1)*4 + l-1]+a[i][j])
print(dp[-1])
|
[
"[email protected]"
] | |
14c287e40ef9f07fe3dd6944c53d3460a99de7cb
|
85b7487c00cabf70cbcf180c5015ac4886e78fb1
|
/test/support/__init__.py
|
bdbdb8f5f19075bc664a148dbdf532d577c3550c
|
[] |
no_license
|
mkatsimpris/test_jpeg
|
7e686f27ac54db4128f4edbeb42b7cd284db0fa4
|
ee626d87e26a08d5ce80f73a883f00703ff34e70
|
refs/heads/master
| 2020-04-06T04:49:58.952565 | 2016-08-17T21:41:25 | 2016-08-17T21:41:25 | 49,828,665 | 3 | 2 | null | 2016-07-25T16:50:52 | 2016-01-17T17:58:21 |
Verilog
|
UTF-8
|
Python
| false | false | 254 |
py
|
from __future__ import absolute_import
from .jpeg_prep_cosim import prep_cosim
from .jpeg_v1_intf import JPEGEncV1
from .jpeg_v2_intf import JPEGEncV2
from .jpegenc_v1_top import convert as convertv1
from .utils import set_default_args, get_cli_args
|
[
"[email protected]"
] | |
19f97b46e444e83a2f72744a9002611efe7ccf0a
|
69e5676a801c5446ddec5e1cfd8daf527dbb3ab9
|
/stringcheese/wrangling/get_fficutout.py
|
917178ace1351e624d1dfa092c3a7e383136a123
|
[
"MIT"
] |
permissive
|
lgbouma/stringcheese
|
96d8d48aaa8da9da92744401bba5498399758636
|
e7f5919335f18d54f331e67f4df1a48e4904526d
|
refs/heads/master
| 2020-07-07T10:11:29.697659 | 2020-03-29T19:05:52 | 2020-03-29T19:05:52 | 203,321,632 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 632 |
py
|
import requests
from astroquery.mast import Tesscut
def get_fficutout(c_obj, cutoutdir=None, sector=None):
# c_obj (SkyCoord): location of target star
print('beginning download tesscut for {}'.format(repr(c_obj)))
try:
tab = Tesscut.download_cutouts(c_obj, size=20, sector=sector,
path=cutoutdir)
except (requests.exceptions.HTTPError,
requests.exceptions.ConnectionError) as e:
print('got {}, try again'.format(repr(e)))
tab = Tesscut.download_cutouts(c_obj, size=20, sector=sector,
path=cutoutdir)
|
[
"[email protected]"
] | |
0945e2340abb7961a09bf19356b325727714a0a7
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/examples/undocumented/python_modular/kernel_spherical_modular.py
|
ef002d63c31f4dc1896ca111b2223acffcd201b9
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069 | 2015-10-27T21:43:20 | 2015-10-27T21:43:20 | 45,059,082 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 919 |
py
|
from tools.load import LoadMatrix
from numpy import where
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list=[[traindat,testdat, 1.0],[traindat,testdat, 5.0]]
def kernel_spherical_modular (fm_train_real=traindat,fm_test_real=testdat, sigma=1.0):
from shogun.Features import RealFeatures
from shogun.Kernel import MultiquadricKernel
from shogun.Distance import EuclidianDistance
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
distance=EuclidianDistance(feats_train, feats_train)
kernel=MultiquadricKernel(feats_train, feats_train, sigma, distance)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Spherical')
kernel_spherical_modular(*parameter_list[0])
|
[
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] |
prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305
|
ec1e5dbc338ecf43d1bd53ded885b1450fb0c5be
|
da570c2047d335b3553e63c27ac7f60b57b28b7e
|
/images/urls.py
|
6c3df5aaf6b9ca607cd5fbcabe80ae605ee575b6
|
[
"MIT"
] |
permissive
|
mfannick/viewImages
|
8c799fc52566de03f4909d36f5ccc50e7fff9564
|
27e447faff455fba306ef3e677d5f2f63160065e
|
refs/heads/master
| 2021-09-09T11:53:42.786004 | 2019-10-14T09:21:16 | 2019-10-14T09:21:16 | 214,357,014 | 0 | 0 | null | 2021-09-08T01:21:15 | 2019-10-11T06:11:06 |
Python
|
UTF-8
|
Python
| false | false | 425 |
py
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$' ,views.homePage,name='homePage'),
url(r'^search/', views.searchImageByCategory, name='searchImageByCategory'),
url(r'^description/(\d+)',views.imageDescription,name='imageDescription')
]
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
546d674261f3df935f47d76c22d816e02e5c5599
|
4e0ee2b68398a90b0986975f645350033a624558
|
/tests/onnx_resnet18/test_onnx_resnet18_int8.py
|
5c23ade3f33cd92e177415de06a1d717c36ea894
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kindsenior/nngen
|
697b80b32cf2b33e7f2c64e4d1a27eb2d739b30c
|
301b19b35e50174d8abb1a757b061ae80cdfe612
|
refs/heads/master
| 2022-09-21T05:53:34.565461 | 2020-05-03T14:58:19 | 2020-05-03T14:58:19 | 269,007,213 | 0 | 0 |
Apache-2.0
| 2020-06-03T06:26:43 | 2020-06-03T06:26:42 | null |
UTF-8
|
Python
| false | false | 2,129 |
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
import veriloggen
import onnx_resnet18
act_dtype = ng.int8
weight_dtype = ng.int8
bias_dtype = ng.int16
scale_dtype = ng.int16
with_batchnorm = True
disable_fusion = False
conv2d_par_ich = 1
conv2d_par_och = 1
conv2d_par_col = 1
conv2d_par_row = 1
conv2d_concur_och = None
conv2d_stationary = 'filter'
pool_par = 1
elem_par = 1
chunk_size = 64
axi_datawidth = 32
def test(request, silent=True):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = onnx_resnet18.run(act_dtype, weight_dtype,
bias_dtype, scale_dtype,
with_batchnorm, disable_fusion,
conv2d_par_ich, conv2d_par_och, conv2d_par_col, conv2d_par_row,
conv2d_concur_och, conv2d_stationary,
pool_par, elem_par,
chunk_size,
axi_datawidth, silent,
filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
if __name__ == '__main__':
rslt = onnx_resnet18.run(act_dtype, weight_dtype,
bias_dtype, scale_dtype,
with_batchnorm, disable_fusion,
conv2d_par_ich, conv2d_par_och, conv2d_par_col, conv2d_par_row,
conv2d_concur_och, conv2d_stationary,
pool_par, elem_par,
chunk_size,
axi_datawidth, silent=False,
filename='tmp.v',
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
print(rslt)
|
[
"[email protected]"
] | |
0c537c7a76a12d5ad4b319e5ecd8695d74b3b0f6
|
5c5458622ab8413fef8ab11ef5e09dcdcd42ff69
|
/1.py
|
ad47b2fba1e02ef70d242917fbf7c8954a6efe8d
|
[] |
no_license
|
zhenyakeg/Console
|
127fbbfe33cf86a0e4d5eb968c783407168364f5
|
31eea7a22a95701049872d4da2c01307f05e920d
|
refs/heads/master
| 2021-01-13T10:06:17.797796 | 2016-10-27T17:59:03 | 2016-10-27T17:59:03 | 72,119,199 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
__author__ = 'student'
# импортируем модуль
import sys
# выводим на экран список всех аргументов
s=0
for arg in sys.argv:
if len(arg) == 3:
s+=1
print (s)
|
[
"[email protected]"
] | |
ed817dc1416c6f9ee3bd63420344cf905981be76
|
f8b77d8b7d90dabfa3b222116d9fe462d890e89b
|
/plans/fixed_ensemble_resnet_linear_4.py
|
53480a497b356b8478b73ceacb2478cfd372a96e
|
[
"BSD-2-Clause"
] |
permissive
|
dbis-uibk/MediaEval2021
|
94e4041d6e82a28ceb95c68994808d0acc725915
|
14d754d9cea36415090aaa115db81f5ace465964
|
refs/heads/master
| 2023-08-27T19:12:17.758042 | 2021-11-03T12:12:57 | 2021-11-03T12:12:57 | 424,210,495 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,196 |
py
|
"""Ensemble plan manually split by type moode/theme."""
import json
from dbispipeline.evaluators import FixedSplitEvaluator
from dbispipeline.evaluators import ModelCallbackWrapper
import numpy as np
from sklearn.pipeline import Pipeline
from mediaeval2021 import common
from mediaeval2021.dataloaders.melspectrograms import MelSpectPickleLoader
from mediaeval2021.models.ensemble import Ensemble
from mediaeval2021.models.wrapper import TorchWrapper
dataloader = MelSpectPickleLoader('data/mediaeval2020/melspect_1366.pickle')
label_splits = [
np.arange(0, 14, 1),
np.arange(14, 28, 1),
np.arange(28, 42, 1),
np.arange(42, 56, 1),
]
pipeline = Pipeline([
('model',
Ensemble(
base_estimator=TorchWrapper(
model_name='ResNet-18',
dataloader=dataloader,
batch_size=64,
early_stopping=True,
),
label_splits=label_splits,
epochs=100,
)),
])
evaluator = ModelCallbackWrapper(
FixedSplitEvaluator(**common.fixed_split_params()),
lambda model: common.store_prediction(model, dataloader),
)
result_handlers = [
lambda results: print(json.dumps(results, indent=4)),
]
|
[
"[email protected]"
] | |
8c55b1b583c89eaaf63961ca00dde5c69b6b67c5
|
5e5799e0ccce7a72d514fbc76dcb0a2108013f18
|
/Textfile2DefDomGeom.py
|
710ab37655fe1cd3158b6347c04304f6a2e29644
|
[] |
no_license
|
sourcery-ai-bot/dash
|
6d68937d225473d06a18ef64079a4b3717b5c12c
|
e1d1c3a601cd397d2508bfd4bb12bdb4e878cd9a
|
refs/heads/master
| 2023-03-07T17:15:39.174964 | 2011-03-01T17:11:21 | 2011-03-01T17:11:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,140 |
py
|
#!/usr/bin/env python
#
# Use doms.txt or nicknames.txt file to create a default-dom-geometry file and
# print the result to sys.stdout
#
# URL: http://icecube.wisc.edu/~testdaq/database_files/nicknames.txt
import sys
from DefaultDomGeometry import DefaultDomGeometryReader, DomsTxtReader, \
NicknameReader
if __name__ == "__main__":
if len(sys.argv) < 2:
raise SystemExit("Please specify a file to load!")
if len(sys.argv) > 2:
raise SystemExit("Too many command-line arguments!")
if sys.argv[1].endswith("nicknames.txt"):
newGeom = NicknameReader.parse(sys.argv[1])
elif sys.argv[1].endswith("doms.txt"):
newGeom = DomsTxtReader.parse(sys.argv[1])
else:
raise SystemExit("File must be 'nicknames.txt' or 'doms.txt'," +
" not '%s'" % sys.argv[1])
oldDomGeom = DefaultDomGeometryReader.parse()
# rewrite the 64-DOM strings to 60 DOM strings plus 32 DOM icetop hubs
newGeom.rewrite(False)
oldDomGeom.rewrite()
oldDomGeom.mergeMissing(newGeom)
# dump the new default-dom-geometry data to sys.stdout
oldDomGeom.dump()
|
[
"[email protected]"
] | |
0365a71e6ddcbf5d739bd768676f3f793715d525
|
1799fe1d9dfcf5f9619a87a11f3fa6170e1864fc
|
/00998/test_maximum_binary_tree_ii.py
|
44cc467d9b38a539320c803e7bc639b674e44c3e
|
[] |
no_license
|
SinCatGit/leetcode
|
5e52b49324d16a96de1ba4804e3d17569377e804
|
399e40e15cd64781a3cea295bf29467d2284d2ae
|
refs/heads/master
| 2021-07-05T18:51:46.018138 | 2020-04-25T04:06:48 | 2020-04-25T04:06:48 | 234,226,791 | 1 | 1 | null | 2021-04-20T19:17:43 | 2020-01-16T03:27:08 |
Python
|
UTF-8
|
Python
| false | false | 1,519 |
py
|
import unittest
from maximum_binary_tree_ii import Solution, TreeNode
class TestSolution(unittest.TestCase):
def test_Calculate_Solution(self):
solution = Solution()
# 6
# / \
# 1 4
# / \
# 3 2
t25 = TreeNode(6)
t21 = TreeNode(1)
t24 = TreeNode(4)
t23 = TreeNode(3)
t26 = TreeNode(2)
t25.left = t21
t25.right = t24
t24.left = t23
t24.right = t26
def dfs(r):
if r.left:
yield from dfs(r.left)
yield r.val
if r.right:
yield from dfs(r.right)
root = solution.insertIntoMaxTree(t25, 7)
self.assertEqual([1, 6, 3, 4, 2, 7], [v for v in dfs(root)])
root = solution.insertIntoMaxTree(t25, 5)
self.assertEqual([1, 6, 3, 4, 2, 5], [v for v in dfs(root)])
# 6
# / \
# 1 4
# / \
# 3 2
t25 = TreeNode(6)
t21 = TreeNode(1)
t24 = TreeNode(4)
t23 = TreeNode(3)
t26 = TreeNode(2)
t25.left = t21
t25.right = t24
t24.left = t23
t24.right = t26
root = solution.insertIntoMaxTreeV01(t25, 7)
self.assertEqual([1, 6, 3, 4, 2, 7], [v for v in dfs(root)])
root = solution.insertIntoMaxTreeV01(t25, 5)
self.assertEqual([1, 6, 3, 4, 2, 5], [v for v in dfs(root)])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1230bd8aea0ed2cf0e02c98811fd1bca3bac9353
|
e6d4a87dcf98e93bab92faa03f1b16253b728ac9
|
/algorithms/python/smallestGoodBase/smallestGoodBase.py
|
0fc48f86b09d916c4865349241f9dfd2a7f0a365
|
[] |
no_license
|
MichelleZ/leetcode
|
b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f
|
a390adeeb71e997b3c1a56c479825d4adda07ef9
|
refs/heads/main
| 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 843 |
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/smallest-good-base/
# Author: Miao Zhang
# Date: 2021-02-15
class Solution:
def smallestGoodBase(self, n: str) -> str:
num = int(n)
# n = 1 + k + k^2+....k^(m - 1)
# i:m
for i in range(int(math.log(num + 1, 2)), 1, -1):
# base k
left = 2
right = pow(num, 1 / (i - 1)) + 1
while left < right:
mid = int(left + (right - left) // 2)
sums = 0
for j in range(i):
sums = sums * mid + 1
if sums == num:
return str(mid)
elif sums < num:
left = mid + 1
else:
right = mid
return str(num - 1)
|
[
"[email protected]"
] | |
a819988d7ff2b5dd395cdf6647f534d1e2bd76d9
|
ac227cc22d5f5364e5d029a2cef83816a6954590
|
/applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/test2.py
|
cb74b851f46253b5bae80ccdd8ca872abd5ef6de
|
[
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
schinmayee/nimbus
|
597185bc8bac91a2480466cebc8b337f5d96bd2e
|
170cd15e24a7a88243a6ea80aabadc0fc0e6e177
|
refs/heads/master
| 2020-03-11T11:42:39.262834 | 2018-04-18T01:28:23 | 2018-04-18T01:28:23 | 129,976,755 | 0 | 0 |
BSD-3-Clause
| 2018-04-17T23:33:23 | 2018-04-17T23:33:23 | null |
UTF-8
|
Python
| false | false | 471 |
py
|
#!/usr/bin/python
from BoostBuild import Tester, List
from time import sleep
t = Tester()
t.set_tree("test2")
t.run_build_system("-sBOOST_BUILD_PATH=" + t.original_workdir + "/..")
file_list = 'bin/foo/$toolset/debug/runtime-link-dynamic/' * List("foo foo.o")
t.expect_addition(file_list)
t.write("foo.cpp", "int main(int, char**) { return 0; }\n")
t.run_build_system("-d2 -sBOOST_BUILD_PATH=" + t.original_workdir + "/..")
t.expect_touch(file_list)
t.pass_test()
|
[
"[email protected]"
] | |
b454e42a2bc91d38dbdb4c2052da1a603fdaf6db
|
4f026ddcf8f058d884f15259f0e42c2178eb2157
|
/clare/clare/application/factories.py
|
0dadccd763c699f4a366d61970db657bafff708f
|
[
"MIT"
] |
permissive
|
dnguyen0304/roomlistwatcher
|
afd95e5f601f77fc8d7c4cd4307e60f36b53162c
|
7ac4d5172de22dd8906662da521995c8e06c2617
|
refs/heads/master
| 2021-01-20T22:55:04.289589 | 2017-11-16T04:09:49 | 2017-11-16T04:09:49 | 101,829,306 | 0 | 0 | null | 2017-11-16T04:09:49 | 2017-08-30T02:38:56 |
Python
|
UTF-8
|
Python
| false | false | 2,677 |
py
|
# -*- coding: utf-8 -*-
import Queue
import os
import threading
import uuid
from . import applications
from . import download_bot
from . import room_list_watcher
class Application(object):
def __init__(self, infrastructure, properties):
"""
Parameters
----------
infrastructure : clare.infrastructure.infrastructures.ApplicationInfrastructure
properties : collections.Mapping
"""
self._infrastructure = infrastructure
self._properties = properties
def create(self):
"""
Returns
-------
clare.application.applications.Application
"""
queue = Queue.Queue()
# Construct the room list watcher.
room_list_watcher_factory = room_list_watcher.factories.Producer(
infrastructure=self._infrastructure.room_list_watcher,
properties=self._properties['room_list_watcher'])
room_list_watcher_ = room_list_watcher_factory.create()
# Include threading.
kwargs = {
'interval': self._properties['room_list_watcher']['interval']
}
room_list_watcher_ = threading.Thread(name='room_list_watcher',
target=room_list_watcher_.produce,
kwargs=kwargs)
room_list_watcher_.daemon = True
# Construct the download bot.
download_bot_factory = download_bot.factories.Factory(
queue=queue,
properties=self._properties['download_bot'])
directory_path = os.path.join(
self._properties['download_bot']['factory']['root_directory_path'],
str(uuid.uuid4()))
download_bot_ = download_bot_factory.create(
download_directory_path=directory_path)
# Include threading.
kwargs = {
'interval': self._properties['download_bot']['interval'],
'timeout': self._properties['download_bot']['timeout']
}
download_bot_ = threading.Thread(name='download_bot',
target=download_bot_.consume,
kwargs=kwargs)
download_bot_.daemon = True
# Construct the application.
application = applications.Application(
room_list_watcher=room_list_watcher_,
download_bot=download_bot_)
return application
def __repr__(self):
repr_ = '{}(infrastructure={}, properties={})'
return repr_.format(self.__class__.__name__,
self._infrastructure,
self._properties)
|
[
"[email protected]"
] | |
7b11a37746ad28f3e18303de213f4beb2bbb4404
|
315450354c6ddeda9269ffa4c96750783963d629
|
/CMSSW_7_0_4/src/SimTotem/RPTimingDetectorsDigiProducer/python/BeamMisalignmentFinder.py
|
3ff7d944d97f722285c3413832867036093dcd54
|
[] |
no_license
|
elizamelo/CMSTOTEMSim
|
e5928d49edb32cbfeae0aedfcf7bd3131211627e
|
b415e0ff0dad101be5e5de1def59c5894d7ca3e8
|
refs/heads/master
| 2021-05-01T01:31:38.139992 | 2017-09-12T17:07:12 | 2017-09-12T17:07:12 | 76,041,270 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,414 |
py
|
import ROOT
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import exp
parser = argparse.ArgumentParser(description='Finds beam misaligned between given template and misaligned ntuple')
parser.add_argument('template_file', metavar='template_file', type=str, nargs=1,
help='File containing ntuple used as template to find misaligned')
parser.add_argument('misaligned_file', metavar='misaligned_file', type=str, nargs=1,
help='File containing misaligned ntuple')
args = parser.parse_args()
ROOT.gROOT.ProcessLine('.include ' + os.environ['CMSSW_BASE'] + '/src')
ROOT.gROOT.ProcessLine(
'.L ' + os.environ['CMSSW_BASE'] + '/src/TotemAnalysis/TotemNtuplizer/interface/RPTimingDetectorsNtuplizerHit.h+g')
tree_name = 'TotemNtuple'
hit_source_types = ["reco", "filtered", "tracking_pot_210_far", "tracking_pot_220_far"]
ids = {
"reco": ["%03d" % id for id in [20, 21, 120, 121]]
}
ids["filtered"] = ids["reco"]
ids["tracking_pot_220_far"] = ids["reco"]
ids["tracking_pot_210_far"] = ids["reco"]
tracking_pot_maps = {
"tracking_pot_220_far": {
"020": "24",
"021": "25",
"120": "124",
"121": "125"
},
"tracking_pot_210_far": {
"020": "4",
"021": "5",
"120": "104",
"121": "105"
}
}
def get_branch_name(detector_id, type):
if type == "geant":
return 'rp_timing_detector_%s_hits' % detector_id
elif type == "reco":
return 'rp_timing_detector_%s_reco_hits' % detector_id
elif type == "filtered":
return 'rp_timing_detector_%s_filtered_hits' % detector_id
elif type == "tracking_pot_220_far":
return 'rp_timing_detector_%s_tracking_pot_%03.d' % (detector_id, int(tracking_pot_maps[type][detector_id]))
elif type == "tracking_pot_210_far":
return 'rp_timing_detector_%s_tracking_pot_%03.d' % (detector_id, int(tracking_pot_maps[type][detector_id]))
def load_ntuple(file_path):
tree_path = '/'.join([file_path, tree_name])
tchain = ROOT.TChain('TotemNtuple', '')
tchain.Add(tree_path)
return tchain
def gauss(x, a, x0, sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))
def cut_zero_bins(histogram, bins):
first = histogram.index(next(x for x in histogram if x != 0))
last = (len(histogram) - 1) - histogram[::-1].index(
next(x for x in histogram[::-1] if x != 0))
hist_x = bins[first:last + 1]
hist_y = histogram[first:last + 1]
return hist_x, hist_y
def fit_gauss(x, y):
gauss_params, pcov = curve_fit(gauss, x, y, p0=[1., 0., 1.])
return gauss_params, np.sqrt(np.diag(pcov))
def find_misalignment(template_data, misaligned_data):
sum = 0.0
number = 0.0
template_error = 0.0
misaligned_error = 0.0
for bin_width in list(np.arange(0.1, 1.1, 0.1))[::-1]:
try:
histogram_bins = np.arange(-20, 20, bin_width)
template_histogram = list(np.histogram(template_data, bins=histogram_bins)[0])
misaligned_histogram = list(np.histogram(misaligned_data, bins=histogram_bins)[0])
template_x, template_y = cut_zero_bins(template_histogram, histogram_bins)
misaligned_x, misaligned_y = cut_zero_bins(misaligned_histogram, histogram_bins)
template_gauss_params, template_standard_deviation_error = fit_gauss(template_x, template_y)
misaligned_gauss_params, misaligned_standard_deviation_error = fit_gauss(misaligned_x, misaligned_y)
template_error += template_standard_deviation_error[1]
misaligned_error += misaligned_standard_deviation_error[1]
template_x0 = template_gauss_params[1]
misaligned_x0 = misaligned_gauss_params[1]
# plt.plot(misaligned_x, misaligned_y, 'b+:', label='data')
# plt.plot(misaligned_x, gauss(misaligned_x, *misaligned_gauss_params), 'ro:', label='fit')
# plt.legend()
# plt.savefig("foo.png")
sum += (misaligned_x0 - template_x0)
number += 1
except RuntimeError:
# print "result not found for %.2f bins width" % bin_width
pass
if number > 0:
return sum/number, template_error/number, misaligned_error/number
raise Exception('Cannot find misalignment')
if __name__ == "__main__":
template_file_name = args.template_file[0]
misaligned_file_name = args.misaligned_file[0]
template_ntuple = load_ntuple(template_file_name)
misaligned_ntuple = load_ntuple(misaligned_file_name)
# check sizes
if template_ntuple.GetEntries() != misaligned_ntuple.GetEntries():
print "Error, all sources must have te same number of events"
exit(-1)
sources_ntuples_types = ["template", "misaligned"]
hits_histograms = {}
for ntuple_type in sources_ntuples_types:
hits_histograms[ntuple_type] = {}
for hit_type in hit_source_types:
hits_histograms[ntuple_type][hit_type] = {}
for id in ids[hit_type]:
hits_histograms[ntuple_type][hit_type][id] = []
for source_name, source_ntuple in zip(sources_ntuples_types, [template_ntuple, misaligned_ntuple]):
for event in source_ntuple:
for type in hit_source_types:
for id in ids[type]:
for hits_vector in getattr(event, get_branch_name(id, type)):
if type in ["reco", "filtered"]:
hits_histograms[source_name][type][id].append(hits_vector.position.x)
elif type in ["tracking_pot_210_far", "tracking_pot_220_far"]:
hits_histograms[source_name][type][id].append(hits_vector.x)
sum = 0.0
number = 0.0
print "Calculated misalignment"
for type in hit_source_types:
for id in ids[type]:
result, template_error, misaligned_error = \
find_misalignment(hits_histograms["template"][type][id], hits_histograms["misaligned"][type][id])
sum += result
number += 1
print '%s %.2fmm; standard deviation error: template: %.2f misaligned: %.2f' % (get_branch_name(id, type), result,
template_error, misaligned_error)
print 'Average %.2fmm' % (sum/number)
|
[
"[email protected]"
] | |
04a0ae35c0b49d0518e6a68d481f6e317f214115
|
3a8c2bd3b8df9054ed0c26f48616209859faa719
|
/Challenges/surroundedRegions.py
|
570dcbb6b411e6fd035814e01998a9a4779b635f
|
[] |
no_license
|
AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges
|
684f1ca2f9ee3c49d0b17ecb1e80707efe305c82
|
98fb752c574a6ec5961a274e41a44275b56da194
|
refs/heads/master
| 2023-09-01T23:58:15.514231 | 2021-09-10T12:42:03 | 2021-09-10T12:42:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,718 |
py
|
"""
Surrounded Regions
Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
Example:
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
Explanation:
Surrounded regions shouldn’t be on the border, which means that any 'O' on the border of the board are not flipped to 'X'. Any 'O' that is not on the border and it is not connected to an 'O' on the border will be flipped to 'X'. Two cells are connected if they are adjacent cells connected horizontally or vertically.
"""
"""
BFS
Time: O(MN)
Space: O(N)
"""
class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
queue = collections.deque()
for i in range(len(board)):
for j in range(len(board[0])):
if (i == 0 or i == len(board)-1 or j == 0 or j == len(board[0])-1) and board[i][j] == 'O':
queue.append((i, j))
directions = [(1, 0), (-1, 0), (0, -1), (0, 1)]
while queue:
i, j = queue.popleft()
if 0 <= i < len(board) and 0 <= j < len(board[0]) and board[i][j] == 'O':
board[i][j] = 'D'
for di, dj in directions:
queue.append((i + di, j + dj))
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 'O':
board[i][j] = 'X'
elif board[i][j] == 'D':
board[i][j] = 'O'
|
[
"[email protected]"
] | |
dece0de38d388908615b7dfa117a5a0a64cc883f
|
fe40fb53bdeb3d693174a57fe336503e92fe299b
|
/eheritage/utils.py
|
f8dbc0756aecfe7426966fb4198086045afbacea
|
[
"BSD-2-Clause"
] |
permissive
|
uq-eresearch/eheritage
|
8c8d096d43888e6e41fbbacdf55f2c6808bace27
|
e4a2f01c56d438d8b3f4de63d50d979a8105d652
|
refs/heads/master
| 2022-07-18T19:21:53.224175 | 2016-08-05T02:40:08 | 2016-08-05T02:40:08 | 18,045,275 | 0 | 0 |
BSD-3-Clause
| 2022-07-06T19:49:44 | 2014-03-23T22:33:56 |
HTML
|
UTF-8
|
Python
| false | false | 281 |
py
|
from flask.json import JSONEncoder
class IterableAwareEncoder(JSONEncoder):
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
|
[
"[email protected]"
] | |
a07324a9d67bfb019bf47a4e379d797eab6ed5f3
|
728f639b8d536348e200a6c6b8dfd3e70a781d85
|
/HTMLTestRunner测试报告&unittest/可以复用项目/webTest/comm/common.py
|
967c849ad0793853febfe741f742e28639cee19c
|
[] |
no_license
|
jingshiyue/my_dict_forPython
|
00adad2a1492b7ecff66a3de44793f17682aaea6
|
7a0da28d68eb130e62d196467d0ef0ee3d8ebf95
|
refs/heads/master
| 2023-04-05T18:29:36.707082 | 2023-03-30T10:30:13 | 2023-03-30T10:30:13 | 192,511,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,436 |
py
|
# -*- coding:utf-8 -*-
import os
import readConfig as readConfig
from xlrd import open_workbook
from xml.etree import ElementTree as ElementTree
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from comm.webDriver import MyDriver as Driver
import time
import comm.runSet as runSet
localReadConfig = readConfig.ReadConfig()
def open_browser():
"""
open browser by url
:return:
"""
browser = webdriver.Chrome()
# 绐楀彛鏈�ぇ鍖�
browser.maximize_window()
return browser
def close_browser(browser):
"""
close browser
:param browser:
:return:
"""
browser.close()
def open_url(name):
"""
open web page by url
:param name:
:return:
"""
url = localReadConfig.get_webServer(name)
browser = open_browser()
browser.get(url)
return browser
def get_xls(xls_name, sheet_name):
"""
:param xls_name: excel file name
:param sheet_name: sheet name
:return: sheet value
"""
web = runSet.get_web()
site = runSet.get_site()
cls = []
# get excel file path
xls_path = os.path.join(readConfig.proDir, 'file', web, site, xls_name)
print("xls path:"+xls_path)
# open excel file
book = open_workbook(xls_path)
# get sheet by name
sheet = book.sheet_by_name(sheet_name)
# get nrows
nrows = sheet.nrows
for i in range(nrows):
if sheet.row_values(i)[0] != u'case_name':
cls.append(sheet.row_values(i))
# print(sheet.row_values(i))
return cls
activity = {}
def set_xml():
"""
get element
:return:
"""
web = runSet.get_web()
site = runSet.get_site()
if len(activity) == 0:
file_path = os.path.join(readConfig.proDir, 'file', web, site, 'element.xml')
tree = ElementTree.parse(file_path)
for a in tree.findall('activity'):
activity_name = a.get('name')
element = {}
for e in a.getchildren():
element_name = e.get('id')
element_child = {}
for t in e.getchildren():
element_child[t.tag] = t.text
element[element_name] = element_child
activity[activity_name] = element
def get_el_dict(activity_name, element):
"""
According to page, activity and element getting element
:param activity_name: activity name
:param element: element name
:return:
"""
set_xml()
element_dict = activity.get(activity_name).get(element)
print(element_dict)
return element_dict
class Element: #Element("shein", "www", "login", "login_link").is_exist()
def __init__(self, activity_name, element_name):
self.driver1 = Driver.get_browser()
self.driver = self.driver1.get_driver()
self.activity = activity_name
self.element = element_name
element_dict = get_el_dict(self.activity, self.element)
self.pathType = element_dict.get('pathType')
self.pathValue = element_dict.get('pathValue')
def is_exist(self):
"""
Determine element is exist
:return: TRUE OR FALSE
"""
try:
if self.pathType == 'ID':
self.driver.find_element_by_id(self.pathValue)
return True
if self.pathType == 'XPATH':
self.driver.find_elements_by_xpath(self.pathValue)
return True
if self.pathType == 'CLASSNAME':
self.driver.find_element_by_class_name(self.pathValue)
return True
if self.pathType == 'NAME':
self.driver.find_element_by_name(self.pathValue)
return True
except NoSuchElementException:
return False
def wait_element(self, wait_time):
"""
wait element appear in time
:param wait_time: wait time
:return: true or false
"""
time.sleep(wait_time)
if self.is_exist():
return True
else:
return False
def get_element(self):
"""
get element
:return: element
"""
try:
if self.pathType == 'ID':
element = self.driver.find_element_by_id(self.pathValue)
return element
if self.pathType == 'XPATH':
element = self.driver.find_elements_by_xpath(self.pathValue)
return element
if self.pathType == 'CLASSNAME':
element = self.driver.find_element_by_class_name(self.pathValue)
return element
if self.pathType == 'NAME':
element = self.driver.find_element_by_name(self.pathValue)
return element
except NoSuchElementException:
return None
def get_element_by_index(self, index):
"""
get element by index
:param index: index
:return: element
"""
try:
if self.pathType == 'ID':
element = self.driver.find_element_by_id(self.pathValue)
return element[index]
if self.pathType == 'XPATH':
element = self.driver.find_elements_by_xpath(self.pathValue)
return element[index]
if self.pathType == 'CLASSNAME':
element = self.driver.find_element_by_class_name(self.pathValue)
return element[index]
if self.pathType == 'NAME':
element = self.driver.find_element_by_name(self.pathValue)
return element[index]
except NoSuchElementException:
return None
def get_element_list(self):
"""
get element list
:return: element list
"""
try:
if self.pathType == 'ID':
element_list = self.driver.find_element_by_id(self.pathValue)
return element_list
if self.pathType == 'XPATH':
element_list = self.driver.find_elements_by_xpath(self.pathValue)
return element_list
if self.pathType == 'CLASSNAME':
element_list = self.driver.find_element_by_class_name(self.pathValue)
return element_list
if self.pathType == 'NAME':
element_list = self.driver.find_element_by_name(self.pathValue)
return element_list
except NoSuchElementException:
return None
def click(self):
"""
click element
:return:
"""
element = self.get_element()
time.sleep(1)
element.click()
def send_key(self, key):
"""
input key
:param key: input value
:return:
"""
element = self.get_element()
time.sleep(1)
element.clear()
element.send_keys(key)
def input_keys(self, index, key):
"""
By index send key
:param index: index
:param key: key
:return:
"""
element = self.get_element_by_index(index)
time.sleep(1)
element.clear()
element.send_keys(key)
def get_text_value(self):
"""
get attribute
:return:
"""
element = self.get_element()
value = element.get_attribute('text')
return str(value)
|
[
"[email protected]"
] | |
a40004ba548e520cede3f28efbf8c20e012e0185
|
373cd41477438cc8826cd2a2f8689be84f486339
|
/msticpy/config/ce_data_providers.py
|
461dd3a6013e76b92a7875acbbc937f2d5327b61
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-3.0-only",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"ISC",
"LGPL-2.0-or-later",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"Unlicense",
"Python-2.0",
"LicenseRef-scancode-python-cwi",
"MIT",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"HPND",
"ODbL-1.0",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
RiskIQ/msticpy
|
cd42d601144299ec43631554076cc52cbb42dc98
|
44b1a390510f9be2772ec62cb95d0fc67dfc234b
|
refs/heads/master
| 2023-08-27T00:11:30.098917 | 2021-06-17T22:54:29 | 2021-06-17T22:54:29 | 374,787,165 | 1 | 0 |
MIT
| 2021-09-16T19:05:43 | 2021-06-07T20:05:09 |
Python
|
UTF-8
|
Python
| false | false | 1,049 |
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Data Providers Component Edit."""
from .._version import VERSION
from .ce_provider_base import CEProviders, HELP_URIS
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-ancestors, duplicate-code
class CEDataProviders(CEProviders):
"""Data providers edit component."""
_DESCRIPTION = "Settings for Data Providers"
_COMP_PATH = "DataProviders"
# _HELP_TEXT inherited from base
_HELP_URI = {
"Data Providers": (
"https://msticpy.readthedocs.io/en/latest/" + "DataAcquisition.html"
),
**HELP_URIS,
}
_COMPONENT_HELP = """
<p><b>LocalData provider <i>data_paths</i></b>
Enter one or more data paths, separated by new lines
</p>
"""
|
[
"[email protected]"
] | |
ecffd0cb40db3a2541dd08f1f6cbc13ea53320ed
|
ed0dd577f03a804cdc274f6c7558fafaac574dff
|
/python/pyre/weaver/mills/CxxMill.py
|
d5307d0adaae8fcbb9fa32dd74b5c3f627978cec
|
[
"Apache-2.0"
] |
permissive
|
leandromoreira/vmaf
|
fd26e2859136126ecc8e9feeebe38a51d14db3de
|
a4cf599444701ea168f966162194f608b4e68697
|
refs/heads/master
| 2021-01-19T03:43:15.677322 | 2016-10-08T18:02:22 | 2016-10-08T18:02:22 | 70,248,500 | 3 | 0 | null | 2016-10-07T13:21:28 | 2016-10-07T13:21:27 | null |
UTF-8
|
Python
| false | false | 701 |
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.weaver.components.LineMill import LineMill
class CxxMill(LineMill):
names = ["c++", "cxx"]
def __init__(self):
LineMill.__init__(self, "//", "// -*- C++ -*-")
return
# version
__id__ = "$Id: CxxMill.py,v 1.1.1.1 2006-11-27 00:10:09 aivazis Exp $"
# End of file
|
[
"[email protected]"
] | |
d4adbe198d9a9e8a3154b16d3b046067822802d5
|
2836c3caf8ca332635640a27254a345afd449081
|
/iem/regain_hour_map.py
|
b88680e83d256a083176bb0d735984295da3bb65
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
akrherz/DEV
|
27cf1bac978a0d6bbfba1851b90d2495a3bdcd66
|
3b1ef5841b25365d9b256467e774f35c28866961
|
refs/heads/main
| 2023-08-30T10:02:52.750739 | 2023-08-29T03:08:01 | 2023-08-29T03:08:01 | 65,409,757 | 2 | 0 |
MIT
| 2023-09-12T03:06:07 | 2016-08-10T19:16:28 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,689 |
py
|
"""Plot the scam that is DST"""
import ephem
import mx.DateTime
import tqdm
from pyiem.plot import MapPlot
def compute_sunrise(lat, long):
arr = []
sun = ephem.Sun()
ames = ephem.Observer()
ames.lat = lat
ames.long = long
sts = mx.DateTime.DateTime(2018, 3, 10)
interval = mx.DateTime.RelativeDateTime(days=1)
now = sts
doy = []
returnD = 0
ames.date = now.strftime("%Y/%m/%d")
rise = mx.DateTime.strptime(
str(ames.next_rising(sun)), "%Y/%m/%d %H:%M:%S"
)
rise = rise.localtime()
delta = rise.hour * 60 + rise.minute
now += interval
while True:
ames.date = now.strftime("%Y/%m/%d")
rise2 = mx.DateTime.strptime(
str(ames.next_rising(sun)), "%Y/%m/%d %H:%M:%S"
)
rise2 = rise2.localtime()
delta2 = rise2.hour * 60 + rise2.minute
if delta2 < delta:
return (rise2 - rise).days
now += interval
return doy, arr, returnD
def main():
"""Go Main Go."""
lats = []
lons = []
vals = []
for lon in tqdm.tqdm(range(-130, -60, 2)):
for lat in range(20, 55, 1):
lats.append(lat)
lons.append(lon)
vals.append(compute_sunrise(str(lat), str(lon)))
m = MapPlot(
sector="conus",
title="Days to Recover Morning Hour after Spring Saving Time Change",
subtitle=(
"days until local time of sunrise is earlier "
"than on 10 March, local DST rules ignored for plot"
),
)
m.contourf(lons, lats, vals, range(27, 78, 3), units="days")
m.postprocess(filename="180313.png")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
dd2f007d5531fe7a3a72581701ad253e6d6eb614
|
9815041feb5bd2a89e39d86e544ca44c2e17e318
|
/config/settings.py
|
fdd605a6aef832ee03fc0708d15aa8ca4282d1b3
|
[] |
no_license
|
raimbaev223/django-docker-postgres-template
|
5ecb62fdc57bb3af77815c3c4d1f03c98d0fdaf3
|
f97449cf90b87daed374576ba52e545fc1694be0
|
refs/heads/master
| 2023-04-03T05:22:38.668148 | 2021-04-05T10:47:52 | 2021-04-05T10:47:52 | 354,720,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,247 |
py
|
"""
Django settings for djangoforprofessionals_ch3 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j)xol&2u_4%!32uegp@x)y*=hmn8!nlp4_1tfxq#zwu#0et$46'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoforprofessionals_ch3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoforprofessionals_ch3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': 5432
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
60d6cb2db006e20e4b18abeedfcd5b7a69a9801b
|
5d48aba44824ff9b9ae7e3616df10aad323c260e
|
/tree/653.two_sum_IV_input_is_a_BST.py
|
4ece9fd24ef54b435eb886456dcb70ac2d4e7d17
|
[] |
no_license
|
eric496/leetcode.py
|
37eab98a68d6d3417780230f4b5a840f6d4bd2a6
|
32a76cf4ced6ed5f89b5fc98af4695b8a81b9f17
|
refs/heads/master
| 2021-07-25T11:08:36.776720 | 2021-07-01T15:49:31 | 2021-07-01T15:49:31 | 139,770,188 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 940 |
py
|
"""
Given a Binary Search Tree and a target number, return true if there exist two elements in the BST such that their sum is equal to the given target.
Example 1:
Input:
5
/ \
3 6
/ \ \
2 4 7
Target = 9
Output: True
Example 2:
Input:
5
/ \
3 6
/ \ \
2 4 7
Target = 28
Output: False
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Solution 1:
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
target = set()
return self.dfs(root, k, target)
def dfs(self, root: TreeNode, k: int, target: int) -> bool:
if not root:
return False
if root.val in target:
return True
else:
target.add(k - root.val)
return self.dfs(root.left, k, target) or self.dfs(root.right, k, target)
|
[
"[email protected]"
] | |
6c0c670495008cbd06140f21e047f3da7ee7a9c9
|
6ceea2578be0cbc1543be3649d0ad01dd55072aa
|
/src/examples/elphf/diffusion/mesh1D.py
|
b8a851a3a7a81fb4e593b52c70dcb733f0cf0331
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
regmi/fipy
|
57972add2cc8e6c04fda09ff2faca9a2c45ad19d
|
eb4aacf5a8e35cdb0e41beb0d79a93e7c8aacbad
|
refs/heads/master
| 2020-04-27T13:51:45.095692 | 2010-04-09T07:32:42 | 2010-04-09T07:32:42 | 602,099 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,365 |
py
|
#!/usr/bin/env python
##
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "mesh1D.py"
#
# Author: Jonathan Guyer <[email protected]>
# Author: Daniel Wheeler <[email protected]>
# Author: James Warren <[email protected]>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
#
# ###################################################################
##
r"""
A simple 1D example to test the setup of the multi-component diffusion
equations. The diffusion equation for each species in single-phase
multicomponent system can be expressed as
.. math::
\frac{\partial C_j}{\partial t}
= D_{jj}\nabla^2 C_j
+ D_{j}\nabla\cdot
\frac{C_j}{1 - \sum_{\substack{k=2\\ k \neq j}}^{n-1} C_k}
\sum_{\substack{i=2\\ i \neq j}}^{n-1} \nabla C_i
where :math:`C_j` is the concentration of the :math:`j^\text{th}` species,
:math:`t` is time, :math:`D_{jj}` is the self-diffusion coefficient of the
:math:`j^\text{th}` species, and :math:`\sum_{\substack{i=2\\ i \neq j}}^{n-1}`
represents the summation over all substitutional species in the system,
excluding the solvent and the component of interest.
We solve the problem on a 1D mesh
>>> nx = 400
>>> dx = 0.01
>>> L = nx * dx
>>> from fipy import *
>>> mesh = Grid1D(dx = dx, nx = nx)
One component in this ternary system will be designated the "solvent"
>>> class ComponentVariable(CellVariable):
... def __init__(self, mesh, value = 0., name = '',
... standardPotential = 0., barrier = 0.,
... diffusivity = None, valence = 0, equation = None):
... CellVariable.__init__(self, mesh = mesh, value = value,
... name = name)
... self.standardPotential = standardPotential
... self.barrier = barrier
... self.diffusivity = diffusivity
... self.valence = valence
... self.equation = equation
...
... def copy(self):
... return self.__class__(mesh = self.getMesh(),
... value = self.getValue(),
... name = self.getName(),
... standardPotential =
... self.standardPotential,
... barrier = self.barrier,
... diffusivity = self.diffusivity,
... valence = self.valence,
... equation = self.equation)
>>> solvent = ComponentVariable(mesh = mesh, name = 'Cn', value = 1.)
We can create an arbitrary number of components,
simply by providing a :keyword:`tuple` or :keyword:`list` of components
>>> substitutionals = [
... ComponentVariable(mesh = mesh, name = 'C1', diffusivity = 1.,
... standardPotential = 1., barrier = 1.),
... ComponentVariable(mesh = mesh, name = 'C2', diffusivity = 1.,
... standardPotential = 1., barrier = 1.),
... ]
>>> interstitials = []
>>> for component in substitutionals:
... solvent -= component
We separate the solution domain into two different concentration regimes
>>> x = mesh.getCellCenters()[0]
>>> substitutionals[0].setValue(0.3)
>>> substitutionals[0].setValue(0.6, where=x > L / 2)
>>> substitutionals[1].setValue(0.6)
>>> substitutionals[1].setValue(0.3, where=x > L / 2)
We create one diffusion equation for each substitutional component
>>> for Cj in substitutionals:
... CkSum = ComponentVariable(mesh = mesh, value = 0.)
... CkFaceSum = FaceVariable(mesh = mesh, value = 0.)
... for Ck in [Ck for Ck in substitutionals if Ck is not Cj]:
... CkSum += Ck
... CkFaceSum += Ck.getHarmonicFaceValue()
...
... convectionCoeff = CkSum.getFaceGrad() \
... * (Cj.diffusivity / (1. - CkFaceSum))
...
... Cj.equation = (TransientTerm()
... == DiffusionTerm(coeff=Cj.diffusivity)
... + PowerLawConvectionTerm(coeff=convectionCoeff))
If we are running interactively, we create a viewer to see the results
>>> if __name__ == '__main__':
... viewer = Viewer(vars=[solvent] + substitutionals,
... datamin=0, datamax=1)
... viewer.plot()
Now, we iterate the problem to equilibrium, plotting as we go
>>> for i in range(40):
... for Cj in substitutionals:
... Cj.updateOld()
... for Cj in substitutionals:
... Cj.equation.solve(var = Cj,
... dt = 10000.)
... if __name__ == '__main__':
... viewer.plot()
Since there is nothing to maintain the concentration separation in this problem,
we verify that the concentrations have become uniform
>>> substitutionals[0].allclose(0.45, rtol = 1e-7, atol = 1e-7).getValue()
1
>>> substitutionals[1].allclose(0.45, rtol = 1e-7, atol = 1e-7).getValue()
1
"""
__docformat__ = 'restructuredtext'
if __name__ == '__main__':
## from fipy.tools.profiler.profiler import Profiler
## from fipy.tools.profiler.profiler import calibrate_profiler
# fudge = calibrate_profiler(10000)
# profile = Profiler('profile', fudge=fudge)
import fipy.tests.doctestPlus
exec(fipy.tests.doctestPlus._getScript())
# profile.stop()
raw_input("finished")
|
[
"[email protected]"
] | |
f1bfe5de0e26671054c332bdfc93d2f0d9d4265e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_070/ch74_2020_04_06_14_56_45_688915.py
|
fbd6fc22f27f23767e365de5db1099f9b9558694
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 197 |
py
|
def conta_bigramas(x):
dicionario = {}
for i in range(len(x)-1):
dicionario[x[i],x[i+1]] = 0
for i in range(len(x)-1):
dicionario[x[i],x[i+1]] += 1
return dicionario
|
[
"[email protected]"
] | |
9c71981567308ad84f8cdd6d9663bb32cd4dd6f4
|
bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd
|
/neekanee/neekanee_solr/solr_query_builder.py
|
bb3792600dc6a9c0af8eefbc4cd05bff2fbb4fb6
|
[] |
no_license
|
thayton/neekanee
|
0890dd5e5cf5bf855d4867ae02de6554291dc349
|
f2b2a13e584469d982f7cc20b49a9b19fed8942d
|
refs/heads/master
| 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,806 |
py
|
KM_PER_MILE = 1.61
class SOLRQueryBuilder():
"""
Build a SOLR query given a GET QueryDict for a job search.
"""
def __init__(self):
self.qdict = {}
#
# Mapping of refine search query parameter names to SOLR doc
# field names. All refine search query parameters are implemented
# as filter queries. For each param in GET from the left column
# below, we add a new filter query using the field name in the
# right column and value GET[param].
#
self.refine_search_fq = {
#
# param SOLR field name
# ----- ---------------
'tld': 'tld',
'title': 'title',
'company': 'company_name',
'size': 'company_size',
'tags': 'company_tags',
'ltags': 'company_location_tags',
'awards': 'company_awards',
'vacation': 'vacation_year_1',
'country': 'country',
'state': 'state',
'city': 'city',
}
def add_fq(self, filt, val):
if filt != 'vacation_year_1':
new_fq = '%s:"%s"' % (filt,val)
else:
new_fq = '%s:%s' % (filt,val)
if self.qdict.has_key('fq'):
self.qdict['fq'].append(new_fq)
else:
self.qdict['fq'] = [new_fq]
def build_query(self, GET):
"""
GET : QueryDict object for an HTTP GET request
"""
self.qdict['q'] = '{!q.op=AND}' + GET.get('q', '*:*')
self.qdict['wt'] = 'json'
if 'lat' in GET and 'lng' and GET:
self.qdict['fq'] = [ '{!bbox}' ]
self.qdict['sfield'] = 'latlng'
self.qdict['pt'] = '%.2f,%.2f' % (float(GET['lat']),float(GET['lng']))
self.qdict['d'] = '%.2f' % (float(GET['radius']) * KM_PER_MILE)
for parm,filt in self.refine_search_fq.items():
val = GET.get(parm, None)
if val is None:
continue
if parm == 'tags' or parm == 'ltags' or parm == 'awards': # multivalued
for v in val.split():
self.add_fq(filt, v)
elif parm == 'vacation':
self.add_fq(filt, '[%d TO %d]' % (int(val), int(val)+4))
else:
self.add_fq(filt, val)
return self.qdict
class SOLRJobSearchQueryBuilder(SOLRQueryBuilder):
def __init__(self, items_per_page):
SOLRQueryBuilder.__init__(self)
self.items_per_page = items_per_page
#
# Pararms specific to job search query with faceting for the
# sidebar. The state facet field is set to 51 so that all of
# the states will show up in the map (and not just 10 of them).
#
params = {
'fl': 'id,title,url,url_data,company_id,company_name,company_ats,company_jobs_page_url,city,state,country',
'facet': 'true',
'facet.field': ['country', 'state', 'city', 'tld', 'company_size', 'company_name', 'company_tags', 'company_location_tags', 'company_awards'],
'facet.mincount': '1',
'facet.limit': '10',
'f.company_tags.facet.limit': '32',
'f.country.facet.limit': '200',
'f.state.facet.limit': '51',
'facet.range': 'vacation_year_1',
'facet.range.start': '10',
'facet.range.end': '50',
'facet.range.gap': '5',
'hl': 'true',
'hl.fl': 'desc',
'hl.snippets': 2,
'hl.alternateField': 'desc',
'hl.maxAlternateFieldLength': '210',
'rows': '%d' % self.items_per_page
}
self.qdict.update(params)
def build_query(self, GET):
page_number = int(GET.get('page', '1'))
self.qdict.update({'start': '%d' % (self.items_per_page * (page_number - 1))})
return SOLRQueryBuilder.build_query(self, GET)
class SOLRCompanyFacetQueryBuilder(SOLRQueryBuilder):
def __init__(self):
SOLRQueryBuilder.__init__(self)
params = {
'fl': 'id',
'facet': 'true',
'facet.field': ['country', 'state', 'city', 'tld', 'company_size', 'company_tags', 'company_location_tags', 'company_awards', 'company_id'],
'facet.mincount': '1',
'facet.limit': '10',
'facet.range': 'vacation_year_1',
'facet.range.start': '10',
'facet.range.end': '50',
'facet.range.gap': '5',
'f.company_tags.facet.limit': '32',
'f.country.facet.limit': '200',
'f.state.facet.limit': '51',
'f.company_id.facet.limit': '-1'
}
self.qdict.update(params)
def build_query(self, GET):
return SOLRQueryBuilder.build_query(self, GET)
class SOLRLocationFacetQueryBuilder(SOLRQueryBuilder):
def __init__(self):
SOLRQueryBuilder.__init__(self)
params = {
'fl': 'id',
'facet': 'true',
'facet.field': ['country', 'state', 'city', 'tld', 'company_size', 'company_name', 'company_tags', 'company_location_tags', 'company_awards'],
'facet.mincount': '1',
'facet.limit': '10',
'facet.range': 'vacation_year_1',
'facet.range.start': '10',
'facet.range.end': '50',
'facet.range.gap': '5',
'f.company_tags.facet.limit': '32',
'f.country.facet.limit': '200',
'f.state.facet.limit': '60',
'f.city.facet.limit': '-1'
}
self.qdict.update(params)
def build_query(self, GET):
return SOLRQueryBuilder.build_query(self, GET)
class SOLRJobTitleFacetQueryBuilder(SOLRQueryBuilder):
pass
|
[
"[email protected]"
] | |
00ae17b2c630ccf0d4036a300ee15ed0a9356121
|
4e3c976773526fd610d64ffb83589bccfaee5e68
|
/sponge-app/sponge-app-demo-service/sponge/sponge_demo_depending.py
|
ff40131a936612f5da6d6cd33534a1a8234f44d8
|
[
"Apache-2.0"
] |
permissive
|
softelnet/sponge
|
2313d2328953fcff49a002e727bb803757870627
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
refs/heads/master
| 2022-10-28T16:19:55.619882 | 2021-09-16T19:50:08 | 2021-09-16T19:50:08 | 95,256,030 | 10 | 2 |
Apache-2.0
| 2022-10-04T23:55:09 | 2017-06-23T20:58:49 |
Java
|
UTF-8
|
Python
| false | false | 2,884 |
py
|
"""
Sponge Knowledge Base
Demo
"""
class DependingArgumentsAction(Action):
def onConfigure(self):
self.withLabel("Depending arguments")
self.withArgs([
StringType("continent").withLabel("Continent").withProvided(ProvidedMeta().withValueSet()),
StringType("country").withLabel("Country").withProvided(ProvidedMeta().withValueSet().withDependency("continent")),
StringType("city").withLabel("City").withProvided(ProvidedMeta().withValueSet().withDependency("country")),
StringType("river").withLabel("River").withProvided(ProvidedMeta().withValueSet().withDependency("continent")),
StringType("weather").withLabel("Weather").withProvided(ProvidedMeta().withValueSet())
]).withResult(StringType().withLabel("Sentences"))
self.withFeatures({"icon":"flag", "showClear":True, "showCancel":True})
def onCall(self, continent, country, city, river, weather):
return "There is a city {} in {} in {}. The river {} flows in {}. It's {}.".format(city, country, continent, river, continent, weather.lower())
def onInit(self):
self.countries = {
"Africa":["Nigeria", "Ethiopia", "Egypt"],
"Asia":["China", "India", "Indonesia"],
"Europe":["Russia", "Germany", "Turkey"]
}
self.cities = {
"Nigeria":["Lagos", "Kano", "Ibadan"],
"Ethiopia":["Addis Ababa", "Gondar", "Mek'ele"],
"Egypt":["Cairo", "Alexandria", "Giza"],
"China":["Guangzhou", "Shanghai", "Chongqing"],
"India":["Mumbai", "Delhi", "Bangalore"],
"Indonesia":["Jakarta", "Surabaya", "Medan"],
"Russia":["Moscow", "Saint Petersburg", "Novosibirsk"],
"Germany":["Berlin", "Hamburg", "Munich"],
"Turkey":["Istanbul", "Ankara", "Izmir"]
}
self.rivers = {
"Africa":["Nile", "Chambeshi", "Niger"],
"Asia":["Yangtze", "Yellow River", "Mekong"],
"Europe":["Volga", "Danube", "Dnepr"]
}
def onProvideArgs(self, context):
if "continent" in context.provide:
context.provided["continent"] = ProvidedValue().withValueSet(["Africa", "Asia", "Europe"])
if "country" in context.provide:
context.provided["country"] = ProvidedValue().withValueSet(self.countries.get(context.current["continent"], []))
if "city" in context.provide:
context.provided["city"] = ProvidedValue().withValueSet(self.cities.get(context.current["country"], []))
if "river" in context.provide:
context.provided["river"] = ProvidedValue().withValueSet(self.rivers.get(context.current["continent"], []))
if "weather" in context.provide:
context.provided["weather"] = ProvidedValue().withValueSet(["Sunny", "Cloudy", "Raining", "Snowing"])
|
[
"[email protected]"
] | |
b55fd799bada92e8f1cd6d17a26da62618bdf02a
|
f6a8d93c0b764f84b9e90eaf4415ab09d8060ec8
|
/Lists Advanced/the_office.py
|
de39a3b8b66d23417344eae1ded709f3c883b3b7
|
[] |
no_license
|
DimoDimchev/SoftUni-Python-Fundamentals
|
90c92f6e8128b62954c4f9c32b01ff4fbb405a02
|
970360dd6ffd54b852946a37d81b5b16248871ec
|
refs/heads/main
| 2023-03-18T17:44:11.856197 | 2021-03-06T12:00:32 | 2021-03-06T12:00:32 | 329,729,960 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 656 |
py
|
employees_list = [int(x) for x in (input().split(" "))]
HIF = int(input()) # happiness improvement factor
happy_count = 0
increased_happiness_list = list(map(lambda employee: employee * HIF, employees_list))
average_happiness = sum(increased_happiness_list) / len(increased_happiness_list)
happy_list = list(filter(lambda employee: employee >= average_happiness, increased_happiness_list))
for i in range(len(happy_list)):
happy_count += 1
if happy_count >= len(employees_list)/2:
print(f"Score: {happy_count}/{len(employees_list)}. Employees are happy!")
else:
print(f"Score: {happy_count}/{len(employees_list)}. Employees are not happy!")
|
[
"[email protected]"
] | |
8bc823c166c4a65c4048e30e2d7438e795a32306
|
018d804d6b53cc544e0adf8c38656bf27152706c
|
/ucsd_catalog_order.py
|
ed744750f3c2affb71f43eccdfbf1a19bb0c13f8
|
[] |
no_license
|
luisroco/cisco_cloud
|
c664520eb1021c7b36577a08d23dbf1b8dd7bd75
|
6bbf7c4f0c0af47860170835cfebc924f1b4c867
|
refs/heads/master
| 2021-01-09T20:11:19.048918 | 2017-02-07T19:06:58 | 2017-02-07T19:06:58 | 81,242,442 | 0 | 0 | null | 2017-02-07T18:53:53 | 2017-02-07T18:53:53 | null |
UTF-8
|
Python
| false | false | 3,208 |
py
|
#! /usr/bin/env python
'''
Command Line Utility to order a Catalog option
'''
import requests
import json
from ucsd_library import catalog_order
if __name__ == '__main__':
import sys
from pprint import pprint
from argparse import ArgumentParser, FileType
p = ArgumentParser()
p.add_argument('catalog', # Name stored in namespace
metavar = 'UCSD Catalog', # Arguement name displayed to user
help = 'The UCSD Catalog to order',
type = str
)
p.add_argument('-v', '--vdc', # Name stored in namespace
metavar = 'UCSD VDC', # Arguement name displayed to user
help = 'The UCSD VDC to place the cVM in',
type = str
)
p.add_argument('-c', '--comment', # Name stored in namespace
metavar = 'UCSD Comment', # Arguement name displayed to user
help = 'The comment to record - default blank',
type = str, default=""
)
p.add_argument('-g', '--group', # Name stored in namespace
metavar = 'UCSD Group', # Arguement name displayed to user
help = 'The group to order on behalf of',
type = str, default=""
)
p.add_argument('-n', '--vmname', # Name stored in namespace
metavar = 'UCSD VMname', # Arguement name displayed to user
help = 'The VM Name or prefix',
type = str, default=""
)
p.add_argument('--vcpus', # Name stored in namespace
metavar = 'vCPU Count', # Arguement name displayed to user
help = 'The number of vCPUs. Only used if vDC allows',
type = str, default="0"
)
p.add_argument('--vram', # Name stored in namespace
metavar = 'vRAM Count', # Arguement name displayed to user
help = 'The amount of vRAM. Only used if vDC allows',
type = str, default="0"
)
p.add_argument('--datastores', # Name stored in namespace
metavar = 'Datastore details', # Arguement name displayed to user
help = 'The datastore details. Only used if vDC allows.',
type = str, default=""
)
p.add_argument('--vnics', # Name stored in namespace
metavar = 'vNIC Details', # Arguement name displayed to user
help = 'The details for vNICS. Only used if vDC allows',
type = str, default=""
)
ns = p.parse_args()
result = catalog_order(ns.catalog, ns.vdc, ns.group, ns.comment, ns.vmname, ns.vcpus, ns.vram, ns.datastores, ns.vnics)
pprint (result)
|
[
"[email protected]"
] | |
362cdc331020a5268fd371e1eac03259c7a14bba
|
f3d01659c2a4465cdf7a5903d18058da008f1aac
|
/src/sentry/models/groupbookmark.py
|
f6cee4369c180e59d520ca7fe8093daee2869739
|
[
"BSD-2-Clause"
] |
permissive
|
Mattlk13/sentry-1
|
f81a1e5dc5d02a07e5c6bbcdb5e1ce53f24f53c1
|
19b0870916b80250f3cb69277641bfdd03320415
|
refs/heads/master
| 2023-08-30T21:49:49.319791 | 2019-07-30T19:23:07 | 2019-07-30T19:23:07 | 81,418,058 | 0 | 1 |
BSD-3-Clause
| 2023-04-04T00:22:49 | 2017-02-09T06:36:41 |
Python
|
UTF-8
|
Python
| false | false | 1,064 |
py
|
from __future__ import absolute_import
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, BaseManager, sane_repr
class GroupBookmark(Model):
"""
Identifies a bookmark relationship between a user and an
aggregated event (Group).
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', related_name="bookmark_set")
group = FlexibleForeignKey('sentry.Group', related_name="bookmark_set")
# namespace related_name on User since we don't own the model
user = FlexibleForeignKey(settings.AUTH_USER_MODEL, related_name="sentry_bookmark_set")
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupbookmark'
# composite index includes project for efficient queries
unique_together = (('project', 'user', 'group'), )
__repr__ = sane_repr('project_id', 'group_id', 'user_id')
|
[
"[email protected]"
] | |
7c25ff18b341cd872a8a25f0dcfbf1023a780010
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/pygears/typing/qround.py
|
ea79fe0998313278f899a4b014df440c38f3cbb8
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172 | 2022-03-07T12:29:00 | 2022-03-07T12:29:00 | 124,890,922 | 146 | 16 |
MIT
| 2022-08-15T07:57:08 | 2018-03-12T13:10:06 |
Python
|
UTF-8
|
Python
| false | false | 1,286 |
py
|
from .cast import value_cast, type_cast
from .fixp import Fixp, Ufixp
from .uint import Uint, Bool, Int, code
def get_out_type(val_type, fract):
if get_cut_bits(val_type, fract) <= 0:
raise TypeError(
f'Cannot qround type "{val_type}" with "{val_type.fract}" '
f'fractional bits, to produce the type with more fractional '
f'bits "fract={fract}"'
)
if fract != 0:
return val_type.base[val_type.integer + 1, val_type.integer + fract + 1]
else:
return (Int if val_type.signed else Uint)[val_type.integer + 1]
def get_cut_bits(val_type, fract):
return val_type.fract - fract
def qround(val, fract=0):
cut_bits = get_cut_bits(type(val), fract)
out_type = get_out_type(type(val), fract)
val_coded = code(val, Int) if type(val).signed else code(val)
res = val_coded + (Bool(1) << (cut_bits - 1))
return out_type.decode(res[cut_bits:])
def qround_even(val, fract=0):
cut_bits = get_cut_bits(type(val), fract)
out_type = get_out_type(type(val), fract)
val_coded = code(val, Int) if type(val).signed else code(val)
round_bit = val_coded[cut_bits]
res = val_coded + Uint([round_bit] + [~round_bit] * (cut_bits - 1))
return out_type.decode(res[cut_bits:])
|
[
"[email protected]"
] | |
673ab9861bcae85a1a55c3ed742550710ec90195
|
99d7a6448a15e7770e3b6f3859da043300097136
|
/src/hardware/core/i_core_device.py
|
653c0e71ab0666d2da9b754da7fe944a400daac1
|
[] |
no_license
|
softtrainee/arlab
|
125c5943f83b37bc7431ae985ac7b936e08a8fe4
|
b691b6be8214dcb56921c55daed4d009b0b62027
|
refs/heads/master
| 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,211 |
py
|
#===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import Interface
#============= standard library imports ========================
#============= local library imports ==========================
class ICoreDevice(Interface):
def get(self):
'''
'''
def set(self, *args, **kw):
'''
'''
#============= views ===================================
#============= EOF ====================================
|
[
"jirhiker@localhost"
] |
jirhiker@localhost
|
1e8c67d8c6ce32961276b4ea876788f030175bf7
|
d9b2805a8b39f147bd77e35c8e96e0cbd5eaa726
|
/flask공부/flaskTest/bin/pip
|
7eb65fc06f5c5c461cfe88d74e5a3c61d6549aab
|
[] |
no_license
|
LeeInHaeng/Study
|
ca8e3e2d4111dc3f742eefea541a67739d729e75
|
96bdb1d224702cebb8a6de6bbd596b075ee33f7b
|
refs/heads/master
| 2020-03-28T11:03:03.848316 | 2019-04-20T08:33:26 | 2019-04-20T08:33:26 | 148,172,460 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
#!/home/lih/pythonTest/flaskTest/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
2f1f274dd1ad0310608a42872e14fff7fbf05b1f
|
c65dfb808e23263b8f3f703a4f31ea7e153b4efd
|
/tockstats.py
|
62fa54d729eb6e180e8686f52ea5921fa2030dd9
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
18F/quick-stats
|
68fcd3bc9fde390f1a74a370f232dd8086865b07
|
400b48bcebea242ac574dd30f870ed1687c3b863
|
refs/heads/master
| 2021-01-17T06:08:00.304550 | 2016-07-27T16:09:22 | 2016-07-27T16:09:22 | 64,323,703 | 0 | 2 | null | 2016-08-08T15:25:00 | 2016-07-27T16:15:43 |
Python
|
UTF-8
|
Python
| false | false | 1,030 |
py
|
"""Hours statistics from Tock exports"""
from collections import Counter
from csv import DictReader
import sys
def file_to_counter(filename):
"""Read CSV, convert it to a counter of hours by project"""
counter = Counter()
with open(filename) as csvfile:
reader = DictReader(csvfile)
for row in reader:
counter[row['Project']] += float(row['Number of Hours'])
return counter
def merge_counters(counters):
totals = Counter()
for counter in counters:
for key, value in counter.items():
totals[key] += value
return totals
def print_totals(totals):
total = sum(totals.values())
for project, amount in totals.most_common(20):
print("{}: {}/{} = {}".format(project, amount, total, amount/total))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python tockstats.py FILE.csv [FILE2.csv ...]")
else:
counters = [file_to_counter(f) for f in sys.argv[1:]]
print_totals(merge_counters(counters))
|
[
"[email protected]"
] | |
ba92d4f9f437fcf74daf2e0b5f28089408f310c4
|
aaa06c63f0fba6c5aad5121d83715d0be828ce4e
|
/OpenStreetMap/models.py
|
6746038957e195d82202ad40ba008a0f5667564b
|
[] |
no_license
|
scotm/Comrade
|
b023b338f0daf5d083ae37e2e3a73d3d424f8a7c
|
c7186f00cd20916a78cc2282ea201f440102ebb7
|
refs/heads/master
| 2020-05-18T06:49:01.411310 | 2014-07-25T08:13:10 | 2014-07-25T08:13:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,658 |
py
|
from django.contrib.gis.db import models
class BaseOsmModel(models.Model):
access = models.TextField(blank=True)
addr_housename = models.TextField(db_column='addr:housename', blank=True)
addr_housenumber = models.TextField(db_column='addr:housenumber', blank=True)
addr_interpolation = models.TextField(db_column='addr:interpolation', blank=True)
admin_level = models.TextField(blank=True)
aerialway = models.TextField(blank=True)
aeroway = models.TextField(blank=True)
amenity = models.TextField(blank=True)
area = models.TextField(blank=True)
barrier = models.TextField(blank=True)
bicycle = models.TextField(blank=True)
boundary = models.TextField(blank=True)
brand = models.TextField(blank=True)
bridge = models.TextField(blank=True)
building = models.TextField(blank=True)
construction = models.TextField(blank=True)
covered = models.TextField(blank=True)
culvert = models.TextField(blank=True)
cutting = models.TextField(blank=True)
denomination = models.TextField(blank=True)
disused = models.TextField(blank=True)
embankment = models.TextField(blank=True)
foot = models.TextField(blank=True)
generator_source = models.TextField(db_column='generator:source', blank=True)
harbour = models.TextField(blank=True)
highway = models.TextField(blank=True)
historic = models.TextField(blank=True)
horse = models.TextField(blank=True)
intermittent = models.TextField(blank=True)
junction = models.TextField(blank=True)
landuse = models.TextField(blank=True)
layer = models.TextField(blank=True)
leisure = models.TextField(blank=True)
lock = models.TextField(blank=True)
man_made = models.TextField(blank=True)
military = models.TextField(blank=True)
motorcar = models.TextField(blank=True)
name = models.TextField(blank=True)
natural = models.TextField(blank=True)
office = models.TextField(blank=True)
oneway = models.TextField(blank=True)
operator = models.TextField(blank=True)
place = models.TextField(blank=True)
population = models.TextField(blank=True)
power = models.TextField(blank=True)
power_source = models.TextField(blank=True)
public_transport = models.TextField(blank=True)
railway = models.TextField(blank=True)
ref = models.TextField(blank=True)
religion = models.TextField(blank=True)
route = models.TextField(blank=True)
service = models.TextField(blank=True)
shop = models.TextField(blank=True)
sport = models.TextField(blank=True)
surface = models.TextField(blank=True)
toll = models.TextField(blank=True)
tourism = models.TextField(blank=True)
tower_type = models.TextField(db_column='tower:type', blank=True)
tunnel = models.TextField(blank=True)
water = models.TextField(blank=True)
waterway = models.TextField(blank=True)
wetland = models.TextField(blank=True)
width = models.TextField(blank=True)
wood = models.TextField(blank=True)
z_order = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
# Create your models here.
class PlanetOsmLine(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
way_area = models.FloatField(blank=True, null=True)
way = models.LineStringField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_line'
class PlanetOsmPoint(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
capital = models.TextField(blank=True)
ele = models.TextField(blank=True)
poi = models.TextField(blank=True)
way = models.PointField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_point'
class PlanetOsmPolygon(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
tracktype = models.TextField(blank=True)
way_area = models.FloatField(blank=True, null=True)
way = models.GeometryField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_polygon'
class PlanetOsmRoads(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
tracktype = models.TextField(blank=True)
way_area = models.FloatField(blank=True, null=True)
way = models.LineStringField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_roads'
|
[
"[email protected]"
] | |
97fe4b22a0d5bd7822f3f5e943e4fad93fc6b66d
|
9de9e636cf845c681fdbf1c6c058cc69d5d05da5
|
/IO/Writer.py
|
5ed136de1753e4ebcc60d562cf59aef0e316b217
|
[] |
no_license
|
dxcv/Portfolio-Management-1
|
4278eebb5c91a3a02ea76398b681ef9dc5beeb1f
|
9f188aeab3177d0a13bae32e3a318a4f18642a3c
|
refs/heads/master
| 2020-12-05T21:48:37.690004 | 2019-01-03T01:34:41 | 2019-01-03T01:34:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,408 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 12:51:44 2018
Write to excel function
@author: ACM05
"""
import pandas as pd
import IO.IO_Tools_Func as IO_TF
class Writer():
def __init__( self,
f_name ):
""" Writer object for
defined format
"""
self.f_name = f_name
self.writer = pd.ExcelWriter( f_name,
engine='xlsxwriter',
options={'nan_inf_to_errors': True})
self.book = self.writer.book
""" Loading all format settings
"""
self.header_format = self.book.add_format(IO_TF.get_format())
self.ticker_format = self.book.add_format(IO_TF.get_ticker_format())
self.thousand_format = self.book.add_format(IO_TF.get_num_k_format())
self.bold_format = self.book.add_format(IO_TF.get_format_bold())
self.pct_format = self.book.add_format(IO_TF.get_num_pct_format())
self.BPS_format = self.book.add_format(IO_TF.get_num_BPS_format())
def add_sheet( self,
s_name ):
""" Add sheets into this workbook
Please pre define all worksheet names
"""
workbook = self.writer.book
worksheet = workbook.add_worksheet( s_name )
self.writer.sheets[s_name] = worksheet
def write_ticker( self,
s_name,
i_row,
i_col,
i_string ):
""" Write tickers with defined formatting
"""
worksheet = self.writer.sheets[s_name]
worksheet.write( i_row, i_col,
i_string, self.ticker_format )
def write_raw( self,
s_name,
i_row,
i_col,
i_string ):
""" Write string into given file with sheet name
raw data without design
"""
worksheet = self.writer.sheets[s_name]
worksheet.write( i_row, i_col,
i_string, self.bold_format )
def write_df( self,
i_row,
i_col,
df,
s_name ):
""" Write to excel given
file name and sheet name
"""
""" Step one load formatting
"""
worksheet = self.writer.sheets[s_name]
""" Step two write df into this work sheet
"""
df = df.reset_index()
df = IO_TF.Add_Sum_Row_df(df, "ALL")
df.to_excel( self.writer,
s_name,
startrow = i_row,
startcol = i_col,
index = False )
for col, value in enumerate(df.columns.values):
worksheet.write( i_row, col+i_col,
value, self.header_format )
for col, value in enumerate(df.iloc[-1]):
if value == value:
worksheet.write( i_row+df.shape[0], col+i_col,
value, self.bold_format )
else:
worksheet.write( i_row+df.shape[0], col+i_col,
"", self.bold_format )
def close( self ):
self.writer.save()
|
[
"[email protected]"
] | |
d0fd9ae97dd8894464641a2387bc5db31a6ea3a3
|
04bd3387ed96a9856c14f76e3022171305203a72
|
/GetPopuler.py
|
348fc46c31c5691ec2af8fdeaedfdaec2f02e79d
|
[] |
no_license
|
Yashwant94308/ATG-Selenium
|
bb3fff41b642951db3b5ab605d524ddcee4794f1
|
39424bee93e49f752105dd35311c2569e1a2de43
|
refs/heads/master
| 2023-05-26T04:36:58.998935 | 2021-05-29T08:34:26 | 2021-05-29T08:34:26 | 371,921,460 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
import requests, json
response = requests.get(
'https://www.flickr.com/services/rest/?method=flickr.photos.getPopular&api_key=22a1377a56b4c384b61b723a80a73492'
'&user_id=193065083%40N04&format=json&nojsoncallback=1')
print(response.json())
|
[
"[email protected]"
] | |
5ddbda28127ab2fb18249701f06df9c1649219a4
|
8fe781f8ac5b1c1d5214ac5a87c5ad855f791a6d
|
/src/clean_data.py
|
90720e0134fea7776aa816fbd08598bb52e51b1b
|
[] |
no_license
|
ternaus/kaggle_ultrasound
|
fabf45b89f5ab0888bb22e9b5205d90b14ce8f06
|
2d688d0cea8e2b1651980e972b1d6400b797c70b
|
refs/heads/master
| 2021-01-11T15:48:19.835115 | 2016-08-20T01:47:37 | 2016-08-20T01:47:37 | 64,818,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,349 |
py
|
from __future__ import division
"""
This script makes train data less noisy in a way:
Finds similar images assigns to these clusters of images max mask
"""
import networkx as nx
import os
import pandas as pd
from tqdm import tqdm
from PIL import Image
import glob
import pandas as pd
import cv2
import os
import numpy as np
from pylab import *
from tqdm import tqdm
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist, squareform
image_rows = 420
image_cols = 580
data_path = '../data'
train_data_path = os.path.join(data_path, 'train')
images = os.listdir(train_data_path)
total = len(images) / 2
imgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
imgs_mask = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in tqdm(images):
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.tif'
img = cv2.imread(os.path.join(train_data_path, image_name), cv2.IMREAD_GRAYSCALE)
img_mask = cv2.imread(os.path.join(train_data_path, image_mask_name), cv2.IMREAD_GRAYSCALE)
img = np.array([img])
img_mask = np.array([img_mask])
imgs[i] = img
imgs_mask[i] = img_mask
i += 1
print('Loading done.')
train_ids = [x for x in images if 'mask' not in x]
train = pd.DataFrame()
train['subject'] = map(lambda x: int(x.split('_')[0]), train_ids)
train['filename'] = train_ids
train['image_num'] = map(lambda x: int(x.split('.')[0].split('_')[1]), train_ids)
imgs_flat = np.reshape(imgs, (5635, 420*580))
for subject in train['subject'].unique():
a = imgs_flat[(train['subject'] == subject).astype(int).values == 1]
b = squareform(pdist(a))
graph = []
for i in range(1, 2000):
for j in range(i + 1, 120):
if (b < 5000)[(i, j)]:
graph += [(i, j)]
G = nx.Graph()
G.add_edges_from(graph)
connected_components = list(map(list, nx.connected_component_subgraphs(G)))
clusters = pd.DataFrame(zip(range(len(connected_components), connected_components)),
columns=['cluster_name', 'components'])
temp = pd.DataFrame()
temp['image_num'] = train.loc[(train['subject'] == subject), 'image_num']
temp['subject'] = subject
|
[
"[email protected]"
] | |
ac8bb2b49f625d413a32f8fef679bc03ce802ab6
|
ade22d64b99e7306eaeaf06684cc9c4f2d539881
|
/oscquintette/tests/v1/test_plugin.py
|
36de987851507a942e89237853e783acf38e25f1
|
[
"Apache-2.0"
] |
permissive
|
dtroyer/osc-quintette
|
59204e4ad2e25be237fb3ec13cbb5087518197d6
|
e37585936b1db9e87ab52e11e714afaf167a0039
|
refs/heads/master
| 2020-04-04T22:57:54.745055 | 2015-01-15T06:42:16 | 2015-01-15T06:42:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,266 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from oscquintette.tests import base
from oscquintette.tests import fakes
from oscquintette.v1 import plugin
# Load the plugin init module for the plugin list and show commands
import oscquintette.plugin
plugin_name = 'oscquintette'
plugin_client = 'oscquintette.plugin'
class FakePluginV1Client(object):
def __init__(self, **kwargs):
#self.servers = mock.Mock()
#self.servers.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestPluginV1(base.TestCommand):
def setUp(self):
super(TestPluginV1, self).setUp()
self.app.client_manager.oscquintette = FakePluginV1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
# Get a shortcut to the Service Catalog Mock
#self.catalog_mock = self.app.client_manager.identity.service_catalog
#self.catalog_mock.reset_mock()
class TestPluginList(TestPluginV1):
def setUp(self):
super(TestPluginList, self).setUp()
self.app.ext_modules = [
sys.modules[plugin_client],
]
# Get the command object to test
self.cmd = plugin.ListPlugin(self.app, None)
def test_plugin_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('Name', 'Versions', 'Module')
self.assertEqual(columns, collist)
datalist = ((
plugin_name,
oscquintette.plugin.API_VERSIONS.keys(),
plugin_client,
), )
self.assertEqual(tuple(data), datalist)
class TestPluginShow(TestPluginV1):
def setUp(self):
super(TestPluginShow, self).setUp()
self.app.ext_modules = [
sys.modules[plugin_client],
]
# Get the command object to test
self.cmd = plugin.ShowPlugin(self.app, None)
def test_plugin_show(self):
arglist = [
plugin_name,
]
verifylist = [
('name', plugin_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('1', 'module', 'name')
self.assertEqual(columns, collist)
datalist = (
oscquintette.plugin.API_VERSIONS['1'],
plugin_client,
plugin_name,
)
self.assertEqual(data, datalist)
|
[
"[email protected]"
] | |
dd84a0764d1cd38b85cddd32caf67859a5427497
|
4ac77337083c7fdb28a901831003cfd0e0ef7bf1
|
/any_urlfield/models/fields.py
|
2d6a67b84cb2f887d661bd1a22600a432304957f
|
[
"Apache-2.0"
] |
permissive
|
borgstrom/django-any-urlfield
|
deb6a10b87c26f53bb3ca5085d486238ab6c2a6c
|
3f97bfd628a5770268b715ee8f796aaab89cf841
|
refs/heads/master
| 2020-12-11T02:13:14.725873 | 2013-12-12T21:55:12 | 2013-12-12T21:55:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,655 |
py
|
"""
Custom model fields to link to CMS content.
"""
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db import models
from any_urlfield.models.values import AnyUrlValue
from any_urlfield.registry import UrlTypeRegistry
class AnyUrlField(models.CharField):
"""
A CharField that can either refer to a CMS page ID, or external URL.
.. figure:: /images/anyurlfield1.*
:width: 363px
:height: 74px
:alt: AnyUrlField, with external URL input.
.. figure:: /images/anyurlfield2.*
:width: 290px
:height: 76px
:alt: AnyUrlField, with internal page input.
By default, the ``AnyUrlField`` only supports linking to external pages.
To add support for your own models (e.g. an ``Article`` model),
include the following code in :file:`models.py`:
.. code-block:: python
from any_urlfield.models import AnyUrlField
AnyUrlField.register_model(Article)
Now, the ``AnyUrlField`` offers users a dropdown field to directly select an article.
By default, it uses a :class:`django.forms.ModelChoiceField` field with a :class:`django.forms.Select` widget
to render the field. This can be customized using the ``form_field`` and ``widget`` parameters:
.. code-block:: python
from any_urlfield.models import AnyUrlField
from any_urlfield.forms import SimpleRawIdWidget
AnyUrlField.register_model(Article, widget=SimpleRawIdWidget(Article))
Now, the ``Article`` model will be displayed as raw input field with a browse button.
"""
__metaclass__ = models.SubfieldBase
_static_registry = UrlTypeRegistry() # Also accessed by AnyUrlValue as internal field.
def __init__(self, *args, **kwargs):
if not kwargs.has_key('max_length'):
kwargs['max_length'] = 300
super(AnyUrlField, self).__init__(*args, **kwargs)
@classmethod
def register_model(cls, ModelClass, form_field=None, widget=None, title=None, prefix=None):
"""
Register a model to use in the URL field.
This function needs to be called once for every model
that should be selectable in the URL field.
:param ModelClass: The model to register.
:param form_field: The form field class used to render the field.
:param widget: The widget class, can be used instead of the form field.
:param title: The title of the model, by default it uses the models ``verbose_name``.
:param prefix: A custom prefix for the model in the serialized database format. By default it uses "appname.modelname".
"""
cls._static_registry.register(ModelClass, form_field, widget, title, prefix)
def formfield(self, **kwargs):
# Associate formfield.
# Import locally to avoid circular references.
from any_urlfield.forms.fields import AnyUrlField as AnyUrlFormField
kwargs['form_class'] = AnyUrlFormField
kwargs['url_type_registry'] = self._static_registry
if kwargs.has_key('widget'):
del kwargs['widget']
return super(AnyUrlField, self).formfield(**kwargs)
def to_python(self, value):
if isinstance(value, AnyUrlValue):
return value
# Convert the string value
if value is None:
return None
return AnyUrlValue.from_db_value(value, self._static_registry)
def get_prep_value(self, value):
if isinstance(value, basestring):
# Happens with south migration
return value
elif value is None:
return None if self.null else ''
else:
# Convert back to string
return value.to_db_value()
def value_to_string(self, obj):
# For dumpdata
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
def validate(self, value, model_instance):
# Final validation of the field, before storing in the DB.
super(AnyUrlField, self).validate(value, model_instance)
if value:
if value.type_prefix == 'http':
validate_url = URLValidator()
validate_url(value.type_value)
elif value.type_value:
if not value.exists():
raise ValidationError(self.error_messages['invalid_choice'] % value.type_value)
# Tell South how to create custom fields
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], [
"^" + __name__.replace(".", "\.") + "\.AnyUrlField",
])
except ImportError:
pass
|
[
"[email protected]"
] | |
40a3d067d1e3b7a8dc8e422b14866b6111bd77a8
|
3e9ac661325657664f3f7fa26ff2edf5310a8341
|
/python/demo100/15.py
|
82e514ce0d7a6957012d7aafb52d784906df006e
|
[] |
no_license
|
JollenWang/study
|
47d1c22a6e15cb82d0ecfc6f43e32e3c61fbad36
|
660a47fd60dd1415f71da362232d710b322b932f
|
refs/heads/master
| 2020-06-15T23:53:37.625988 | 2017-04-21T11:18:20 | 2017-04-21T11:18:20 | 75,257,807 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 490 |
py
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
#author : Jollen Wang
#date : 2016/05/10
#version: 1.0
'''
题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。
'''
def main():
score = int(raw_input("$>Enter the score:"))
print "grade=",
if score >= 90:
print "A"
elif score >= 60:
print "B"
else:
print "C"
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
f502a1ab4fbd3fb3c402deb9bcb0a91171c04ca9
|
3ea75e35408de10bba250f52120b5424bd50fdd9
|
/py/plotSigzFunc.py
|
21812552da7cc7dfcdf7898e0e091f012d051cf2
|
[] |
no_license
|
jobovy/segue-maps
|
9848fe59ee24a11a751df4f8855c40f2480aef23
|
ed20b1058a98618700a20da5aa9b5ebd2ea7719b
|
refs/heads/main
| 2022-11-30T15:27:08.079999 | 2016-12-20T04:28:26 | 2016-12-20T04:28:26 | 40,663,061 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,199 |
py
|
import os, os.path
import math
import numpy
import cPickle as pickle
from matplotlib import pyplot
from optparse import OptionParser
from scipy import optimize, special
from galpy.util import bovy_coords, bovy_plot
def plotSigzFunc(parser):
(options,args)= parser.parse_args()
if len(args) == 0:
parser.print_help()
return
if os.path.exists(args[0]):#Load savefile
savefile= open(args[0],'rb')
params1= pickle.load(savefile)
samples1= pickle.load(savefile)
savefile.close()
else:
print "Need to give filename ..."
if os.path.exists(args[1]):#Load savefile
savefile= open(args[1],'rb')
params1= pickle.load(savefile)
samples2= pickle.load(savefile)
savefile.close()
else:
print "Need to give filename ..."
#First one
zs= numpy.linspace(0.3,1.2,1001)
xrange= [0.,1.3]
yrange= [0.,60.]
#Now plot the mean and std-dev from the posterior
zmean= numpy.zeros(len(zs))
nsigs= 3
zsigs= numpy.zeros((len(zs),2*nsigs))
fs= numpy.zeros((len(zs),len(samples1)))
ds= zs-0.5
for ii in range(len(samples1)):
thisparams= samples1[ii]
fs[:,ii]= math.exp(thisparams[1])+thisparams[2]*ds+thisparams[3]*ds**2.
#Record mean and std-devs
zmean[:]= numpy.mean(fs,axis=1)
bovy_plot.bovy_print()
xlabel=r'$|z|\ [\mathrm{kpc}]$'
ylabel=r'$\sigma_z\ [\mathrm{km\ s}^{-1}]$'
bovy_plot.bovy_plot(zs,zmean,'k-',xrange=xrange,yrange=yrange,
xlabel=xlabel,
ylabel=ylabel)
for ii in range(nsigs):
for jj in range(len(zs)):
thisf= sorted(fs[jj,:])
thiscut= 0.5*special.erfc((ii+1.)/math.sqrt(2.))
zsigs[jj,2*ii]= thisf[int(math.floor(thiscut*len(samples1)))]
thiscut= 1.-thiscut
zsigs[jj,2*ii+1]= thisf[int(math.floor(thiscut*len(samples1)))]
colord, cc= (1.-0.75)/nsigs, 1
nsigma= nsigs
pyplot.fill_between(zs,zsigs[:,0],zsigs[:,1],color='0.75')
while nsigma > 1:
pyplot.fill_between(zs,zsigs[:,cc+1],zsigs[:,cc-1],
color='%f' % (.75+colord*cc))
pyplot.fill_between(zs,zsigs[:,cc],zsigs[:,cc+2],
color='%f' % (.75+colord*cc))
cc+= 1.
nsigma-= 1
bovy_plot.bovy_plot(zs,zmean,'k-',overplot=True)
#Second one
zmean= numpy.zeros(len(zs))
zsigs= numpy.zeros((len(zs),2*nsigs))
fs= numpy.zeros((len(zs),len(samples2)))
for ii in range(len(samples2)):
thisparams= samples2[ii]
fs[:,ii]= math.exp(thisparams[1])+thisparams[2]*ds+thisparams[3]*ds**2.
#Record mean and std-devs
zmean[:]= numpy.mean(fs,axis=1)
for ii in range(nsigs):
for jj in range(len(zs)):
thisf= sorted(fs[jj,:])
thiscut= 0.5*special.erfc((ii+1.)/math.sqrt(2.))
zsigs[jj,2*ii]= thisf[int(math.ceil(thiscut*len(samples2)))]
thiscut= 1.-thiscut
zsigs[jj,2*ii+1]= thisf[int(math.ceil(thiscut*len(samples2)))]
colord, cc= (1.-0.75)/nsigs, 1
nsigma= nsigs
pyplot.fill_between(zs,zsigs[:,0],zsigs[:,1],color='0.75')
while nsigma > 1:
pyplot.fill_between(zs,zsigs[:,cc+1],zsigs[:,cc-1],
color='%f' % (.75+colord*cc))
pyplot.fill_between(zs,zsigs[:,cc],zsigs[:,cc+2],
color='%f' % (.75+colord*cc))
cc+= 1.
nsigma-= 1
bovy_plot.bovy_plot(zs,zmean,'k-',overplot=True)
bovy_plot.bovy_text(r'$-0.4 < [\mathrm{Fe/H}] < 0.5\,, \ \ -0.25 < [\alpha/\mathrm{Fe}] < 0.2$',bottom_right=True)
bovy_plot.bovy_text(r'$-1.5 < [\mathrm{Fe/H}] < -0.5\,, \ \ 0.25 < [\alpha/\mathrm{Fe}] < 0.5$',top_left=True)
bovy_plot.bovy_end_print(options.plotfile)
return None
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the fit/samples will be saved to"
parser = OptionParser(usage=usage)
parser.add_option("-o",dest='plotfile',
help="Name of file for plot")
return parser
if __name__ == '__main__':
plotSigzFunc(get_options())
|
[
"[email protected]"
] | |
28bf8e32b2fc71691571cc473c7d4d6c7cefcf3a
|
fe98f7502a5724be0ec7ec3ae73ff4703d299d6e
|
/Neural Tree/data.py
|
1b85e274b45f66319d308125e39f23e90bf4375f
|
[] |
no_license
|
SoumitraAgarwal/BTP
|
92ab095aacf3dd374148f40b9e777bb49c4253f1
|
07df960ad7e8680680a9d3494c8a860b394867d1
|
refs/heads/master
| 2020-03-16T12:39:13.548988 | 2018-05-09T06:09:11 | 2018-05-09T06:09:11 | 132,671,601 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,513 |
py
|
import random
import math
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D
plt.switch_backend('agg')
random.seed(311)
def generate(radius, centre):
alpha = 2 * math.pi * random.random()
r = radius*random.random()
x = r*math.cos(alpha) + centre[0]
y = r*math.sin(alpha) + centre[1]
return [x,y]
k = 10
n = 600
ranger = 500
C = []
X = []
Y = []
for j in range(k):
T = [random.uniform(0, ranger), random.uniform(0, ranger)]
temp = []
C.append([[j*ranger + random.uniform(0, ranger), ranger*random.uniform(0, k)], 400*random.uniform(0, 1)])
for i in range(n):
temp.append(generate(C[j][1], C[j][0]))
temp = np.asarray(temp)
Y.append(np.matmul(temp,T))
X.append(temp)
X = np.asarray(X)
Y = np.asarray(Y)
fig = plt.figure()
ax1 = fig.add_subplot(111)
colors = cm.rainbow(np.linspace(0, 1, len(Y)))
for i in range(k):
x1, y1 = X[i].T
ax1.scatter( x1,y1, s = 3, marker="o", label='target', color=colors[i])
plt.savefig('Data.png')
X1 = []
X2 = []
for i in range(k):
x1,x2 = X[i].T
X1.append(x1)
X2.append(x2)
X1 = np.asarray(X1)
X2 = np.asarray(X2)
Y = Y.ravel()
X1 = X1.ravel()
X2 = X2.ravel()
X1 = preprocessing.scale(X1)
X2 = preprocessing.scale(X2)
Y = preprocessing.scale(Y)
data = pd.DataFrame(data = {
'X1':X1,
'X2':X2,
'Y' :Y
})
data = data.sample(frac=1).reset_index(drop=True)
data.to_csv('data.csv', index = False)
|
[
"[email protected]"
] | |
ef3d990361a736c2c8243ef71653066e995e9f04
|
a1c7b21d96d6326790831b2b3115fcd2563655a4
|
/pylidc/__init__.py
|
95c187456f43a5b9aafdc6d2673def316432c058
|
[
"MIT"
] |
permissive
|
jovsa/pylidc
|
3837b17fbe02bc60817081a349681612f24b2f81
|
bd378a60a4b0e6dfb569afb25c3dfcbbcd169550
|
refs/heads/master
| 2021-06-13T02:45:41.359793 | 2017-03-10T23:14:57 | 2017-03-10T23:14:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,068 |
py
|
"""
--------------------------------------------------------
Author: Matt Hancock, [email protected]
--------------------------------------------------------
This python module implements an (ORM) object relational mapping
to an sqlite database containing the annotation information from
the XML files provided by the LIDC dataset. The purpose of this
module is to make for easier data querying and to include
functional aspects of the data models in addition to pure
attribute information, e.g., computing nodule centroids from
contour attribtues.
The ORM is implemented using sqlalchemy. There are three data models:
Scan, Annotation, and Contour
The relationships are "one to many" for each model going left
to right, i.e., scans have many annotations and annotations
have many contours.
For more information, see the model classes themselves.
"""
from __future__ import print_function as _pf
__version__ = '0.1.2'
# Hidden stuff.
import os as _os
import pkg_resources as _pr
from sqlalchemy import create_engine as _create_engine
from sqlalchemy.orm import sessionmaker as _sessionmaker
_dbpath = _pr.resource_filename('pylidc', 'pylidc.sqlite')
_engine = _create_engine('sqlite:///'+_dbpath)
_session = _sessionmaker(bind=_engine)()
# Public stuff.
from .Scan import Scan
from .Scan import dicompath
from .Annotation import Annotation
from .Contour import Contour
def query(*args):
"""
Wraps the sqlalchemy session object. Some example usage:
>>> import pylidc as pl
>>> qu = pl.query(pl.Scan).filter(pl.Scan.slice_thickness <= 1.)
>>> print qu.count()
>>> # => 97
>>> scan = qu.first()
>>> print len(scan.annotations)
>>> # => 11
>>> qu = pl.query(pl.Annotation).filter((pl.Annotation.malignancy > 3), (pl.Annotation.spiculation < 3))
>>> print qu.count()
>>> # => 1083
>>> annotation = qu.first()
>>> print annotation.estimate_volume()
>>> # => 5230.33874999
"""
return _session.query(*args)
|
[
"[email protected]"
] | |
d476c12d19016fedb10bf55bbe245feb207b93ac
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_Lag1Trend_NoCycle_NoAR.py
|
ff7ef11eee723d83fe871324617d9665f298f2bc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 154 |
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['Lag1Trend'] , ['NoCycle'] , ['NoAR'] );
|
[
"[email protected]"
] | |
5be51dbb88aa58f10058062d78de161544e789e6
|
cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101
|
/st2common/tests/unit/test_configs_registrar.py
|
e23dfe74262ed4c55b95ba299c1a0f50fbeb08c9
|
[
"Apache-2.0"
] |
permissive
|
Junsheng-Wu/st2
|
6451808da7de84798641882ca202c3d1688f8ba8
|
c3cdf657f7008095f3c68b4132b9fe76d2f52d81
|
refs/heads/master
| 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 |
Apache-2.0
| 2022-03-29T22:04:26 | 2020-03-02T06:53:58 |
Python
|
UTF-8
|
Python
| false | false | 4,577 |
py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.configsregistrar import ConfigsRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import Config
from st2tests.api import SUPER_SECRET_PARAMETER
from st2tests.base import CleanDbTestCase
from st2tests import fixturesloader
__all__ = [
'ConfigsRegistrarTestCase'
]
PACK_1_PATH = os.path.join(fixturesloader.get_fixtures_packs_base_path(), 'dummy_pack_1')
PACK_6_PATH = os.path.join(fixturesloader.get_fixtures_packs_base_path(), 'dummy_pack_6')
class ConfigsRegistrarTestCase(CleanDbTestCase):
def test_register_configs_for_all_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_1_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
config_db = config_dbs[0]
self.assertEqual(config_db.values['api_key'], '{{st2kv.user.api_key}}')
self.assertEqual(config_db.values['api_secret'], SUPER_SECRET_PARAMETER)
self.assertEqual(config_db.values['region'], 'us-west-1')
def test_register_all_configs_invalid_config_no_config_schema(self):
# verify_ configs is on, but ConfigSchema for the pack doesn't exist so
# validation should proceed normally
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, validate_configs=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_6': PACK_6_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
def test_register_all_configs_with_config_schema_validation_validation_failure(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, fail_on_failure=True,
validate_configs=True)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_6': PACK_6_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name='dummy_pack_5', pack_dir=PACK_6_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
expected_msg = ('Failed validating attribute "regions" in config for pack "dummy_pack_6" '
'(.*?): 1000 is not of type u\'array\'')
self.assertRaisesRegexp(ValueError, expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths)
|
[
"[email protected]"
] | |
196964f8812712d14c761353096cc995312f630d
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/containsDuplicate_20200907093833.py
|
a13711fe822495f778880bcdac9e84cd2d398e7d
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
def duplicate(nums,k,t):
number = None
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if nu
|
[
"[email protected]"
] | |
286901b3a6a7ec15eaad0c29b53851f0e00a3e81
|
599db5e2e3c4d6c296de25a8ef8e95a862df032b
|
/OpenAI Gym/(clear)breakout-ramdeterministic-v4/model6/14000epi/modelPlay.py
|
7e5537269ecb2dd11378115b992616635625fad7
|
[] |
no_license
|
wantyouring/ML-practice-code
|
bb7577e99f22587c7ca016c1c4d067175e5ce9d9
|
a3efbb4d252bacc831c5d7a01daf6476e7a755e4
|
refs/heads/master
| 2020-05-14T17:45:17.735081 | 2019-06-30T14:43:25 | 2019-06-30T14:43:25 | 181,898,074 | 0 | 1 | null | 2019-06-15T05:52:44 | 2019-04-17T13:31:24 |
Python
|
UTF-8
|
Python
| false | false | 3,233 |
py
|
# 학습모델 play. random action과 학습model 비교.
import gym
import pylab
import numpy as np
import gym.wrappers as wrappers
from doubleDQN2 import DoubleDQNAgent
EPISODES = 1 # 처음은 random으로 수행, 나중에는 학습model로 수행
global_step = 0
def change_action(action):
if action == 0:
return 0
elif action == 1:
return 2
elif action == 2:
return 3
elif action == 3:
return 3
if __name__ == "__main__":
env = gym.make('Breakout-ramDeterministic-v4')
env = wrappers.Monitor(env,"./results",force = True)
state_size = 128
action_size = 3
agent = DoubleDQNAgent(state_size, action_size)
agent.load_model()
agent.epsilon = -1 # Q value에 의해서만 움직이게끔.
agent.render = True
scores, episodes = [], []
random_success_cnt = 0
model_success_cnt = 0
# 랜덤액션 진행시
for e in range(EPISODES):
done = False
score = 0
life = 5
env.reset()
for i in range(5):
env.step(1) # 시작 action.
while not done:
action = env.action_space.sample()
_, reward, done, info = env.step(change_action(action))
score += reward
if done:
if score > 0:
random_success_cnt += 1
print("episode:", e, " score:", score)
scores.append(score)
episodes.append(e)
break
if life != info['ale.lives']:
life = info['ale.lives']
for i in range(5):
state, _, _, _ = env.step(1)
state = np.reshape(state, [1, 128])
# 학습모델 play
for e in range(EPISODES,EPISODES*2):
done = False
life = 5
score = 0
state = env.reset()
for i in range(5):
state, _, _, _ = env.step(1) # 시작 action.
state = np.reshape(state,[1,128])
while not done:
global_step += 1
if agent.render:
env.render()
# 현재 s에서 a취해 s`, r, done 정보 얻기.
action = agent.get_action(state)
next_state, reward, done, info = env.step(change_action(action))
score += reward
state = next_state
state = np.reshape(state, [1, 128])
if done:
if score > 0 :
model_success_cnt += 1
print("episode:", e, " score:", score)
scores.append(score)
episodes.append(e)
if e % 5 == 0:
pylab.plot(episodes, scores)
pylab.savefig("./play_score.png")
break
if life != info['ale.lives']:
life = info['ale.lives']
for i in range(5):
state, _, _, _ = env.step(1)
state = np.reshape(state, [1, 128])
env.close()
print("random : {}/{} success. rate : {}".format(random_success_cnt,EPISODES,random_success_cnt/EPISODES))
print("model : {}/{} success. rate : {}".format(model_success_cnt,EPISODES,model_success_cnt/EPISODES))
|
[
"[email protected]"
] | |
4a18ef0719c0058c463c0200d66e76acbe62ccfd
|
e49b654d3db99773390c5b9686df9c99fbf92b2a
|
/linked_lists/is_palindrome.py
|
703a4960dc6cfbe7e741efde1dd056a7ede1b2cc
|
[] |
no_license
|
hao89/diary_of_programming_puzzles
|
467e8264d0ad38768ba5ac3cfb45301293d79943
|
0e05d3716f28075f99bbd7b433d16a383209e57c
|
refs/heads/master
| 2021-01-16T00:49:38.956102 | 2015-08-25T13:44:53 | 2015-08-25T13:44:53 | 41,692,587 | 1 | 0 | null | 2015-08-31T18:20:38 | 2015-08-31T18:20:36 |
Python
|
UTF-8
|
Python
| false | false | 831 |
py
|
"""
Implement a function to check if a linked list is a palindrome
"""
import random
from linked_list import LinkedListNode
def is_palindrome1(linked_list):
# reverse and compare
pass
def build_palindrome_list():
root = LinkedListNode(5)
previous_node = root
for i in range(0, 2):
new_node = LinkedListNode(random.randint(0, 9))
previous_node.next = new_node
previous_node = new_node
stack = []
current_node = root
while current_node.next != None: # all but the last one
stack.append(current_node.data)
current_node = current_node.next
while len(stack) != 0:
data = stack.pop()
new_node = LinkedListNode(data)
previous_node.next = new_node
previous_node = new_node
return root
def build_random_list():
pass
|
[
"[email protected]"
] | |
07f54965bf19a638d7de2870978fd0fccb3c3b59
|
635670997e25d7fd578701995fe0422dd5671528
|
/src/models_VAE/best_models/vae/encoder.py
|
48db109dad68d468093e78e6d9e4cbd35e10fc19
|
[] |
no_license
|
QuangNamVu/thesis
|
5126c0281d93e7a5c2c3a5784363d7f6c6baadfd
|
01a404de2dfb70f13f3e61a9a8f3b73c88d93502
|
refs/heads/master
| 2022-12-24T10:08:33.472729 | 2019-12-21T16:27:07 | 2019-12-21T16:27:07 | 174,741,015 | 0 | 3 | null | 2022-12-14T06:56:36 | 2019-03-09T20:09:03 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,892 |
py
|
import tensorflow as tf
from tensorpack import *
from tf_utils.ar_layers import *
from tf_utils.common import *
def encoder(self, x):
is_training = get_current_tower_context().is_training
# [M, T, D] => [M, T, f0]
fc_l1 = gaussian_dense(name='encode_l1', inputs=x, out_C=self.hps.f[0])
activate_l1 = tf.nn.elu(fc_l1)
out_l1 = tf.layers.dropout(inputs=activate_l1, rate=self.hps.dropout_rate, training=is_training)
# [M, T, f0] => [M, T, f1]
fc_l2 = gaussian_dense(name='encode_l2', inputs=out_l1, out_C=self.hps.f[0])
activate_l2 = tf.nn.tanh(fc_l2)
out_l2 = tf.layers.dropout(inputs=activate_l2, rate=self.hps.dropout_rate, training=is_training)
cell = tf.nn.rnn_cell.LSTMCell(num_units=self.hps.lstm_units, state_is_tuple=True)
# z: [M, T, o]
# h: [M, o]
# c: [M, o]
# [M, T, f1] => [M, T, o]
outputs, state = tf.nn.dynamic_rnn(cell, out_l2, sequence_length=[self.hps.T] * self.hps.M, dtype=tf.float32,
parallel_iterations=64)
# [M, T, o] => [M, T * o] => [M, n_z]
next_seq = tf.reshape(outputs, shape=[-1, self.hps.T * self.hps.lstm_units])
state_c = state.c
if self.hps.is_VDE:
# z_lst = tf.contrib.layers.fully_connected(inputs=next_seq, out_C=2 * self.hps.n_z)
z_lst = gaussian_dense(name='encode_fc1', inputs=next_seq, out_C=2 * self.hps.n_z)
else:
rs_l3 = tf.reshape(out_l2, [-1, self.hps.T * self.hps.f[1]])
z_lst = gaussian_dense(name='encode_fc2', inputs=rs_l3, out_C=2 * self.hps.n_z)
z_mu, z_std1 = split(z_lst, split_dim=1, split_sizes=[self.hps.n_z, self.hps.n_z])
z_std = 1e-10 + tf.nn.softplus(z_std1)
if self.hps.is_VAE:
noise = tf.random_normal(shape=tf.shape(z_mu), mean=0.0, stddev=1.0)
z = z_mu + noise * z_std
else:
z = z_mu
return z_mu, z_std, z, state_c
|
[
"[email protected]"
] | |
a96bac8257857719d4e612c36e2dc88f720a5690
|
ad212b92beac17c4d061848c1dcd443d02a168c8
|
/monthly_challenge/202008/19_goat_latin.py
|
e866084a3ba4569f5efdb64fd6aa23d3416e864d
|
[] |
no_license
|
21eleven/leetcode-solutions
|
5ec97e4391c8ebaa77f4404a1155f3ef464953b3
|
35c91e6f5f5ed348186b8641e6fc49c825322d32
|
refs/heads/master
| 2023-03-03T10:22:41.726612 | 2021-02-13T21:02:13 | 2021-02-13T21:02:13 | 260,374,085 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,420 |
py
|
"""
A sentence S is given, composed of words separated by spaces. Each word consists of lowercase and uppercase letters only.
We would like to convert the sentence to "Goat Latin" (a made-up language similar to Pig Latin.)
The rules of Goat Latin are as follows:
If a word begins with a vowel (a, e, i, o, or u), append "ma" to the end of the word.
For example, the word 'apple' becomes 'applema'.
If a word begins with a consonant (i.e. not a vowel), remove the first letter and append it to the end, then add "ma".
For example, the word "goat" becomes "oatgma".
Add one letter 'a' to the end of each word per its word index in the sentence, starting with 1.
For example, the first word gets "a" added to the end, the second word gets "aa" added to the end and so on.
Return the final sentence representing the conversion from S to Goat Latin.
Example 1:
Input: "I speak Goat Latin"
Output: "Imaa peaksmaaa oatGmaaaa atinLmaaaaa"
"""
class Solution:
def toGoatLatin(self, S: str) -> str:
words = S.split()
vowels = set(["a", "e", "i", "o", "u"])
goat = []
idx = 2
a = 'a'
for w in words:
if w[0].lower() in vowels:
goat.append(f"{w}m{a*idx}")
else:
goat.append(f"{w[1:]}{w[0]}m{a*idx}")
idx += 1
return ' '.join(goat)
|
[
"[email protected]"
] | |
2caa36497292851a2824c6d22461f476df9e29db
|
8d113f0a487dab55c733ff63da5bba9e20f69b69
|
/config/settings.py
|
bca76446204a4d8a3e3373d62517eb9c85a8dc70
|
[
"MIT"
] |
permissive
|
AktanKasymaliev/django-video-hosting
|
c33d341a7709a21869c44a15eb6a3b6e9a783f54
|
b201ed3421025da22b43405452bde617ea26a90f
|
refs/heads/main
| 2023-07-18T08:10:00.289537 | 2021-09-02T20:15:41 | 2021-09-02T20:15:41 | 387,730,116 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,039 |
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("DJANGO_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = eval(os.environ.get("DJANGO_DEBUG"))
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
"videoApp",
"channels",
"django_cleanup",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
ASGI_APPLICATION = 'config.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [('127.0.0.1', 6379)],
}
}
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get("DATABASE_NAME"),
'USER': os.environ.get("DATABASE_USER"),
'PASSWORD': os.environ.get("DATABASE_PASSW"),
'HOST': os.environ.get("DATABASE_HOST"),
'PORT': os.environ.get("DATABASE_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [STATIC_DIR]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"[email protected]"
] | |
b5915b18fbbba281d99d4d188ad3de150336d99e
|
dbaad22aa8aa6f0ebdeacfbe9588b281e4e2a106
|
/0423 Pandas/1-複習-pandas/Pandas1_csv_-plot1.py
|
58fb6438f52cd86d1347b1d57e7b87de2c826879
|
[
"MIT"
] |
permissive
|
ccrain78990s/Python-Exercise
|
b4ecec6a653afd90de855a64fbf587032705fa8f
|
a9d09d5f3484efc2b9d9a53b71307257a51be160
|
refs/heads/main
| 2023-07-18T08:31:39.557299 | 2021-09-06T15:26:19 | 2021-09-06T15:26:19 | 357,761,471 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,289 |
py
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = "Chen"
# 0423 練習
"""
資料出處:
公開發行公司股票發行概況統計表_New
https://data.gov.tw/dataset/103533
主要欄位說明:
年月、上市家數、上市資本額_十億元、上市成長率、上市面值_十億元、上市市值_十億元、
上櫃家數、上櫃資本額_十億元、上櫃成長率、上櫃面值_十億元、上櫃市值_十億元、
未上市上櫃家數、未上市上櫃資本額_十億元、公告日期
"""
import pandas as pd
df = pd.read_csv('每月_公開發行公司股票發行概況統計表.csv')
print(df.head())
print(type(df))
# 查看自資料訊息
print("=====資料訊息=====")
print(df.info())
# 資料大小
print("=====資料大小=====")
print(df.shape)
print("=====欄位名稱=====")
print(df.columns)
print("=====索引指數=====")
print(df.index)
print("=====統計描述=====")
print(df.describe())
print("**小練習***************************************************************")
print("========1.把 [年月,上市家數,上市資本額_十億元] 印出來========")
print(df[['年月','上市家數','上市資本額_十億元']])
#df2=df[['年月','上市家數','上市資本額_十億元']]
print("========2.找出 2019 年的資料========")
print(df[(df['年月']<=201999) & (df['年月']>=201900) ])
print("========3.找出 上市成長率 最高的年月========")
#print(df.上市成長率.max())
#print(df.上市成長率.idxmax())
max1=df.上市成長率.idxmax()
print(df[max1:max1+1])
print("========4.找出 2019 年的[上市成長率] 最高的月份========")
df2=df[(df['年月']<=201999) & (df['年月']>=201900) ]
max2=df2.上市成長率.idxmax()
print(df[max2:max2+1])
print("========5.找出 2018 年的資料========")
print(df[(df['年月']<=201899) & (df['年月']>=201800) ])
"""
未做完 可以參考老師解答
print("========6.比較 2017 和 2018 年的[上市資本額_十億元] 情況 (差異)========")
df3=df[(df['年月']<=201799) & (df['年月']>=201700) ]
df4=df[(df['年月']<=201899) & (df['年月']>=201800) ]
df5=df3[['年月','上市資本額_十億元']]
df6=df4[['年月','上市資本額_十億元']]
#df7=pd.concat([df5, df6], ignore_index=True)
df7=pd.merge(df5, df6,how='')
print(df7)
"""
|
[
"[email protected]"
] | |
473edc044398c5b3eca2579faca5a7c518d2a277
|
10e5b1b2e42a2ff6ec998ed900071e8b5da2e74e
|
/array/0509_fibonacci_number/0509_fibonacci_number.py
|
5d815145881d946e9ff8a001d2a66e9ff2dcd44e
|
[] |
no_license
|
zdyxry/LeetCode
|
1f71092d687316de1901156b74fbc03588f0b0a5
|
b149d1e8a83b0dfc724bd9dc129a1cad407dd91f
|
refs/heads/master
| 2023-01-29T11:59:14.162531 | 2023-01-26T03:20:23 | 2023-01-26T03:20:23 | 178,754,208 | 6 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
# -*- coding: utf-8 -*-
class Solution(object):
def fib(self, N):
if N <= 1:
return N
return self.fib(N - 1) + self.fib(N - 2)
def fib2(self, N):
array = [i for i in xrange(N)]
for i in xrange(2, N+1):
array[i] = array[i-1] + array[i-2]
return array[-1]
def fib3(self, N):
if N <=1:
return N
left = 0
right =1
for i in range(2,N+1):
left, right = right, left + right
return right
def fib4(self, N):
array =[i for i in range(N+1)]
return self.fibola(array, N)
def fibola(self, array, N):
if N <= 1:
return N
array[N] = self.fibola(array, N-1) + array[N-2]
return array[N]
print(Solution().fib4(6))
|
[
"[email protected]"
] | |
0a6f99febf158ce23215714249263dc107358160
|
2724412db1fc69b67b74a7d1c4ca4731962908d3
|
/tests/test_streams.py
|
fd7c66a0bdc95eac88148387db0573a5c90b4496
|
[
"BSD-3-Clause"
] |
permissive
|
Tijani-Dia/websockets
|
a981267685e681df822307bce4ec7eb781e9927d
|
ed9a7b446c7147f6f88dbeb1d86546ad754e435e
|
refs/heads/main
| 2023-08-23T13:10:16.030126 | 2021-10-08T20:18:24 | 2021-10-28T20:17:30 | 425,114,573 | 1 | 0 |
BSD-3-Clause
| 2021-11-05T23:56:39 | 2021-11-05T23:56:39 | null |
UTF-8
|
Python
| false | false | 6,055 |
py
|
from websockets.streams import StreamReader
from .utils import GeneratorTestCase
class StreamReaderTests(GeneratorTestCase):
def setUp(self):
self.reader = StreamReader()
def test_read_line(self):
self.reader.feed_data(b"spam\neggs\n")
gen = self.reader.read_line(32)
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"spam\n")
gen = self.reader.read_line(32)
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"eggs\n")
def test_read_line_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(32)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"m\neg")
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"spam\n")
gen = self.reader.read_line(32)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"gs\n")
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"eggs\n")
def test_read_line_not_enough_data(self):
self.reader.feed_data(b"spa")
self.reader.feed_eof()
gen = self.reader.read_line(32)
with self.assertRaises(EOFError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"stream ends after 3 bytes, before end of line",
)
def test_read_line_too_long(self):
self.reader.feed_data(b"spam\neggs\n")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 5 bytes, expected no more than 2 bytes",
)
def test_read_line_too_long_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 3 bytes, expected no more than 2 bytes",
)
def test_read_exact(self):
self.reader.feed_data(b"spameggs")
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"eggs")
def test_read_exact_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_exact(4)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"meg")
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.read_exact(4)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"gs")
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"eggs")
def test_read_exact_not_enough_data(self):
self.reader.feed_data(b"spa")
self.reader.feed_eof()
gen = self.reader.read_exact(4)
with self.assertRaises(EOFError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"stream ends after 3 bytes, expected 4 bytes",
)
def test_read_to_eof(self):
gen = self.reader.read_to_eof(32)
self.reader.feed_data(b"spam")
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
def test_read_to_eof_at_eof(self):
self.reader.feed_eof()
gen = self.reader.read_to_eof(32)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"")
def test_read_to_eof_too_long(self):
gen = self.reader.read_to_eof(2)
self.reader.feed_data(b"spam")
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 4 bytes, expected no more than 2 bytes",
)
def test_at_eof_after_feed_data(self):
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"spam")
eof = self.assertGeneratorReturns(gen)
self.assertFalse(eof)
def test_at_eof_after_feed_eof(self):
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
eof = self.assertGeneratorReturns(gen)
self.assertTrue(eof)
def test_feed_data_after_feed_data(self):
self.reader.feed_data(b"spam")
self.reader.feed_data(b"eggs")
gen = self.reader.read_exact(8)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spameggs")
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
def test_feed_eof_after_feed_data(self):
self.reader.feed_data(b"spam")
self.reader.feed_eof()
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.at_eof()
eof = self.assertGeneratorReturns(gen)
self.assertTrue(eof)
def test_feed_data_after_feed_eof(self):
self.reader.feed_eof()
with self.assertRaises(EOFError) as raised:
self.reader.feed_data(b"spam")
self.assertEqual(
str(raised.exception),
"stream ended",
)
def test_feed_eof_after_feed_eof(self):
self.reader.feed_eof()
with self.assertRaises(EOFError) as raised:
self.reader.feed_eof()
self.assertEqual(
str(raised.exception),
"stream ended",
)
def test_discard(self):
gen = self.reader.read_to_eof(32)
self.reader.feed_data(b"spam")
self.reader.discard()
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"")
|
[
"[email protected]"
] | |
4b14a84a25716004baaf55a0e43796fab1a29293
|
a137466dbaa5d704cd5a15ab9dfd17907b24be04
|
/algo2/mrdqn/agent.py
|
21a44e7aca50bb7bc677d14406d87263a932f502
|
[
"Apache-2.0"
] |
permissive
|
xlnwel/g2rl
|
92c15b8b9d0cd75b6d2dc8df20e6717e1a621ff6
|
e1261fdd2ce70724a99ddd174616cf013917b241
|
refs/heads/master
| 2023-08-30T10:29:44.169523 | 2021-11-08T07:50:43 | 2021-11-08T07:50:43 | 422,582,891 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,158 |
py
|
import tensorflow as tf
from utility.tf_utils import softmax, log_softmax, explained_variance
from utility.rl_utils import *
from utility.rl_loss import retrace
from core.decorator import override
from algo.mrdqn.base import RDQNBase, get_data_format, collect
class Agent(RDQNBase):
""" MRDQN methods """
@tf.function
def _learn(self, obs, action, reward, discount, mu, mask,
IS_ratio=1, state=None, prev_action=None, prev_reward=None):
obs, action, mu, mask, target, state, add_inp, terms = \
self._compute_target_and_process_data(
obs, action, reward, discount, mu, mask,
state, prev_action, prev_reward)
with tf.GradientTape() as tape:
x, _ = self._compute_embed(obs, mask, state, add_inp)
qs = self.q(x)
q = tf.reduce_sum(qs * action, -1)
error = target - q
value_loss = tf.reduce_mean(.5 * error**2, axis=-1)
value_loss = tf.reduce_mean(IS_ratio * value_loss)
terms['value_loss'] = value_loss
tf.debugging.assert_shapes([
[q, (None, self._sample_size)],
[target, (None, self._sample_size)],
[error, (None, self._sample_size)],
[IS_ratio, (None,)],
[value_loss, ()]
])
terms['value_norm'] = self._value_opt(tape, value_loss)
if 'actor' in self.model:
with tf.GradientTape() as tape:
pi, logpi = self.actor.train_step(x)
pi_a = tf.reduce_sum(pi * action, -1)
reinforce = tf.minimum(1. / mu, self._loo_c) * error * pi_a
v = tf.reduce_sum(qs * pi, axis=-1)
regularization = -tf.reduce_sum(pi * logpi, axis=-1)
loo_loss = -(self._v_pi_coef * v + self._reinforce_coef * reinforce)
tf.debugging.assert_shapes([
[pi, (None, self._sample_size, self._action_dim)],
[qs, (None, self._sample_size, self._action_dim)],
[v, (None, self._sample_size)],
[reinforce, (None, self._sample_size)],
[regularization, (None, self._sample_size)],
])
loo_loss = tf.reduce_mean(loo_loss, axis=-1)
regularization = tf.reduce_mean(regularization, axis=-1)
actor_loss = loo_loss - self._tau * regularization
actor_loss = tf.reduce_mean(IS_ratio * actor_loss)
terms.update(dict(
reinforce=reinforce,
v=v,
loo_loss=loo_loss,
regularization=regularization,
actor_loss=actor_loss,
ratio=tf.reduce_mean(pi_a / mu),
pi_min=tf.reduce_min(pi),
pi_std=tf.math.reduce_std(pi)
))
terms['actor_norm'] = self._actor_opt(tape, actor_loss)
if self._is_per:
priority = self._compute_priority(tf.abs(error))
terms['priority'] = priority
terms.update(dict(
q=q,
q_std=tf.math.reduce_std(q),
error=error,
error_std=tf.math.reduce_std(error),
mu_min=tf.reduce_min(mu),
mu=mu,
mu_inv=tf.reduce_mean(1/mu),
mu_std=tf.math.reduce_std(mu),
target=target,
explained_variance_q=explained_variance(target, q)
))
return terms
@override(RDQNBase)
def _compute_target(self, obs, action, reward, discount,
mu, mask, state, add_inp):
terms = {}
x, _ = self._compute_embed(obs, mask, state, add_inp, online=False)
if self._burn_in_size:
bis = self._burn_in_size
ss = self._sample_size
_, reward = tf.split(reward, [bis, ss], 1)
_, discount = tf.split(discount, [bis, ss], 1)
_, next_mu_a = tf.split(mu, [bis+1, ss], 1)
_, next_x = tf.split(x, [bis+1, ss], 1)
_, next_action = tf.split(action, [bis+1, ss], 1)
else:
_, next_mu_a = tf.split(mu, [1, self._sample_size], 1)
_, next_x = tf.split(x, [1, self._sample_size], 1)
_, next_action = tf.split(action, [1, self._sample_size], 1)
next_qs = self.target_q(next_x)
regularization = None
if 'actor' in self.model:
next_pi, next_logpi = self.target_actor.train_step(next_x)
if self._probabilistic_regularization == 'entropy':
regularization = tf.reduce_sum(
self._tau * next_pi * next_logpi, axis=-1)
else:
if self._probabilistic_regularization is None:
if self._double: # don't suggest to use double Q here, but implement it anyway
online_x, _ = self._compute_embed(obs, mask, state, add_inp)
next_online_x = tf.split(online_x, [bis+1, ss-1], 1)
next_online_qs = self.q(next_online_x)
next_pi = self.q.compute_greedy_action(next_online_qs, one_hot=True)
else:
next_pi = self.target_q.compute_greedy_action(next_qs, one_hot=True)
elif self._probabilistic_regularization == 'prob':
next_pi = softmax(next_qs, self._tau)
elif self._probabilistic_regularization == 'entropy':
next_pi = softmax(next_qs, self._tau)
next_logpi = log_softmax(next_qs, self._tau)
regularization = tf.reduce_sum(next_pi * next_logpi, axis=-1)
terms['next_entropy'] = - regularization / self._tau
else:
raise ValueError(self._probabilistic_regularization)
discount = discount * self._gamma
target = retrace(
reward, next_qs, next_action,
next_pi, next_mu_a, discount,
lambda_=self._lambda,
axis=1, tbo=self._tbo,
regularization=regularization)
return target, terms
|
[
"[email protected]"
] | |
b77ad5adbfe3bdc3c5a57d4185371cc854289ac2
|
a07124716edd86159dff277010132ba9c5cd0f75
|
/Text-Based Browser/task/tests.py
|
3ca883bce21f71b1767281f280b941e8d1d999d1
|
[
"MIT"
] |
permissive
|
drtierney/hyperskill-TextBasedBrowser-Python
|
27a15fa0bd44a927a9552d4815a0b4ab69375710
|
a4f2ac60643559e580b75a02078a679e5f1f0a2c
|
refs/heads/main
| 2023-08-28T04:24:51.693648 | 2021-10-25T17:34:58 | 2021-10-25T17:34:58 | 415,304,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,637 |
py
|
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
from hstest.check_result import CheckResult
import os
import shutil
import sys
if sys.platform.startswith("win"):
import _locale
# pylint: disable=protected-access
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])
CheckResult.correct = lambda: CheckResult(True, '')
CheckResult.wrong = lambda feedback: CheckResult(False, feedback)
class TextBasedBrowserTest(StageTest):
def generate(self):
return [
TestCase(
stdin='bloomberg.com\nbloomberg\nexit',
attach=('Bloomberg', 'New York Times', 'bloomberg'),
args=['tb_tabs']
),
TestCase(
stdin='nytimes.com\nnytimes\nexit',
attach=('New York Times', 'Bloomberg', 'nytimes'),
args=['tb_tabs']
),
TestCase(
stdin='nytimescom\nexit',
args=['tb_tabs']
),
TestCase(
stdin='blooomberg.com\nexit',
args=['tb_tabs']
),
TestCase(
stdin='blooomberg.com\nnytimes.com\nexit',
attach=(None, 'New York Times', 'Bloomberg', 'nytimes'),
args=['tb_tabs']
),
TestCase(
stdin='nytimescom\nbloomberg.com\nexit',
attach=(None, 'Bloomberg', 'New York Times', 'bloomberg'),
args=['tb_tabs']
),
TestCase(
stdin='bloomberg.com\nnytimes.com\nback\nexit',
attach={
'This New Liquid Is Magnetic, and Mesmerizing': (1, 'New York Times'),
'The Space Race: From Apollo 11 to Elon Musk': (2, 'Bloomberg')
},
args=['tb_tabs']
),
TestCase(
stdin='nytimes.com\nbloomberg.com\nback\nexit',
attach={
'This New Liquid Is Magnetic, and Mesmerizing': (2, 'New York Times'),
'The Space Race: From Apollo 11 to Elon Musk': (1, 'Bloomberg')
},
args=['tb_tabs']
),
]
def _check_files(self, path_for_tabs: str, right_word: str) -> int:
"""
Helper which checks that browser saves visited url in files and
provides access to them.
:param path_for_tabs: directory which must contain saved tabs
:param right_word: Word-marker which must be in right tab
:return: True, if right_words is present in saved tab
"""
for path, dirs, files in os.walk(path_for_tabs):
for file in files:
with open(os.path.join(path_for_tabs, file), 'r') as tab:
try:
content = tab.read()
except UnicodeDecodeError:
return -1
if right_word in content:
return 1
break
return 0
def check(self, reply, attach):
# Incorrect URL
if attach is None:
if 'error' in reply.lower():
return CheckResult.correct()
else:
return CheckResult.wrong('There was no "error" word, but should be.')
# Correct URL
if isinstance(attach, tuple):
if len(attach) == 4:
_, *attach = attach
if 'error' not in reply.lower():
return CheckResult.wrong('There was no "error" word, but should be.')
right_word, wrong_word, correct_file_name = attach
path_for_tabs = 'tb_tabs'
if not os.path.isdir(path_for_tabs):
return CheckResult.wrong(
"Can't find a directory \"" + path_for_tabs + "\" "
"in which you should save your web pages.")
check_files_result = self._check_files(path_for_tabs, right_word)
if not check_files_result:
return CheckResult.wrong(
"Seems like you did\'n save the web page "
"\"" + right_word + "\" into the "
"directory \"" + path_for_tabs + "\". "
"This file with page should be named \"" + correct_file_name + "\"")
elif check_files_result == -1:
return CheckResult.wrong('An error occurred while reading your saved tab. '
'Perhaps you used the wrong encoding?')
try:
shutil.rmtree(path_for_tabs)
except PermissionError:
return CheckResult.wrong("Impossible to remove the directory for tabs. Perhaps you haven't closed some file?")
if wrong_word in reply:
return CheckResult.wrong('It seems like you printed wrong variable')
if right_word in reply:
return CheckResult.correct()
return CheckResult.wrong('You printed neither bloomberg_com nor nytimes_com')
if isinstance(attach, dict):
for key, value in attach.items():
count, site = value
real_count = reply.count(key)
if reply.count(key) != count:
return CheckResult.wrong(
f'The site "{site}" should be displayed {count} time(s).\n'
f'Actually displayed: {real_count} time(s).'
)
return CheckResult.correct()
TextBasedBrowserTest().run_tests()
|
[
"[email protected]"
] | |
4918810498af75369329a2204c7cccbe0e40efb1
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/tools/model_converters/twins2mmseg.py
|
647d41784aa07468be4b3f2e183064ad55266ad1
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711 | 2023-07-24T07:28:21 | 2023-07-24T07:28:21 | 272,133,018 | 6,534 | 2,375 |
Apache-2.0
| 2023-09-14T01:22:32 | 2020-06-14T04:32:33 |
Python
|
UTF-8
|
Python
| false | false | 2,764 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmengine
import torch
from mmengine.runner import CheckpointLoader
def convert_twins(args, ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
new_v = v
if k.startswith('head'):
continue
elif k.startswith('patch_embeds'):
if 'proj.' in k:
new_k = k.replace('proj.', 'projection.')
else:
new_k = k
elif k.startswith('blocks'):
# Union
if 'attn.q.' in k:
new_k = k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[k.replace('attn.q.', 'attn.kv.')]],
dim=0)
elif 'mlp.fc1' in k:
new_k = k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in k:
new_k = k.replace('mlp.fc2', 'ffn.layers.1')
# Only pcpvt
elif args.model == 'pcpvt':
if 'attn.proj.' in k:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
# Only svt
else:
if 'attn.proj.' in k:
k_lst = k.split('.')
if int(k_lst[2]) % 2 == 1:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
else:
new_k = k
new_k = new_k.replace('blocks.', 'layers.')
elif k.startswith('pos_block'):
new_k = k.replace('pos_block', 'position_encodings')
if 'proj.0.' in new_k:
new_k = new_k.replace('proj.0.', 'proj.')
else:
new_k = k
if 'attn.kv.' not in k:
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in timm pretrained vit models to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
parser.add_argument('model', help='model: pcpvt or svt')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
# timm checkpoint
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
weight = convert_twins(args, state_dict)
mmengine.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c436a852bf1b29fdd43c22fec676f7de2348174a
|
da7a165522daea7c346693c5f32850017c482967
|
/abc51-100/abc066/c.py
|
09ed9795009df321637516a4eee2dcfb604ef0b8
|
[] |
no_license
|
SShayashi/ABC
|
19f8750919208c5ff8935638dbaab941c255f914
|
3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c
|
refs/heads/master
| 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 317 |
py
|
def main():
n = int(input())
a = list(map(int, input().split()))
even = a[1::2]
odd = a[::2]
ans = []
if n % 2 == 0:
even.reverse()
ans = even + odd
else:
odd.reverse()
ans = odd + even
to_str = map(str, ans)
return " ".join(to_str)
print(main())
|
[
"[email protected]"
] | |
5a1800a557704e33d4f51badeae781b4ae00bcca
|
c3a01f8bcece48f94a347b92694f90227708f507
|
/pyvisa/testsuite/test_constants.py
|
8c5add8034b3b0c9c0686b60af1742adea537ea8
|
[
"MIT"
] |
permissive
|
panlun/pyvisa
|
e16a6cdaae47bc69d932538f14c62015d17be7ab
|
124c46bd2ad89e49031339d6181255c2808fecbc
|
refs/heads/master
| 2022-11-21T13:07:29.280849 | 2020-06-24T22:23:27 | 2020-06-24T22:23:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 649 |
py
|
# -*- coding: utf-8 -*-
"""Test objects from constants.
This file is part of PyVISA.
:copyright: 2019-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pyvisa.constants import DataWidth
from . import BaseTestCase
class TestDataWidth(BaseTestCase):
def test_conversion_from_literal(self):
for v, e in zip(
(8, 16, 32, 64),
(DataWidth.bit_8, DataWidth.bit_16, DataWidth.bit_32, DataWidth.bit_64),
):
self.assertEqual(DataWidth.from_literal(v), e)
with self.assertRaises(ValueError):
DataWidth.from_literal(0)
|
[
"[email protected]"
] | |
ccc0c33067aa23f9329f7727f8ce57f7f5cf29b1
|
fff24c6c6123e5e90ac2fae26536150449140c6d
|
/setup.py
|
0b74facb61fe0ac8600893175528a1d17392e7ab
|
[
"ISC"
] |
permissive
|
binaryf/demosys-py
|
83da9f9ddd8d1672413f89153012ab6bb7fae6ab
|
f11b09cb6502adfaa437c8cbe780039c49b72524
|
refs/heads/master
| 2020-03-22T16:30:16.767030 | 2018-07-24T11:19:22 | 2018-07-24T11:19:22 | 140,331,208 | 1 | 0 | null | 2018-07-09T19:12:49 | 2018-07-09T19:12:48 | null |
UTF-8
|
Python
| false | false | 1,375 |
py
|
from setuptools import setup
setup(
name="demosys-py",
version="1.0.4",
description="Modern OpenGL 3.3+ Framework inspired by Django",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url="https://github.com/Contraz/demosys-py",
author="Einar Forselv",
author_email="[email protected]",
maintainer="Einar Forselv",
maintainer_email="[email protected]",
packages=['demosys'],
include_package_data=True,
keywords = ['opengl', 'framework', 'demoscene'],
classifiers=[
'Programming Language :: Python',
'Environment :: MacOS X',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
install_requires=[
'PyOpenGL==3.1.0',
'glfw==1.6.0',
'moderngl==5.3.0',
'pyrr==0.9.2',
'Pillow==5.1.0',
'pyrocket==0.2.7',
'PyWavefront==0.3.2',
# 'pygame==1.9.3',
],
entry_points={'console_scripts': [
'demosys-admin = demosys.core.management:execute_from_command_line',
]},
)
|
[
"[email protected]"
] | |
dfc119e744be40778ca5cd17e33454a7d7001076
|
fd18ce27b66746f932a65488aad04494202e2e0d
|
/day34/farms/categories/categories/categories/pipelines.py
|
dba029c921f259cfcbba84dba0b24d192b7fa697
|
[] |
no_license
|
daofeng123/ClassCodes
|
1acbd843836e550c9cebf67ef21dfca9f6b9fc87
|
fbcd1f24d79b8bb56ad0669b07ad118064609612
|
refs/heads/master
| 2020-06-24T12:34:28.148197 | 2019-08-15T03:56:40 | 2019-08-15T03:56:40 | 198,963,469 | 3 | 0 | null | 2019-07-26T06:53:45 | 2019-07-26T06:53:44 | null |
UTF-8
|
Python
| false | false | 558 |
py
|
# -*- coding: utf-8 -*-
import json
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from categories.dbs.redismq import RedisMQ
class CategoriesPipeline(object):
redis_mq = RedisMQ()
def process_item(self, item, spider):
# 做一下简单的json格式处理
content = json.dumps(dict(item), ensure_ascii=False)
# 发送采集任务到队列
self.redis_mq.push_task(content)
return item
|
[
"[email protected]"
] | |
f7b22c64ab658985f221cf7076cee8fc91505b98
|
a360a22af5e0b385db438b1324564ef317ff2f38
|
/idex_module/views.py
|
a846edfb5592c73af23acdf636aeb55d68b6c4af
|
[] |
no_license
|
ogglin/exchange_comparison
|
3eb2d849e731f94e67509e4ce9130e33bb37bbaf
|
f3feae64aff26b574f7ecd24e6f7aff7bb95ec65
|
refs/heads/master
| 2023-04-26T07:45:06.229584 | 2021-05-31T18:52:29 | 2021-05-31T18:52:29 | 287,036,194 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 532 |
py
|
from rest_framework.response import Response
from rest_framework.views import APIView
from .functions import idex_profits
# Create your views here.
class idex(APIView):
def get(self, request):
# hotbit_result = hotbit_profits()
idex_result = idex_profits()
results = []
# idex_init()
# exchanges_init()
# for result in hotbit_result:
# results.append(result)
for result in idex_result:
results.append(result)
return Response(results)
|
[
"[email protected]"
] | |
33f88b3804973bb17c410c2bdf24456d89324c34
|
9bcfbdf23c9ac156e0cdf5b5b5e06f18a1ad6fae
|
/pre_code/stock/xueqiuPawer.py
|
1a9ff34bb4d43402fbd96230452d2828ff831e48
|
[] |
no_license
|
haoson888/vnpy_future
|
a7576513b7ecf50c36d730c647263c6d1e44f3a6
|
89df2d5079a2e6d3782531369675248e38b2ff00
|
refs/heads/origin
| 2020-09-04T15:04:55.368725 | 2017-12-10T10:37:47 | 2017-12-10T10:37:47 | 219,762,816 | 0 | 1 | null | 2019-11-05T15:12:32 | 2019-11-05T14:18:18 | null |
UTF-8
|
Python
| false | false | 6,873 |
py
|
#coding: utf-8
from selenium import webdriver
import re
from urllib import request as urllib2
import sys
import os
from datetime import datetime
from datetime import timedelta
from datetime import date
import xlrd
from xlrd import open_workbook
from xlutils.copy import copy
#import nltk
import time
description_id = 1
#windows
#browser = webdriver.Chrome(executable_path='F:\chromedriver_win32\chromedriver.exe')
#mac
browser = webdriver.Chrome(executable_path='/Users/daitechang/Documents/stock/chromedriver')
def start(url, d, today, vstock):
# try:
global description_id
global browser
url = url
try:
browser.get(url)
t = browser.page_source
pn = re.compile(r'(.*)"statuses":(.*?)}]', re.S)
match = pn.match(t)
if not match:
# browser.close()
# browser.quit()
return 0
result = match.group(2)
result = result + '}]'
decode = json.loads(result)
startDetect = time.time()
st = int(time.mktime(datetime.strptime(datetime.strftime(today, "%Y-%m-%d"), "%Y-%m-%d").timetuple()))
ed = int(time.mktime(datetime.strptime(datetime.strftime(today + timedelta(days = 1), "%Y-%m-%d"), "%Y-%m-%d").timetuple()))
st = str(st) + '000'
print(st)
ed = str(ed) + '000'
print(ed)
s_today = datetime.strftime(today, "%Y-%m-%d")
for i in range(len(vstock)):
for item in decode:
if item['mark'] == 1:
continue
#print item['created_at'], st, ed
#print item['description'].encode('utf-8'), vstock[i]._name
if str(item['created_at']) > st and str(item['created_at']) < ed:
if item['text'].encode('utf-8').find(vstock[i]._name) != -1:
ff = open('corpus/' + s_today + '_' + str(description_id) + '.txt', 'w')
ff.write(item['text'].encode('utf-8'))
ff.close()
description_id += 1
#print vstock[i]._name, item['description'].encode('utf-8')
if d.has_key(i):
d[i] = d[i] + 1
else:
d[i] = 1
elif str(item['created_at']) < st and i == len(vstock) -1:
#print 1
# browser.close()
# browser.quit()
#if i == len(vstock) -1:
return 0
#print array[0], array[1]
# print decode[0]['description'].encode('utf-8')
# browser.close()
# browser.quit()
return 1
except Exception as e:
print(e)
# browser.close()
# browser.quit()
return 0
import json
#获取热门用户列表
def get_id():
f = open('id.txt', 'w')
for i in range(25):
url = 'http://xueqiu.com/recommend/user/industry.json?detail=1&index=' + str(i)
#browser.get(url)
#t = browser.page_source
print(url)
# print t.encode('utf-8')
cookie = '''s=10ht15dh2y; xq_a_token=5e47e2777e3b08d99725fe0f9f78815eb1cb8374; xqat=5e47e2777e3b08d99725fe0f9f78815eb1cb8374; xq_r_token=c38fedb2680c6b923eb4c87f16ebf19f574c3eca; xq_is_login=1; u=6585534947; xq_token_expire=Sun%20Nov%2015%202015%2009%3A14%3A02%20GMT%2B0800%20(CST); bid=73fe343eeb79fd513ae47464f938acf9_ig040t46; snbim_minify=true; __utmt=1; __utma=1.2082135748.1445390046.1445497172.1445504051.8; __utmb=1.14.10.1445504051; __utmc=1; __utmz=1.1445390046.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); Hm_lvt_1db88642e346389874251b5a1eded6e3=1445390044; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1445506132'''
headers = {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6","Cookie":cookie }
req = urllib2.Request( url, headers = headers)
try:
content = urllib2.urlopen(req).read()
except Exception as e:
print(e)
#return
js = json.loads(content)
#print js
industries = js['industries']
#print industries
if industries:
for industry in industries:
for user in industry['users']:
print(user['id'], user['screen_name'].encode('utf-8'))
f.write((str(user['id'])) + ' ' + (user['screen_name']).encode('utf-8') + '\n')
#f.write(p[0].get('value').encode('utf-8') + ' ' + p[1].get('value').encode('utf-8') + '\n')
class stock:
_id = ''
_name = ''
_industry = ''
def __init__(self, id, name, industry):
self._id = id
self._name = name
self._industry = industry
def pawner(day, t2):
today = date.today()
delta = -1
os.mkdir('corpus')
while 1:
f = open('id.txt', 'r')
delta += 1
if delta >= t2:
break
yesterday1 = today - timedelta(days = day - delta)
yesterday = datetime.strftime(yesterday1, "%Y-%m-%d")
score_file = 'score' + yesterday + '.txt'
industry_file = 'industry' + yesterday + '.txt'
#ff = open('score' + yesterday + '.txt', 'r')
d = {}
print score_file
vstock = []
#ff = open('stock.txt', 'r')
wb = xlrd.open_workbook('stock.xls')
sh = wb.sheet_by_name('stock')
for rownum in range(sh.nrows):
if rownum < 2:
continue
s = stock(str(sh.cell(rownum, 0).value), sh.cell(rownum, 1).value.encode('utf-8'), sh.cell(rownum, 2).value.encode('utf-8'))
vstock.append(s)
print(len(vstock))
print(repr(vstock[0]._name))
while 1:
try:
line = f.readline()
# user = str(i)
if not line:
break
array = line[:-1].split(' ')
user = array[0]
print(array[0], array[1])
#user = "1676206424"
page = 1
while 1:
url = "http://xueqiu.com/" + user + "?page=" + str(page)
ret = start(url, d, yesterday1, vstock)
if ret == 0:
#print i
break
page = page + 1
time.sleep(2)
except Exception as e:
print(e)
continue
#break
#i = i + 1
#if i >=9999999999:
# break
f.close()
ff = open(score_file, 'w')
industry_p = open(industry_file, 'w')
rb = open_workbook('stock.xls')
rs = rb.sheet_by_name('stock')
wb = copy(rb)
ws = wb.get_sheet(0)
ncol = rs.ncols
ws.write(1, ncol, yesterday)
industry_d = {}
t = sorted(d.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for key in t:
print(str(vstock[key[0]]._name) + '%' + str(vstock[key[0]]._industry) + '%'+ str(key[1]) + '\n')
ff.write(str(vstock[key[0]]._name) + '%' + str(vstock[key[0]]._industry) + '%'+ str(key[1]) + '\n')
if industry_d.has_key(vstock[key[0]]._industry):
industry_d[vstock[key[0]]._industry] += 1
else:
industry_d[vstock[key[0]]._industry] = 1
ws.write(key[0] + 2, ncol, key[1])
t = sorted(industry_d.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for key in t:
print(str(key[0]) + '%' + str(key[1]) + '\n')
industry_p.write(str(key[0]) + '%' + str(key[1]) + '\n')
print(industry_d)
wb.save('stock.xls')
browser.close()
browser.quit()
# timer = threading.Timer(7200, pawner)
# timer.start()
if __name__ == "__main__":
#nltk.download()
#negids = movie_reviews.fileids('neg')
#posids = movie_reviews.fileids('pos')
#print 1
## timer = threading.Timer(7200, pawner)
# timer.start()
t = int(sys.argv[1])
t2 = int(sys.argv[2])
#get_id()
pawner(t, t2)
|
[
"[email protected]"
] | |
291f1107e0a99ce49de7bd1a42bab6e7fa9b9073
|
ffae55f50f9eb0ae028d9f46cebea565f3700585
|
/18/VAJets/PKUTreeMaker/test/CrabJobsSrc/MC/crab3_analysisWZ_v1.py
|
7d3a2cd976ff0802af00a8aafe4ae252256a8d2a
|
[] |
no_license
|
JINGFFF/test
|
57a92eb2c3143bcfa5776fc87d3ff16ff7cdc04b
|
d48c2be6387dfaff3eb37e28ff116c91c3eaf67e
|
refs/heads/master
| 2021-02-06T21:00:52.184508 | 2020-04-26T04:35:04 | 2020-04-26T04:35:04 | 243,942,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,197 |
py
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'WZ_v1_2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.maxMemoryMB = 3000
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
config.JobType.psetName = 'analysis_mc.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
##config.Data.outputPrimaryDataset = 'VBS_WGAMMA_94X'
config.Data.inputDataset = '/WZ_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 2
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'WZ_v1_2'
config.section_("Site")
config.Site.storageSite = 'T2_CN_Beijing'
|
[
"[email protected]"
] | |
5396d59485edcffb1060921d5fc348209d891fe0
|
b13a326c8aac68f72c71169187a4aa8d4fe1438f
|
/environment/envs/icra.py
|
eaa3aafeecc909022ff8d9a459423e63e37e2866
|
[] |
no_license
|
zy10zm/Pulsar
|
9f1d9abdf90d94e80c6dba2a02630bfe4b4e2115
|
714ee2d78577e59077af7c0f890e639879490eb8
|
refs/heads/master
| 2023-02-22T20:26:42.995175 | 2021-01-23T04:35:38 | 2021-01-23T04:35:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,882 |
py
|
import os, sys
import numpy as np
import logging
from copy import deepcopy
from mujoco_worldgen import Floor
from mujoco_worldgen import ObjFromXML
from mujoco_worldgen.util.sim_funcs import qpos_idxs_from_joint_prefix, qvel_idxs_from_joint_prefix
from environment.worldgen.battlefield import Battlefield
from environment.worldgen.builder import WorldBuilder
from environment.worldgen.core import WorldParams
from environment.worldgen.env import Env
from environment.module.agents import Agents
from environment.wrappers.util_w import DiscardMujocoExceptionEpisodes, DiscretizeActionWrapper, AddConstantObservationsWrapper, ConcatenateObsWrapper
from environment.wrappers.lidar import Lidar
from environment.wrappers.multi_agent import SplitMultiAgentActions, SplitObservations, SelectKeysWrapper
from environment.wrappers.line_of_sight import AgentAgentObsMask2D
from environment.wrappers.buff import BuffWrapper
from environment.wrappers.collision import CollisionWrapper
from environment.wrappers.health import HealthWrapper
from environment.wrappers.prep import PrepWrapper
from environment.wrappers.projectile import ProjectileWrapper
from environment.wrappers.outcome import OutcomeWrapper
from environment.wrappers.no_enter_zone import NoEnterZoneWrapper
from environment.objects.lidarsites import LidarSites
class IcraBase(Env):
'''
Icra base environment.
Args:
horizon (int): Number of steps agent gets to act
n_substeps (int): Number of mujoco simulation steps per outer environment time-step
n_agents (int): number of agents in the environment
mjco_ts (float): seconds for one mujoco simulation step
action_lims (float tuple): lower and upper limit of mujoco actions
deterministic_mode (bool): if True, seeds are incremented rather than randomly sampled.
meshdir (string): directory for meshes
texturedir (string): directory for textures
set_action (function): function for setting actions
env_no (int): number for environment file
'''
def __init__(self, horizon=250, n_substeps=3, n_agents=2, mjco_ts=0.002,
action_lims=(-200.0, 200.0), deterministic_mode=False,
meshdir="assets/stls", texturedir="assets/texture",
set_action=None,
env_no=1, **kwargs):
super().__init__(get_sim=self._get_sim,
get_obs=self._get_obs,
action_space=tuple(action_lims),
horizon=horizon,
set_action=set_action,
deterministic_mode=deterministic_mode)
self.env_no = env_no
self.mjco_ts = mjco_ts
self.n_agents = n_agents
self.metadata['n_actors'] = n_agents
self.horizon = horizon
self.n_substeps = n_substeps
self.kwargs = kwargs
self.modules = []
self.meshdir = meshdir
self.texturedir = texturedir
self.placement_size = (8080, 4480)
def add_module(self, module):
self.modules.append(module)
def _get_obs(self, sim):
'''
Loops through modules, calls their observation_step functions, and
adds the result to the observation dictionary.
'''
obs = {}
for module in self.modules:
obs.update(module.observation_step(self, self.sim))
return obs
def _get_sim(self, seed):
'''
Calls build_world_step and then modify_sim_step for each module. If
a build_world_step failed, then restarts.
'''
world_params = WorldParams(size=(self.placement_size[0], self.placement_size[1], 100),
num_substeps=self.n_substeps)
successful_placement = False
failures = 0
while not successful_placement:
if (failures + 1) % 10 == 0:
logging.warning(f"Failed {failures} times in creating environment")
builder = WorldBuilder(world_params, self.meshdir, self.texturedir, seed, env_no=self.env_no)
battlefield = Battlefield()
builder.append(battlefield)
self.placement_grid = np.zeros((self.placement_size[0], self.placement_size[1]))
successful_placement = np.all([module.build_world_step(self, battlefield, self.placement_size)
for module in self.modules])
failures += 1
sim = builder.get_sim()
for module in self.modules:
module.modify_sim_step(self, sim)
return sim
def get_ts(self):
return self.t
def get_horizon(self):
return self.horizon
def secs_to_steps(self, secs):
return int(secs / (self.mjco_ts * self.n_substeps))
def make_env(deterministic_mode=False, n_agents=4, env_no=1, add_bullets_visual=False):
'''
Response time = 0.02 seconds
Game time = 180 seconds
Decisions = 180 / 0.02 = 9000
Total steps = 9000
Seconds per simulated step = 0.002 seconds
Seconds for each run = 9000 * 0.002 = 18 seconds
'''
mjco_ts = 0.002
n_substeps = 1
horizon = 90000
# Setup action functions
motor_trans_max, motor_forw_max, motor_z_max = 2000.0, 3000.0, 47123.9
action_scale = (motor_trans_max, motor_forw_max, motor_z_max)
action_lims = (-1.0, 1.0)
def icra_ctrl_set_action(sim, action):
"""
For velocity actuators it copies the action into mujoco ctrl field.
"""
if sim.model.nmocap > 0:
_, action = np.split(action, (sim.model.nmocap * 7, ))
if sim.data.ctrl is not None:
for a_idx in range(n_agents):
for as_idx in range(3):
sim.data.ctrl[a_idx*3 + as_idx] = action[a_idx*3 + as_idx] * action_scale[as_idx]
# Create base environment for battlefield
env = IcraBase(n_agents=n_agents,
n_substeps=n_substeps,
horizon=horizon,
mjco_ts=mjco_ts,
action_lims=action_lims,
deterministic_mode=deterministic_mode,
env_no=env_no,
set_action=icra_ctrl_set_action,
meshdir=os.path.join(os.getcwd(), "environment", "assets", "stls"),
texturedir=os.path.join(os.getcwd(), "environment", "assets", "textures"))
# Add bullets just for visualization
nbullets = 25
env.add_module(Agents(n_agents, action_scale=action_scale, add_bullets_visual=add_bullets_visual, nbullets=nbullets))
env.reset()
# PrepWrapper must always be on-top
env = PrepWrapper(env)
env = BuffWrapper(env)
env = CollisionWrapper(env)
env = ProjectileWrapper(env, add_bullets_visual, nbullets)
env = NoEnterZoneWrapper(env)
# OutcomeWrapper must always be lowest, after HealthWrapper
env = HealthWrapper(env)
env = OutcomeWrapper(env)
keys_self = ['agent_qpos_qvel']
global_obs = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'Agent:buff', 'colli_dmg',
'proj_dmg', 'nprojectiles', 'agents_health', 'agent_teams',
'agent_local_qvel']
keys_external = deepcopy(global_obs)
keys_copy = deepcopy(global_obs)
keys_mask_self = []
keys_mask_external = []
env = SplitMultiAgentActions(env)
#env = DiscretizeActionWrapper(env, 'action_movement')
env = SplitObservations(env, keys_self + keys_mask_self, keys_copy=keys_copy)
env = DiscardMujocoExceptionEpisodes(env)
env = SelectKeysWrapper(env, keys_self=keys_self,
keys_external=keys_external,
keys_mask=keys_mask_self + keys_mask_external,
flatten=False)
return env
|
[
"[email protected]"
] | |
38cce542df0415d2d792a37b8355ec7ce0f789d3
|
9e2d467de2d665f41dc94799f0acb98479571922
|
/_error.py
|
cf399a395d6e832d683a0de18251cbd067d4a2f6
|
[] |
no_license
|
pytsite/plugin-geo_ip
|
c63ecd12c95004c05fdae76b20a9343b52fb923f
|
db71e67651eb57b6ca76136d0014eaadf2cb6ffb
|
refs/heads/master
| 2021-10-23T09:29:32.580289 | 2019-03-16T22:04:28 | 2019-03-16T22:04:28 | 112,030,619 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 154 |
py
|
"""PytSite GeoIP Errors.
"""
__author__ = 'Oleksandr Shepetko'
__email__ = '[email protected]'
__license__ = 'MIT'
class ResolveError(Exception):
pass
|
[
"[email protected]"
] | |
eb3a8d5c498c7474673b63e103c93f49315218fa
|
3ff9821b1984417a83a75c7d186da9228e13ead9
|
/No_0122_Best Time to Buy and Sell Stock II/by_dynamic_programming.py
|
5874db8b00a7a87dcea7b16d8be839baf34edc99
|
[
"MIT"
] |
permissive
|
brianchiang-tw/leetcode
|
fd4df1917daef403c48cb5a3f5834579526ad0c2
|
6978acfb8cb767002cb953d02be68999845425f3
|
refs/heads/master
| 2023-06-11T00:44:01.423772 | 2023-06-01T03:52:00 | 2023-06-01T03:52:00 | 222,939,709 | 41 | 12 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,646 |
py
|
'''
Description:
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [7,1,5,3,6,4]
Output: 7
Explanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.
Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.
Example 2:
Input: [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
'''
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
# It is impossible to sell stock on first day, set -infinity as initial value for cur_hold
cur_hold, cur_not_hold = -float('inf'), 0
for stock_price in prices:
prev_hold, prev_not_hold = cur_hold, cur_not_hold
# either keep hold, or buy in stock today at stock price
cur_hold = max( prev_hold, prev_not_hold - stock_price )
# either keep not-hold, or sell out stock today at stock price
cur_not_hold = max( prev_not_hold, prev_hold + stock_price )
# maximum profit must be in not-hold state
return cur_not_hold if prices else 0
# n : the length of input list, prices.
## Time Complexity: O( n )
#
# The overhead in time is the cost of for loop, which is of O( n )
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for loop index and temporary vairable, which is of O( 1 )
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'stock_sequence')
def test_bench():
test_data = [
TestEntry( stock_sequence = [7,1,5,3,6,4] ),
TestEntry( stock_sequence = [1,2,3,4,5] ),
TestEntry( stock_sequence = [7,6,4,3,1] ),
]
# expected output:
'''
7
4
0
'''
for t in test_data:
print( Solution().maxProfit( prices = t.stock_sequence) )
return
if __name__ == '__main__':
test_bench()
|
[
"[email protected]"
] | |
155e6f8d2612353259928900fac73b905ca32da0
|
e5d8b15cbd899283d6ead4742334e997db06d6e0
|
/web/config/settings/base.py
|
37124bc82aab5552b2646ceca937c109e33f6676
|
[] |
no_license
|
Maliaotw/dashboard-django
|
628d777d88b61dad7c3c551b72979b38c2065e15
|
cabbc3e6e8156510dd4ba91ffe1066c9cb040eac
|
refs/heads/main
| 2023-02-16T02:52:02.169754 | 2021-01-12T03:13:55 | 2021-01-12T03:13:55 | 289,612,737 | 0 | 0 | null | 2021-01-12T03:13:56 | 2020-08-23T04:05:36 |
JavaScript
|
UTF-8
|
Python
| false | false | 5,390 |
py
|
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from .conf import load_user_config
from pathlib import Path
CONFIG = load_user_config()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent.parent
# vsphere_monitor/
APPS_DIR = ROOT_DIR / "web"
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = CONFIG.DEBUG
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# SECURITY WARNING: keep the secret key used in production secret!
# from django.core.management.utils import get_random_secret_key
# get_random_secret_key()
SECRET_KEY = CONFIG.SECRET_KEY
ALLOWED_HOSTS = ['*']
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
# "django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
'rest_framework',
'django_filters',
'widget_tweaks',
]
LOCAL_APPS = [
'app.apps.AppConfig',
'common.apps.CommonConfig',
'authentication.apps.AuthenticationConfig',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [str(APPS_DIR / "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
"NAME": str(APPS_DIR / "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(APPS_DIR / "static"),
)
STATIC_ROOT = str(APPS_DIR / "data" / "static")
# Media files (File, ImageField) will be save these
MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(BASE_DIR, "data", 'media')
MEDIA_ROOT = str(APPS_DIR / "data" / "media")
LOGIN_URL = "/login/"
# SESSION
SESSION_COOKIE_AGE = 60 * 60 # 設置session過期時間為60分鐘
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 當瀏覽器被關閉的時候將session失效,但是不能刪除數據庫的session數據
SESSION_SAVE_EVERY_REQUEST = True # 每次請求都要保存一下session
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'normal': {
'format': '[%(levelname)s] %(asctime)s | %(name)s:%(lineno)d | %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler', # Default logs to stderr
'formatter': 'normal', # use the above "normal" formatter
}
},
'loggers': {
'': { # means "root logger"
'handlers': ['console'], # use the above "console" handler
'level': 'DEBUG', # logging level
},
},
}
|
[
"[email protected]"
] | |
eecdc90449ea3bbc47e90548ca8004f0872498f7
|
ac03d9f3a8c2e6209940ae30900e9b2e32084dce
|
/main.py
|
9ef8df5eafeff0357882459573d9ee1b460c71e4
|
[
"Apache-2.0"
] |
permissive
|
cls1991/github-projects-remover
|
29f28e0a23b596a7e07b0c07b65092626b42de05
|
d924100fedccbb0fd6e20365d4f4df98bf04b292
|
refs/heads/master
| 2022-12-11T12:31:59.498180 | 2019-10-23T14:22:14 | 2019-10-23T14:22:14 | 84,054,255 | 0 | 0 |
Apache-2.0
| 2019-10-23T14:22:16 | 2017-03-06T09:25:51 |
Python
|
UTF-8
|
Python
| false | false | 1,042 |
py
|
# coding: utf8
import os
# 切换工作目录到项目根目录
project = os.path.split(os.path.realpath(__file__))[0]
os.chdir(project)
from core.github import GithubSample
if __name__ == '__main__':
gs = GithubSample('8709c9b9d01ec8e7388378c3992eff61aa7df813')
# pretty_print(gs.query_api_info())
# pretty_print(gs.query_user_info('cls1991'))
# pretty_print(gs.query_user_repos('cls1991'))
# print(gs.star_repo('torvalds', 'linux'))
"""
star all forked repos, then remove all, for personal use!
"""
user_repos = gs.query_user_repos('cls1991', page=1, per_page=50)
# pretty_print(user_repos)
for repo in user_repos:
if repo['fork']:
repo_info = gs.query_repo_info('cls1991', repo['name'])
if 'source' not in repo_info:
continue
status_code = gs.star_repo(repo_info['source']['owner']['login'], repo['name'])
print(status_code)
if status_code == 204:
gs.remove_repo('cls1991', repo['name'])
|
[
"[email protected]"
] | |
f9d8898f58752cd3781b1c1101eefbc33a20667c
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/perf/CascadeMaskRCNN_iflytek_for_PyTorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
|
853289e67b4a5019eddfc1bbefb0b44e53dd49e2
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 3,714 |
py
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
pretrained='torchvision://resnet101',
backbone=dict(depth=101),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
|
[
"[email protected]"
] | |
3d825b0e036a2c4f6a56c755ea8fe0225bc2d1f8
|
6610ebe9141f00678851a6f068ec1e5458bf050c
|
/code/graph_keyboard.py
|
19a6ffbf2f3e96351320d674a186a385b8d5dedc
|
[
"MIT"
] |
permissive
|
iamrajee/bio_medical
|
e9cec1d9e12c04d87b893d0c12c92d3a1b8fb963
|
8d91cd3838f46685faa057f93f5d22f8e6c4187b
|
refs/heads/master
| 2020-05-04T23:47:45.595827 | 2019-09-17T17:14:19 | 2019-09-17T17:14:19 | 179,555,562 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,147 |
py
|
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph()
# G.add_nodes_from([1,2,3,4,5,6,7,8,9,0],key="A")
# # G.add_edges_from([(1,2),(2,3),(3,4),(5,8),(9,1),(2,3),(4,6),(8,2),(7,3)])
# G.add_weighted_edges_from([(1,2,1),(2,3,2),(3,4,3),(5,8,4),(9,1,5),(2,3,6),(4,6,7),(8,2,8),(7,3,9)])
# keyboard_config = [
# ('1','2','3','4','5','6','7','8','9','0'),
# ('q','w','e','r','t','y','u','i','o','p'),
# ('a','s','d','f','g','h','j','k','l'),
# ('z','x','c','v','b','n','m'),
# ('\t\tspace\t\t','backspace','enter','save')
# ]
keyboard_config = [
('1','2','3'),
('q','w','e'),
('a','s','d'),
]
for t_ in range(len(keyboard_config)):
G.add_nodes_from(list(t))
for i in range(0,len(t)):
e=[(t[i],t[i+1],1) for i in range(0,len(t)-1)]
e.append((t[0],t[len(t)-1],1))
G.add_weighted_edges_from(e)
for i in range(0,len(t)):
print(G.nodes(data=True))
nx.draw(G)
plt.show()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.