blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b881d3fd3b5a0506e690102189326ec26164212 | 46bd3e3ba590785cbffed5f044e69f1f9bafbce5 | /env/lib/python3.8/site-packages/pip/_internal/operations/freeze.py | 4173161009fc9748ecc149d7cd925afb5657aad9 | [] | no_license | adamkluk/casper-getstarted | a6a6263f1547354de0e49ba2f1d57049a5fdec2b | 01e846621b33f54ed3ec9b369e9de3872a97780d | refs/heads/master | 2023-08-13T11:04:05.778228 | 2021-09-19T22:56:59 | 2021-09-19T22:56:59 | 408,036,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4f22ef5d3e19aa9222f31f17fd34ec80127be881b9e0289d265c4620705b9813
size 10556
| [
"[email protected]"
] | |
1fe809787c4d22f427ef3c91f31a6d7b5004f8a3 | 3ca30ff28e4233ff815ebc2525ba6409dbf7ade6 | /changelogs/custom/pypi/django.py | a0b30a1bc3ab3f9b485cb6575052bc4a75a497b5 | [
"MIT"
] | permissive | hackebrot/changelogs | e29b7ce1b6b799fc1a5f1871e29d7d7ac787ad48 | c5bf363a5b7efd2640ba404b217a37661ef220c1 | refs/heads/master | 2021-01-20T02:07:27.028697 | 2017-04-25T15:05:19 | 2017-04-25T15:05:19 | 89,375,742 | 1 | 0 | null | 2017-04-25T15:18:14 | 2017-04-25T15:18:14 | null | UTF-8 | Python | false | false | 392 | py | def get_head(line, releases, **kwargs):
for release in releases:
if "Django {} release notes".format(release) in line:
return release
return False
def get_urls(releases, **kwargs):
urls = []
for release in releases:
urls.append("https://raw.githubusercontent.com/django/django/master/docs/releases/{v}.txt".format(v=release))
return urls, []
| [
"[email protected]"
] | |
fc7a978cdca9b14608013239456e0f1fea702e7b | f57529f95a0fd10676f46063fdcd273fb5a81427 | /boj/03001-04000/3181.py | 0cf1f7d1f5d5af6ca60a5dd002df336fe82466ba | [] | no_license | hoyasmh/PS | a9b83b0044e483586590c9b7c6bf8a77236b67e7 | 6bbaa0ce77b2726f6af782af049d73720820f761 | refs/heads/master | 2023-04-23T10:43:27.349785 | 2021-05-17T13:43:53 | 2021-05-17T13:43:53 | 311,239,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | l=['i','pa','te','ni','niti','a','ali','nego','no','ili']
s=input().split()
for i in range(len(s)):
if i==0 or s[i] not in l:
print(s[i][0].upper(),end='')
| [
"[email protected]"
] | |
b6e50f0069a0ad20140650a86266d623d882464d | 167eb71c690e43e06b943a04a031f9e662ac7521 | /acq4/devices/Scanner/scan_program/step.py | 78e088f79c49f0e555d3506b82810b3513d8e648 | [
"MIT"
] | permissive | histed/acq4 | 8e0a5dedc74c2ea063477e4b0027fbade3a72e61 | ea0242d49245b81ab218d8d3e0187138b136ded5 | refs/heads/develop | 2021-01-19T23:46:54.999081 | 2017-03-24T22:48:52 | 2017-03-24T22:48:52 | 89,023,143 | 0 | 6 | null | 2017-04-21T20:58:33 | 2017-04-21T20:58:32 | null | UTF-8 | Python | false | false | 589 | py | import numpy as np
import acq4.pyqtgraph as pg
from .component import ScanProgramComponent
#class StepScanComponent(ScanProgramComponent):
#"""
#Steps the laser once to a specific position.
#"""
#name = 'step'
#def generateVoltageArray(self, arr, startInd, stopInd):
#pos = cmd['pos']
#if pos == None:
#pos = self.dev.getOffVoltage()
#else:
#pos = self.mapToScanner(pos[0], pos[1])
#lastPos = pos
#arr[0, startInd] = pos[0]
#arr[1, startInd] = pos[1]
#return startInd
| [
"[email protected]"
] | |
e564a35ab740baf58a12135b49025e9f1ac47b4b | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/desktopvirtualization/v20201102preview/_inputs.py | 91b02e54c08da8927105e2cb9d80bf145a1f5552 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,215 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'MsixPackageApplicationsArgs',
'MsixPackageDependenciesArgs',
'RegistrationInfoArgs',
]
@pulumi.input_type
class MsixPackageApplicationsArgs:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
app_user_model_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
icon_image_name: Optional[pulumi.Input[str]] = None,
raw_icon: Optional[pulumi.Input[str]] = None,
raw_png: Optional[pulumi.Input[str]] = None):
"""
Schema for MSIX Package Application properties.
:param pulumi.Input[str] app_id: Package Application Id, found in appxmanifest.xml.
:param pulumi.Input[str] app_user_model_id: Used to activate Package Application. Consists of Package Name and ApplicationID. Found in appxmanifest.xml.
:param pulumi.Input[str] description: Description of Package Application.
:param pulumi.Input[str] friendly_name: User friendly name.
:param pulumi.Input[str] icon_image_name: User friendly name.
:param pulumi.Input[str] raw_icon: the icon a 64 bit string as a byte array.
:param pulumi.Input[str] raw_png: the icon a 64 bit string as a byte array.
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if app_user_model_id is not None:
pulumi.set(__self__, "app_user_model_id", app_user_model_id)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if icon_image_name is not None:
pulumi.set(__self__, "icon_image_name", icon_image_name)
if raw_icon is not None:
pulumi.set(__self__, "raw_icon", raw_icon)
if raw_png is not None:
pulumi.set(__self__, "raw_png", raw_png)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
Package Application Id, found in appxmanifest.xml.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="appUserModelID")
def app_user_model_id(self) -> Optional[pulumi.Input[str]]:
"""
Used to activate Package Application. Consists of Package Name and ApplicationID. Found in appxmanifest.xml.
"""
return pulumi.get(self, "app_user_model_id")
@app_user_model_id.setter
def app_user_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_user_model_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of Package Application.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
User friendly name.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="iconImageName")
def icon_image_name(self) -> Optional[pulumi.Input[str]]:
"""
User friendly name.
"""
return pulumi.get(self, "icon_image_name")
@icon_image_name.setter
def icon_image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "icon_image_name", value)
@property
@pulumi.getter(name="rawIcon")
def raw_icon(self) -> Optional[pulumi.Input[str]]:
"""
the icon a 64 bit string as a byte array.
"""
return pulumi.get(self, "raw_icon")
@raw_icon.setter
def raw_icon(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "raw_icon", value)
@property
@pulumi.getter(name="rawPng")
def raw_png(self) -> Optional[pulumi.Input[str]]:
"""
the icon a 64 bit string as a byte array.
"""
return pulumi.get(self, "raw_png")
@raw_png.setter
def raw_png(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "raw_png", value)
@pulumi.input_type
class MsixPackageDependenciesArgs:
def __init__(__self__, *,
dependency_name: Optional[pulumi.Input[str]] = None,
min_version: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None):
"""
Schema for MSIX Package Dependencies properties.
:param pulumi.Input[str] dependency_name: Name of package dependency.
:param pulumi.Input[str] min_version: Dependency version required.
:param pulumi.Input[str] publisher: Name of dependency publisher.
"""
if dependency_name is not None:
pulumi.set(__self__, "dependency_name", dependency_name)
if min_version is not None:
pulumi.set(__self__, "min_version", min_version)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter(name="dependencyName")
def dependency_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of package dependency.
"""
return pulumi.get(self, "dependency_name")
@dependency_name.setter
def dependency_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dependency_name", value)
@property
@pulumi.getter(name="minVersion")
def min_version(self) -> Optional[pulumi.Input[str]]:
"""
Dependency version required.
"""
return pulumi.get(self, "min_version")
@min_version.setter
def min_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_version", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
Name of dependency publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@pulumi.input_type
class RegistrationInfoArgs:
def __init__(__self__, *,
expiration_time: Optional[pulumi.Input[str]] = None,
registration_token_operation: Optional[pulumi.Input[Union[str, 'RegistrationTokenOperation']]] = None,
token: Optional[pulumi.Input[str]] = None):
"""
Represents a RegistrationInfo definition.
:param pulumi.Input[str] expiration_time: Expiration time of registration token.
:param pulumi.Input[Union[str, 'RegistrationTokenOperation']] registration_token_operation: The type of resetting the token.
:param pulumi.Input[str] token: The registration token base64 encoded string.
"""
if expiration_time is not None:
pulumi.set(__self__, "expiration_time", expiration_time)
if registration_token_operation is not None:
pulumi.set(__self__, "registration_token_operation", registration_token_operation)
if token is not None:
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="expirationTime")
def expiration_time(self) -> Optional[pulumi.Input[str]]:
"""
Expiration time of registration token.
"""
return pulumi.get(self, "expiration_time")
@expiration_time.setter
def expiration_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration_time", value)
@property
@pulumi.getter(name="registrationTokenOperation")
def registration_token_operation(self) -> Optional[pulumi.Input[Union[str, 'RegistrationTokenOperation']]]:
"""
The type of resetting the token.
"""
return pulumi.get(self, "registration_token_operation")
@registration_token_operation.setter
def registration_token_operation(self, value: Optional[pulumi.Input[Union[str, 'RegistrationTokenOperation']]]):
pulumi.set(self, "registration_token_operation", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
"""
The registration token base64 encoded string.
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
| [
"[email protected]"
] | |
eda7057bb4a064142794dc4168db8c4c3d7075ef | 18825807a4cf373f00419e46ac70566d17115e9e | /problems/algo/convert_a_number_to_hexadecimal.py | b8d4588cc5bd2a0833c3299792178830a61a9c56 | [] | no_license | StefanRankovic/leetcode | 51154d7297b4674c62e481c6c13016097207b4d0 | bbed81b50acaef025186648c61110dbf65e5f6cb | refs/heads/master | 2023-02-20T06:16:02.913457 | 2021-01-24T09:42:50 | 2021-01-24T09:42:50 | 266,200,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | class Solution:
def toHex(self, num: int) -> str:
if num < 0:
num += 2 ** 32
elif num == 0:
return '0'
mapping = { 0:'0', 1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 10:'a', 11:'b', 12:'c', 13:'d', 14:'e', 15:'f' }
res = []
while num > 0:
res.append(mapping[num & 0xF])
num >>= 4
return ''.join(res[::-1])
| [
"[email protected]"
] | |
599d2ccb94c5166eb0e397169f084f8ac3d1816c | 0f234d1029e89309994331a68a999e2359bb08b0 | /tslearn/neural_network/__init__.py | 1eed30502447a5b9d10b18cf9d50d8c3bec683cf | [
"BSD-2-Clause"
] | permissive | tslearn-team/tslearn | 8282698361bfb42183466eaaa4c6da1d107e9513 | e9b3ecca5f56bc8ffab5a0106e2d41f17ae89109 | refs/heads/main | 2023-09-01T02:03:19.814166 | 2023-08-21T13:22:42 | 2023-08-21T13:22:42 | 90,264,407 | 1,687 | 198 | BSD-2-Clause | 2023-09-13T20:39:47 | 2017-05-04T13:08:13 | Python | UTF-8 | Python | false | false | 415 | py | """
The :mod:`tslearn.neural_network` module contains multi-layer perceptron
models for time series classification and regression.
These are straight-forward adaptations of scikit-learn models.
"""
from .neural_network import TimeSeriesMLPClassifier, TimeSeriesMLPRegressor
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
__all__ = [
"TimeSeriesMLPClassifier", "TimeSeriesMLPRegressor"
]
| [
"[email protected]"
] | |
e3b7eb1b3827edcba78060e3051438f29d691b58 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02404/s960425293.py | 0de2e292bdd90651c2ea9948ed854c3590feb3e8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | while 1:
(H,W) = [int(i) for i in input().split()]
if H==W==0:
break
print('#'*W)
for i in range(H-2):
print('#'+'.'*(W-2)+'#')
print('#'*W)
print('')
| [
"[email protected]"
] | |
eda67a4d9a32dddcbfdbcf77327db29c4d47aaab | e2c120b55ab149557679e554c1b0c55126e70593 | /python/imagej/IsoView-GCaMP/lib/nuclei.py | 4aa3f3f0ade0c4510dc82814f55affa08fc27364 | [] | no_license | acardona/scripts | 30e4ca2ac87b9463e594beaecd6da74a791f2c22 | 72a18b70f9a25619b2dbf33699a7dc1421ad22c6 | refs/heads/master | 2023-07-27T14:07:37.457914 | 2023-07-07T23:13:40 | 2023-07-07T23:14:00 | 120,363,431 | 4 | 5 | null | 2023-05-02T11:20:49 | 2018-02-05T21:21:13 | Python | UTF-8 | Python | false | false | 10,474 | py | import os
from lib.dogpeaks import createDoG
from lib.synthetic import virtualPointsRAI
from lib.ui import showStack
from lib.util import newFixedThreadPool, Task
from lib.io import writeZip, ImageJLoader
from lib.converter import makeCompositeToRealConverter
from net.imglib2 import KDTree, FinalInterval
from net.imglib2.neighborsearch import RadiusNeighborSearchOnKDTree
from net.imglib2.view import Views
from net.imglib2.img.array import ArrayImgs
from net.imglib2.util import ImgUtil, Intervals
from net.imglib2.algorithm.math.ImgMath import compute, add, sub, maximum
from java.lang import Math
def doGPeaks(img, params):
# Calibration is 1,1,1, so returned peaks in pixel space coincide with calibrated space,
# no need for any adjustment of the peaks' positions.
dog = createDoG(img, params["calibration"], params["sigmaSmaller"], params["sigmaLarger"], params["minPeakValue"])
return dog.getSubpixelPeaks() # as RealPoint
def __makeMerge(params):
radius = params["searchRadius"]
def merge(nuclei, peaks2):
"""
nuclei: a dictionary of RealPoint, representing the average position,
vs the number of points averaged.
peaks: a list of RealPoint
Returns the updated nuclei, with possibly new nuclei, and the existing ones
having their coordinates (and counts of points averaged) updated.
"""
peaks1 = nuclei.keys()
search = RadiusNeighborSearchOnKDTree(KDTree(peaks1, peaks1))
for peak2 in peaks2:
search.search(peak2, radius, False)
n = search.numNeighbors()
if 0 == n:
# New nuclei not ever seen before
nuclei[peak2] = 1
else:
# Merge peak with nearest found nuclei, which should only be one given the small radius
peak1 = search.getSampler(0).get()
count = float(nuclei[peak1])
new_count = count + 1
fraction = count / new_count
for d in xrange(3):
peak1.setPosition(peak1.getDoublePosition(d) * fraction + peak2.getDoublePosition(d) / new_count, d)
nuclei[peak1] = new_count
# Check for more
if n > 1:
print "Ignoring %i additional closeby nuclei" % (n - 1)
# Return nuclei to enable a reduce operation over many sets of peaks
return nuclei
return merge
def findPeaks(img4D, params):
"""
img4D: a 4D RandomAccessibleInterval
params["frames"]: the number of consecutive time points to average
towards detecting peaks with difference of Gaussian.
Returns a list of lists of peaks found, one list per time point.
"""
frames = params["frames"]
# Work image: the current sum
sum3D = ArrayImgs.unsignedLongs([img4D.dimension(d) for d in [0, 1, 2]])
peaks = []
# Sum of the first set of frames
compute(add([Views.hyperSlice(img4D, 3, i) for i in xrange(frames)])).into(sum3D)
# Extract nuclei from first sum3D
peaks.append(doGPeaks(sum3D, params))
# Running sums: subtract the first and add the last
for i in xrange(frames, img4D.dimension(3), 1):
compute(add(sub(sum3D,
Views.hyperSlice(img4D, 3, i - frames)),
Views.hyperSlice(img4D, 3, i))) \
.into(sum3D)
# Extract nuclei from sum4D
peaks.append(doGPeaks(sum3D, params))
return peaks
def mergePeaks(peaks, params):
# Cluster nearby nuclei detections:
# Make a KDTree from points
# For every point, measure distance to nearby points up to e.g. half a soma diameter
# and vote on neighboring points, weighed by distance.
# Points with more than X votes remain.
merged = reduce(__makeMerge(params), peaks[1:], {peak: 1 for peak in peaks[0]})
return merged
def filterNuclei(mergedPeaks, params):
"""
mergedPeaks: a dictionary of RealPoint vs count of DoG peaks averaged to make it.
params["min_count"]: the minimum number of detections to consider a mergedPeak valid.
Returns the list of accepted mergedPeaks.
"""
min_count = params["min_count"]
return [mergedPeak for mergedPeak, count in mergedPeaks.iteritems() if count > min_count]
def findNucleiOverTime(img4D, params, show=True):
"""
params["frames"]: number of time frames to average
params["calibration"]: e.g. [1.0, 1.0, 1.0]
params["somaDiameter"]: width of a soma, in pixels
params["minPeakValue"]: determine it by hand with e.g. difference of Gaussians sigma=somaDiameter/4 minus sigma=somaDiameter/2
params["sigmaSmaller"]: for difference of Gaussian to detect somas. Recommended somaDiameter / 4.0 -- in pixels
params["sigmaLarger"]: for difference of Gaussian to detect somas. Recommended somaDiameter / 2.0 -- in pixels
params["searchRadius"]: for finding nearby DoG peaks which are actually the same soma. Recommended somaDiameter / 3.0 -- in pixels
parmams["min_count"]: to consider only somas detected in at least min_count time points, i.e. their coordinates are the average
of at least min_count independent detections.
"""
peaks = findPeaks(img4D, params)
mergedPeaks = mergePeaks(peaks, params)
nuclei = filterNuclei(mergedPeaks, params)
# Show as a 3D volume with spheres
if show:
spheresRAI = virtualPointsRAI(nuclei, params["somaDiameter"] / 2.0, Views.hyperSlice(img4D, 3, 1))
imp = showStack(spheresRAI, title="nuclei (min_count=%i)" % params["min_count"])
return peaks, mergedPeaks, nuclei, spheresRAI, imp
return peaks, mergedPeaks, nuclei
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
last_dimension = img.numDimensions() -1
if "1by1" == strategy:
exe = newFixedThreadPool()
try:
n_threads = exe.getCorePoolSize()
imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
def mergeMax(img1, img2, imgT):
return compute(maximum(img1, img2)).into(imgT)
def hyperSlice(index):
return Views.hyperSlice(img, last_dimension, index)
# The first n_threads mergeMax:
n = img.dimension(last_dimension)
futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
for i in xrange(min(n_threads, (n if 0 == n % 2 else n-1) -1 ))]
# As soon as one finishes, merge it with the next available hyperSlice
next = n_threads
while len(futures) > 0: # i.e. not empty
print len(futures)
imgT = futures.pop(0).get()
if next < img.dimension(last_dimension):
futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
next += 1
else:
# Run out of hyperSlices to merge
if 0 == len(futures):
return imgT # done
# Merge imgT to each other until none remain
futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
finally:
exe.shutdownNow()
else:
# By chunks
imglibtype = img.randomAccess().get().getClass()
# The Converter class
reduce_max = makeCompositeToRealConverter(reducer_class=Math,
reducer_method="max",
reducer_method_signature="(DD)D")
if chunk_size > 0:
# map reduce approach
exe = newFixedThreadPool()
try:
def projectMax(img, minC, maxC, reduce_max):
imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
return imgA
# The min and max coordinates of all dimensions except the last one
minCS = [0 for d in xrange(last_dimension)]
maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]
# Process every chunk in parallel
futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get()), futures).get()
finally:
exe.shutdownNow()
else:
# One chunk: all at once
# Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
# Reduce each vector to a single scalar, using a Converter
img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
return imgA
def findNucleiByMaxProjection(img4D, params, img3D_filepath, projection_strategy="1by1", mask=None, show=True):
"""
img4D: the 4D series to max-project and then detect nuclei in.
params: for difference of Gaussian to detect somas.
img3D: optional, provide a ready-made max projection.
projection_strategy: defaults to "1by1". See maxProjectLastDimension.
mask: defaults to None, can be a 3D image (a RandomAccesibleInterval of 3 dimensions) used to
filter nuclei detections by whether their coordinates have a non-zero value.
show: defaults to True, and if so opens a 3D volume showing the nuclei as white spheres.
"""
if not os.path.exists(img3D_filepath):
print "Will max project 4D to 3D"
img3D = maxProjectLastDimension(img4D, strategy=projection_strategy)
writeZip(img3D, img3D_filepath, title=os.path.basename(img3D_filepath))
else:
print "Loading max projection"
img3D = ImageJLoader().get(img3D_filepath)
peaks = doGPeaks(img3D, params)
if mask:
ra = mask.randomAccess()
def isNonZero(peak):
ra.setPosition(peak)
return 0 != ra.get().get()
peaks = filter(isNonZero, peaks)
if show:
spheresRAI = virtualPointsRAI(peaks, params["somaDiameter"] / 2.0, img3D)
imp = showStack(spheresRAI, title="nuclei by max projection")
return img3D, peaks, spheresRAI, imp
else:
return img3D, peaks
def boundsOf(nuclei):
x0, y0, z0 = nuclei[0]
x1, y1, z1 = nuclei[0]
for x, y, z in nuclei:
if x < x0: x0 = x
if y < y0: y0 = y
if z < z0: z0 = z
if x > x1: x1 = x
if y > y1: y1 = y
if z > z1: z1 = z
return [x0, y0, z0], \
[x1, y1, z1]
def dimensionsOf(bounds):
return bounds[1][0] - bounds[0][0], \
bounds[1][1] - bounds[0][1], \
bounds[1][2] - bounds[0][2] | [
"[email protected]"
] | |
849c162af41131a106cdda454b6af428f8cac483 | 11aac6edab131293027add959b697127bf3042a4 | /isToeplitzMatrix.py | 39560dbb5a486627bf54e486fb6cac79f90fe046 | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # https://leetcode.com/problems/toeplitz-matrix/description/
class Solution:
def isToeplitzMatrix(self, matrix):
for i in range(len(matrix) - 1):
for j in range(len(matrix[i]) - 1):
if matrix[i][j] != matrix[i + 1][j + 1]:
return False
return True
| [
"[email protected]"
] | |
e25bebd643efbb70ced24082356761148e85ce8c | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/resolve/InstanceAttrOtherMethod.py | f13ebe6f3c2cee60520647fea746506037441e69 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 115 | py | class C:
def f(self):
return self.foo
# <ref>
def g(self):
self.foo = 1
| [
"[email protected]"
] | |
da4f9f021fd019d5ef18dbd2e821d201de06d002 | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/test/connectivity/acts/framework/acts/controllers/chameleon_controller.py | b9965cf69fdfc8d09502e15d02627a0fdb1751c4 | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,175 | py | #!/usr/bin/env python3
#
# Copyright 2017 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import xmlrpc.client
from subprocess import call
from acts import signals
MOBLY_CONTROLLER_CONFIG_NAME = "ChameleonDevice"
ACTS_CONTROLLER_REFERENCE_NAME = "chameleon_devices"
CHAMELEON_DEVICE_EMPTY_CONFIG_MSG = "Configuration is empty, abort!"
CHAMELEON_DEVICE_NOT_LIST_CONFIG_MSG = "Configuration should be a list, abort!"
audio_bus_endpoints = {
'CROS_HEADPHONE': 'Cros device headphone',
'CROS_EXTERNAL_MICROPHONE': 'Cros device external microphone',
'PERIPHERAL_MICROPHONE': 'Peripheral microphone',
'PERIPHERAL_SPEAKER': 'Peripheral speaker',
'FPGA_LINEOUT': 'Chameleon FPGA line-out',
'FPGA_LINEIN': 'Chameleon FPGA line-in',
'BLUETOOTH_OUTPUT': 'Bluetooth module output',
'BLUETOOTH_INPUT': 'Bluetooth module input'
}
class ChameleonDeviceError(signals.ControllerError):
pass
def create(configs):
if not configs:
raise ChameleonDeviceError(CHAMELEON_DEVICE_EMPTY_CONFIG_MSG)
elif not isinstance(configs, list):
raise ChameleonDeviceError(CHAMELEON_DEVICE_NOT_LIST_CONFIG_MSG)
elif isinstance(configs[0], str):
# Configs is a list of IP addresses
chameleons = get_instances(configs)
return chameleons
def destroy(chameleons):
for chameleon in chameleons:
del chameleon
def get_info(chameleons):
"""Get information on a list of ChameleonDevice objects.
Args:
ads: A list of ChameleonDevice objects.
Returns:
A list of dict, each representing info for ChameleonDevice objects.
"""
device_info = []
for chameleon in chameleons:
info = {"address": chameleon.address, "port": chameleon.port}
device_info.append(info)
return device_info
def get_instances(ips):
"""Create ChameleonDevice instances from a list of IPs.
Args:
ips: A list of Chameleon IPs.
Returns:
A list of ChameleonDevice objects.
"""
return [ChameleonDevice(ip) for ip in ips]
class ChameleonDevice:
"""Class representing a Chameleon device.
Each object of this class represents one Chameleon device in ACTS.
Attributes:
address: The full address to contact the Chameleon device at
client: The ServiceProxy of the XMLRPC client.
log: A logger object.
port: The TCP port number of the Chameleon device.
"""
def __init__(self, ip="", port=9992):
self.ip = ip
self.log = logging.getLogger()
self.port = port
self.address = "http://{}:{}".format(ip, self.port)
try:
self.client = xmlrpc.client.ServerProxy(
self.address, allow_none=True, verbose=False)
except ConnectionRefusedError as err:
self.log.exception(
"Failed to connect to Chameleon Device at: {}".format(
self.address))
self.client.Reset()
def pull_file(self, chameleon_location, destination):
"""Pulls a file from the Chameleon device. Usually the raw audio file.
Args:
chameleon_location: The path to the file on the Chameleon device
destination: The destination to where to pull it locally.
"""
# TODO: (tturney) implement
self.log.error("Definition not yet implemented")
def start_capturing_audio(self, port_id, has_file=True):
"""Starts capturing audio.
Args:
port_id: The ID of the audio input port.
has_file: True for saving audio data to file. False otherwise.
"""
self.client.StartCapturingAudio(port_id, has_file)
def stop_capturing_audio(self, port_id):
"""Stops capturing audio.
Args:
port_id: The ID of the audio input port.
Returns:
List contain the location of the recorded audio and a dictionary
of values relating to the raw audio including: file_type, channel,
sample_format, and rate.
"""
return self.client.StopCapturingAudio(port_id)
def audio_board_connect(self, bus_number, endpoint):
"""Connects an endpoint to an audio bus.
Args:
bus_number: 1 or 2 for audio bus 1 or bus 2.
endpoint: An endpoint defined in audio_bus_endpoints.
"""
self.client.AudioBoardConnect(bus_number, endpoint)
def audio_board_disconnect(self, bus_number, endpoint):
"""Connects an endpoint to an audio bus.
Args:
bus_number: 1 or 2 for audio bus 1 or bus 2.
endpoint: An endpoint defined in audio_bus_endpoints.
"""
self.client.AudioBoardDisconnect(bus_number, endpoint)
def audio_board_disable_bluetooth(self):
"""Disables Bluetooth module on audio board."""
self.client.AudioBoardDisableBluetooth()
def audio_board_clear_routes(self, bus_number):
"""Clears routes on an audio bus.
Args:
bus_number: 1 or 2 for audio bus 1 or bus 2.
"""
self.client.AudioBoardClearRoutes(bus_number)
def scp(self, source, destination):
"""Copies files from the Chameleon device to the host machine.
Args:
source: The file path on the Chameleon board.
dest: The file path on the host machine.
"""
cmd = "scp root@{}:/{} {}".format(self.ip, source, destination)
try:
call(cmd.split(" "))
except FileNotFoundError as err:
self.log.exception("File not found {}".format(source))
| [
"[email protected]"
] | |
20b035cb4df2c7e31ca09b0df3a8484d28292617 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5020/180005020.py | 3ebd718b508dadfdbe85804be5bfc4ff1cde6abc | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,675 | py | from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'AN',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'RDR', MIN: 0, MAX: 1},
{ID: 'PRF', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'SAC', MIN: 0, MAX: 10},
{ID: 'G38', MIN: 0, MAX: 1},
{ID: 'PKG', MIN: 0, MAX: 5},
{ID: 'TD1', MIN: 0, MAX: 10},
{ID: 'TD5', MIN: 0, MAX: 10},
{ID: 'NTE', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 200, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 5},
]},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'BLI', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 20},
{ID: 'PID', MIN: 0, MAX: 5},
{ID: 'RDR', MIN: 0, MAX: 1},
{ID: 'SAC', MIN: 0, MAX: 10},
{ID: 'AMT', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999},
{ID: 'CRC', MIN: 0, MAX: 99999},
{ID: 'NTE', MIN: 0, MAX: 99999},
{ID: 'PRF', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 15},
{ID: 'DD', MIN: 0, MAX: 100},
{ID: 'GF', MIN: 0, MAX: 1},
{ID: 'TD5', MIN: 0, MAX: 5},
{ID: 'SDQ', MIN: 0, MAX: 100},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'N1', MIN: 0, MAX: 200, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 5},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
a452d8c8be8214679e4821b0ad93f0e586261b5e | c9fe9f52d70ad5308d19664e82081233f1bc6d9a | /app/views.py | 4f04a92d6305eafbcd88315cf2b9d14c4a415af4 | [] | no_license | arifbd2221/ResumeParser | 9f48f97528588cde6fa7b5507d8ac3364a6c016b | 4508465e21e9a362018c84ac0370dcd35df98a7f | refs/heads/master | 2022-12-10T21:06:50.429742 | 2020-03-18T18:21:07 | 2020-03-18T18:21:07 | 248,309,886 | 0 | 0 | null | 2022-12-08T03:50:02 | 2020-03-18T18:19:53 | Python | UTF-8 | Python | false | false | 1,578 | py | from django.shortcuts import render
from .models import Resume, Candidate
from django.core.files.storage import default_storage
import os
from pyresparser import ResumeParser
def home(request):
top_candidates = dict()
candidates = Candidate.objects.all()
candidates = list(candidates)
candidates.sort(key=lambda c: c.experience, reverse=True)
return render(request, "app/home.html", {'candidates': candidates})
def handleResume(request):
if request.method == 'POST':
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print('post')
resume = request.FILES.get('resume', None)
print(resume)
if resume:
saving=Resume(resume=resume)
saving.save()
media_path = os.path.join(BASE_DIR,'resumes')
lpart = str(saving.resume).split('/')
full_path=os.path.join(media_path,lpart[1])
data = ResumeParser(str(full_path)).get_extracted_data()
candidate = Candidate(name=data.get('name'),email=data.get('email'),
phone=data.get('mobile_number'),experience=float(data.get('total_experience')),
total_skills=len(data.get('skills')), designation=data.get('designation'),
company= "N/A" if data.get('company_names') is None else data.get('company_names'))
candidate.save()
return render(request, "app/home.html", {})
return render(request, "app/cvform.html", {})
| [
"[email protected]"
] | |
3fc99cb24ddecebaf07b6bdc249560f5cc586b4c | b9e99a828952ffeab9767e625c0061cb3ea5b670 | /Python编程从入门到实践/learning_log/learning_log_2.1_让用户能够输入数据/learning_log/urls.py | 1eb20c51f860bd491ba4e3b501449aa4cf335e2c | [] | no_license | ZGA101421/Python3_Project | 95d95e23858ef92f6825f018605089c105303ad3 | fa30f876fd13890743bc81d1521534c340575132 | refs/heads/master | 2022-04-03T07:03:46.369710 | 2019-12-30T15:22:21 | 2019-12-30T15:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | """learning_log URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),# 该模块定义了可在管理网站中请求的所有URL
path('', include('learning_logs.urls', namespace='learning_logs')),
# 代码包含实参namespace , 让我们能够将learning_logs 的URL同项目中的其他URL区分开来
]
'''
Django版本更新,书上的代码需做相应修改
书中源代码:
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('learning_logs.urls', namespace='learning_logs')),
]
应改为:
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('learning_logs.urls', namespace='learning_logs')),
]
''' | [
"[email protected]"
] | |
7b5282cb4880ed7331be6c50b7b9bde16fe209cb | b9b06d86d43e738b62ab9289fc13aae4c2b2670b | /nsd1807/devops/day04/smail2.py | 51a4e856461309c72f18ac3c1d64e75aafe8f38f | [] | no_license | MrZhangzhg/nsd_2018 | 31a7a8d54e2cb3ff4f4eb5c736fbd76601718356 | 458a1fef40c5e15ba7689fcb3a00baf893ac0218 | refs/heads/master | 2020-04-08T19:08:48.237646 | 2019-09-08T04:31:07 | 2019-09-08T04:31:07 | 159,642,127 | 5 | 7 | null | 2019-01-04T05:33:40 | 2018-11-29T09:37:27 | Python | UTF-8 | Python | false | false | 904 | py | from email.mime.text import MIMEText
from email.header import Header
from smtplib import SMTP
import getpass
def send_mail(text, subject, sender, receivers, server, user, passwd, port=25):
message = MIMEText(text, 'plain', 'utf8')
message['From'] = Header(sender, 'utf8')
message['To'] = Header(receivers[0], 'utf8')
message['Subject'] = Header(subject, 'utf8')
smtp = SMTP()
smtp.connect(server, port)
# smtp.starttls() # 如果使用证书,打开此注释
smtp.login(user, passwd)
smtp.sendmail(sender, receivers, message.as_bytes())
if __name__ == '__main__':
text = 'python邮件测试\r\n'
subject = 'smtp test'
sender = '[email protected]'
passwd = getpass.getpass()
server = 'mail.tedu.cn'
receivers = ['[email protected]', '[email protected]']
send_mail(text, subject, sender, receivers, server, sender, passwd)
| [
"[email protected]"
] | |
261e0eb698524a65c64f509f16fc005825678a85 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5709773144064000_1/Python/DayBit/probB.py | 89a1ba67f2f5762b9bdf723758fef3336e9985fe | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | '''
Created on 12/04/2014
@author: david
'''
fIn=open("B-large.in")
T=int(fIn.readline())
P=[]
for i in range(T):
c,f,x = [float(x) for x in fIn.readline().strip().split()]
P.append((c,f,x))
fRes = open("res.txt", "w")
case = 0
for c,f,x in P:
case += 1
cps=2.0
timetobuy = c/cps
bestTime = x/cps
acc = 0
while True:
cps+=f
acc += timetobuy
if bestTime < acc + x/cps:
print("Case #{0}: {1:0.7f}".format(case,bestTime))
fRes.write("Case #{0}: {1:0.7f}\n".format(case,bestTime))
break
timetobuy = c/cps
bestTime = acc + x/cps
fRes.close()
| [
"[email protected]"
] | |
27f71acc6181f6b40de7037f107d718970c210e8 | 24532cc3eb0e489415a08457b454c454abf66525 | /object-maker/copy-dataset-files.py | 295761788c3e67586d04717102ac11cacb0d8a08 | [] | no_license | glygener/glygen-backend-integration | 7a4c8e45dd9af6b0424946fcc7e11e9aef39d9a6 | 526775496f860680df2dbfdfc42b3ba35c69cfea | refs/heads/master | 2022-09-22T03:56:35.116497 | 2022-09-09T16:56:59 | 2022-09-09T16:56:59 | 151,144,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,122 | py | #!/usr/bin/python
import os,sys
import string
import csv
import json
import glob
import requests
import subprocess
import pymongo
from optparse import OptionParser
import libgly
from Bio import SeqIO
__version__="1.0"
__status__ = "Dev"
def get_master_file_list():
file_name_list = []
ds_obj_list = json.loads(open(wrk_dir + "/generated/misc/dataset-masterlist.json", "r").read())
for obj in ds_obj_list:
ds_name = obj["name"]
ds_format = obj["format"]
mol = obj["categories"]["molecule"]
if ds_name in ["homolog_alignments", "isoform_alignments"]:
continue
if obj["categories"]["species"] == []:
file_name_list.append("%s_%s.%s" % (mol, ds_name, ds_format))
else:
sp_list_one = sorted(obj["categories"]["species"])
for species in sp_list_one:
if species not in obj["integration_status"]["excludelist"]:
file_name_list.append("%s_%s_%s.%s" % (species, mol, ds_name, ds_format))
return file_name_list
def main():
global wrk_dir
global field_dict
global io_dict
generated_dir = "/data/projects/glygen/generated/"
wrk_dir = "/home/rykahsay/glygen-backend-integration/object-maker"
reviewed_dir = wrk_dir + "/reviewed/"
unreviewed_dir = wrk_dir + "/unreviewed/"
file_list = get_master_file_list()
path_list = []
missing_files = []
for out_file_name in file_list:
path = unreviewed_dir + out_file_name
if os.path.isfile(path) == False:
missing_files.append(path)
else:
path_list.append(path)
if missing_files != []:
for path in missing_files:
print (path, "is missing")
else:
cmd = "rm -f " + reviewed_dir + "/*"
x, y = subprocess.getstatusoutput(cmd)
for path in path_list:
cmd = "cp " + path + " " + reviewed_dir
x, y = subprocess.getstatusoutput(cmd)
cmd = "chmod -R 755 " + reviewed_dir
x, y = subprocess.getstatusoutput(cmd)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ec95968a7b2df86a07137f7e6e672b71302ae50a | 7834e55df20cd3b0fb629a137dd2671cf53f484f | /tests/test_encoding.py | 8fb4b5e646be000a21d9ac4265648f9e54c2c5af | [
"MIT"
] | permissive | mapbox/mapbox-sdk-py | ca23c0f5cbbadd654b53683ff3f8918f504e0ff6 | 0329ccb17e7d3f4123da1534417bd21aa31bc2eb | refs/heads/master | 2023-06-05T11:46:38.434644 | 2022-08-01T22:31:37 | 2022-08-01T22:31:37 | 39,404,445 | 335 | 150 | MIT | 2020-02-04T16:25:14 | 2015-07-20T19:34:47 | Python | UTF-8 | Python | false | false | 4,221 | py | import pytest
import copy
import json
from mapbox.encoding import (read_points,
encode_waypoints,
encode_polyline,
encode_coordinates_json)
gj_point_features = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-87.33787536621092,
36.539156961321574]}}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-88.2476806640625,
36.92217534275667]}}]
gj_multipoint_features = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "MultiPoint",
"coordinates": [
[-87.33787536621092,
36.539156961321574],
[-88.2476806640625,
36.92217534275667]]}}]
gj_line_features = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "LineString",
"coordinates": [
[-87.33787536621092,
36.539156961321574],
[-88.2476806640625,
36.92217534275667]]}}]
class GeoThing(object):
__geo_interface__ = None
def __init__(self, thing):
self.__geo_interface__ = thing
def test_read_geojson_features():
expected = [(-87.33787536621092, 36.539156961321574),
(-88.2476806640625, 36.92217534275667)]
assert expected == list(read_points(gj_point_features))
assert expected == list(read_points(gj_multipoint_features))
assert expected == list(read_points(gj_line_features))
def test_geo_interface():
expected = [(-87.33787536621092, 36.539156961321574),
(-88.2476806640625, 36.92217534275667)]
features = [GeoThing(gj_point_features[0]),
GeoThing(gj_point_features[1])]
assert expected == list(read_points(features))
geoms = [GeoThing(gj_point_features[0]['geometry']),
GeoThing(gj_point_features[1]['geometry'])]
assert expected == list(read_points(geoms))
def test_encode_waypoints():
expected = "-87.337875,36.539157;-88.247681,36.922175"
assert expected == encode_waypoints(gj_point_features)
assert expected == encode_waypoints(gj_multipoint_features)
assert expected == encode_waypoints(gj_line_features)
def test_encode_limits():
expected = "-87.337875,36.539157;-88.247681,36.922175"
assert expected == encode_waypoints(gj_point_features)
with pytest.raises(ValueError) as exc:
encode_waypoints(gj_point_features, min_limit=3)
assert 'at least' in str(exc.value)
with pytest.raises(ValueError) as exc:
encode_waypoints(gj_point_features, max_limit=1)
assert 'at most' in str(exc.value)
def test_unsupported_geometry():
unsupported = copy.deepcopy(gj_point_features)
unsupported[0]['geometry']['type'] = "MultiPolygonnnnnn"
with pytest.raises(ValueError) as exc:
list(read_points(unsupported))
assert 'Unsupported geometry' in str(exc.value)
def test_unknown_object():
unknown = ["foo", "bar"]
with pytest.raises(ValueError) as exc:
list(read_points(unknown))
assert 'Unknown object' in str(exc.value)
def test_encode_polyline():
expected = "wp_~EvdatO{xiAfupD"
assert expected == encode_polyline(gj_point_features)
assert expected == encode_polyline(gj_multipoint_features)
assert expected == encode_polyline(gj_line_features)
def test_encode_coordinates_json():
expected = {
'coordinates': [
[-87.33787536621092, 36.539156961321574],
[-88.2476806640625, 36.92217534275667]]}
assert expected == json.loads(encode_coordinates_json(gj_point_features))
assert expected == json.loads(encode_coordinates_json(gj_multipoint_features))
assert expected == json.loads(encode_coordinates_json(gj_line_features))
def test_encode_waypoints_rounding():
expected = "1.0,0.0"
int_coord_features = [{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1, 0]
},
"properties": {}}]
assert expected == encode_waypoints(int_coord_features)
| [
"[email protected]"
] | |
94a7c9bb205a5af9de7c09c8beb9796010b2cc71 | b059c2cf1e19932abb179ca3de74ced2759f6754 | /S20/day04/03作业.py | 89601d4d3d9c179bb55fbfb29fee50ae4e4ba7d1 | [] | no_license | Lwk1071373366/zdh | a16e9cad478a64c36227419d324454dfb9c43fd9 | d41032b0edd7d96e147573a26d0e70f3d209dd84 | refs/heads/master | 2020-06-18T02:11:22.740239 | 2019-07-10T08:55:14 | 2019-07-10T08:55:14 | 196,130,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,864 | py | # li = ["alex", "WuSir", "ritian", "barry", "wenzhou"].
# a.计算列表的⻓度并输出
# b. 列表中追加元素"seven",并输出添加后的列表
# c. 请在列表的第1个位置插⼊元素"Tony",并输出添加后的列表
# d. 请修改列表第2个位置的元素为"Kelly",并输出修改后的列表
# e. 请将列表l2=[1,"a",3,4,"heart"]的每⼀个元素添加到列表li中,⼀⾏代码实现,不
# 允许循环添加。
# f. 请将字符串s = "qwert"的每⼀个元素添加到列表li中,⼀⾏代码实现,不允许循
# 环添加。
# g. 请删除列表中的元素"ritian",并输出添加后的列表
# h. 请删除列表中的第2个元素,并输出删除的元素和删除元素后的列表
# i. 请删除列表中的第2⾄4个元素,并输出删除元素后的列表
# j. 请将列表所有得元素反转,并输出反转后的列表
# k. 请计算出"alex"元素在列表li中出现的次数,并输出该次数
# li = ["alex", "WuSir", "ritian", "barry", "wenzhou"]
# print(len(li))
# li.append('seven')
# print(li)
# li.insert(0,'Tony')
# print(li)
# li.insert(1,'Kelly')
# print(li)
# l2=[1,"a",3,4,"heart"]
# li.extend(l2)
# print(li)
# s = "qwert"
# li.extend(s)
# print(li)
# del li[2]
# print(li)
# li.pop(1)
# print(li.pop(1))
# print(li)
# del li[1:4]
# print(li)
# li.reverse()
# print(li)
# print(li.count('alex'))
# 写代码,有如下列表,利⽤切⽚实现每⼀个功能
# li = [1, 3, 2, "a", 4, "b", 5,"c"]
# a. 通过对li列表的切⽚形成新的列表l1,l1 = [1,3,2]
# b. 通过对li列表的切⽚形成新的列表l2,l2 = ["a",4,"b"]
# c. 通过对li列表的切⽚形成新的列表l3,l3 = ["1,2,4,5]
# d. 通过对li列表的切⽚形成新的列表l4,l4 = [3,"a","b"]
# e. 通过对li列表的切⽚形成新的列表l5,l5 = ["c"]
# f. 通过对li列表的切⽚形成新的列表l6,l6 = ["b","a",3]
# li = [1, 3, 2, "a", 4, "b", 5,"c"]
# # del li[3::1]
# # print(li)
# # del li[1::2]
# # print(li)
# # del li[:6:2]
# # print(li)
# # del li[:7:1]
# # print(li)
# print(li[-3::-2])
# lis = [2, 3, "k", ["qwe", 20, ["k1", ["tt", 3, "1"]], 89], "ab", "adv"]
# a. 将列表lis中的"tt"变成⼤写(⽤两种⽅式)。
# b. 将列表中的数字3变成字符串"100"(⽤两种⽅式)。
# c. 将列表中的字符串"1"变成数字101(⽤两种⽅式)。
# lis = [2, 3, "k", ["qwe", 20, ["k1", ["tt", 3, "1"]], 89], "ab", "adv"]
# lis[3][2][1][0]=lis[3][2][1][0].upper()
# print(lis)
# lis[1]=100
# lis[3][2][1][1]=100 还有一种
# print(lis)
# lis[3][2][1][2]=101
# print(lis)
# li = ["alex", "wusir", "taibai"]
# 利⽤下划线将列表的每⼀个元素拼接成字符串"alex_wusir_taibai"
# li = ["alex", "wusir", "taibai"]
# l1='_'.join(li)
# print(l1)
#
# 利⽤for循环和range打印出下⾯列表的索引。
# li = ["alex", "WuSir", "ritian", "barry", "wenzhou"]
# for i in range(len(li)):
# print(i)
# for i in range(len(li)):
# print(i)
# for i in range(len(li)):
# print(i)
# 利⽤for循环和range找出100以内所有的偶数并将这些偶数插⼊到⼀个新列表中
# list=[]
# for i in range(100):
# if i % 2 == 0:
# list.append(i)
# print(list)
# for i in range (2,100,2):
# list.append(i)
# print(list)
# 利⽤for循环和range从100~1,倒序打印
# for i in range(100,1,-1):
# print(i)
#-----------------------------------------------------------------------
# 利⽤for循环和range从100~10,倒序将所有的偶数添加到⼀个新列表中,然后对列
# # 表的元素进⾏筛选,将能被4整除的数留下来。
# list = [] #先定义一个空列表 用for循环 遍历 100~10的偶数
# # 既然是偶数 可以用加步长的方式解决这个问题
# #再用i 取4的倍数 将满足条件的 增加到列表中
# # list1=[]
# for i in range(100,10,-2):
# if i % 4 == 0:
# list.append(i)
# print(list)
# # --------------------------------------------------------------
# 利⽤for循环和range,将1-30的数字⼀次添加到⼀个列表中,并循环这个列表,将
# 能被3整除的数改成*
# list = []
# list1 = []
# for i in range(1,31,1):
# list.append(i)
# if i % 3 != 0:
# list1.append(i)
# else:i = '*'
# list1.append(i)
# print(list1)
#------------------------------------------------------------
# li=[]
# index=0 先定义一个空列表 先定义一个空列表,及index
# 在30以内遍历,遍历到的数据
# 添加到空列表中, 若 遍历到
# 的数字取3等于0.则视为3的倍
# 数,将index替换为星号,并
# 每次自加一。
# for i in range(1,31,1):
# li.append(i)
# for i in li:
# if i % 3==0:
# li[index]='*'
# index=index+1
# print(li)
# ----------------------------------------------------------
# lst = []
# for x in range(1,31):
# lst.append(x)
#
# index = 0
# while index < len(lst): # while循环做法
# if lst[index] % 3 == 0:
# lst[index] = '*'
# index += 1
#
# print(lst)
# -----------------------------------------------------------
# 查找列表li中的元素,移除每个元素的空格,并找出以"A"或者"a"开头,并以"c"结尾
# 的所有元素,并添加到⼀个新列表中,最后循环打印这个新列表。
# li = ["TaiBai ", "alexC", "AbC ", "egon", " riTiAn", "WuSir", " aqc"] 先用 for...in 取出元素
# li = ["TaiBai ", "alexC", "AbC ", "egon", " riTiAn", "WuSir", " aqc",]
# # lst = []
# # for x in li:
# # x = x.strip()
# # if (x.startswith('A') or x.startswith('a')) and x.endswith('c'):
# # lst.append(x)
# # for x in lst:
# # print(x,end=' ')
# #
# # lst=[]
# # for i in li:
# # i=i.strip()
# # if (i.startswith('A')or i.startswith('a')) and i.endswith('c'):
# # lst.append(i)
# # for i in lst:
# # print(i)
#
# lst = []
# for i in li:
# i=i.strip()
# if (i.startswith('A') or i.startswith('a') ) and i.endswith('c'):
# lst.append(i)
# if i in lst:
# print(i)
#先定义个空列表 给变量 lst ; 用for循环,若 i 在 列表li里:题中要求元素去空格,所以 将去掉空格的i 重新赋值给 i
#此时 得到的 i 是去掉空格的;用if 判断 若 以A或a 并以c开头的元素 添加的一个新列表中 ;若 遍历到的 i 在这个列表中,输出即可。
# list = [] 创建一个新列表
# for i in li : 取出的元素赋值一个变量‘J’并去空格
# j = i.strip() 判断条件:因为是以C为结尾的所有元素 所有是True
# if j.endswith('c')and j.startswith('A')or j.startswith('a') : 所以‘c’ 在前 ....
# # print(j)
# list.append(j)
# print(list)
# li = ["TaiBai ", "alexC", "AbC ", "egon", " riTiAn", "WuSir", " aqc"]
# list=[]
# for i in li:
# j = i.strip()
# if j.endswith('c')and j.startswith('A')or j.startswith('a'):
# list.append(j)
# print(list)
# list=[]
# for i in li:
# j = i.strip()
# if j.endswith('c')and j.startswith('A')or j.startswith('a'):
# list.append(j)
# print(j)
# 开发敏感词语过滤程序,提示⽤户输⼊评论内容,如果⽤户输⼊的内容中包含特殊的
# 字符:
# 敏感词列表 li = ["苍⽼师", "东京热", "武藤兰", "波多野结⾐"]
# 则将⽤户输⼊的内容中的敏感词汇替换成等⻓度的*(苍⽼师就替换***),并添加到⼀
# 个列表中;如果⽤户输⼊的内容没有敏感词汇,则直接添加到上述的列表中。
# li = ["苍老师", "东京热", "武藤兰", "波多野结⾐"]
# comment_list=[]
# comment=input('请输入你的评论:')
# for name in li:
# if name in comment:
# comment=comment.replace(name,len(name)*'*')
# comment_list.append(comment)
# print(comment_list)
# li = ["苍老师", "东京热", "武藤兰", "波多野结⾐"]
# comment_list=[]
# comment=input('请输入你的评论:')
# for name in li:
# if name in comment:
# comment=comment.replace(name,len(name)*'*')
# comment_list.append(comment)
# print(comment_list)
#
#
# li= ["苍老师", "东京热", "武藤兰", "波多野结衣"]
# l1=[]
# comment = input('请输入评论:')
# for i in li:
# if i in comment:
# comment=comment.replace(i,len(i))
# l1.append(comment)
# print(li)
# 利⽤下划线将列表的每⼀个元素拼接成字符串"alex_wusir_taibai"
# li = ["alex", "wusir", "taibai"]
# l1='_'.join(li)
# print(l1)
#
| [
"[email protected]"
] | |
9d75f4f664eb0f368c443272ef6b096804e26e20 | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/legacy_test/test_numel_op.py | 5c8c477877c3261829732e495a4f3679b18d2316 | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 4,760 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle import fluid
from paddle.fluid import core
class TestNumelOp(OpTest):
def setUp(self):
self.op_type = "size"
self.python_api = paddle.numel
self.init()
x = np.random.random(self.shape).astype(self.dtype)
self.inputs = {
'Input': x,
}
self.outputs = {'Out': np.array(np.size(x))}
def test_check_output(self):
self.check_output()
def init(self):
self.shape = (6, 56, 8, 55)
self.dtype = np.float64
class TestNumelOp1(TestNumelOp):
def init(self):
self.shape = (11, 66)
self.dtype = np.float64
class TestNumelOp2(TestNumelOp):
def init(self):
self.shape = (0,)
self.dtype = np.float64
class TestNumelOpFP16(TestNumelOp):
def init(self):
self.dtype = np.float16
self.shape = (6, 56, 8, 55)
class TestNumelOp1FP16(TestNumelOp):
def init(self):
self.dtype = np.float16
self.shape = (11, 66)
class TestNumelOp2FP16(TestNumelOp):
def init(self):
self.dtype = np.float16
self.shape = (0,)
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA and do not support bfloat16",
)
class TestNumelOpBF16(OpTest):
def setUp(self):
self.op_type = "size"
self.python_api = paddle.numel
self.dtype = np.uint16
self.init()
x = np.random.random(self.shape).astype(np.float32)
self.inputs = {'Input': convert_float_to_uint16(x)}
self.outputs = {'Out': np.array(np.size(x))}
def test_check_output(self):
place = paddle.CUDAPlace(0)
self.check_output_with_place(place)
def init(self):
self.shape = (6, 56, 8, 55)
class TestNumelOp1BF16(TestNumelOpBF16):
def init(self):
self.shape = (11, 66)
class TestNumelAPI(unittest.TestCase):
def test_numel_static(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
shape1 = [2, 1, 4, 5]
shape2 = [1, 4, 5]
x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2')
input_1 = np.random.random(shape1).astype("int32")
input_2 = np.random.random(shape2).astype("int32")
out_1 = paddle.numel(x_1)
out_2 = paddle.numel(x_2)
exe = paddle.static.Executor(place=paddle.CPUPlace())
res_1, res_2 = exe.run(
feed={
"x_1": input_1,
"x_2": input_2,
},
fetch_list=[out_1, out_2],
)
assert np.array_equal(
res_1, np.array(np.size(input_1)).astype("int64")
)
assert np.array_equal(
res_2, np.array(np.size(input_2)).astype("int64")
)
def test_numel_imperative(self):
paddle.disable_static(paddle.CPUPlace())
input_1 = np.random.random([2, 1, 4, 5]).astype("int32")
input_2 = np.random.random([1, 4, 5]).astype("int32")
x_1 = paddle.to_tensor(input_1)
x_2 = paddle.to_tensor(input_2)
out_1 = paddle.numel(x_1)
out_2 = paddle.numel(x_2)
assert np.array_equal(out_1.numpy().item(0), np.size(input_1))
assert np.array_equal(out_2.numpy().item(0), np.size(input_2))
paddle.enable_static()
def test_error(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
def test_x_type():
shape = [1, 4, 5]
input_1 = np.random.random(shape).astype("int32")
out_1 = paddle.numel(input_1)
self.assertRaises(TypeError, test_x_type)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
96c96b1cbf1f744df9693030c13335e0783a3353 | 1af6958461af6257264ace2a6d13385b47104606 | /pyscf/semiempirical/umindo3_grad.py | f9223d6afa0b11f546adc5c837174e722f4b0638 | [
"Apache-2.0"
] | permissive | tmash/pyscf | ac9a86c078170044b52be71e5d00fa5f680f55af | 89c101c1c963e8247808635c61cd165bffab42d6 | refs/heads/master | 2020-12-04T04:41:23.456744 | 2020-01-02T18:05:16 | 2020-01-02T18:05:16 | 231,615,690 | 1 | 0 | Apache-2.0 | 2020-01-03T15:33:33 | 2020-01-03T15:33:32 | null | UTF-8 | Python | false | false | 2,099 | py | #!/usr/bin/env python
import copy
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf.grad import uhf as uhf_grad
from pyscf.data.elements import _symbol
from pyscf.semiempirical import mopac_param
from pyscf.semiempirical import mindo3
from pyscf.semiempirical import rmindo3_grad
class Gradients(uhf_grad.Gradients):
get_hcore = None
hcore_generator = rmindo3_grad.hcore_generator
def get_ovlp(self, mol=None):
nao = self.base._mindo_mol.nao
return numpy.zeros((3,nao,nao))
def get_jk(self, mol=None, dm=None, hermi=0):
if dm is None: dm = self.base.make_rdm1()
vj, vk = rmindo3_grad.get_jk(self.base._mindo_mol, dm)
return vj, vk
def grad_nuc(self, mol=None, atmlst=None):
mol = self.base._mindo_mol
return rmindo3_grad.grad_nuc(mol, atmlst)
def grad_elec(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
with lib.temporary_env(self, mol=self.base._mindo_mol):
return uhf_grad.grad_elec(self, mo_energy, mo_coeff, mo_occ,
atmlst)
Grad = Gradients
if __name__ == '__main__':
from pyscf.data.nist import HARTREE2EV
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.spin = 2
mol.verbose = 0
mol.build()
mfs = mindo3.UMINDO3(mol).set(conv_tol=1e-8).as_scanner()
mfs(mol)
print(mfs.e_tot - -336.25080977434175/HARTREE2EV)
mol1 = mol.copy()
mol1.set_geom_([['O' , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
mol2 = mol.copy()
mindo_mol1 = mindo3._make_mindo_mol(mol1)
mol2.set_geom_([['O' , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
mindo_mol2 = mindo3._make_mindo_mol(mol2)
g1 = mfs.nuc_grad_method().kernel()
e1 = mfs(mol1)
e2 = mfs(mol2)
print(abs((e1-e2)/0.0002 - g1[0,2]))
| [
"[email protected]"
] | |
510ae73b72ae1dfb9680491782b111449bfd44ff | d7b9b490c954c7a9160b69f8ce2c907ef4681ecb | /sponsors/migrations/0006_auto_20201016_1517.py | ff9d137547f788af7cd2e185b1028bb98733f640 | [
"Apache-2.0"
] | permissive | python/pythondotorg | 00db93a4b1789a4d438806d106d9cee3349ad78c | c4ee749942227ca75c8e670546afe67232d647b2 | refs/heads/main | 2023-08-28T20:04:24.735314 | 2023-08-03T19:12:29 | 2023-08-03T19:12:29 | 6,127,047 | 1,131 | 646 | Apache-2.0 | 2023-08-24T15:57:04 | 2012-10-08T16:00:15 | Python | UTF-8 | Python | false | false | 2,459 | py | # Generated by Django 2.0.13 on 2020-10-16 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0005_auto_20201015_0908"),
]
operations = [
migrations.RenameModel(
old_name="SponsorshipLevel",
new_name="SponsorshipPackage",
),
migrations.RemoveField(
model_name="sponsorshipbenefit",
name="levels",
),
migrations.RemoveField(
model_name="sponsorshipbenefit",
name="minimum_level",
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="new",
field=models.BooleanField(
default=False,
help_text='If selected, display a "New This Year" badge along side the benefit.',
verbose_name="New Benefit",
),
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="package_only",
field=models.BooleanField(
default=False,
help_text="If a benefit is only available via a sponsorship package, select this option.",
verbose_name="Package Only Benefit",
),
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="packages",
field=models.ManyToManyField(
help_text="What sponsorship packages this benefit is included in.",
related_name="benefits",
to="sponsors.SponsorshipPackage",
verbose_name="Sponsorship Packages",
),
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="soft_capacity",
field=models.BooleanField(
default=False,
help_text="If a benefit's capacity is flexible, select this option.",
verbose_name="Soft Capacity",
),
),
migrations.AlterField(
model_name="sponsorshipbenefit",
name="internal_value",
field=models.PositiveIntegerField(
blank=True,
help_text="Value used internally to calculate sponsorship value when applicants construct their own sponsorship packages.",
null=True,
verbose_name="Internal Value",
),
),
]
| [
"[email protected]"
] | |
0510146ef6fac9025dff91e4eeac1220c8281527 | bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd | /neekanee/job_scrapers/plugins/com/link/scotiabank.py | cb1391598e9dac9c05f05ebfd3b07cae11ae06a6 | [] | no_license | thayton/neekanee | 0890dd5e5cf5bf855d4867ae02de6554291dc349 | f2b2a13e584469d982f7cc20b49a9b19fed8942d | refs/heads/master | 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | import re, urlparse, mechanize
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text
from neekanee_solr.models import *
COMPANY = {
'name': 'Scotiabank',
'hq': 'Toronto, Canada',
'home_page_url': 'http://www.scotiabank.com',
'jobs_page_url': 'http://jobs.scotiabank.com/careers/',
'empcnt': [10001]
}
class ScotiabankJobScraper(JobScraper):
def __init__(self):
super(ScotiabankJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
while True:
s = soupify(self.br.response().read())
x = {'class': 'jobTitle'}
for td in s.findAll('td', attrs=x):
tr = td.findParent('tr')
l = tr.find('td', attrs={'class': 'location'})
l = self.parse_location(l.text)
if not l:
continue
job = Job(company=self.company)
job.title = td.text
job.url = urlparse.urljoin(self.br.geturl(), td.a['href'])
job.location = l
jobs.append(job)
try:
self.br.follow_link(self.br.find_link(text='Next page'))
except mechanize.LinkNotFoundError:
break
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
x = {'class': 'job-details'}
d = s.find('div')
job.desc = get_all_text(d)
job.save()
def get_scraper():
return ScotiabankJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
| [
"[email protected]"
] | |
21f276b882cd9006b94371de31160164730f6994 | 99f851bc034bdedd61ff673b4ca1d294e9451d04 | /iprPy/records/LAMMPS-potential.py | bbade6130a0a6bfafae083165ae1d8893d518f71 | [] | no_license | njisrawi/iprPy | c583ba92b2537ce449c3fb6a832a06036dc1918f | 5ce6c14b1cc889069495a2f29db19d5d78e29ede | refs/heads/master | 2021-01-20T09:00:24.709510 | 2017-01-25T20:28:54 | 2017-01-25T20:28:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | from DataModelDict import DataModelDict as DM
import atomman as am
import atomman.unitconvert as uc
import numpy as np
def schema():
dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir, 'record-LAMMPS-potential.xsd')
def todict(record):
model = DM(record)
pot = model['LAMMPS-potential']
params = {}
params['pot_key'] = pot['potential']['key']
params['pot_id'] = pot['potential']['id']
params['units'] = pot['units']
params['atom_style'] = pot['atom_style']
params['pair_style'] = pot['pair_style']['type']
params['elements'] = []
params['masses'] = []
params['symbols'] = []
params['charge'] = []
for atom in pot.iteraslist('atom'):
params['elements'].append(atom.get('element', np.nan))
params['masses'].append(atom.get('mass', np.nan))
params['symbols'].append(atom.get('symbol', np.nan))
params['charge'].append(atom.get('charge', np.nan))
return params | [
"[email protected]"
] | |
eab0f37861eb12d0b3543cafdd7136150516b581 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2285/60598/260635.py | 6ae600bba2a2f7ddba8115e36439a621e275c9fc | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | times = int(input())
for i in range(times):
length = int(input())
nums = list(map(int, input().split(" ")))
j = 0
finish = False
result = []
while j < length-1:
start = j
while j < length-1 and nums[j] < nums[j+1]:
j += 1
if start != j:
result.append("("+str(start) +" " +str(j)+")")
finish = True
j += 1
if finish:
for k in range(len(result)-1):
print(result[k], "", end="")
print(result[-1])
else:
print("没有利润")
| [
"[email protected]"
] | |
d3f26c05d3402fa44b20bfa369d5f437432ac93a | ce4a7ef82cf2146647714c7887c581bc0971f83e | /account/migrations/0001_create_sites.py | ceafd6dc51e23aae38c15ae11ea569fd78f6ee07 | [] | no_license | fbenke/BeamRemit | d15d8467c17ca15a1afc10c6bc23d756e3b13f75 | 2b894f56e3b1711334115085b6cd9379bd5bf1aa | refs/heads/master | 2021-01-10T12:12:16.289891 | 2014-12-05T11:36:45 | 2014-12-05T11:36:45 | 52,040,642 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | from south.v2 import DataMigration
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
orm['sites.site'].objects.all().delete()
site = orm['sites.site'].objects.create(
id=0,
domain=settings.ENV_SITE_MAPPING[settings.ENV][settings.SITE_USER],
name='Beam'
)
site.save()
def backwards(self, orm):
orm['sites.site'].objects.all().delete()
site = orm['sites.site'].objects.create(
id=0,
domain='example.com',
name='example.com'
)
site.save()
models = {
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sites']
symmetrical = True
| [
"vagrant@precise64.(none)"
] | vagrant@precise64.(none) |
8d48d5283041505a2efe6dd502d7cefd20c39f93 | 6baf192a289f602407044e3b2100aeffc60e3897 | /microblog.py | 3d8c0872b222e8d7023fe352aaa3889b9d5f61fd | [] | no_license | HaoREN211/hao_read | 798adcb0c6bdd2372b050112e76b858e3a212276 | ed126ffb424f4e128be02cbc06807f1e5c863a69 | refs/heads/master | 2023-05-12T18:20:20.315328 | 2020-02-03T14:23:43 | 2020-02-03T14:23:43 | 236,145,154 | 0 | 0 | null | 2023-05-01T21:20:55 | 2020-01-25T08:38:31 | JavaScript | UTF-8 | Python | false | false | 1,255 | py | # 作者:hao.ren3
# 时间:2019/11/5 14:34
# IDE:PyCharm
from flask import send_from_directory
from app import create_app, db
from app.models.User import User
from app.models.Post import Post
from os.path import join
app = create_app()
# 为网站添加图标
def favicon():
return send_from_directory(join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
app.add_url_rule('/favicon.ico',view_func=favicon)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Post}
@app.template_filter('md')
def markdown_html(txt):
from markdown import markdown
post_content_html = markdown(txt, extensions=[
'markdown.extensions.extra',
'markdown.extensions.fenced_code',
'markdown.extensions.admonition',
'markdown.extensions.codehilite',
'markdown.extensions.meta',
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.smarty',
'markdown.extensions.toc',
'markdown.extensions.wikilinks',
'markdown.extensions.tables'
])
return post_content_html
if __name__ == '__main__':
app.run(host="0.0.0.0", port=3000) | [
"[email protected]"
] | |
4f36cb2a52bc750cf728f622b4d832bbc4cfdf9b | 70cc96b55c202245691463ee59e42e9801cde858 | /python/rtypes/types/subset.py | 5638f2520dcf59172857df76e81b5dcdca8f347a | [] | no_license | rezafuru/spacetime | ad1da33fbcf9c358cf1b379507f0178155354f92 | 3b4b58775d41c75f103278c5e1553e5b36542d72 | refs/heads/master | 2023-03-08T09:01:48.286203 | 2019-10-14T18:07:15 | 2019-10-14T18:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | from rtypes.attributes import PredicateFunction
from rtypes.metadata import SubsetMetadata
from rtypes.utils.enums import Rtype
def set_metadata(cls, parent):
cls.__r_table__ = parent.__r_table__
pred_func = None
for attr in dir(cls):
if isinstance(getattr(cls, attr), PredicateFunction):
pred_func = getattr(cls, attr)
meta = SubsetMetadata(Rtype.SUBSET, cls, parent, pred_func)
if hasattr(cls, "__r_meta__"):
TypeError("How am I here?")
else:
cls.__r_meta__ = meta
class subset(object):
def __init__(self, parent_cls):
self.parent = parent_cls
def __call__(self, cls):
set_metadata(cls, self.parent)
return cls
| [
"[email protected]"
] | |
6e8292163311f9d2d6a1c5cb60d88ddcffd2cf58 | 015383d460fa4321391d964c4f65c4d0c044dcc1 | /.venv/lib/python3.7/site-packages/faker/providers/person/dk_DK/__init__.py | 2b739525bda88b354c41c2ee0642b1e1f9a3a170 | [
"Unlicense"
] | permissive | kobbyrythm/temperature_stories_django | 8f400c8d3c8190b0e83f7bcfece930d696c4afe9 | 552d39f1f6f3fc1f0a2f7308a7da61bf1b9b3de3 | refs/heads/main | 2023-07-03T21:28:46.020709 | 2021-07-20T09:44:29 | 2021-07-20T09:44:29 | 468,728,039 | 3 | 0 | Unlicense | 2022-03-11T11:41:47 | 2022-03-11T11:41:46 | null | UTF-8 | Python | false | false | 7,565 | py | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
)
first_names_male = (
'Adam', 'Albert', 'Aksel', 'Alex', 'Alexander', 'Alf', 'Allan',
'Alvin', 'Anders', 'André', 'Andreas', 'Anton', 'Arne', 'Asger',
'ugust', 'Benjamin', 'Benny', 'Bent', 'Bertil', 'Bertram', 'Birger',
'Bjarne', 'Bo', 'Bob', 'Bobby', 'Boe', 'Boris', 'Borris',
'Brian', 'Bruno', 'Bøje', 'Børge', 'Carl', 'Carlo', 'Carsten',
'Casper', 'Christian', 'Christoffer', 'Christopher', 'Claus', 'Clavs', 'Curt',
'Dan', 'Daniel', 'Danny', 'David', 'Dennis', 'Ebbe', 'Einar',
'Einer', 'Elias', 'Emil', 'Eric', 'Erik', 'Erling', 'Ernst',
'Esben', 'Finn', 'Flemming', 'Frank', 'Frans', 'Freddy', 'Frede',
'Frederik', 'Frode', 'Georg', 'George', 'Gert', 'Gorm', 'Gunnar',
'Gunner', 'Gustav', 'Hans', 'Helge', 'Henrik', 'Henry', 'Herbert',
'Herman', 'Hjalte', 'Holger', 'Hugo', 'Ib', 'Ivan', 'Iver',
'Jack', 'Jacob', 'Jakob', 'James', 'Jan', 'Jano', 'Jarl',
'Jean', 'Jens', 'Jeppe', 'Jesper', 'Jim', 'Jimmy', 'Joachim',
'Joakim', 'Johan', 'Johannes', 'John', 'Johnnie', 'Johnny', 'Jon',
'Jonas', 'Jonathan', 'Julius', 'Jørgen', 'Karl', 'Karlo', 'Karsten',
'Kaspar', 'Kasper', 'Keld', 'Ken', 'Kenn', 'Kenneth', 'Kenny',
'Kent', 'Kim', 'Kjeld', 'Klaus', 'Klavs', 'Kristian', 'Kurt',
'Kåre', 'Lars', 'Lasse', 'Laurits', 'Laus', 'Laust', 'Leif',
'Lennarth', 'Lucas', 'Ludvig', 'Mads', 'Magnus', 'Malthe', 'Marcus',
'Marius', 'Mark', 'Martin', 'Mathias', 'Matthias', 'Michael', 'Mik',
'Mikael', 'Mike', 'Mikkel', 'Mogens', 'Morten', 'Nick', 'Nicklas',
'Nicolai', 'Nicolaj', 'Niels', 'Nikolai', 'Nikolaj', 'Nils', 'Noah',
'Ole', 'Olfert', 'Oliver', 'Oscar', 'Oskar', 'Osvald', 'Otto',
'Ove', 'Palle', 'Patrick', 'Paw', 'Peder', 'Per', 'Pete',
'Peter', 'Paul', 'Philip', 'Poul', 'Preben', 'Ragnar', 'Ragner',
'Rasmus', 'René', 'Richard', 'Richardt', 'Robert', 'Robin', 'Rolf',
'Ron', 'Ronni', 'Ronnie', 'Ronny', 'Ruben', 'Rune', 'Sam',
'Sebastian', 'Silas', 'Simon', 'Simon', 'Sonny', 'Steen', 'Stefan',
'Sten', 'Stephan', 'Steve', 'Steven', 'Stig', 'Svenning', 'Søren',
'Tage', 'Tejs', 'Thomas', 'Tim', 'Timmy', 'Tobias', 'Tom',
'Tommy', 'Tonny', 'Torben', 'Troels', 'Uffe', 'Ulf', 'Ulrik',
'Vagn', 'Valdemar', 'Verner', 'Victor', 'Villads', 'Werner', 'William',
'Yan', 'Yannick', 'Yngve', 'Zacharias', 'Ziggy', 'Øivind', 'Øjvind',
'Ørni', 'Øvli', 'Øystein', 'Øyvind', 'Åbjørn', 'Aage', 'Åge',
)
first_names_female = (
'Abelone', 'Agnes', 'Agnete', 'Alberte', 'Alma', 'Amalie', 'Amanda',
'Andrea', 'Ane', 'Anette', 'Anna', 'Anne', 'Annemette', 'Annette',
'Asta', 'Astrid', 'Benedicte', 'Benedikte', 'Bente', 'Benthe', 'Berit',
'Berta', 'Beth', 'Bettina', 'Birgit', 'Birgitte', 'Birte', 'Birthe',
'Bitten', 'Bodil', 'Britt', 'Britta', 'Camilla', 'Carina', 'Carla',
'Caroline', 'Cathrine', 'Catrine', 'Cecilie', 'Charlotte', 'Christina', 'Christine',
'Cirkeline', 'Clara', 'Connie', 'Conny', 'Dagmar', 'Dagny', 'Daniella',
'Dina', 'Ditte', 'Doris', 'Dorte', 'Dorthe', 'Edith', 'Elin',
'Elisabeth', 'Ella', 'Ellen', 'Elna', 'Else', 'Elsebeth', 'Emilie',
'Emily', 'Emma', 'Erna', 'Esmarelda', 'Ester', 'Filippa', 'Frederikke',
'Freja', 'Frida', 'Gerda', 'Gertrud', 'Gitte', 'Grete', 'Grethe',
'Gundhild', 'Gunhild', 'Gurli', 'Gyda', 'Hannah', 'Hanne', 'Heidi',
'Helen', 'Helle', 'Henriette', 'Herdis', 'Iben', 'Ida', 'Inga',
'Inge', 'Ingelise', 'Inger', 'Ingrid', 'Irma', 'Isabella', 'Jacobine',
'Jacqueline', 'Janne', 'Janni', 'Jannie', 'Jasmin', 'Jean', 'Jenny',
'Joan', 'Johanne', 'Jonna', 'Josefine', 'Josephine', 'Julie', 'Justina',
'Jytte', 'Karen', 'Karin', 'Karina', 'Karla', 'Karoline', 'Katcha',
'Katja', 'Katrine', 'Kirsten', 'Kirstin', 'Kirstine', 'Klara', 'Kristina',
'Kristine', 'Laura', 'Lea', 'Lena', 'Lene', 'Leonora', 'Line',
'Liva', 'Lona', 'Lone', 'Lotte', 'Louise', 'Lærke', 'Maiken',
'Maja', 'Majken', 'Malene', 'Malou', 'Maren', 'Margit', 'Margrethe',
'Maria', 'Marianne', 'Marie', 'Marlene', 'Mathilde', 'Maya', 'Merete',
'Merethe', 'Mette', 'Mia', 'Michala', 'Michelle', 'Mie', 'Mille',
'Mimi', 'Minna', 'Nadia', 'Naja', 'Nana', 'Nanna', 'Nanni',
'Natasha', 'Natasja', 'Nete', 'Nicoline', 'Nina', 'Nora', 'Oda',
'Odeline', 'Odette', 'Ofelia', 'Olga', 'Olivia', 'Patricia', 'Paula',
'Paulina', 'Pernille', 'Pia', 'Ragna', 'Ragnhild', 'Randi', 'Rebecca',
'Regitse', 'Regitze', 'Rikke', 'Rita', 'Ritt', 'Ronja', 'Rosa',
'Ruth', 'Sabine', 'Sandra', 'Sanne', 'Sara', 'Sarah', 'Selma',
'Signe', 'Sigrid', 'Silje', 'Sille', 'Simone', 'Sine', 'Sofia',
'Sofie', 'Solveig', 'Solvej', 'Sonja', 'Sophie', 'Stina', 'Stine',
'Susanne', 'Sussanne', 'Sussie', 'Sys', 'Sørine', 'Søs', 'Tammy',
'Tanja', 'Thea', 'Tilde', 'Tina', 'Tine', 'Tove', 'Trine',
'Ulla', 'Ulrike', 'Ursula', 'Vera', 'Victoria', 'Viola', 'Vivian',
'Weena', 'Winni', 'Winnie', 'Xenia', 'Yasmin', 'Yda', 'Yrsa',
'Yvonne', 'Zahra', 'Zara', 'Zehnia', 'Zelma', 'Zenia', 'Åse',
)
first_names = first_names_male + first_names_female
last_names = (
'Jensen', 'Nielsen', 'Hansen', 'Pedersen', 'Andersen', 'Christensen', 'Larsen',
'Sørensen', 'Rasmussen', 'Petersen', 'Jørgensen', 'Madsen', 'Kristensen', 'Olsen',
'Christiansen', 'Thomsen', 'Poulsen', 'Johansen', 'Knudsen', 'Mortensen', 'Møller',
'Jacobsen', 'Jakobsen', 'Olesen', 'Frederiksen', 'Mikkelsen', 'Henriksen', 'Laursen',
'Lund', 'Schmidt', 'Eriksen', 'Holm', 'Kristiansen', 'Clausen', 'Simonsen',
'Svendsen', 'Andreasen', 'Iversen', 'Jeppesen', 'Mogensen', 'Jespersen', 'Nissen',
'Lauridsen', 'Frandsen', 'Østergaard', 'Jepsen', 'Kjær', 'Carlsen', 'Vestergaard',
'Jessen', 'Nørgaard', 'Dahl', 'Christoffersen', 'Skov', 'Søndergaard', 'Bertelsen',
'Bruun', 'Lassen', 'Bach', 'Gregersen', 'Friis', 'Johnsen', 'Steffensen',
'Kjeldsen', 'Bech', 'Krogh', 'Lauritsen', 'Danielsen', 'Mathiesen', 'Andresen',
'Brandt', 'Winther', 'Toft', 'Ravn', 'Mathiasen', 'Dam', 'Holst',
'Nilsson', 'Lind', 'Berg', 'Schou', 'Overgaard', 'Kristoffersen', 'Schultz',
'Klausen', 'Karlsen', 'Paulsen', 'Hermansen', 'Thorsen', 'Koch', 'Thygesen',
)
prefixes_male = (
'Hr', 'Dr.', 'Prof.', 'Univ.Prof.',
)
prefixes_female = (
'Fru', 'Dr.', 'Prof.', 'Univ.Prof.',
)
| [
"[email protected]"
] | |
f8482f84e76853cc7b2a70e7460d1e2cd3e290db | 6ac77834909c485686638d27c0bf41e6d1765cf7 | /src/database/module_user.py | b042699456da0a2849bcae55811c5f87ed51da84 | [] | no_license | YangXinNewlife/gears | 4144e451861efb0f3ae1d738eb5fcd6cec46a833 | 486b1ce5a7b8d8682bb1394be8f5dd6ae0fca837 | refs/heads/master | 2021-01-20T01:41:30.074696 | 2017-05-26T08:17:45 | 2017-05-26T08:17:45 | 89,316,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | #-*- coding:utf-8 -*-
__author__ = 'yx'
from module_base import *
import sys
reload(sys)
class ModuleUser(ModuleBase):
def __init__(self, table="t_user"):
self.schema = "ehc"
self.table = table
self.table_name = "\"%s\".\"%s\"" % (self.schema, self.table)
# def __init__(self, table_name="\"ehc\".\"t_user\""):
# self.table = table_name
# def get(self, user_id=None, user_name=None):
# client = PostgresClient()
# row = client.fetch_data(self.table, "WHERE \"autoKey\" = %s" % user_id)
# client.close()
# return row if not row else row[0]
#
# def get_by_partner_id(self, partner_userid):
# #sql = "SELECT * FROM %s WHERE autoKey = %s" % (self.table, env_id)
# client = PostgresClient()
# row = client.fetch_data(self.table, "WHERE partner_user_id = '%s'" % partner_userid)
# client.close()
# return row
# def add(self, name, partnerRawdata, partner_user_id, email="", phone=""):
# sql = "INSERT INTO %s (name, email, phone, partnerRawdata, partner_user_id) VALUES ('%s', '%s', '%s', '%s', '%s') returning *;" \
# % (self.table, name, email, phone, partnerRawdata, partner_user_id)
# print sql
# client = PostgresClient()
# ret = client.insert_sql(sql)
# client.close()
# return ret
def update_access_info(self, access, user_id):
sql = "UPDATE %s SET access_info = '%s' WHERE \"autoKey\" = %s" % (self.table, access, user_id)
client = PostgresClient()
client.execute_sql(sql)
client.close()
def update_status(self, status, user_id):
sql = "UPDATE %s SET status = '%s' WHERE \"autoKey\" = %s" % (self.table, status, user_id)
client = PostgresClient()
client.execute_sql(sql)
client.close()
def get_all(self):
client = PostgresClient()
rows = client.fetch_data(self.table)
client.close()
return rows
| [
"[email protected]"
] | |
62a3e940766f49a1361fa806c29ca65246258810 | a6da9040a6dad7db109cc163b76acd3e6c8be56f | /hafta01/ders06.py | 8dfdb173da15c242146905782db437de1c0a0502 | [] | no_license | sinanurun/Python_8181 | f04677ada7f1f6daadddaaf49211b20e8197ad7f | 710b2bc4573d22988376fd3680c5be0dc011f5bc | refs/heads/master | 2020-04-27T13:14:14.544839 | 2019-04-15T04:50:33 | 2019-04-15T04:50:33 | 174,362,253 | 21 | 10 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # ad = input("adınız nedir")
#
# if ad == "fatih" or ad == "serhat":
# print("bilişmcisin")
# else:
# print("farklı branştasın")
cinsiyet = input("cinsiyet")
meslek = input("mesleğiniz")
if cinsiyet =="kadın" and meslek =="bilisim":
print("8 Mart dünya kadınlar gününüz kutlu olsun")
else:
print("her gününüz de kutlu olsun") | [
"[email protected]"
] | |
19842f088f7b8feaf1c34853366f4f1851fb997b | 9303910239ca531d512460553e291960f3b0bd1c | /setup.py | 866ecd51fea15b427bb5e38717130887e781a264 | [
"BSD-3-Clause"
] | permissive | basnijholt/pyfeast | 2c81e67b221a2450f514b7da853342d91eae54bd | b6d8832b3a101900ed8b50127c1884ef74b34750 | refs/heads/master | 2020-03-18T08:06:20.356311 | 2018-05-23T18:08:54 | 2018-05-23T18:33:48 | 134,490,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #!/usr/bin/env python3
import configparser
import sys
import os.path
import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from create_cython_files import create_feast_pxd, create_feast_pyx
def guess_libraries():
"""Return the configuration for FEAST if it is available in a known way.
This is known to work with the FEAST binaries in the conda-forge channel."""
import ctypes.util
common_libs = ['mkl_rt', 'gfortran', 'iomp5']
for lib in ['blas', 'openblas']:
if ctypes.util.find_library(lib):
return common_libs + [lib]
else:
print('Cannot find MKL or openBLAS!')
sys.exit(1)
def guess_libraries_dirs():
return [os.path.join(sys.exec_prefix, 'lib')]
def guess_include_dirs():
return [os.path.join(sys.exec_prefix, 'include')]
def guess(key):
if key == 'library_dirs':
return guess_libraries_dirs()
elif key == 'include_dirs':
return guess_include_dirs()
elif key == 'libraries':
return guess_libraries()
def get_config(config_file='build.conf'):
# Read build configuration file.
configs = configparser.ConfigParser()
try:
with open(config_file) as f:
configs.read_file(f)
config = dict(configs['feast'])
except IOError:
print('User-configured build config.')
config = {}
except KeyError:
print('User-configured build config, '
'but no `feast` section.')
config = {}
keys = ['include_dirs', 'library_dirs', 'libraries']
for k in keys:
if k in config:
config[k] = config[k].split()
else:
print('Auto configuring `{}` (best guess)'.format(k))
config[k] = guess(k)
config['include_dirs'].append(numpy.get_include())
return config
if __name__ == '__main__':
ext_params = get_config()
create_feast_pxd()
create_feast_pyx()
ext_modules=[
Extension("feast",
sources=["feast.pyx"],
**ext_params,
)
]
setup(
name="pyfeast",
ext_modules=cythonize(ext_modules),
)
| [
"[email protected]"
] | |
710bafb6e75878a0ed6c139caf4c0f43bac256d9 | 66f4c011237e9fcad12d5f5508589b01a66a2a91 | /neural_sp/models/modules/attention.py | 23eead75b60255abae0090078e2902d3e3121e7e | [
"Apache-2.0"
] | permissive | nikhil-garg/neural_sp | 5004624ed1c23ff1ce5fefba1538e25eabab1e8c | 0df9107bf2515f6fba6d2a5910c6878daa06193f | refs/heads/master | 2023-01-22T14:44:17.338339 | 2020-10-31T13:49:50 | 2020-10-31T13:49:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,022 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Single-head attention layer."""
import numpy as np
import torch
import torch.nn as nn
class AttentionMechanism(nn.Module):
"""Single-head attention layer.
Args:
kdim (int): dimension of key
qdim (int): dimension of query
atype (str): type of attention mechanisms
adim: (int) dimension of attention space
sharpening_factor (float): sharpening factor in the softmax layer
for attention weights
sigmoid_smoothing (bool): replace the softmax layer for attention weights
with the sigmoid function
conv_out_channels (int): number of channles of conv outputs.
This is used for location-based attention.
conv_kernel_size (int): size of kernel.
This must be the odd number.
dropout (float): dropout probability for attention weights
lookahead (int): lookahead frames for triggered attention
"""
def __init__(self, kdim, qdim, adim, atype,
sharpening_factor=1, sigmoid_smoothing=False,
conv_out_channels=10, conv_kernel_size=201, dropout=0.,
lookahead=2):
super().__init__()
assert conv_kernel_size % 2 == 1, "Kernel size should be odd for 'same' conv."
self.atype = atype
self.adim = adim
self.sharpening_factor = sharpening_factor
self.sigmoid_smoothing = sigmoid_smoothing
self.n_heads = 1
self.lookahead = lookahead
self.reset()
# attention dropout applied after the softmax layer
self.dropout = nn.Dropout(p=dropout)
if atype == 'no':
raise NotImplementedError
# NOTE: sequence-to-sequence without attetnion (use the last state as a context vector)
elif atype in ['add', 'triggered_attention']:
self.w_key = nn.Linear(kdim, adim)
self.w_query = nn.Linear(qdim, adim, bias=False)
self.v = nn.Linear(adim, 1, bias=False)
elif atype == 'location':
self.w_key = nn.Linear(kdim, adim)
self.w_query = nn.Linear(qdim, adim, bias=False)
self.w_conv = nn.Linear(conv_out_channels, adim, bias=False)
self.conv = nn.Conv2d(in_channels=1,
out_channels=conv_out_channels,
kernel_size=(1, conv_kernel_size),
stride=1,
padding=(0, (conv_kernel_size - 1) // 2),
bias=False)
self.v = nn.Linear(adim, 1, bias=False)
elif atype == 'dot':
self.w_key = nn.Linear(kdim, adim, bias=False)
self.w_query = nn.Linear(qdim, adim, bias=False)
elif atype == 'luong_dot':
assert kdim == qdim
# NOTE: no additional parameters
elif atype == 'luong_general':
self.w_key = nn.Linear(kdim, qdim, bias=False)
elif atype == 'luong_concat':
self.w = nn.Linear(kdim + qdim, adim, bias=False)
self.v = nn.Linear(adim, 1, bias=False)
else:
raise ValueError(atype)
def reset(self):
self.key = None
self.mask = None
def forward(self, key, value, query, mask=None, aw_prev=None,
cache=False, mode='', trigger_points=None):
"""Forward pass.
Args:
key (FloatTensor): `[B, klen, kdim]`
klens (IntTensor): `[B]`
value (FloatTensor): `[B, klen, vdim]`
query (FloatTensor): `[B, 1, qdim]`
mask (ByteTensor): `[B, qlen, klen]`
aw_prev (FloatTensor): `[B, 1 (H), 1 (qlen), klen]`
cache (bool): cache key and mask
mode: dummy interface for MoChA/MMA
trigger_points (IntTensor): `[B]`
Returns:
cv (FloatTensor): `[B, 1, vdim]`
aw (FloatTensor): `[B, 1 (H), 1 (qlen), klen]`
beta: dummy interface for MoChA/MMA
p_choose_i: dummy interface for MoChA/MMA
"""
bs, klen = key.size()[:2]
qlen = query.size(1)
if aw_prev is None:
aw_prev = key.new_zeros(bs, 1, klen)
else:
aw_prev = aw_prev.squeeze(1) # remove head dimension
# Pre-computation of encoder-side features for computing scores
if self.key is None or not cache:
if self.atype in ['add', 'trigerred_attention',
'location', 'dot', 'luong_general']:
self.key = self.w_key(key)
else:
self.key = key
self.mask = mask
if mask is not None:
assert self.mask.size() == (bs, 1, klen), (self.mask.size(), (bs, 1, klen))
# for batch beam search decoding
if self.key.size(0) != query.size(0):
self.key = self.key[0: 1, :, :].repeat([query.size(0), 1, 1])
if self.atype == 'no':
raise NotImplementedError
elif self.atype in ['add', 'triggered_attention']:
tmp = self.key.unsqueeze(1) + self.w_query(query).unsqueeze(2)
e = self.v(torch.tanh(tmp)).squeeze(3)
elif self.atype == 'location':
conv_feat = self.conv(aw_prev.unsqueeze(1)).squeeze(2) # `[B, ch, klen]`
conv_feat = conv_feat.transpose(2, 1).contiguous().unsqueeze(1) # `[B, 1, klen, ch]`
tmp = self.key.unsqueeze(1) + self.w_query(query).unsqueeze(2)
e = self.v(torch.tanh(tmp + self.w_conv(conv_feat))).squeeze(3)
elif self.atype == 'dot':
e = torch.bmm(self.w_query(query), self.key.transpose(2, 1))
elif self.atype in ['luong_dot', 'luong_general']:
e = torch.bmm(query, self.key.transpose(2, 1))
elif self.atype == 'luong_concat':
query = query.repeat([1, klen, 1])
e = self.v(torch.tanh(self.w(torch.cat([self.key, query], dim=-1)))).transpose(2, 1)
assert e.size() == (bs, qlen, klen), (e.size(), (bs, qlen, klen))
NEG_INF = float(np.finfo(torch.tensor(0, dtype=e.dtype).numpy().dtype).min)
# Mask the right part from the trigger point
if self.atype == 'triggered_attention':
assert trigger_points is not None
for b in range(bs):
e[b, :, trigger_points[b] + self.lookahead + 1:] = NEG_INF
# Compute attention weights, context vector
if self.mask is not None:
e = e.masked_fill_(self.mask == 0, NEG_INF)
if self.sigmoid_smoothing:
aw = torch.sigmoid(e) / torch.sigmoid(e).sum(-1).unsqueeze(-1)
else:
aw = torch.softmax(e * self.sharpening_factor, dim=-1)
aw = self.dropout(aw)
cv = torch.bmm(aw, value)
return cv, aw.unsqueeze(1), None, None
| [
"[email protected]"
] | |
bc728071d8ce05a274be6f3ab7e50341062153cc | a127d0feb3bcf4f2581f385bb24f2b789c771c9c | /2syo/17.py | 81b2851a04ffe18296b8930111e36f4c0080e7e3 | [] | no_license | NgoVanDau/nlp100knock | 01383e4cc5a1470508744668103b9ea1a238b892 | 3ef63c0d2dfb55c0e6a31aced645f284325a98a5 | refs/heads/master | 2023-03-22T13:19:23.932429 | 2018-08-05T05:27:11 | 2018-08-05T05:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | '''indexが入ってしまうので少し意図とずれますが,とりあえず先に進みます'''
import pandas as pd
# f = open('hightemp.txt', 'r')
# lines = f.readlines()
# f.close()
# hightemp = pd.read_table('input/hightemp.txt')
# print(hightemp)
cols = ['prefecture','city','degree','date']
hightemp = pd.read_table('input/hightemp.txt', header=None)
hightemp.columns = cols
# print(hightemp)
for col in cols:
print(hightemp[col].value_counts())
# print(type(hightemp[col].value_counts()))
exit()
| [
"[email protected]"
] | |
5555d92655b3c29e89c2358b93be0e313d1b0343 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03103/s900243702.py | 79c2efdabaa3a89657cec5473f6e626b9bfafc0b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py |
n,m = map(int,input().split(" "))
li = []
for i in range(n):
a,b = map(int,input().split(" "))
li.append((a,b))
li.sort()
result = 0
count = 0
flag = False
for i in range(m):
for j in range(li[i][1]):
result += li[i][0]
count += 1
if count == m:
flag = True
break
if flag:
break
print(result) | [
"[email protected]"
] | |
9d762fd0e7c422218e547c5cb69f8b1a0f8bdd98 | ca1151bb86a2445d74b24e6ec27c353a36cc511e | /setup.py | d932b3a8d81634b19ad4eeaced1b1a66de5c8646 | [
"MIT"
] | permissive | williamfzc/stagesep2 | 6739c8f32a5fb81f4907e92aa953b2b2d437cbdd | 20c00187c86e8b807bbb3373f58918a575c2ccc9 | refs/heads/master | 2020-04-06T22:06:21.683906 | 2019-08-17T10:07:31 | 2019-08-17T10:07:31 | 157,824,967 | 22 | 3 | MIT | 2019-03-13T07:55:29 | 2018-11-16T06:49:13 | Python | UTF-8 | Python | false | false | 865 | py | from setuptools import setup, find_packages
setup(
name='stagesep2',
version='0.2.6',
description='Analyse, and convert video into useful data.',
author='williamfzc',
author_email='[email protected]',
url='https://github.com/williamfzc/stagesep2',
packages=find_packages(),
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
python_requires=">=3.6",
install_requires=[
'opencv-python',
'structlog',
'numpy',
'jieba',
'scikit-image',
'pyecharts==0.5.11',
'pyecharts_snapshot',
'findit',
'tesserocr',
'Pillow',
]
)
| [
"[email protected]"
] | |
31d37ea5367ad2ba461b4028cc146828551bad82 | 131ccf66fb787e9b1f0773a25fa518d1f2a3c5d0 | /gui_programming/menu_demo.py | 4621009871473d50b9017a2adab00a3842a40797 | [] | no_license | jocogum10/learning-python-programming | a0ba62abde49fd79762bcb7ba4a94bf8126afa77 | 035858bd332e3970d95db8bce7b1175e450802db | refs/heads/master | 2020-07-07T17:08:00.743196 | 2019-12-13T05:32:47 | 2019-12-13T05:32:47 | 203,416,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,770 | py | #!/usr/local/bin/python
"""
Tk8.0 style main window menus
menu/tool bars packed before middle, fill=X (pack first=clip last);
adds photo menu entries; see also: add_checkbutton, add_radiobutton
"""
from tkinter import *
from tkinter.messagebox import *
class NewMenuDemo(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH)
self.createWidgets()
self.master.title("Toolbars and Menus")
self.master.iconname("tkpython")
def createWidgets(self):
self.makeMenuBar()
self.makeToolBar()
L = Label(self, text="Menu and Toolbar Demo")
L.config(relief=SUNKEN, width=40, height=10, bg="white")
L.pack(expand=YES, fill=BOTH)
def makeToolBar(self):
toolbar = Frame(self, cursor='hand2', relief=SUNKEN, bd=2)
toolbar.pack(side=BOTTOM, fill=X)
Button(toolbar, text='Quit', command=self.quit).pack(side=RIGHT)
Button(toolbar, text='Hello', command=self.greeting).pack(side=LEFT)
def makeMenuBar(self):
self.menubar = Menu(self.master)
self.master.config(menu=self.menubar) #master=top-level window
self.fileMenu()
self.editMenu()
self.imageMenu()
def fileMenu(self):
pulldown = Menu(self.menubar)
pulldown.add_command(label="Open...", command=self.notdone)
pulldown.add_command(label="Quit...", command=self.quit)
self.menubar.add_cascade(label='File', underline=0, menu=pulldown)
def editMenu(self):
pulldown = Menu(self.menubar)
pulldown.add_command(label='Paste', command=self.notdone)
pulldown.add_command(label='Spam', command=self.greeting)
pulldown.add_separator()
pulldown.add_command(label='Delete', command=self.greeting)
pulldown.entryconfig(4, state="disable")
self.menubar.add_cascade(label='Edit', underline=0, menu=pulldown)
def imageMenu(self):
photoFiles = ('1.png', '2.png', '3.png')
pulldown = Menu(self.menubar)
self.photoObjs = []
for file in photoFiles:
img = PhotoImage(file='./images/' + file)
pulldown.add_command(image=img, command=self.notdone)
self.photoObjs.append(img) #keep a reference
self.menubar.add_cascade(label='Image', underline=0, menu=pulldown)
def greeting(self):
showinfo('greeting', 'Greetings')
def notdone(self):
showerror('Not implemented', 'Not yet available')
def quit(self):
if askyesno('Verify quit', 'Are you sure you want to quit?'):
Frame.quit(self)
if __name__ == '__main__':
NewMenuDemo().mainloop() | [
"[email protected]"
] | |
4ba22a2cd9579de26bad0952e3b979925435e5ce | 1763b41a702b8e8b15e3767676fb201de927cca6 | /Yelp_CF.py | 09913bdba4902eb3c7adf26662e19e3115f1eda4 | [] | no_license | chixujohnny/Yelp_project | fe81701d642729cf850c1a4adf5734cc052561d3 | 844da018885b107246c39c1942cec0575e051b59 | refs/heads/master | 2021-01-12T06:23:22.318169 | 2017-05-11T10:42:10 | 2017-05-11T10:42:10 | 77,352,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # coding: utf-8
#####################
# 基于协同过滤的推荐 #
#####################
import json
def CF_Data_Preprocess(review_path):
# 搞一个 dict
# {'user_id':['business_id', 'stars', 'date']}
User_Rate_Dict = {}
lines = open(review_path)
for line in lines:
line_json = json.loads(line)
uid = line_json['user_id']
bid = line_json['business_id']
stars = line_json['stars']
date = line_json['date']
| [
"[email protected]"
] | |
420b7dde5c7f879259a4d295a2879d5337315c01 | 38433574de70ccc0472daeabb614b491ac8526c0 | /sqlalchemy-stubs/dialects/firebird/fdb.pyi | d4d08d972401a15e71addb27f7142773657389cf | [
"MIT"
] | permissive | Parnassius/sqlalchemy2-stubs | 28fd7611a50b60415062fdb6d367da14c9a69462 | 68f8417888456588714fcced1c6799f3eb00ff2d | refs/heads/main | 2023-09-01T18:03:58.568139 | 2021-10-14T17:54:15 | 2021-10-14T17:54:15 | 363,415,451 | 0 | 0 | MIT | 2021-10-14T20:29:03 | 2021-05-01T13:22:32 | Python | UTF-8 | Python | false | false | 402 | pyi | from typing import Any
from .kinterbasdb import FBDialect_kinterbasdb as FBDialect_kinterbasdb
from ... import util as util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(
self, enable_rowcount: bool = ..., retaining: bool = ..., **kwargs: Any
) -> None: ...
@classmethod
def dbapi(cls): ...
def create_connect_args(self, url: Any): ...
dialect = FBDialect_fdb
| [
"[email protected]"
] | |
e5f65f77df2260dd3efa5fb59971d518f10a7410 | 481641e0179b5d416a7c48481455874767ae2575 | /Course Schedule.py | c2898ffbfd7fa05912babe048fa6ac2bbf524b85 | [] | no_license | nixonpj/leetcode | 776dad03a9de43a8c046b1ea1bbb3dd5e9f256ca | de756337b11e578e25f6d0cc0c70a22ae0b8fdc5 | refs/heads/main | 2023-05-01T11:38:44.530120 | 2021-05-17T15:13:16 | 2021-05-17T15:13:16 | 304,484,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | """
There are a total of numCourses courses you have to take, labeled from
0 to numCourses - 1. You are given an array prerequisites where
prerequisites[i] = [ai, bi] indicates that you must take course bi first
if you want to take course ai.
For example, the pair [0, 1], indicates that to take course 0 you
have to first take course 1.
Return true if you can finish all courses. Otherwise, return false.
"""
from typing import List
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
self.prereqs = {i: [] for i in range(numCourses)}
for course, prereq in prerequisites:
self.prereqs[course].append(prereq)
print(self.prereqs)
if not prerequisites:
return True
self.taken = {i: "Unvisited" for i in range(numCourses)}
for course in range(numCourses):
if self.taken[course] == "Unvisited":
if not self.can_take_course(course):
return False
self.taken[course] = "Visited"
print("---")
return True
def can_take_course(self, course):
print(course, self.taken)
self.taken[course] = "Visiting"
can_take = True
for prereq in self.prereqs[course]:
if self.taken[prereq] == "Unvisited":
if not self.can_take_course(prereq):
return False
# return can_take and self.can_take_course(prereq)
elif self.taken[prereq] == "Visiting":
print("cycle", course, prereq, self.taken)
return False
self.taken[course] = "Visited"
return True
s = Solution()
# print(s.canFinish(numCourses=2, prerequisites=[[1,0], [0,1]]))
# print(s.canFinish(numCourses=5, prerequisites=[[1,4],[2,4],[3,1],[3,2]]))
print(s.canFinish(numCourses=5, prerequisites=[[0,1],[1,2],[0,3],[4,0], [3,1], [4,1], [2,4]]))
| [
"[email protected]"
] | |
0b53643e6c106365bda081bdb298e397550c27bd | c40d1eb90464fa61c7c290ccd4f4a6416d7ed2ff | /1094. Car Pooling.py | 3cdb8a616a16184f9f22f3d0baaab05d4e69970d | [] | no_license | mh-rahman/Programming-Practice | 2bebdd5c68490882efefa9e262d2a90bb0da51fa | e4ceb275a6c9a56999289751f13e74548d9cd185 | refs/heads/master | 2021-07-26T12:48:43.158183 | 2020-09-19T22:34:25 | 2020-09-19T22:34:25 | 219,614,371 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
trips.sort(key = lambda x: (x[1],x[2]))
passengers, dropHeap = 0, []
heapq.heapify(dropHeap)
for nPassengers, startLoc, endLoc in trips:
#Drop passengers
while dropHeap and dropHeap[0][0] <= startLoc:
_, drop = heapq.heappop(dropHeap)
passengers -= drop
#Check capacity and return false
if nPassengers + passengers > capacity:
return False
#Add to heap
heapq.heappush(dropHeap,(endLoc, nPassengers))
passengers += nPassengers
return True | [
"[email protected]"
] | |
397ef26f523edeb63c9bc75e90425a4be6ca01c7 | 149e9e52304a970ffb256f290fce5f614c9e20c4 | /Python Programming language/DataCampPractice/Intermediate Python for DS/DictionariesAndPandas/DeP2.py | 2c6e6d638adfb8dd2b65d73c8fa29af218c8603c | [] | no_license | Pasquale-Silv/Improving_Python | 7451e0c423d73a91fa572d44d3e4133b0b4f5c98 | 96b605879810a9ab6c6459913bd366b936e603e4 | refs/heads/master | 2023-06-03T15:00:21.554783 | 2021-06-22T15:26:28 | 2021-06-22T15:26:28 | 351,806,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Add italy to europe
europe["italy"] = "rome"
# Print out italy in europe
print(europe["italy"])
print("italy" in europe)
# Add poland to europe
europe["poland"] = "warsaw"
# Print europe
print(europe) | [
"[email protected]"
] | |
8bb252afb4067b58069f6b9377b10913461f63c5 | c0ec8563efa462cbcdbfba55552fd50f994f6cb0 | /gitee/models/code_forks_history.py | 250e04dc60ae647d80ecb7af6ccc93de3971694f | [
"MIT"
] | permissive | kingreatwill/pygitee | 191487e208f42a42efb3f10d69fe0e40229de768 | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | refs/heads/master | 2022-12-18T08:10:06.079301 | 2020-09-10T09:05:34 | 2020-09-10T09:05:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,982 | py | # coding: utf-8
import pprint
import re # noqa: F401
import six
class CodeForksHistory(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'url': 'str',
'forks_url': 'str',
'commits_url': 'str',
'id': 'str',
'description': 'str',
'public': 'str',
'owner': 'str',
'user': 'str',
'files': 'str',
'truncated': 'str',
'html_url': 'str',
'comments': 'str',
'comments_url': 'str',
'git_pull_url': 'str',
'git_push_url': 'str',
'created_at': 'str',
'updated_at': 'str',
'forks': 'str',
'history': 'str'
}
attribute_map = {
'url': 'url',
'forks_url': 'forks_url',
'commits_url': 'commits_url',
'id': 'id',
'description': 'description',
'public': 'public',
'owner': 'owner',
'user': 'user',
'files': 'files',
'truncated': 'truncated',
'html_url': 'html_url',
'comments': 'comments',
'comments_url': 'comments_url',
'git_pull_url': 'git_pull_url',
'git_push_url': 'git_push_url',
'created_at': 'created_at',
'updated_at': 'updated_at',
'forks': 'forks',
'history': 'history'
}
def __init__(self, url=None, forks_url=None, commits_url=None, id=None, description=None, public=None, owner=None,
user=None, files=None, truncated=None, html_url=None, comments=None, comments_url=None,
git_pull_url=None, git_push_url=None, created_at=None, updated_at=None, forks=None,
history=None): # noqa: E501
"""CodeForksHistory - a model defined in Swagger""" # noqa: E501
self._url = None
self._forks_url = None
self._commits_url = None
self._id = None
self._description = None
self._public = None
self._owner = None
self._user = None
self._files = None
self._truncated = None
self._html_url = None
self._comments = None
self._comments_url = None
self._git_pull_url = None
self._git_push_url = None
self._created_at = None
self._updated_at = None
self._forks = None
self._history = None
self.discriminator = None
if url is not None:
self.url = url
if forks_url is not None:
self.forks_url = forks_url
if commits_url is not None:
self.commits_url = commits_url
if id is not None:
self.id = id
if description is not None:
self.description = description
if public is not None:
self.public = public
if owner is not None:
self.owner = owner
if user is not None:
self.user = user
if files is not None:
self.files = files
if truncated is not None:
self.truncated = truncated
if html_url is not None:
self.html_url = html_url
if comments is not None:
self.comments = comments
if comments_url is not None:
self.comments_url = comments_url
if git_pull_url is not None:
self.git_pull_url = git_pull_url
if git_push_url is not None:
self.git_push_url = git_push_url
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if forks is not None:
self.forks = forks
if history is not None:
self.history = history
@property
def url(self):
"""Gets the url of this CodeForksHistory. # noqa: E501
:return: The url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this CodeForksHistory.
:param url: The url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._url = url
@property
def forks_url(self):
"""Gets the forks_url of this CodeForksHistory. # noqa: E501
:return: The forks_url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._forks_url
@forks_url.setter
def forks_url(self, forks_url):
"""Sets the forks_url of this CodeForksHistory.
:param forks_url: The forks_url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._forks_url = forks_url
@property
def commits_url(self):
"""Gets the commits_url of this CodeForksHistory. # noqa: E501
:return: The commits_url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._commits_url
@commits_url.setter
def commits_url(self, commits_url):
"""Sets the commits_url of this CodeForksHistory.
:param commits_url: The commits_url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._commits_url = commits_url
@property
def id(self):
"""Gets the id of this CodeForksHistory. # noqa: E501
:return: The id of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CodeForksHistory.
:param id: The id of this CodeForksHistory. # noqa: E501
:type: str
"""
self._id = id
@property
def description(self):
"""Gets the description of this CodeForksHistory. # noqa: E501
:return: The description of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CodeForksHistory.
:param description: The description of this CodeForksHistory. # noqa: E501
:type: str
"""
self._description = description
@property
def public(self):
"""Gets the public of this CodeForksHistory. # noqa: E501
:return: The public of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._public
@public.setter
def public(self, public):
"""Sets the public of this CodeForksHistory.
:param public: The public of this CodeForksHistory. # noqa: E501
:type: str
"""
self._public = public
@property
def owner(self):
"""Gets the owner of this CodeForksHistory. # noqa: E501
:return: The owner of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this CodeForksHistory.
:param owner: The owner of this CodeForksHistory. # noqa: E501
:type: str
"""
self._owner = owner
@property
def user(self):
"""Gets the user of this CodeForksHistory. # noqa: E501
:return: The user of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this CodeForksHistory.
:param user: The user of this CodeForksHistory. # noqa: E501
:type: str
"""
self._user = user
@property
def files(self):
"""Gets the files of this CodeForksHistory. # noqa: E501
:return: The files of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this CodeForksHistory.
:param files: The files of this CodeForksHistory. # noqa: E501
:type: str
"""
self._files = files
@property
def truncated(self):
"""Gets the truncated of this CodeForksHistory. # noqa: E501
:return: The truncated of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._truncated
@truncated.setter
def truncated(self, truncated):
"""Sets the truncated of this CodeForksHistory.
:param truncated: The truncated of this CodeForksHistory. # noqa: E501
:type: str
"""
self._truncated = truncated
@property
def html_url(self):
"""Gets the html_url of this CodeForksHistory. # noqa: E501
:return: The html_url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._html_url
@html_url.setter
def html_url(self, html_url):
"""Sets the html_url of this CodeForksHistory.
:param html_url: The html_url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._html_url = html_url
@property
def comments(self):
"""Gets the comments of this CodeForksHistory. # noqa: E501
:return: The comments of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""Sets the comments of this CodeForksHistory.
:param comments: The comments of this CodeForksHistory. # noqa: E501
:type: str
"""
self._comments = comments
@property
def comments_url(self):
"""Gets the comments_url of this CodeForksHistory. # noqa: E501
:return: The comments_url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._comments_url
@comments_url.setter
def comments_url(self, comments_url):
"""Sets the comments_url of this CodeForksHistory.
:param comments_url: The comments_url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._comments_url = comments_url
@property
def git_pull_url(self):
"""Gets the git_pull_url of this CodeForksHistory. # noqa: E501
:return: The git_pull_url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._git_pull_url
@git_pull_url.setter
def git_pull_url(self, git_pull_url):
"""Sets the git_pull_url of this CodeForksHistory.
:param git_pull_url: The git_pull_url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._git_pull_url = git_pull_url
@property
def git_push_url(self):
"""Gets the git_push_url of this CodeForksHistory. # noqa: E501
:return: The git_push_url of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._git_push_url
@git_push_url.setter
def git_push_url(self, git_push_url):
"""Sets the git_push_url of this CodeForksHistory.
:param git_push_url: The git_push_url of this CodeForksHistory. # noqa: E501
:type: str
"""
self._git_push_url = git_push_url
@property
def created_at(self):
"""Gets the created_at of this CodeForksHistory. # noqa: E501
:return: The created_at of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this CodeForksHistory.
:param created_at: The created_at of this CodeForksHistory. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this CodeForksHistory. # noqa: E501
:return: The updated_at of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this CodeForksHistory.
:param updated_at: The updated_at of this CodeForksHistory. # noqa: E501
:type: str
"""
self._updated_at = updated_at
@property
def forks(self):
"""Gets the forks of this CodeForksHistory. # noqa: E501
:return: The forks of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._forks
@forks.setter
def forks(self, forks):
"""Sets the forks of this CodeForksHistory.
:param forks: The forks of this CodeForksHistory. # noqa: E501
:type: str
"""
self._forks = forks
@property
def history(self):
"""Gets the history of this CodeForksHistory. # noqa: E501
:return: The history of this CodeForksHistory. # noqa: E501
:rtype: str
"""
return self._history
@history.setter
def history(self, history):
"""Sets the history of this CodeForksHistory.
:param history: The history of this CodeForksHistory. # noqa: E501
:type: str
"""
self._history = history
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CodeForksHistory, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CodeForksHistory):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
864cfe6ca7cb9e8a0d840fa7b0fd3f5bb4df3542 | c7d4b0a7278df76c65108a637be0ee144f3b0edd | /graphite_api/finders/__init__.py | 4b4f76a120412c53cbd1f6dfeaef5925ff3e0f34 | [
"Apache-2.0"
] | permissive | dkulikovsky/graphite-api | e21f0441319e9a58a6f06bfee8369e8d0f13cab8 | f42d14eb2bd8112f12212318f8ca5b6f859b12f0 | refs/heads/master | 2021-01-23T00:25:39.428369 | 2015-07-08T23:27:09 | 2015-07-08T23:27:09 | 38,269,511 | 2 | 1 | null | 2015-06-29T20:26:27 | 2015-06-29T20:26:27 | null | UTF-8 | Python | false | false | 1,546 | py | import fnmatch
import os.path
def get_real_metric_path(absolute_path, metric_path):
# Support symbolic links (real_metric_path ensures proper cache queries)
if os.path.islink(absolute_path):
real_fs_path = os.path.realpath(absolute_path)
relative_fs_path = metric_path.replace('.', os.sep)
base_fs_path = absolute_path[:-len(relative_fs_path)]
relative_real_fs_path = real_fs_path[len(base_fs_path):]
return fs_to_metric(relative_real_fs_path)
return metric_path
def fs_to_metric(path):
dirpath = os.path.dirname(path)
filename = os.path.basename(path)
return os.path.join(dirpath, filename.split('.')[0]).replace(os.sep, '.')
def _deduplicate(entries):
yielded = set()
for entry in entries:
if entry not in yielded:
yielded.add(entry)
yield entry
def match_entries(entries, pattern):
"""A drop-in replacement for fnmatch.filter that supports pattern
variants (ie. {foo,bar}baz = foobaz or barbaz)."""
v1, v2 = pattern.find('{'), pattern.find('}')
if v1 > -1 and v2 > v1:
variations = pattern[v1+1:v2].split(',')
variants = [pattern[:v1] + v + pattern[v2+1:] for v in variations]
matching = []
for variant in variants:
matching.extend(fnmatch.filter(entries, variant))
# remove dupes without changing order
return list(_deduplicate(matching))
else:
matching = fnmatch.filter(entries, pattern)
matching.sort()
return matching
| [
"[email protected]"
] | |
ea400d3b5f0ad457cae4af10cb257e224a88cc0a | 9d41f4df737dc2e6fd3fcf4c6f50028fd483cdd0 | /python_Django/fc_community/board/migrations/0002_auto_20200430_1106.py | 66d148799c461106ca4773eddece35b4ca8a41f6 | [] | no_license | Ha-Young/byte_degree_python | 33a730f4c1f4a99fea03fb923ad73edee2dd1d48 | 7fcbfed832dec3d7cb8503b86d9457e1f2ae0ccf | refs/heads/master | 2022-11-16T16:54:52.978443 | 2020-07-04T14:32:16 | 2020-07-04T14:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Generated by Django 3.0.5 on 2020-04-30 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='board',
name='registered_dttm',
field=models.DateTimeField(auto_now_add=True, verbose_name='등록일자'),
),
]
| [
"[email protected]"
] | |
2fa05b1d216249b5cab111a5a004c50d74328970 | cc43149992c5f79718279ee47e5db4617b1b42e9 | /pytorch_toolkit/nncf/examples/common/models/classification/squeezenet.py | 8bac5c962959fb4ff1fec14ff7328bba41855a48 | [
"Apache-2.0"
] | permissive | tongni1975/openvino_training_extensions | 174be009bb2fedf6bc774426f340960a90635600 | 3ff9796a2fc413564726916d5c11b42738bb40ef | refs/heads/develop | 2020-06-07T19:06:46.120350 | 2020-04-17T11:27:08 | 2020-04-17T11:27:08 | 193,077,317 | 0 | 0 | Apache-2.0 | 2020-04-17T11:47:00 | 2019-06-21T10:12:20 | Python | UTF-8 | Python | false | false | 5,531 | py | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# SqueezeNet implementation from:
# torchvision/models/squeezenet.py
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1))
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def squeezenet1_0_custom(pretrained=False, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0']))
return model
def squeezenet1_1_custom(pretrained=False, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
return model
| [
"[email protected]"
] | |
e295f4546f92d704e963f88e2f69e970dcdbf3df | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_gainsay.py | 0939ed1baa146c43e3236b450df317cc19978b75 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py |
#calss header
class _GAINSAY():
def __init__(self,):
self.name = "GAINSAY"
self.definitions = [u'to refuse to accept something as the truth: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
217e3f356c285d0b8b0da2c8c12ff234b8ca8f10 | 1064db5dfd154c4bc600e0e03841b0f73f0eefbc | /home/api/v1/urls.py | 68d0f35d9043e634ceb843ff272945032f9cd6ae | [] | no_license | crowdbotics-apps/web-29-dev-5196 | 3303921a0e5c8794e8e67f55c9841f3ec7610c16 | 7beda8f7d57ce9b9858a46f7e3940d6eed4b5725 | refs/heads/master | 2023-05-26T23:00:23.271209 | 2020-05-29T12:47:07 | 2020-05-29T12:47:07 | 267,768,914 | 0 | 0 | null | 2021-06-13T04:08:30 | 2020-05-29T04:59:18 | Python | UTF-8 | Python | false | false | 614 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import CustomTextViewSet, GgfhgfhViewSet, HomePageViewSet
from home.api.v1.viewsets import (
SignupViewSet,
LoginViewSet,
HomePageViewSet,
CustomTextViewSet,
)
router = DefaultRouter()
router.register("signup", SignupViewSet, basename="signup")
router.register("login", LoginViewSet, basename="login")
router.register("customtext", CustomTextViewSet)
router.register("homepage", HomePageViewSet)
router.register("ggfhgfh", GgfhgfhViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"[email protected]"
] | |
98661a7646afcda1edc0dedb83f90e8b72a50dc0 | 650b516b1214c4d44fd6f04941e87e28e9049cde | /addons/plugin.video.fanfilm/resources/lib/libraries/cleandate.py | 85151cc3c8bb936f60ac45d4dfad364b8b419655 | [] | no_license | MultiWu/build | b85cc45a33b871f4ade58de8457fcd094761f385 | f50a64f674b6499668e0a5758fe0879b016f5c38 | refs/heads/master | 2022-10-31T20:35:53.382826 | 2019-12-20T22:50:16 | 2019-12-20T22:50:16 | 228,462,984 | 0 | 3 | null | 2022-10-07T08:47:18 | 2019-12-16T19:46:39 | Python | UTF-8 | Python | false | false | 1,884 | py | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import datetime
import time
def iso_2_utc(iso_ts):
if not iso_ts or iso_ts is None: return 0
delim = -1
if not iso_ts.endswith('Z'):
delim = iso_ts.rfind('+')
if delim == -1: delim = iso_ts.rfind('-')
if delim > -1:
ts = iso_ts[:delim]
sign = iso_ts[delim]
tz = iso_ts[delim + 1:]
else:
ts = iso_ts
tz = None
if ts.find('.') > -1:
ts = ts[:ts.find('.')]
try:
d = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S')
except TypeError:
d = datetime.datetime(*(time.strptime(ts, '%Y-%m-%dT%H:%M:%S')[0:6]))
dif = datetime.timedelta()
if tz:
hours, minutes = tz.split(':')
hours = int(hours)
minutes = int(minutes)
if sign == '-':
hours = -hours
minutes = -minutes
dif = datetime.timedelta(minutes=minutes, hours=hours)
utc_dt = d - dif
epoch = datetime.datetime.utcfromtimestamp(0)
delta = utc_dt - epoch
try:
seconds = delta.total_seconds() # works only on 2.7
except:
seconds = delta.seconds + delta.days * 24 * 3600 # close enough
return seconds
| [
"[email protected]"
] | |
7000364dbe6d566f301d1972a2bebd9c1f7666da | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/others/DeepFM_for_PyTorch/deepctr_torch/layers/interaction.py | d6e7ba6d8917cface0acbf225497622269efc13f | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 30,854 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at#
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..layers.activation import activation_layer
from ..layers.core import Conv2dSame
from ..layers.sequence import KMaxPooling
class FM(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FM, self).__init__()
def forward(self, inputs):
fm_input = inputs
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False)
return cross_term
class BiInteractionPooling(nn.Module):
"""Bi-Interaction Layer used in Neural FM,compress the
pairwise element-wise product of features into one single vector.
Input shape
- A 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size,1,embedding_size)``.
References
- [He X, Chua T S. Neural factorization machines for sparse predictive analytics[C]//Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval. ACM, 2017: 355-364.](http://arxiv.org/abs/1708.05027)
"""
def __init__(self):
super(BiInteractionPooling, self).__init__()
def forward(self, inputs):
concated_embeds_value = inputs
square_of_sum = torch.pow(
torch.sum(concated_embeds_value, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(
concated_embeds_value * concated_embeds_value, dim=1, keepdim=True)
cross_term = 0.5 * (square_of_sum - sum_of_square)
return cross_term
class SENETLayer(nn.Module):
"""SENETLayer used in FiBiNET.
Input shape
- A list of 3D tensor with shape: ``(batch_size,filed_size,embedding_size)``.
Output shape
- A list of 3D tensor with shape: ``(batch_size,filed_size,embedding_size)``.
Arguments
- **filed_size** : Positive integer, number of feature groups.
- **reduction_ratio** : Positive integer, dimensionality of the
attention network output space.
- **seed** : A Python integer to use as random seed.
References
- [FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction
Tongwen](https://arxiv.org/pdf/1905.09433.pdf)
"""
def __init__(self, filed_size, reduction_ratio=3, seed=1024, device='cpu'):
super(SENETLayer, self).__init__()
self.seed = seed
self.filed_size = filed_size
self.reduction_size = max(1, filed_size // reduction_ratio)
self.excitation = nn.Sequential(
nn.Linear(self.filed_size, self.reduction_size, bias=False),
nn.ReLU(),
nn.Linear(self.reduction_size, self.filed_size, bias=False),
nn.ReLU()
)
self.to(device)
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len(inputs.shape)))
Z = torch.mean(inputs, dim=-1, out=None)
A = self.excitation(Z)
V = torch.mul(inputs, torch.unsqueeze(A, dim=2))
return V
class BilinearInteraction(nn.Module):
"""BilinearInteraction Layer used in FiBiNET.
Input shape
- A list of 3D tensor with shape: ``(batch_size,filed_size, embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size,filed_size*(filed_size-1)/2, embedding_size)``.
Arguments
- **filed_size** : Positive integer, number of feature groups.
- **embedding_size** : Positive integer, embedding size of sparse features.
- **bilinear_type** : String, types of bilinear functions used in this layer.
- **seed** : A Python integer to use as random seed.
References
- [FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction
Tongwen](https://arxiv.org/pdf/1905.09433.pdf)
"""
def __init__(self, filed_size, embedding_size, bilinear_type="interaction", seed=1024, device='cpu'):
super(BilinearInteraction, self).__init__()
self.bilinear_type = bilinear_type
self.seed = seed
self.bilinear = nn.ModuleList()
if self.bilinear_type == "all":
self.bilinear = nn.Linear(
embedding_size, embedding_size, bias=False)
elif self.bilinear_type == "each":
for _ in range(filed_size):
self.bilinear.append(
nn.Linear(embedding_size, embedding_size, bias=False))
elif self.bilinear_type == "interaction":
for i, j in itertools.combinations(range(filed_size), 2):
self.bilinear.append(
nn.Linear(embedding_size, embedding_size, bias=False))
else:
raise NotImplementedError
self.to(device)
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len(inputs.shape)))
inputs = torch.split(inputs, 1, dim=1)
if self.bilinear_type == "all":
p = [torch.mul(self.bilinear(v_i), v_j)
for v_i, v_j in itertools.combinations(inputs, 2)]
elif self.bilinear_type == "each":
p = [torch.mul(self.bilinear[i](inputs[i]), inputs[j])
for i, j in itertools.combinations(range(len(inputs)), 2)]
elif self.bilinear_type == "interaction":
p = [torch.mul(bilinear(v[0]), v[1])
for v, bilinear in zip(itertools.combinations(inputs, 2), self.bilinear)]
else:
raise NotImplementedError
return torch.cat(p, dim=1)
class CIN(nn.Module):
"""Compressed Interaction Network used in xDeepFM.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, featuremap_num)`` ``featuremap_num = sum(self.layer_size[:-1]) // 2 + self.layer_size[-1]`` if ``split_half=True``,else ``sum(layer_size)`` .
Arguments
- **filed_size** : Positive integer, number of feature groups.
- **layer_size** : list of int.Feature maps in each layer.
- **activation** : activation function name used on feature maps.
- **split_half** : bool.if set to False, half of the feature maps in each hidden will connect to output unit.
- **seed** : A Python integer to use as random seed.
References
- [Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.] (https://arxiv.org/pdf/1803.05170.pdf)
"""
def __init__(self, field_size, layer_size=(128, 128), activation='relu', split_half=True, l2_reg=1e-5, seed=1024,
device='cpu'):
super(CIN, self).__init__()
if len(layer_size) == 0:
raise ValueError(
"layer_size must be a list(tuple) of length greater than 1")
self.layer_size = layer_size
self.field_nums = [field_size]
self.split_half = split_half
self.activation = activation_layer(activation)
self.l2_reg = l2_reg
self.seed = seed
self.conv1ds = nn.ModuleList()
for i, size in enumerate(self.layer_size):
self.conv1ds.append(
nn.Conv1d(self.field_nums[-1] * self.field_nums[0], size, 1))
if self.split_half:
if i != len(self.layer_size) - 1 and size % 2 > 0:
raise ValueError(
"layer_size must be even number except for the last layer when split_half=True")
self.field_nums.append(size // 2)
else:
self.field_nums.append(size)
# for tensor in self.conv1ds:
# nn.init.normal_(tensor.weight, mean=0, std=init_std)
self.to(device)
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len(inputs.shape)))
batch_size = inputs.shape[0]
dim = inputs.shape[-1]
hidden_nn_layers = [inputs]
final_result = []
for i, size in enumerate(self.layer_size):
# x^(k-1) * x^0
x = torch.einsum(
'bhd,bmd->bhmd', hidden_nn_layers[-1], hidden_nn_layers[0])
# x.shape = (batch_size , hi * m, dim)
x = x.reshape(
batch_size, hidden_nn_layers[-1].shape[1] * hidden_nn_layers[0].shape[1], dim)
# x.shape = (batch_size , hi, dim)
x = self.conv1ds[i](x)
if self.activation is None or self.activation == 'linear':
curr_out = x
else:
curr_out = self.activation(x)
if self.split_half:
if i != len(self.layer_size) - 1:
next_hidden, direct_connect = torch.split(
curr_out, 2 * [size // 2], 1)
else:
direct_connect = curr_out
next_hidden = 0
else:
direct_connect = curr_out
next_hidden = curr_out
final_result.append(direct_connect)
hidden_nn_layers.append(next_hidden)
result = torch.cat(final_result, dim=1)
result = torch.sum(result, -1)
return result
class AFMLayer(nn.Module):
"""Attentonal Factorization Machine models pairwise (order-2) feature
interactions without linear term and bias.
Input shape
- A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **attention_factor** : Positive integer, dimensionality of the
attention network output space.
- **l2_reg_w** : float between 0 and 1. L2 regularizer strength
applied to attention network.
- **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout.
- **seed** : A Python integer to use as random seed.
References
- [Attentional Factorization Machines : Learning the Weight of Feature
Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf)
"""
def __init__(self, in_features, attention_factor=4, l2_reg_w=0, dropout_rate=0, seed=1024, device='cpu'):
super(AFMLayer, self).__init__()
self.attention_factor = attention_factor
self.l2_reg_w = l2_reg_w
self.dropout_rate = dropout_rate
self.seed = seed
embedding_size = in_features
self.attention_W = nn.Parameter(torch.Tensor(
embedding_size, self.attention_factor))
self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor))
self.projection_h = nn.Parameter(
torch.Tensor(self.attention_factor, 1))
self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1))
for tensor in [self.attention_W, self.projection_h, self.projection_p]:
nn.init.xavier_normal_(tensor, )
for tensor in [self.attention_b]:
nn.init.zeros_(tensor, )
self.dropout = nn.Dropout(dropout_rate)
self.to(device)
def forward(self, inputs):
embeds_vec_list = inputs
row = []
col = []
for r, c in itertools.combinations(embeds_vec_list, 2):
row.append(r)
col.append(c)
p = torch.cat(row, dim=1)
q = torch.cat(col, dim=1)
inner_product = p * q
bi_interaction = inner_product
attention_temp = F.relu(torch.tensordot(
bi_interaction, self.attention_W, dims=([-1], [0])) + self.attention_b)
self.normalized_att_score = F.softmax(torch.tensordot(
attention_temp, self.projection_h, dims=([-1], [0])), dim=1)
attention_output = torch.sum(
self.normalized_att_score * bi_interaction, dim=1)
attention_output = self.dropout(attention_output) # training
afm_out = torch.tensordot(
attention_output, self.projection_p, dims=([-1], [0]))
return afm_out
class InteractingLayer(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **att_embedding_size**: int.The embedding size in multi-head self-attention network.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, in_features, att_embedding_size=8, head_num=2, use_res=True,
scaling=False, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.scaling = scaling
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(
embedding_size, self.att_embedding_size * self.head_num))
self.W_key = nn.Parameter(torch.Tensor(
embedding_size, self.att_embedding_size * self.head_num))
self.W_Value = nn.Parameter(torch.Tensor(
embedding_size, self.att_embedding_size * self.head_num))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(
embedding_size, self.att_embedding_size * self.head_num))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self.to(device)
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len(inputs.shape)))
querys = torch.tensordot(inputs, self.W_Query,
dims=([-1], [0])) # None F D*head_num
keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0]))
# head_num None F D
querys = torch.stack(torch.split(
querys, self.att_embedding_size, dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(
values, self.att_embedding_size, dim=2))
inner_product = torch.einsum(
'bnik,bnjk->bnij', querys, keys) # head_num None F F
if self.scaling:
inner_product /= self.att_embedding_size ** 0.5
self.normalized_att_scores = F.softmax(
inner_product, dim=-1) # head_num None F F
result = torch.matmul(self.normalized_att_scores,
values) # head_num None F D
result = torch.cat(torch.split(result, 1, ), dim=-1)
result = torch.squeeze(result, dim=0) # None F D*head_num
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0]))
result = F.relu(result)
return result
class CrossNet(nn.Module):
"""The Cross Network part of Deep&Cross Network model,
which leans both low and high degree cross feature.
Input shape
- 2D tensor with shape: ``(batch_size, units)``.
Output shape
- 2D tensor with shape: ``(batch_size, units)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **input_feature_num**: Positive integer, shape(Input tensor)[-1]
- **layer_num**: Positive integer, the cross layer number
- **parameterization**: string, ``"vector"`` or ``"matrix"`` , way to parameterize the cross network.
- **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix
- **seed**: A Python integer to use as random seed.
References
- [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123)
- [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535)
"""
def __init__(self, in_features, layer_num=2, parameterization='vector', seed=1024, device='cpu'):
super(CrossNet, self).__init__()
self.layer_num = layer_num
self.parameterization = parameterization
if self.parameterization == 'vector':
# weight in DCN. (in_features, 1)
self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1))
elif self.parameterization == 'matrix':
# weight matrix in DCN-M. (in_features, in_features)
self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, in_features))
else: # error
raise ValueError("parameterization should be 'vector' or 'matrix'")
self.bias = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1))
for i in range(self.kernels.shape[0]):
nn.init.xavier_normal_(self.kernels[i])
for i in range(self.bias.shape[0]):
nn.init.zeros_(self.bias[i])
self.to(device)
def forward(self, inputs):
x_0 = inputs.unsqueeze(2)
x_l = x_0
for i in range(self.layer_num):
if self.parameterization == 'vector':
xl_w = torch.tensordot(x_l, self.kernels[i], dims=([1], [0]))
dot_ = torch.matmul(x_0, xl_w)
x_l = dot_ + self.bias[i] + x_l
elif self.parameterization == 'matrix':
xl_w = torch.matmul(self.kernels[i], x_l) # W * xi (bs, in_features, 1)
dot_ = xl_w + self.bias[i] # W * xi + b
x_l = x_0 * dot_ + x_l # x0 · (W * xi + b) +xl Hadamard-product
else: # error
raise ValueError("parameterization should be 'vector' or 'matrix'")
x_l = torch.squeeze(x_l, dim=2)
return x_l
class CrossNetMix(nn.Module):
"""The Cross Network part of DCN-Mix model, which improves DCN-M by:
1 add MOE to learn feature interactions in different subspaces
2 add nonlinear transformations in low-dimensional space
Input shape
- 2D tensor with shape: ``(batch_size, units)``.
Output shape
- 2D tensor with shape: ``(batch_size, units)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **low_rank** : Positive integer, dimensionality of low-rank sapce.
- **num_experts** : Positive integer, number of experts.
- **layer_num**: Positive integer, the cross layer number
- **device**: str, e.g. ``"cpu"`` or ``"cuda:0"``
References
- [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535)
"""
def __init__(self, in_features, low_rank=32, num_experts=4, layer_num=2, device='cpu'):
super(CrossNetMix, self).__init__()
self.layer_num = layer_num
self.num_experts = num_experts
# U: (in_features, low_rank)
self.U_list = nn.Parameter(torch.Tensor(self.layer_num, num_experts, in_features, low_rank))
# V: (in_features, low_rank)
self.V_list = nn.Parameter(torch.Tensor(self.layer_num, num_experts, in_features, low_rank))
# C: (low_rank, low_rank)
self.C_list = nn.Parameter(torch.Tensor(self.layer_num, num_experts, low_rank, low_rank))
self.gating = nn.ModuleList([nn.Linear(in_features, 1, bias=False) for i in range(self.num_experts)])
self.bias = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1))
init_para_list = [self.U_list, self.V_list, self.C_list]
for i in range(len(init_para_list)):
for j in range(self.layer_num):
nn.init.xavier_normal_(init_para_list[i][j])
for i in range(len(self.bias)):
nn.init.zeros_(self.bias[i])
self.to(device)
def forward(self, inputs):
x_0 = inputs.unsqueeze(2) # (bs, in_features, 1)
x_l = x_0
for i in range(self.layer_num):
output_of_experts = []
gating_score_of_experts = []
for expert_id in range(self.num_experts):
# (1) G(x_l)
# compute the gating score by x_l
gating_score_of_experts.append(self.gating[expert_id](x_l.squeeze(2)))
# (2) E(x_l)
# project the input x_l to $\mathbb{R}^{r}$
v_x = torch.matmul(self.V_list[i][expert_id].t(), x_l) # (bs, low_rank, 1)
# nonlinear activation in low rank space
v_x = torch.tanh(v_x)
v_x = torch.matmul(self.C_list[i][expert_id], v_x)
v_x = torch.tanh(v_x)
# project back to $\mathbb{R}^{d}$
uv_x = torch.matmul(self.U_list[i][expert_id], v_x) # (bs, in_features, 1)
dot_ = uv_x + self.bias[i]
dot_ = x_0 * dot_ # Hadamard-product
output_of_experts.append(dot_.squeeze(2))
# (3) mixture of low-rank experts
output_of_experts = torch.stack(output_of_experts, 2) # (bs, in_features, num_experts)
gating_score_of_experts = torch.stack(gating_score_of_experts, 1) # (bs, num_experts, 1)
moe_out = torch.matmul(output_of_experts, gating_score_of_experts.softmax(1))
x_l = moe_out + x_l # (bs, in_features, 1)
x_l = x_l.squeeze() # (bs, in_features)
return x_l
class InnerProductLayer(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
Input shape
- a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:
``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
Arguments
- **reduce_sum**: bool. Whether return inner product or element-wise product
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]
(https://arxiv.org/pdf/1611.00144.pdf)"""
def __init__(self, reduce_sum=True, device='cpu'):
super(InnerProductLayer, self).__init__()
self.reduce_sum = reduce_sum
self.to(device)
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx]
for idx in row], dim=1) # batch num_pairs k
q = torch.cat([embed_list[idx]
for idx in col], dim=1)
inner_product = p * q
if self.reduce_sum:
inner_product = torch.sum(
inner_product, dim=2, keepdim=True)
return inner_product
class OutterProductLayer(nn.Module):
"""OutterProduct Layer used in PNN.This implemention is
adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets.
Input shape
- A list of N 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 2D tensor with shape:``(batch_size,N*(N-1)/2 )``.
Arguments
- **filed_size** : Positive integer, number of feature groups.
- **kernel_type**: str. The kernel weight matrix type to use,can be mat,vec or num
- **seed**: A Python integer to use as random seed.
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.](https://arxiv.org/pdf/1611.00144.pdf)
"""
def __init__(self, field_size, embedding_size, kernel_type='mat', seed=1024, device='cpu'):
super(OutterProductLayer, self).__init__()
self.kernel_type = kernel_type
num_inputs = field_size
num_pairs = int(num_inputs * (num_inputs - 1) / 2)
embed_size = embedding_size
if self.kernel_type == 'mat':
self.kernel = nn.Parameter(torch.Tensor(
embed_size, num_pairs, embed_size))
elif self.kernel_type == 'vec':
self.kernel = nn.Parameter(torch.Tensor(num_pairs, embed_size))
elif self.kernel_type == 'num':
self.kernel = nn.Parameter(torch.Tensor(num_pairs, 1))
nn.init.xavier_uniform_(self.kernel)
self.to(device)
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx]
for idx in row], dim=1) # batch num_pairs k
q = torch.cat([embed_list[idx] for idx in col], dim=1)
# -------------------------
if self.kernel_type == 'mat':
p.unsqueeze_(dim=1)
# k k* pair* k
# batch * pair
kp = torch.sum(
# batch * pair * k
torch.mul(
# batch * pair * k
torch.transpose(
# batch * k * pair
torch.sum(
# batch * k * pair * k
torch.mul(
p, self.kernel),
dim=-1),
2, 1),
q),
dim=-1)
else:
# 1 * pair * (k or 1)
k = torch.unsqueeze(self.kernel, 0)
# batch * pair
kp = torch.sum(p * q * k, dim=-1)
# p q # b * p * k
return kp
class ConvLayer(nn.Module):
"""Conv Layer used in CCPM.
Input shape
- A list of N 3D tensor with shape: ``(batch_size,1,filed_size,embedding_size)``.
Output shape
- A list of N 3D tensor with shape: ``(batch_size,last_filters,pooling_size,embedding_size)``.
Arguments
- **filed_size** : Positive integer, number of feature groups.
- **conv_kernel_width**: list. list of positive integer or empty list,the width of filter in each conv layer.
- **conv_filters**: list. list of positive integer or empty list,the number of filters in each conv layer.
Reference:
- Liu Q, Yu F, Wu S, et al. A convolutional click prediction model[C]//Proceedings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, 2015: 1743-1746.(http://ir.ia.ac.cn/bitstream/173211/12337/1/A%20Convolutional%20Click%20Prediction%20Model.pdf)
"""
def __init__(self, field_size, conv_kernel_width, conv_filters, device='cpu'):
super(ConvLayer, self).__init__()
self.device = device
module_list = []
n = int(field_size)
l = len(conv_filters)
filed_shape = n
for i in range(1, l + 1):
if i == 1:
in_channels = 1
else:
in_channels = conv_filters[i - 2]
out_channels = conv_filters[i - 1]
width = conv_kernel_width[i - 1]
k = max(1, int((1 - pow(i / l, l - i)) * n)) if i < l else 3
module_list.append(Conv2dSame(in_channels=in_channels, out_channels=out_channels, kernel_size=(width, 1),
stride=1).to(self.device))
module_list.append(torch.nn.Tanh().to(self.device))
# KMaxPooling, extract top_k, returns tensors values
module_list.append(KMaxPooling(k=min(k, filed_shape), axis=2, device=self.device).to(self.device))
filed_shape = min(k, filed_shape)
self.conv_layer = nn.Sequential(*module_list)
self.to(device)
self.filed_shape = filed_shape
def forward(self, inputs):
return self.conv_layer(inputs)
| [
"[email protected]"
] | |
8decbdca81e3e4b91373b7e04f95168420879c90 | 98d9305b1717642bcfb842eecd84d63b6eeaf759 | /Funtions/Favorite Book.py | e2892efd0cc0ff10b3dc5face5cc9e96c6f9880e | [] | no_license | er-aditi/Learning-Python | 5ceb020f4df8db9e34df78edfaecca3e1854c8a9 | 297eda435ee2e1cee643f94ea4c5de6a82e3c8a7 | refs/heads/master | 2020-03-24T17:22:22.129081 | 2019-06-19T05:47:26 | 2019-06-19T05:47:26 | 142,856,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | #Parameter
def favorite_book(title):
print("One of my favorite books is Alice in " + title)
#Argument
favorite_book("Wonderland")
| [
"[email protected]"
] | |
416be490276271568c133407975891c44a56e873 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/countingE_20200723125242.py | ee2eed06a056b5d20bc8221422b9c32e3af0b259 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | def counting(arr):
# keep count of the occurrences
m = max(arr)+1
counts = [0 for i in range(m)]
outputs = [0 for i in range(m)]
# we keep the record of the occurences of the various numbers
for i in range(len(arr)):
counts[arr[i]] +=1
# now to get the running sum
total = 0
for i in range(len(counts)):
total += counts[i]
counts[i] = total
# next step is to now map the numbers to there proper positions starting from the end of the arr
for k in range(len(arr)-1,-1,-1):
position = counts[arr[k]]- 1
outputs[position] = arr[k]
counts[arr[k]] -=1
print('out',outputs)
def swap(A,B):
n = len(A)
sum_a = sum(A)
sum_
# 22
# 24
swap([1,4,1,2,7,5,4],[2,4,5,6,2,2,3]) | [
"[email protected]"
] | |
04640dde560910e0832261ea2972e720f222af3c | c0b6f77fce4a35001ac75d9375eac682780c72cd | /experiments/heli/plotting/plot_trajectories.py | 5e11cf21d79a94fe0198595dcd8167631e0f9ca0 | [
"MIT"
] | permissive | sisl/CEEM | aef7854211887939f582fef9ce4fa9ac23a30567 | 6154587fe3cdb92e8b7f70eedb1262caa1553cc8 | refs/heads/master | 2023-07-19T17:56:31.222617 | 2021-03-16T15:59:43 | 2021-03-16T15:59:43 | 266,781,728 | 6 | 1 | MIT | 2023-07-06T21:36:26 | 2020-05-25T13:07:42 | Python | UTF-8 | Python | false | false | 2,885 | py | import matplotlib.pyplot as plt
import torch
import numpy as np
from ceem.data_utils import *
from ceem.smoother import EKF
import pandas as pd
import click
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}", r"\usepackage{siunitx}"]
ttl = [
'$a_x$ \n $(\si{\meter\per\second\squared})$', '$a_y$ \n $(\si{\meter\per\second\squared})$', '$a_z$ \n $(\si{\meter\per\second\squared})$',
'$\dot{\omega}_x$ \n $(\si{\meter\per\second\squared})$', '$\dot{\omega}_y$ \n $(\si{\meter\per\second\squared})$',
'$\dot{\omega}_z$ \n $(\si{\meter\per\second\squared})$'
]
figsizes = {'large': (10, 4), 'small': (6.4, 4.8)}
@click.command()
@click.option('-b', '--trajectory', type=int, default=9)
@click.option('--datadir', type=click.Path(), default='./datasets/split_normalized')
@click.option('--modelfile', type=click.Path(), default='./experiments/heli/trajectories')
@click.option('-m', '--moments', is_flag=True)
@click.option('-s', '--savename', type=str, default=None)
@click.option('--figsize', type=str, default='large')
def main(trajectory, datadir, modelfile, moments, savename, figsize):
# load test data
test_u, test_y, demos = load_helidata(datadir, 'test', return_files=True)
y_mean, y_std, u_mean, u_std = load_statistics(datadir)
test_u = test_u * u_std + u_mean
test_y = test_y * y_std + y_mean
dt = 0.01
T = torch.arange(test_y.shape[1], dtype=torch.float32) * dt
# load predictions
naivepred = torch.load(f'{modelfile}/naivepred')
h25pred = torch.load(f'{modelfile}/h25pred')
sidpred = torch.load(f'{modelfile}/sidpred')
nlpred = torch.load(f'{modelfile}/nlpred')
# create plot
f, ax = plt.subplots(3, 1, figsize=figsizes[figsize])
b = trajectory
i = 0
lines = []
c = 3 if moments else 0
for j in range(3):
lines.append(ax[i].plot(T, test_y[b, :, j + c], alpha=0.8)[0])
lines.append(ax[i].plot(T[25:], h25pred[b, 1:, j + c], '--', alpha=0.8)[0])
lines.append(ax[i].plot(T[25:], nlpred[b, 25:, j + c], '--', alpha=0.8)[0])
lines.append(ax[i].plot(T[25:], sidpred[b, 25:, j + c], '--', alpha=0.8)[0])
ax[i].set_ylabel(ttl[j + c], rotation=0, ha='center', fontweight='bold', labelpad=20)
ax[i].grid(True)
i += 1
ax[i - 1].set_xlabel('time (s)', fontweight='bold', labelpad=-5)
lgd = plt.figlegend(handles=lines[:4], labels=['dataset', 'H25', 'NL (ours)', 'SID'],
loc='upper center', shadow=True, ncol=4)
f.subplots_adjust(bottom=0.1)
plt.tight_layout(rect=[0, 0., 1., .935])
if savename is None:
plt.show()
else:
plt.savefig(f'./experiments/heli/plotting/{savename}.pdf', bbox_extra_artists=(lgd,),
bbox_inches='tight', dpi=400)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1096ceab5c58b730c11c204555f2b606334dfd5b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /hzQ6dEJ2GfscAZzND_23.py | 6d6b618bba5d6f707d882f094ff6b102bf50e42f | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | """
Create a function that takes a number as its parameter and returns another
function.The returned function must take a list of numbers as its parameter,
and return a list of the numbers divided by the number that was passed into
the first function.
### Examples
first = factory(15)
// returns a function first.
lst = [30, 45, 60]
// 30 / 15 = 2, 45 / 15 = 3, 60 / 15 = 4
first(lst) ➞ [2, 3, 4]
second = factory(2)
// returns a function second.
lst = [2, 4, 6]
// 2 / 2 = 1, 4 / 2 = 2, 6 / 2 = 3
second(lst) ➞ [1, 2, 3]
### Notes
Rounding not required.
"""
def factory(n):
def newFunc(l):
return [x/n for x in l]
return newFunc
| [
"[email protected]"
] | |
7791443a22dfd1c3fc5ec4d1ed8346c1da591f42 | d6183f3762b0ecc4b580642fac8db9707a94679a | /cluster/server/app/conftest.py | e7f1ec17c423166f5cbe35ce1f1346e698b6f69c | [] | no_license | luke-zhu/blueno | 4c5fd8b66df5c75e2d28f0cc9e32b45c75386beb | 09fbb603468a4de8567e0fe4debd575da81672b2 | refs/heads/master | 2022-12-02T10:16:23.693589 | 2019-05-31T21:16:12 | 2019-05-31T22:20:12 | 184,935,571 | 2 | 0 | null | 2022-11-22T03:33:43 | 2019-05-04T19:20:07 | Python | UTF-8 | Python | false | false | 1,789 | py | import datetime
import random
import string
from typing import Tuple
import psycopg2
import pytest
import testing.postgresql
from werkzeug import security
from app import env, db
@pytest.fixture(scope='session')
def test_user() -> Tuple[str, str]:
created_at = datetime.datetime.now(datetime.timezone.utc)
test_email = f"test-{created_at.utcnow()}"
test_password = ''.join(random.choice(string.ascii_letters)
for _ in range(24))
pwd_hash = security.generate_password_hash(test_password)
# Initialize a testing database if env vars not defined
if not env.POSTGRES_CONFIG:
postgresql = testing.postgresql.Postgresql()
env.POSTGRES_CONFIG = postgresql.dsn()
db.init_db()
conn = psycopg2.connect(**env.POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(
"""
INSERT INTO users (email, pwhash, created_at)
VALUES (%s, %s, %s)
ON CONFLICT DO NOTHING;
""",
(test_email, pwd_hash, created_at)
)
conn.commit()
yield test_email, test_password
# Clean up the database
with conn.cursor() as cur:
cur.execute(
"""
DELETE FROM samples
WHERE dataset_id IN (
SELECT datasets.id
FROM datasets
WHERE datasets.name ILIKE %s);
""",
('test%',)
)
cur.execute(
"""
DELETE FROM datasets
WHERE datasets.name ILIKE %s;
""",
('test%',)
)
cur.execute(
"""
DELETE FROM users
WHERE email = %s;
""",
(test_email,)
)
conn.commit()
| [
"[email protected]"
] | |
68516eb8465dd0d6a43d17922aeacc2e62549fc3 | b3ab2979dd8638b244abdb2dcf8da26d45d7b730 | /test/test_update_request_permission_set_request_model.py | 1cd01805888beee5d37b87cb5335ebce540773ae | [] | no_license | CU-CommunityApps/ct-cloudcheckr-cmx-client | 4b3d9b82c5dfdaf24f8f443526868e971d8d1b15 | 18ac9fd4d6c4ae799c0d21745eaecd783da68c0c | refs/heads/main | 2023-03-03T19:53:57.685925 | 2021-02-09T13:05:07 | 2021-02-09T13:05:07 | 329,308,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | # coding: utf-8
"""
CloudCheckr API
CloudCheckr API # noqa: E501
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudcheckr_cmx_client
from cloudcheckr_cmx_client.models.update_request_permission_set_request_model import UpdateRequestPermissionSetRequestModel # noqa: E501
from cloudcheckr_cmx_client.rest import ApiException
class TestUpdateRequestPermissionSetRequestModel(unittest.TestCase):
"""UpdateRequestPermissionSetRequestModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateRequestPermissionSetRequestModel(self):
"""Test UpdateRequestPermissionSetRequestModel"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudcheckr_cmx_client.models.update_request_permission_set_request_model.UpdateRequestPermissionSetRequestModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7c0c761e0eb874e87225b67b92c6c871ca3ea0aa | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_cat-207.py | 53a36c0c4f2221975566cc4362fa0f79bd194121 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | a:str = "Hello"
b:str = "World"
c:str = "ChocoPy"
def cat2(a:str, b:str) -> str:
return a + b
def cat3(a:str, b:str, c:str) -> str:
return a + b + c
print(cat2(a, b))
print(cat2("", c))
print(cat3(a, " ", c))
print(len(a))
print(len(cat2(a,a)))
print(len(cat2($Exp,"")))
| [
"[email protected]"
] | |
44ec356d33a6eeb229f8a08e5c38a02ca2b32098 | 75a179e8ddba54442697de87a3846f1711a30bae | /custompermission/api/views.py | a22c47ef90caba6c8771d00567decb8b3d0a670d | [] | no_license | amanlalwani007/drftutorial | 2b5a5338b3146b1feb88c4d815fbf996dd49cb9d | 4f5c651f4dee98a359b7a6e34d0ae9a8f8630e68 | refs/heads/master | 2023-07-09T01:28:04.921042 | 2021-08-21T10:59:06 | 2021-08-21T10:59:06 | 392,457,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | from .models import Student
from .serializers import StudentSerializer
from rest_framework import viewsets
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser, IsAuthenticatedOrReadOnly, \
DjangoModelPermissions
from .custompermissions import Mypermission
class StudentModelViewSet(viewsets.ModelViewSet):
queryset = Student.objects.all()
serializer_class = StudentSerializer
authentication_classes = [SessionAuthentication]
permission_classes = [Mypermission]
| [
"[email protected]"
] | |
567854f29d38416103ec3318189d55778dbeb556 | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /suvari/gtk2chain/gtk2deps/libXinerama/actions.py | c2c1791cced00ef75b6b069b187e330f85971b39 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ChangeLog", "COPYING", "README")
| [
"[email protected]"
] | |
b5862c54903294ec07c26c54d450861018205faf | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_blitzes.py | 605e73b62180965fa4fde07a03eb8b38914f2909 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.nouns._blitz import _BLITZ
#calss header
class _BLITZES(_BLITZ, ):
def __init__(self,):
_BLITZ.__init__(self)
self.name = "BLITZES"
self.specie = 'nouns'
self.basic = "blitz"
self.jsondata = {}
| [
"[email protected]"
] | |
e05939b877ab8b45dd5f02bdaecaea04224c8ed5 | 0b842bcb3bf20e1ce628d39bf7e11abd7699baf9 | /oscar/a/sys/platform_basic/platform_basic.py | c0e0d961147716015348a902c79c19714d02ab8e | [] | no_license | afeset/miner2-tools | 75cc8cdee06222e0d81e39a34f621399e1ceadee | 81bcc74fe7c0ca036ec483f634d7be0bab19a6d0 | refs/heads/master | 2016-09-05T12:50:58.228698 | 2013-08-27T21:09:56 | 2013-08-27T21:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,862 | py | #
# Copyright Qwilt, 2013
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: Shmulika
#
import os
import json
import a.infra.format.json
import platform_base
class PlatformBasic(platform_base.PlatformBase):
""" TODO(shmulika): doc this
"""
#initialization fields
INIT_PARAM_DATA_PLATFORM_BASIC_DIR = "platform-basic-dir"
INIT_PARAM_DATA_PLATFORM_TYPE = "platform-type"
_INIT_PARAM_FILE_NAME = "platform-basic-init-params.json"
def __init__ (self, log):
platform_base.PlatformBase.__init__ (self)
self._log = log # TODO(shmulika): createSameModule
self._platformType = None
self._platformBasicDir = None
#########################################################################################
# INITIALIZATION METHODS
#########################################################################################
def init (self, platformBasicDir, platformType):
""" Initializes the platform_basic directory from which all the platform data is loaded, and the platform type
Source dir should be the directory into which the platform basic package was released
Raises: OS Error if directory/file of platform_basic paths do not exist, or there's an error reading from the files.
"""
self._platformBasicDir = platformBasicDir
self._platformType = platformType
platformFilename = os.path.join(self._platformBasicDir, self.PLATFORM_BASIC_DATA_FILES_PREFIX, self.s_getDataFileBaseName(platformType))
self._log("init").debug1("PlatformBasic initialized (platformBasicDir=%s, platformType=%s), loading platform data from file=%s:", platformBasicDir, platformType, platformFilename)
self._platformDictionary = self._loadFromJson(platformFilename)
self._log("init").debug1("platform data from file=%s was loaded. Data=%s", platformFilename, self._platformDictionary)
###########################################
# CAPTAIN CLIENT INITIALIZATION INTERFACE
##########################################
def initCaptain (self, captain):
""" set the captain object used by the class
"""
self._captain = captain
def initFromDictionary (self, data):
""" Initializes the platform_basic directory using a dictionary. see "init" for more details
"""
return self.init(platformBasicDir = data[self.INIT_PARAM_DATA_PLATFORM_BASIC_DIR],
platformType = data[self.INIT_PARAM_DATA_PLATFORM_TYPE])
def captainClient_initFromParamFile (self):
""" Initializes the platform_basic directory from which all the platform data is loaded, and the platform type
Fatal in case of failure
"""
initParamFilesDirName = self._captain.getInitParamFilesDirName()
initParamFileName = os.path.join(initParamFilesDirName, self._INIT_PARAM_FILE_NAME)
try:
if os.path.exists(initParamFileName):
self._log("read-init-file").debug2("reading init file %s", initParamFileName)
data = a.infra.format.json.readFromFile(self._log, initParamFileName)
else:
a.infra.process.processFatal("Failed to init platform data. File %s does not exists", initParamFileName)
except Exception as exception:
a.infra.process.processFatal("Failed to read platform data init file: %s", exception)
self._log("init-values").debug2("Init values: '%s'", data)
try:
self.initFromDictionary(data)
except Exception as exception:
a.infra.process.processFatal("Failed to init platform data: %s", exception)
#########################################################################################
# CREATORS
#########################################################################################
def createPlatformBasicForPlatformType (self, platformType):
""" Creates and returns a new platform basic for a specified platform type
For usages that require information on other platform types
"""
newPlatformBasic = PlatformBasic(self._log)
newPlatformBasic.init(self._platformBasicDir, platformType)
return newPlatformBasic
#########################################################################################
# DATA GETTER METHODS
#########################################################################################
def getPlatformType (self):
""" Returns the platform type (a string) of this platform.
"""
return self._platformDictionary[self.FIELD_PLATFORM]
def getDiskProperty (self, diskName, field, dictionary = None):
""" Returns a property of a disk.
Arguments:
diskName - Constant (one of PlatformBasic.DISK_NAME_*) which is the name of the disk
field - Constant (one of PlatformBasic.DISK_FIELD_*) which is the name of the property (field)
dictionary - If None, the dictionary of the initialized platform is used, o.w. should be a platform dictionary gotten
"""
if dictionary is None:
dictionary = self._platformDictionary
return dictionary[self.FIELD_DISKS][diskName][field]
def getPartitionsUnderDisk (self, diskName, dictionary = None):
""" Returns a list of all the partitions listed under the given disk.
The list is ordered by the indices of the partitions.
Arguments:
dictionary - a platform_data dictionary of a certain platform (result of getQmDictionary(), getQvmDictionary(), and so...)
diskName - string, name of the disk of which partitions should be returned
Returns: list of the disk-names of the partitions
Empty list, if `diskName` has no partition
None, if a disk named `diskName` does not exist in the dictionary.
"""
return self._getDisksUnderDisk(diskName, diskTypeFilter = [self.TYPE_PARTITON], dictionary = dictionary)
def getLogicalVolumesUnderDisk (self, diskName, dictionary = None):
""" Returns a list of all the logical volume listed under the given disk (should usually be a volume group disk).
The list is ordered by the indices of the volumes.
Arguments:
dictionary - a platform_data dictionary of a certain platform (result of getQmDictionary(), getQvmDictionary(), and so...)
diskName - string, name of the disk of which volumes should be returned
Returns: list of the disk-names of the volumes
Empty list, if `diskName` has no volumes
None, if a disk named `diskName` does not exist in the dictionary.
"""
return self._getDisksUnderDisk(diskName, diskTypeFilter = [self.TYPE_LV], dictionary = dictionary)
def getVolumeGroupsUnderDisk (self, diskName, dictionary = None):
""" Returns a list of all the volume groups listed under the given disk.
The list is ordered by the indices of the groups.
Arguments:
dictionary - a platform_data dictionary of a certain platform (result of getQmDictionary(), getQvmDictionary(), and so...)
diskName - string, name of the disk of which volumes should be returned
Returns: list of the disk-names of the groups
Empty list, if `diskName` has no groups
None, if a disk named `diskName` does not exist in the dictionary.
"""
return self._getDisksUnderDisk(diskName, diskFormatFilter = [self.FORMAT_VG], dictionary = dictionary)
def getRaidProperty (self, field, dictionary = None):
""" Returns a property of the raid.
Arguments:
field - Constant (one of PlatformBasic.DISK_FIELD_*) which is the name of the property (field)
dictionary - If None, the dictionary of the initialized platform is used, o.w. should be a platform dictionary gotten
"""
if dictionary is None:
dictionary = self._platformDictionary
return dictionary[self.FIELD_RAID][field]
def getBiosProperty (self, field, dictionary = None):
""" Returns a property of the bios.
Arguments:
field - Constant (one of PlatformBasic.DISK_FIELD_*) which is the name of the property (field)
dictionary - If None, the dictionary of the initialized platform is used, o.w. should be a platform dictionary gotten
"""
if dictionary is None:
dictionary = self._platformDictionary
return dictionary[self.FIELD_BIOS][field]
#########################################################################################
# STATIC METHODS
#########################################################################################
@classmethod
def s_createInitParamFile (cls, dbgLog, initParamFilesDirName, dictionary):
a.infra.format.json.writeToFile(dbgLog, dictionary, os.path.join(initParamFilesDirName, cls._INIT_PARAM_FILE_NAME), indent=4)
#########################################################################################
# LOGIC PRIVATE
#########################################################################################
def _getDisksUnderDisk (self, diskName, diskTypeFilter = None, diskFormatFilter = None, dictionary = None):
""" Returns a list of all the partitions listed under the given disk.
The list is ordered by the indices of the partitions.
Arguments:
dictionary - a platform_data dictionary of a certain platform (result of getQmDictionary(), getQvmDictionary(), and so...)
diskName - string, name of the disk of which partitions should be returned
diskTypeFilter - a list of disk types, only these types of disks will be returned (if None - not used)
diskFormatFilter - a list of disk formats, only these types of disks will be returned (if None - not used)
Returns: list of the disk-names of the partitions
Empty list, if `diskName` has no partition
None, if a disk named `diskName` does not exist in the dictionary.
"""
if dictionary is None:
dictionary = self._platformDictionary
if diskName not in dictionary[self.FIELD_DISKS]:
return None
disksAndDictionaryUnderDisk = []
# find disks that the given disk is their parents, and are also partitions
for disk, diskDictionary in dictionary[self.FIELD_DISKS].iteritems():
if (diskTypeFilter is None) or (diskDictionary[self.DISK_FIELD_PARENT] == diskName and diskDictionary[self.DISK_FIELD_TYPE] in diskTypeFilter):
if (diskFormatFilter is None) or (diskDictionary[self.DISK_FIELD_PARENT] == diskName and diskDictionary[self.DISK_FIELD_FORMAT] in diskFormatFilter):
disksAndDictionaryUnderDisk.append((disk, diskDictionary))
disksAndDictionaryUnderDisk = sorted(disksAndDictionaryUnderDisk, key = lambda (disk, dictionary): dictionary[self.DISK_FIELD_INDEX])
disksUnderDisk = [disk for disk, diskDictionary in disksAndDictionaryUnderDisk]
return disksUnderDisk
#########################################################################################
# UTILITIES PRIVATE
#########################################################################################
def _loadFromJson (self, filename):
with open(filename, 'r') as fileInput:
return json.load(fileInput)
| [
"[email protected]"
] | |
8c5fc38935a674117b20cbd3f6a3038786d0fc2a | 76769ef4394477046f646cd4195f5ffe30091d10 | /Problems/Counting unique/task.py | ea694370acdd2174bdfc3c1cf580d4ac52838058 | [] | no_license | Hemie143/Hangman | 10b01c18fa6cbfc275d3758774b1455f2229f84e | cb82a82bb937b917e515b93ad5fb7b634eaca6aa | refs/heads/master | 2022-04-25T00:54:51.078640 | 2020-04-28T20:34:33 | 2020-04-28T20:34:33 | 259,745,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | # please, work with the variables 'Belov', 'Smith', and 'Sarada' | [
"[email protected]"
] | |
61478897225f0fdf30d0bea7d3d211beeb1d1c41 | 2d38b91d42de3d209bf5a011193d3b76d24476c7 | /ExpensesTracker/ExpensesTracker/wsgi.py | 128c7a208b401edafccfa6c8175046aede000df3 | [] | no_license | dhariskov/python-web | 88143fe2c733a7f8149cd014e9a99d630875bd46 | 8de6ca0eb05576dfcd6723aa2d7ce0933074612c | refs/heads/master | 2023-01-23T07:08:36.349211 | 2020-11-29T12:44:26 | 2020-11-29T12:44:26 | 316,788,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for ExpensesTracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ExpensesTracker.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
7bbe58a925f858deabcb88cfe329c58ff4da3f6d | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/N/narphorium/canadian_charities.py | b127dd024b64b13a4a719a865d73f8ac0e00bf51 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,780 | py | import scraperwiki
from string import Template
import re
from math import ceil
from BeautifulSoup import BeautifulSoup
start_page = scraperwiki.sqlite.get_var("current_page", 1)
page = start_page
num_pages = 1
max_pages = 500
for p in range(1, max_pages):
page = start_page + p
if page > num_pages:
page -= num_pages
scraperwiki.sqlite.save_var("current_page", page)
page_url = Template("http://www.cra-arc.gc.ca/ebci/haip/srch/basicsearchresult-eng.action?s=+&k=&b=true&f=25&p=$page").substitute(page=page)
html = scraperwiki.scrape(page_url)
soup = BeautifulSoup(html)
for result in soup.find('div', {'class':'center'}).findAll('div', {'class':'alignLeft'}, recursive=False):
record = {}
for entry in result.findAll('div'):
entry_content = str(entry)
entry_content = entry_content.replace('<div>','')
entry_content = entry_content.replace('</div>','')
entry_content = entry_content.replace(' ',' ')
for sub_entry in entry_content.split('<b>'):
parts = sub_entry.split(':</b>')
if len(parts) > 1:
key = parts[0].strip()
value = parts[1].strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', key)
if m:
key = m.group(1).strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', value)
if m:
value = m.group(1).strip()
if key == "Charity Name":
m = re.search('(.+)\s+\/\s+([A-Z,\d]+)', value)
if m:
name = m.group(1).strip()
id = m.group(2).strip()
record['ID'] = id
record['Name'] = name
else:
key = key.replace('/',' ')
key = key.replace('\s+','_')
record[key] = value
if record.has_key('ID'):
#print record
# save records to the datastore
scraperwiki.sqlite.save(["ID"], record)
m = re.search('<b>([\d,]+) matches found\.<\/b>', html)
if m:
num_results = int(m.group(1).replace(',',''))
num_pages = ceil(num_results / 25.0)
import scraperwiki
from string import Template
import re
from math import ceil
from BeautifulSoup import BeautifulSoup
start_page = scraperwiki.sqlite.get_var("current_page", 1)
page = start_page
num_pages = 1
max_pages = 500
for p in range(1, max_pages):
page = start_page + p
if page > num_pages:
page -= num_pages
scraperwiki.sqlite.save_var("current_page", page)
page_url = Template("http://www.cra-arc.gc.ca/ebci/haip/srch/basicsearchresult-eng.action?s=+&k=&b=true&f=25&p=$page").substitute(page=page)
html = scraperwiki.scrape(page_url)
soup = BeautifulSoup(html)
for result in soup.find('div', {'class':'center'}).findAll('div', {'class':'alignLeft'}, recursive=False):
record = {}
for entry in result.findAll('div'):
entry_content = str(entry)
entry_content = entry_content.replace('<div>','')
entry_content = entry_content.replace('</div>','')
entry_content = entry_content.replace(' ',' ')
for sub_entry in entry_content.split('<b>'):
parts = sub_entry.split(':</b>')
if len(parts) > 1:
key = parts[0].strip()
value = parts[1].strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', key)
if m:
key = m.group(1).strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', value)
if m:
value = m.group(1).strip()
if key == "Charity Name":
m = re.search('(.+)\s+\/\s+([A-Z,\d]+)', value)
if m:
name = m.group(1).strip()
id = m.group(2).strip()
record['ID'] = id
record['Name'] = name
else:
key = key.replace('/',' ')
key = key.replace('\s+','_')
record[key] = value
if record.has_key('ID'):
#print record
# save records to the datastore
scraperwiki.sqlite.save(["ID"], record)
m = re.search('<b>([\d,]+) matches found\.<\/b>', html)
if m:
num_results = int(m.group(1).replace(',',''))
num_pages = ceil(num_results / 25.0)
| [
"[email protected]"
] | |
eb8e4a117c13531cce84262fa88ef50c79dff1be | ee3d8e233370d5a890ba61b00b768f743c979e67 | /baekjoon/1904.py | 3e6a0c99876563e3fe760c5e26fcfe350281dd62 | [
"MIT"
] | permissive | alinghi/PracticeAlgorithm | 50384fd12a29964e5aa704784a8867046693eff2 | dea49e17337d9d7711e694059e27ceefb4b9d5d5 | refs/heads/master | 2023-01-23T06:03:40.314459 | 2020-12-09T14:20:09 | 2020-12-09T14:20:09 | 285,253,698 | 0 | 0 | null | 2020-08-13T05:25:44 | 2020-08-05T10:19:42 | Python | UTF-8 | Python | false | false | 129 | py | N=int(input())
#111 001 100
#001+1 00+00 100+1 11+00 111+1
a,b=0,1
for i in range(N):
a,b=b%15746,(a+b)%15746
print(b%15746)
| [
"[email protected]"
] | |
322d29fe0931fd88622b497e37f99ea0d0d93cf8 | 13ce3959fca0e51d5d17b4bf5b99d55b4a3d8ee0 | /setup.py | e84b1d4bdd1371f6a1b0b7bdfbced243bda9212f | [
"MIT"
] | permissive | guadagn0/flavio | 2b4d4bb450dfcbd21a92f55c5dfdbf6488cbf331 | 5a9b64e38b828fcd7907fd6fe7eb79b1dcefd4d3 | refs/heads/master | 2020-04-05T12:29:25.207810 | 2019-03-12T22:32:29 | 2019-03-12T22:32:29 | 156,872,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | from setuptools import setup, find_packages
with open('flavio/_version.py', encoding='utf-8') as f:
exec(f.read())
with open('README.md', encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(name='flavio',
version=__version__,
author='David M. Straub',
author_email='[email protected]',
url='https://flav-io.github.io',
description='A Python package for flavour physics phenomenology in the Standard Model and beyond',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license='MIT',
packages=find_packages(),
package_data={
'flavio':['data/*.yml',
'data/test/*',
'physics/data/arXiv-0810-4077v3/*',
'physics/data/arXiv-1503-05534v1/*',
'physics/data/arXiv-1503-05534v2/*',
'physics/data/arXiv-1501-00367v2/*',
'physics/data/arXiv-1602-01399v1/*',
'physics/data/arXiv-1602-01399v1/*',
'physics/data/arXiv-1811-00983v1/*',
'physics/data/pdg/*',
'physics/data/qcdf_interpolate/*',
'physics/data/wcsm/*',
]
},
install_requires=['numpy', 'scipy', 'setuptools>=3.3', 'pyyaml',
'ckmutil', 'wilson>=1.6', ],
extras_require={
'testing': ['nose'],
'plotting': ['matplotlib>=1.4'],
'sampling': ['pypmc>=1.1', 'emcee', 'iminuit',],
},
)
| [
"[email protected]"
] | |
413bc5ab7c9a5eeea98c1310fcf21c955a3b899d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /qwDPeZeufrHo2ejAY_5.py | dd2776cc8868b371d82d76aca6a7ce2578b301f3 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | """
Given a _string_ containing an _algebraic equation_ , calculate and **return
the value of x**. You'll only be given equations for simple **addition** and
**subtraction**.
### Examples
eval_algebra("2 + x = 19") ➞ 17
eval_algebra("4 - x = 1") ➞ 3
eval_algebra("23 + 1 = x") ➞ 24
### Notes
* There are spaces between every number and symbol in the string.
* x may be a negative number.
"""
def eval_algebra(eq):
eq='-'.join(eq.split('='))
if '- x' in eq:return eval(eq.replace('x','0'))
else:return -eval(eq.replace('x','0'))
| [
"[email protected]"
] | |
ca6a14711f5fbc121792986501439f1f313ec357 | a4186aadd9de84de34cde0d9bf443f802117260d | /scrapenews/spiders/news24.py | 20561989efbb09f476923702ccff8a4ccb499508 | [
"MIT"
] | permissive | public-people/scrape-news | bf1bd414d0c94d591aed0c515befaa7a76069093 | b057f100e4db567dbadba26e18728d4ff5cd5fb3 | refs/heads/master | 2023-05-29T13:41:14.181360 | 2020-04-18T20:52:02 | 2020-04-18T20:52:02 | 123,028,777 | 10 | 20 | MIT | 2023-05-22T22:28:35 | 2018-02-26T20:50:29 | Python | UTF-8 | Python | false | false | 2,256 | py | # -*- coding: utf-8 -*-
from .sitemap import SitemapSpider
from scrapenews.items import ScrapenewsItem
from datetime import datetime
import pytz
SAST = pytz.timezone('Africa/Johannesburg')
class News24Spider(SitemapSpider):
name = 'news24'
allowed_domains = ['www.news24.com']
sitemap_urls = ['https://www.news24.com/robots.txt']
sitemap_rules = [
('www.news24.com/SouthAfrica/News', 'parse'),
('www.news24.com/Columnists', 'parse'),
('www.news24.com/Green/News', 'parse'),
('www.news24.com/Obituaries', 'parse'),
('www.news24.com/PressReleases', 'parse'),
]
publication_name = 'News24'
def parse(self, response):
if '/News/' not in response.url:
self.logger.info("Ignoring %s", response.url)
return
title = response.xpath('//div[contains(@class, "article_details")]/h1/text()').extract_first()
self.logger.info('%s %s', response.url, title)
article_body = response.xpath('//article[@id="article-body"]')
if article_body:
body_html = article_body.extract_first()
byline = response.xpath('//div[contains(@class, "ByLineWidth")]/p/text()').extract_first()
publication_date_str = response.xpath('//span[@id="spnDate"]/text()').extract_first()
accreditation = response.xpath('//div[contains(@class, "ByLineWidth")]/div[contains(@class, "accreditation")]/a/@href').extract_first()
publication_date = datetime.strptime(publication_date_str, '%Y-%m-%d %H:%M')
publication_date = SAST.localize(publication_date)
item = ScrapenewsItem()
item['body_html'] = body_html
item['title'] = title
item['byline'] = byline
item['published_at'] = publication_date.isoformat()
item['retrieved_at'] = datetime.utcnow().isoformat()
item['url'] = response.url
item['file_name'] = response.url.split('/')[-1]
item['spider_name'] = self.name
item['publication_name'] = self.publication_name
if accreditation:
item['publication_name'] += " with " + accreditation[1:]
yield item
self.logger.info("")
| [
"[email protected]"
] | |
050c6d64a7e34ab1e92d1ca8a82c006208da4b2e | 32e0dcfe03fc8a54fd218bfb5fe8741a5ea8fc39 | /UserCode/jzhang/sbc_run6_mergeall.py | 013b5ffbce8389c2ccc782914a093247c85589c1 | [
"MIT"
] | permissive | RunzZhang/SBCcode | e480ab85d165b42de060b1778a2e2af38b2f511e | e75b8e751cec5fb2c28950edef0c82f005caedcb | refs/heads/master | 2021-09-08T03:41:56.222249 | 2019-06-17T19:52:32 | 2019-06-17T19:52:32 | 192,990,370 | 0 | 0 | MIT | 2019-06-20T21:36:26 | 2019-06-20T21:36:26 | null | UTF-8 | Python | false | false | 7,163 | py | import numpy as np
import SBCcode as sbc
import os
import re
from SBCcode.DataHandling.WriteBinary import WriteBinaryNtupleFile as wb
# import ipdb
modules = [
'AcousticAnalysis_',
'DytranAnalysis_',
'EventAnalysis_',
'HistoryAnalysis_',
'ImageAnalysis_',
'TimingAnalysis_',
'PMTfastDAQalignment_']
# modules = ['PMTpulseAnalysis_']
# modules = ['ImageAnalysis_']
# modules = ['AcousticAnalysis_']
# modules = ['TimingAnalysis_']
# recondir = '/bluearc/storage/recon/devel/SBC-17/output'
recondir = '/pnfs/coupp/persistent/grid_output/SBC-17/output'
merge_dir = '/bluearc/storage/recon/devel/SBC-17/output'
runlist = os.listdir(recondir)
runlist = filter(lambda fn: (not re.search('^\d+_\d+$', fn) is None)
and os.path.isdir(os.path.join(recondir, fn))
and (len(os.listdir(os.path.join(recondir, fn))) > 0),
runlist)
# runlist = ['20170706_6']
# runlist = ['20170621_7','20170625_2']
print(runlist)
# one_piezo_list = [
# '20170619_3',
# '20170621_0',
# '20170621_2',
# '20170621_3',
# '20170621_4',
# '20170621_5',
# '20170621_6',
# '20170621_7',
# '20170621_8',
# '20170622_0',
# '20170622_1',
# '20170622_2',
# '20170622_3',
# '20170622_5',
# '20170622_6',
# '20170622_7',
# '20170622_8',
# '20170622_9',
# '20170623_0',
# '20170623_1',
# '20170623_2']
# merge out by category to save memory
for module in modules:
# bad_list = [
# '20170624_2',
# '20170624_4',
# '20170625_0',
# '20170625_1',
# '20170625_2',
# '20170704_3',
# '20170704_4',
# '20170705_0',
# '20170705_1',
# '20170705_2',
# '20170706_5',
# '20170713_3',
# '20170713_4',
# '20170713_5',
# '20170714_0',
# '20170714_1',
# '20170714_2',
# '20170715_0',
# '20170715_1',
# '20170715_2',
# '20170715_4',
# '20170716_0',
# '20170716_1',
# '20170716_2',
# '20170716_3',
# '20170716_5',
# '20170716_6',
# '20170716_7',
# '20170717_0']
# if key == 'AcousticAnalysis_':
# bad_list += [
# '20170621_1', '20170622_4', '20170624_3', '20170711_13', '20170706_6', '20170708_2', '20170719_11']
# bad_list = []
# if key == 'ImageAnalysis_':
# bad_list = ['20170626_9', '20170703_3', '20170707_4']
# elif key == 'DytranAnalysis_':
# bad_list = [
# '20170622_9',
# '20170624_4',
# '20170625_0',
# '20170625_1',
# '20170704_3',
# '20170704_4',
# '20170705_0',
# '20170705_1',
# '20170705_2',
# '20170706_5']
# elif key == 'EventAnalysis_':
# bad_list = ['20170621_1' '20170622_4' '20170624_3']
# elif key == 'PMTfastDAQalignment_':
# bad_list = ['20170621_1' '20170622_4' '20170624_3']
bad_list = []
print("Loading " + module)
merge_out = []
shapes0 = []
for runname in runlist:
if runname in set(bad_list):
print(runname + ' is in bad_list')
continue
runid_str = runname.split('_')
runid = np.int32(runid_str)
runsn = runid[0] * 1000 + runid[1]
if (runsn >= 20170619003) and (runsn < 20170901000):
fpath = os.path.join(recondir, runname, module + runname + '.bin')
if os.path.exists(fpath):
if os.stat(fpath).st_size > 0:
data = sbc.read_bin(fpath)
# # check array sizes
# shapes = [data[x].shape for x in data.keys()]
# if len(shapes0) < 1:
# shapes0 = shapes
# print(runname + "\t" + str(shapes))
# Pad 0's to fields without Piezo2
if module == 'AcousticAnalysis_' and len(data['piezo_list'].shape) == 1:
size = [data['piezo_list'].shape[0], 2]
tmp = data['piezo_list']
data['piezo_list'] = np.zeros(size, dtype=np.int32)
data['piezo_list'][:, 0] = tmp
tmp = data['bubble_t0']
data['bubble_t0'] = np.zeros(size, dtype=np.float64)
data['bubble_t0'][:, 0] = tmp
tmp = data['peak_t0']
data['peak_t0'] = np.zeros(size, dtype=np.float64)
data['peak_t0'][:, 0] = tmp
size = list(data['piezoE'].shape)
size[1] += 1
tmp = data['piezoE']
data['piezoE'] = np.zeros(size, dtype=np.float64)
# ipdb.set_trace()
data['piezoE'][:, 0, :, :] = tmp[:, 0, :, :]
if module == 'TimingAnalysis_' and len(data['PMTmatch_t0'].shape) == 1:
var_names = ['CAMstate', 'PMTmatch_area', 'PMTmatch_area_nobs', 'PMTmatch_baseline', 'PMTmatch_baserms', 'PMTmatch_coinc', 'PMTmatch_ix', 'PMTmatch_lag', 'PMTmatch_max', 'PMTmatch_min', 'PMTmatch_pulse_area', 'PMTmatch_pulse_height', 'PMTmatch_pulse_t10', 'PMTmatch_pulse_t90', 'PMTmatch_pulse_tend', 'PMTmatch_pulse_tpeak', 'PMTmatch_pulse_tstart', 'PMTmatch_t0', 'nPMThits_fastdaq', 'nVetohits_fastdaq', 't_nearestPMThit', 't_nearestVetohit']
for var_name in var_names:
if len(data[var_name].shape) == 1:
data[var_name] = np.stack((data[var_name],
np.zeros(data[var_name].shape, data[var_name].dtype)), axis=1)
elif len(data[var_name].shape) > 1:
data[var_name] = np.concatenate((data[var_name],
np.zeros(data[var_name].shape, data[var_name].dtype)),
axis=1)
if module == 'TimingAnalysis_': # fix int32/int64 problem
var_name = 'PMTmatch_ix'
data[var_name] = np.int64(data[var_name])
shapes = [(x, data[x].dtype, data[x].shape) for x in data.keys()]
if len(shapes0) < 1:
shapes0 = shapes
print(runname + "\t" + str(shapes))
# ipdb.set_trace()
merge_out.append(data)
else:
print("zero size file: " + fpath)
else:
print("nonexis file: " + fpath)
merge_name = 'all'
rowdef = 1
if module in set(['PMTpulseAnalysis_', 'PMTpheAnalysis_']):
rowdef = 7
if module in set(['HumanGetBub_']):
rowdef = 8
print("Writing " + module)
wb(os.path.join(merge_dir, module + merge_name + '.bin'), merge_out,
rowdef=rowdef, initialkeys=['runid', 'ev'], drop_first_dim=True)
| [
"[email protected]"
] | |
730ad7c52a87d68667cc6d4ef736c84998330595 | e17ab8f50b8a1f13b52aa770269eb469c87161b8 | /apps/contact/forms.py | 61cca7b816d64ca24e3016b278acd325dd385140 | [] | no_license | masterfung/bond | 6baa84a4322801aeb3f466d8f83e7e18c1d91731 | bc283ec1bd7a52f77b6fc788d5e818bd7233fc1d | refs/heads/master | 2022-12-09T17:45:47.243471 | 2015-03-08T19:29:21 | 2015-03-08T19:29:21 | 22,904,060 | 0 | 0 | null | 2022-12-07T23:22:59 | 2014-08-13T05:42:10 | Python | UTF-8 | Python | false | false | 598 | py | __author__ = '@masterfung'
from captcha.fields import ReCaptchaField # Only import different from yesterday
import floppyforms as forms
class ContactForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
subject = forms.CharField(required=True)
message = forms.CharField(widget=forms.Textarea)
captcha = ReCaptchaField() | [
"[email protected]"
] | |
2b80e991998881a815ef8d991d0d1747dd9a3be1 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_list_diagnostics.py | 9c7e8d5fde159226518502a2d34c5ee855e982f4 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,587 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_list_diagnostics.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.diagnostic.list_by_service(
resource_group_name="rg1",
service_name="apimService1",
)
for item in response:
print(item)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2022-08-01/examples/ApiManagementListDiagnostics.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
63cdacc5b1a7bf38a597105b8ec6af5aa9c3117b | bc441bb06b8948288f110af63feda4e798f30225 | /resource_package_tools_sdk/model/ops_automation/job_details_pb2.pyi | 3a118c77a835875581e4960eaa7aa1f7fb4251e6 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,971 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from resource_package_tools_sdk.model.ops_automation.bind_resource_pb2 import (
BindResource as resource_package_tools_sdk___model___ops_automation___bind_resource_pb2___BindResource,
)
from resource_package_tools_sdk.model.ops_automation.mail_info_pb2 import (
MailInfo as resource_package_tools_sdk___model___ops_automation___mail_info_pb2___MailInfo,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class JobDetails(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Scheduler(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
isBound = ... # type: builtin___bool
isActive = ... # type: builtin___bool
def __init__(self,
*,
isBound : typing___Optional[builtin___bool] = None,
isActive : typing___Optional[builtin___bool] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> JobDetails.Scheduler: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> JobDetails.Scheduler: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"isActive",b"isActive",u"isBound",b"isBound"]) -> None: ...
version = ... # type: builtin___int
createTime = ... # type: typing___Text
updateTime = ... # type: typing___Text
creator = ... # type: typing___Text
org = ... # type: builtin___int
name = ... # type: typing___Text
category = ... # type: typing___Text
menuId = ... # type: typing___Text
desc = ... # type: typing___Text
allowModify = ... # type: builtin___bool
id = ... # type: typing___Text
@property
def scheduler(self) -> JobDetails.Scheduler: ...
@property
def bindResource(self) -> resource_package_tools_sdk___model___ops_automation___bind_resource_pb2___BindResource: ...
@property
def mail(self) -> resource_package_tools_sdk___model___ops_automation___mail_info_pb2___MailInfo: ...
def __init__(self,
*,
version : typing___Optional[builtin___int] = None,
createTime : typing___Optional[typing___Text] = None,
updateTime : typing___Optional[typing___Text] = None,
creator : typing___Optional[typing___Text] = None,
org : typing___Optional[builtin___int] = None,
scheduler : typing___Optional[JobDetails.Scheduler] = None,
name : typing___Optional[typing___Text] = None,
category : typing___Optional[typing___Text] = None,
menuId : typing___Optional[typing___Text] = None,
bindResource : typing___Optional[resource_package_tools_sdk___model___ops_automation___bind_resource_pb2___BindResource] = None,
desc : typing___Optional[typing___Text] = None,
allowModify : typing___Optional[builtin___bool] = None,
mail : typing___Optional[resource_package_tools_sdk___model___ops_automation___mail_info_pb2___MailInfo] = None,
id : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> JobDetails: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> JobDetails: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"bindResource",b"bindResource",u"mail",b"mail",u"scheduler",b"scheduler"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"allowModify",b"allowModify",u"bindResource",b"bindResource",u"category",b"category",u"createTime",b"createTime",u"creator",b"creator",u"desc",b"desc",u"id",b"id",u"mail",b"mail",u"menuId",b"menuId",u"name",b"name",u"org",b"org",u"scheduler",b"scheduler",u"updateTime",b"updateTime",u"version",b"version"]) -> None: ...
| [
"[email protected]"
] | |
a04fcffa8e327209c8e04feb7c9e4aec49f1d73b | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /fRB5QRYn5WC8jMGTe_10.py | ad1e01971694e69b65bc1bfc1f4ba58bef2fe277 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py |
import datetime as dt
def time_difference(city_a, timestamp, city_b):
gmt = {"Los Angeles": {"h": -8,"m":0},
"New York": {"h":-5,"m":0},
"Caracas": {"h":-4,"m":-30},
"Buenos Aires": {"h":-3,"m":0},
"London": {"h":0,"m":0},
"Rome": {"h":1,"m":0},
"Moscow": {"h":3,"m":0},
"Tehran": {"h":3,"m":30},
"New Delhi": {"h":5,"m":30},
"Beijing": {"h":8,"m":0},
"Canberra": {"h":10,"m":0}
}
t = dt.datetime.strptime(timestamp, "%B %d, %Y %H:%M")
ot = (t - dt.timedelta(hours=gmt[city_a]["h"], minutes=gmt[city_a]["m"]) +
dt.timedelta(hours=gmt[city_b]["h"], minutes=gmt[city_b]["m"]))
return "{}-{}-{} {:02d}:{:02d}".format(ot.year, ot.month, ot.day,
ot.hour, ot.minute)
| [
"[email protected]"
] | |
7d7d9e3f4ee38535b9909b2b3168a029ffb5622e | e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551 | /uri/uri_python/matematica/p1198.py | 26ea0741bc55d2e35e7fe6e038e30ff4b41afddf | [] | no_license | GabrielEstevam/icpc_contest_training | b8d97184ace8a0e13e1c0bf442baa36c853a6837 | 012796c2ceb901cf7aa25d44a93614696a7d9c58 | refs/heads/master | 2020-04-24T06:15:16.826669 | 2019-10-08T23:13:15 | 2019-10-08T23:13:15 | 171,758,893 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | import math
while True:
try:
entrada = input().split(" ")
A = int(entrada[0])
B = int(entrada[1])
print(int(math.fabs(A-B)))
except EOFError:
break | [
"[email protected]"
] | |
60925646feb8473a3fff7eec5ed67860e4efff65 | aea8fea216234fd48269e4a1830b345c52d85de2 | /fhir/resources/STU3/tests/test_episodeofcare.py | 2a6f7c5f3886e9275ff0a301dc3bf923e2cac14a | [
"BSD-3-Clause"
] | permissive | mmabey/fhir.resources | 67fce95c6b35bfdc3cbbc8036e02c962a6a7340c | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | refs/heads/master | 2023-04-12T15:50:30.104992 | 2020-04-11T17:21:36 | 2020-04-11T17:21:36 | 269,712,884 | 0 | 0 | NOASSERTION | 2020-06-05T17:03:04 | 2020-06-05T17:03:04 | null | UTF-8 | Python | false | false | 4,568 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EpisodeOfCare
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import episodeofcare
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class EpisodeOfCareTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("EpisodeOfCare", js["resourceType"])
return episodeofcare.EpisodeOfCare(js)
def testEpisodeOfCare1(self):
inst = self.instantiate_from("episodeofcare-example.json")
self.assertIsNotNone(inst, "Must have instantiated a EpisodeOfCare instance")
self.implEpisodeOfCare1(inst)
js = inst.as_json()
self.assertEqual("EpisodeOfCare", js["resourceType"])
inst2 = episodeofcare.EpisodeOfCare(js)
self.implEpisodeOfCare1(inst2)
def implEpisodeOfCare1(self, inst):
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].code), force_bytes("CC")
)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].display),
force_bytes("Chief complaint"),
)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].system),
force_bytes("http://hl7.org/fhir/diagnosis-role"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://example.org/sampleepisodeofcare-identifier"),
)
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123"))
self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-09-01")
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(
inst.statusHistory[0].period.end.date, FHIRDate("2014-09-14").date
)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2014-09-14")
self.assertEqual(
inst.statusHistory[0].period.start.date, FHIRDate("2014-09-01").date
)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2014-09-01")
self.assertEqual(
force_bytes(inst.statusHistory[0].status), force_bytes("planned")
)
self.assertEqual(
inst.statusHistory[1].period.end.date, FHIRDate("2014-09-21").date
)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2014-09-21")
self.assertEqual(
inst.statusHistory[1].period.start.date, FHIRDate("2014-09-15").date
)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2014-09-15")
self.assertEqual(
force_bytes(inst.statusHistory[1].status), force_bytes("active")
)
self.assertEqual(
inst.statusHistory[2].period.end.date, FHIRDate("2014-09-24").date
)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2014-09-24")
self.assertEqual(
inst.statusHistory[2].period.start.date, FHIRDate("2014-09-22").date
)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2014-09-22")
self.assertEqual(
force_bytes(inst.statusHistory[2].status), force_bytes("onhold")
)
self.assertEqual(
inst.statusHistory[3].period.start.date, FHIRDate("2014-09-25").date
)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2014-09-25")
self.assertEqual(
force_bytes(inst.statusHistory[3].status), force_bytes("active")
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type[0].coding[0].code), force_bytes("hacc"))
self.assertEqual(
force_bytes(inst.type[0].coding[0].display),
force_bytes("Home and Community Care"),
)
self.assertEqual(
force_bytes(inst.type[0].coding[0].system),
force_bytes("http://hl7.org/fhir/episodeofcare-type"),
)
| [
"[email protected]"
] | |
f383d554c135fc392f72f27540010b2c2a96e753 | 743da4642ac376e5c4e1a3b63c079533a5e56587 | /build/lib.win-amd64-3.6/fairseq/modules/adaptive_softmax.py | 1c60d09568cbafd7a449a66bea1936644528f85f | [
"MIT"
] | permissive | tmtmaj/Exploiting-PrLM-for-NLG-tasks | cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b | e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5 | refs/heads/main | 2023-06-16T08:26:32.560746 | 2021-07-14T17:50:19 | 2021-07-14T17:50:19 | 371,899,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,028 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import operator
import functools
import torch
import torch.nn.functional as F
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class TiedLinear(nn.Module):
def __init__(self, weight, transpose):
super().__init__()
self.weight = weight
self.transpose = transpose
def forward(self, input):
return F.linear(input, self.weight.t() if self.transpose else self.weight)
class TiedHeadModule(nn.Module):
def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size):
super().__init__()
tied_emb, _ = weights
self.num_words, emb_dim = tied_emb.size()
self.word_proj = quant_noise(TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size)
if input_dim != emb_dim:
self.word_proj = nn.Sequential(
quant_noise(nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size),
self.word_proj,
)
self.class_proj = quant_noise(nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size)
self.out_dim = self.num_words + num_classes
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def forward(self, input):
inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1)
out = self._float_tensor.new(inp_sz, self.out_dim)
out[:, :self.num_words] = self.word_proj(input.view(inp_sz, -1))
out[:, self.num_words:] = self.class_proj(input.view(inp_sz, -1))
return out
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(self, vocab_size, input_dim, cutoff, dropout, factor=4., adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert vocab_size == cutoff[
-1], 'cannot specify cutoff larger than vocab size'
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout = dropout
self.input_dim = input_dim
self.factor = factor
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.lsm = nn.LogSoftmax(dim=1)
if adaptive_inputs is not None:
self.head = TiedHeadModule(adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size)
else:
self.head = quant_noise(nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size)
self._make_tail(adaptive_inputs, tie_proj)
def init_weights(m):
if hasattr(m, 'weight') and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('version', torch.LongTensor([1]))
def _make_tail(self, adaptive_inputs=None, tie_proj=False):
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
dim = int(self.input_dim // self.factor ** (i + 1))
tied_emb, tied_proj = adaptive_inputs.weights_for_band(i + 1) \
if adaptive_inputs is not None else (None, None)
if tied_proj is not None:
if tie_proj:
proj = quant_noise(TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size)
if tied_emb is None:
out_proj = nn.Linear(dim, self.cutoff[i + 1] - self.cutoff[i], bias=False)
else:
out_proj = TiedLinear(tied_emb, transpose=False)
m = nn.Sequential(
proj,
nn.Dropout(self.dropout),
quant_noise(out_proj, self.q_noise, self.qn_block_size),
)
self.tail.append(m)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + '.version'
if version_name not in state_dict:
raise Exception('This version of the model is no longer supported')
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i
if mask.any():
target_idxs.append(mask.nonzero().squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = F.dropout(input, p=self.dropout, training=self.training)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input)]
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])))
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0]: head_sz].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])
log_probs = log_probs.view(bsz, length, -1)
return log_probs
| [
"[email protected]"
] | |
d17cf53c623fa6d7bd0d5d74da87667c85fca93f | f730a1fc0fe7021d68cec973125d605c10ac7a64 | /code/camera.py | b9bfb55be94621c6619c04db7f15b9de8a045fcd | [] | no_license | wwxFromTju/TJU_AR_alpha0.1 | 47a248b6861dfcdc47a9eefd86250d616a4d71f8 | e435424943846a7812e22afb7ca66a5065d70aec | refs/heads/master | 2021-04-12T11:29:44.434154 | 2016-07-27T10:20:59 | 2016-07-27T10:20:59 | 64,299,053 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
# encoding=utf-8
from scipy import linalg
class Camera(object):
"""
相机的类
"""
def __init__(self, P):
"""
初始化相机类
"""
self.P = P
# 标定矩阵
self.K = None
# 旋转矩阵
self.R = None
# 平移矩阵
self.t = None
# 相机中心
self.c = None
def project(self, X):
"""
:param X: (4, n) 的投影点, 并且对坐标归一化
:return:
"""
x = linalg.dot(self.P, X)
for i in range(3):
x[i] /= x[2]
return x | [
"[email protected]"
] | |
9eaa19c9d5828a8c9d3014e6f598ade1b040dc26 | 8be39cae865fa2163c131a34051c4867ad0350a0 | /examples/quickhowto2/app/views.py | 965acaaf1bbf79ecf7beb5b956b8ac0d380fcf32 | [
"BSD-3-Clause"
] | permissive | ben-github/Flask-AppBuilder | fd13f694457ef4fbc8c73f8b0b90083dc5b978bc | e52947f3e4494a84017bf101b19823df91a41448 | refs/heads/master | 2021-01-17T17:52:19.125926 | 2015-01-09T18:13:30 | 2015-01-09T18:13:30 | 25,661,891 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,050 | py | import calendar
from flask import redirect
from flask_appbuilder import ModelView, GroupByChartView, aggregate_count, action
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.models.generic.interface import GenericInterface
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.widgets import ListThumbnail
from flask.ext.appbuilder.models.generic import PSSession
from flask_appbuilder.models.generic import PSModel
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import db, appbuilder
from .models import ContactGroup, Gender, Contact, FloatModel, Product, ProductManufacturer, ProductModel
def fill_gender():
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
sess = PSSession()
class PSView(ModelView):
datamodel = GenericInterface(PSModel, sess)
base_permissions = ['can_list', 'can_show']
list_columns = ['UID', 'C', 'CMD', 'TIME']
search_columns = ['UID', 'C', 'CMD']
class ProductManufacturerView(ModelView):
datamodel = SQLAInterface(ProductManufacturer)
class ProductModelView(ModelView):
datamodel = SQLAInterface(ProductModel)
class ProductView(ModelView):
datamodel = SQLAInterface(Product)
list_columns = ['name','product_manufacturer', 'product_model']
add_columns = ['name','product_manufacturer', 'product_model']
edit_columns = ['name','product_manufacturer', 'product_model']
add_widget = FormVerticalWidget
class ContactModelView2(ModelView):
datamodel = SQLAInterface(Contact)
list_columns = ['name', 'personal_celphone', 'birthday', 'contact_group.name']
add_form_query_rel_fields = {'contact_group':[['name',FilterStartsWith,'p']],
'gender':[['name',FilterStartsWith,'F']]}
class ContactModelView(ModelView):
datamodel = SQLAInterface(Contact)
add_widget = FormVerticalWidget
show_widget = ShowBlockWidget
list_columns = ['name', 'personal_celphone', 'birthday', 'contact_group.name']
list_template = 'list_contacts.html'
list_widget = ListThumbnail
show_template = 'show_contacts.html'
extra_args = {'extra_arg_obj1': 'Extra argument 1 injected'}
base_order = ('name', 'asc')
show_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
add_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
edit_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class GroupModelView(ModelView):
datamodel = SQLAInterface(ContactGroup)
related_views = [ContactModelView]
show_template = 'appbuilder/general/model/show_cascade.html'
list_columns = ['name', 'extra_col']
class FloatModelView(ModelView):
datamodel = SQLAInterface(FloatModel)
class ContactChartView(GroupByChartView):
datamodel = SQLAInterface(Contact)
chart_title = 'Grouped contacts'
label_columns = ContactModelView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group': 'contact_group.name',
'series': [(aggregate_count, 'contact_group')]
},
{
'group': 'gender',
'series': [(aggregate_count, 'gender')]
}
]
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
class ContactTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Contact)
chart_title = 'Grouped Birth contacts'
chart_type = 'AreaChart'
label_columns = ContactModelView.label_columns
definitions = [
{
'group': 'month_year',
'formatter': pretty_month_year,
'series': [(aggregate_count, 'contact_group')]
},
{
'group': 'year',
'formatter': pretty_year,
'series': [(aggregate_count, 'contact_group')]
}
]
db.create_all()
fill_gender()
appbuilder.add_view(PSView, "List PS", icon="fa-folder-open-o", category="Contacts", category_icon='fa-envelope')
appbuilder.add_view(GroupModelView, "List Groups", icon="fa-folder-open-o", category="Contacts",
category_icon='fa-envelope')
appbuilder.add_view(ContactModelView, "List Contacts", icon="fa-envelope", category="Contacts")
appbuilder.add_view(ContactModelView2, "List Contacts 2", icon="fa-envelope", category="Contacts")
appbuilder.add_view(FloatModelView, "List Float Model", icon="fa-envelope", category="Contacts")
appbuilder.add_separator("Contacts")
appbuilder.add_view(ContactChartView, "Contacts Chart", icon="fa-dashboard", category="Contacts")
appbuilder.add_view(ContactTimeChartView, "Contacts Birth Chart", icon="fa-dashboard", category="Contacts")
appbuilder.add_view(ProductManufacturerView, "List Manufacturer", icon="fa-folder-open-o", category="Products",
category_icon='fa-envelope')
appbuilder.add_view(ProductModelView, "List Models", icon="fa-envelope", category="Products")
appbuilder.add_view(ProductView, "List Products", icon="fa-envelope", category="Products")
appbuilder.security_cleanup()
| [
"[email protected]"
] | |
6e820d1d5f5954963c01bd964aa9c66f883d00d7 | 61dcd9b485bc5e6d07c4adf14f138eabaa9a23b5 | /evennumberedexercise/Exercise6_24.py | 2b58b016281f39f12c87f0eed9c9473c43981ad8 | [] | no_license | bong1915016/Introduction-to-Programming-Using-Python | d442d2252d13b731f6cd9c6356032e8b90aba9a1 | f23e19963183aba83d96d9d8a9af5690771b62c2 | refs/heads/master | 2020-09-25T03:09:34.384693 | 2019-11-28T17:33:28 | 2019-11-28T17:33:28 | 225,904,132 | 1 | 0 | null | 2019-12-04T15:56:55 | 2019-12-04T15:56:54 | null | UTF-8 | Python | false | false | 946 | py | def main():
count = 1
i = 2
while count <= 100:
# Display each number in five positions
if isPrime(i) and isPalindrome(i):
print(i, end = " ")
if count % 10 == 0:
print()
count += 1 # Increase count
i += 1
def isPrime(number):
divisor = 2
while divisor <= number / 2:
if number % divisor == 0:
# If true, number is not prime
return False # number is not a prime
divisor += 1
return True # number is prime
# Return the reversal of an integer, i.e. reverse(456) returns 654
def isPalindrome(number):
return number == reverse(number)
# Return the reversal of an integer, i.e. reverse(456) returns 654
def reverse(number):
result = 0
while number != 0:
remainder = number % 10
result = result * 10 + remainder
number = number // 10
return result
main() | [
"[email protected]"
] | |
25bc3be33edf11b325941a166313b77fcd34b28a | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Machine-Learning-Python/9781118961742_all code files/06/simpleBagging.py | 72c83810c832b413e58d7f6b9fbb92e5a85022e9 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,973 | py | __author__ = 'mike-bowles'
import numpy
import matplotlib.pyplot as plot
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
from math import floor
import random
#Build a simple data set with y = x + random
nPoints = 1000
#x values for plotting
xPlot = [(float(i)/float(nPoints) - 0.5) for i in range(nPoints + 1)]
#x needs to be list of lists.
x = [[s] for s in xPlot]
#y (labels) has random noise added to x-value
#set seed
random.seed(1)
y = [s + numpy.random.normal(scale=0.1) for s in xPlot]
#take fixed test set 30% of sample
nSample = int(nPoints * 0.30)
idxTest = random.sample(range(nPoints), nSample)
idxTest.sort()
idxTrain = [idx for idx in range(nPoints) if not(idx in idxTest)]
#Define test and training attribute and label sets
xTrain = [x[r] for r in idxTrain]
xTest = [x[r] for r in idxTest]
yTrain = [y[r] for r in idxTrain]
yTest = [y[r] for r in idxTest]
#train a series of models on random subsets of the training data
#collect the models in a list and check error of composite as list grows
#maximum number of models to generate
numTreesMax = 20
#tree depth - typically at the high end
treeDepth = 1
#initialize a list to hold models
modelList = []
predList = []
#number of samples to draw for stochastic bagging
nBagSamples = int(len(xTrain) * 0.5)
for iTrees in range(numTreesMax):
idxBag = []
for i in range(nBagSamples):
idxBag.append(random.choice(range(len(xTrain))))
xTrainBag = [xTrain[i] for i in idxBag]
yTrainBag = [yTrain[i] for i in idxBag]
modelList.append(DecisionTreeRegressor(max_depth=treeDepth))
modelList[-1].fit(xTrainBag, yTrainBag)
#make prediction with latest model and add to list of predictions
latestPrediction = modelList[-1].predict(xTest)
predList.append(list(latestPrediction))
#build cumulative prediction from first "n" models
mse = []
allPredictions = []
for iModels in range(len(modelList)):
#average first "iModels" of the predictions
prediction = []
for iPred in range(len(xTest)):
prediction.append(sum([predList[i][iPred] for i in range(iModels + 1)])/(iModels + 1))
allPredictions.append(prediction)
errors = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in errors]) / len(yTest))
nModels = [i + 1 for i in range(len(modelList))]
plot.plot(nModels,mse)
plot.axis('tight')
plot.xlabel('Number of Models in Ensemble')
plot.ylabel('Mean Squared Error')
plot.ylim((0.0, max(mse)))
plot.show()
plotList = [0, 9, 19]
for iPlot in plotList:
plot.plot(xTest, allPredictions[iPlot])
plot.plot(xTest, yTest, linestyle="--")
plot.axis('tight')
plot.xlabel('x value')
plot.ylabel('Predictions')
plot.show()
print('Minimum MSE')
print(min(mse))
#With treeDepth = 1
#Minimum MSE
#0.0242960117899
#With treeDepth = 5
#Minimum MSE
#0.0118893503384 | [
"[email protected]"
] | |
b4524a2c6c4dec9afdd81e0de0712e0042927eb8 | 3950cb348a4a3ff6627d502dbdf4e576575df2fb | /.venv/Lib/site-packages/numba/np/ufunc/sigparse.py | a54df0e25537c1d62b56201d92da6306fa0fa4ba | [] | no_license | Bdye15/Sample_Programs | a90d288c8f5434f46e1d266f005d01159d8f7927 | 08218b697db91e55e8e0c49664a0b0cb44b4ab93 | refs/heads/main | 2023-03-02T04:40:57.737097 | 2021-01-31T03:03:59 | 2021-01-31T03:03:59 | 328,053,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | import tokenize
import string
def parse_signature(sig):
'''Parse generalized ufunc signature.
NOTE: ',' (COMMA) is a delimiter; not separator.
This means trailing comma is legal.
'''
def stripws(s):
return ''.join(c for c in s if c not in string.whitespace)
def tokenizer(src):
def readline():
yield src
gen = readline()
return tokenize.generate_tokens(lambda: next(gen))
def parse(src):
tokgen = tokenizer(src)
while True:
tok = next(tokgen)
if tok[1] == '(':
symbols = []
while True:
tok = next(tokgen)
if tok[1] == ')':
break
elif tok[0] == tokenize.NAME:
symbols.append(tok[1])
elif tok[1] == ',':
continue
else:
raise ValueError('bad token in signature "%s"' % tok[1])
yield tuple(symbols)
tok = next(tokgen)
if tok[1] == ',':
continue
elif tokenize.ISEOF(tok[0]):
break
elif tokenize.ISEOF(tok[0]):
break
else:
raise ValueError('bad token in signature "%s"' % tok[1])
ins, _, outs = stripws(sig).partition('->')
inputs = list(parse(ins))
outputs = list(parse(outs))
# check that all output symbols are defined in the inputs
isym = set()
osym = set()
for grp in inputs:
isym |= set(grp)
for grp in outputs:
osym |= set(grp)
diff = osym.difference(isym)
if diff:
raise NameError('undefined output symbols: %s' % ','.join(sorted(diff)))
return inputs, outputs
| [
"[email protected]"
] | |
bc0ad0f7ec39d42a50304cbfb1480cfe527a4b4f | d4df738d2066c5222080e043a95a9b230673af81 | /course_512/3.6_API/problem_3.6.4.py | fd758a474fa3c86d4e73a0aa1cafbcef08e81973 | [] | no_license | kazamari/Stepik | c2277f86db74b285e742854f1072897f371e87f5 | bf0224a4c4e9322e481263f42451cd263b10724c | refs/heads/master | 2021-05-04T19:06:02.110827 | 2018-03-26T09:06:09 | 2018-03-26T09:06:09 | 105,513,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,756 | py | '''
В этой задаче вам необходимо воспользоваться API сайта artsy.net
API проекта Artsy предоставляет информацию о некоторых деятелях искусства, их работах, выставках.
В рамках данной задачи вам понадобятся сведения о деятелях искусства (назовем их, условно, художники).
Вам даны идентификаторы художников в базе Artsy.
Для каждого идентификатора получите информацию о имени художника и годе рождения.
Выведите имена художников в порядке неубывания года рождения. В случае если у художников одинаковый год рождения,
выведите их имена в лексикографическом порядке.
Работа с API Artsy
Полностью открытое и свободное API предоставляют совсем немногие проекты. В большинстве случаев, для получения доступа
к API необходимо зарегистрироваться в проекте, создать свое приложение, и получить уникальный ключ (или токен),
и в дальнейшем все запросы к API осуществляются при помощи этого ключа.
Чтобы начать работу с API проекта Artsy, вам необходимо пройти на стартовую страницу документации к API
https://developers.artsy.net/start и выполнить необходимые шаги, а именно зарегистрироваться, создать приложение,
и получить пару идентификаторов Client Id и Client Secret. Не публикуйте эти идентификаторы.
После этого необходимо получить токен доступа к API. На стартовой странице документации есть примеры того, как можно
выполнить запрос и как выглядит ответ сервера. Мы приведем пример запроса на Python.
import requests
import json
client_id = '...'
client_secret = '...'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
})
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
Теперь все готово для получения информации о художниках. На стартовой странице документации есть пример того, как
осуществляется запрос и как выглядит ответ сервера. Пример запроса на Python.
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
# инициируем запрос с заголовком
r = requests.get("https://api.artsy.net/api/artists/4d8b92b34eb68a1b2c0003f4", headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
Примечание:
В качестве имени художника используется параметр sortable_name в кодировке UTF-8.
Пример входных данных:
4d8b92b34eb68a1b2c0003f4
537def3c139b21353f0006a6
4e2ed576477cc70001006f99
Пример выходных данных:
Abbott Mary
Warhol Andy
Abbas Hamra
Примечание для пользователей Windows
При открытии файла для записи на Windows по умолчанию используется кодировка CP1251, в то время как для записи имен на
сайте используется кодировка UTF-8, что может привести к ошибке при попытке записать в файл имя с необычными символами.
Вы можете использовать print, или аргумент encoding функции open.
'''
import requests
import json
client_id = '8e3ae03a8bf8050b30c9'
client_secret = 'd3a41eb062e10a397dbcab18b31b317f'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
}, verify=False)
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token": token}
artists = []
with open('dataset_24476_4.txt', 'r') as f:
for line in f:
# инициируем запрос с заголовком
res = requests.get("https://api.artsy.net/api/artists/{}".format(line.strip()), headers=headers, verify=False)
res.encoding = 'utf-8'
j = res.json()
artists.append((j['birthday'], j['sortable_name']))
with open('test_24476_4.txt', 'w', encoding="utf-8") as file:
for bd, name in sorted(artists):
file.write(name + '\n') | [
"[email protected]"
] | |
c2471403aa320202deac3015c37cb0a0ac6e08a3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/storage/v20190601/get_private_endpoint_connection.py | 198ad11aa90ab756d8d0907c319a4050996605d9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,983 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(account_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20190601:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(account_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
The Private Endpoint Connection resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
...
| [
"[email protected]"
] | |
b3cbcb1d5bbbf22e60bf51058c034822d2297c4c | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/search_beta/azext_search_beta/vendored_sdks/search/_configuration.py | 76b296b982dacd86747b02dc4fa3d3ca51ea1334 | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class SearchConfiguration(Configuration):
"""Configuration for Search.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
"""
def __init__(
self,
credential, # type: "TokenCredential"
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
search=None, # type: Optional[str]
filter=None, # type: Optional[str]
count=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(SearchConfiguration, self).__init__(**kwargs)
self.credential = credential
self.top = top
self.skip = skip
self.search = search
self.filter = filter
self.count = count
self.credential_scopes = ['https://management.azure.com/.default']
self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
kwargs.setdefault('sdk_moniker', 'search/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"[email protected]"
] | |
4b2fa6673d63d5e719510a8281c35d5055a55f66 | b3d552675b36cb88a1388fcfc531e497ad7cbee9 | /qfpython/apps/news/templatetags/news_filters.py | 3a666825994e57a123163079c2f8ecd8013170d7 | [
"LicenseRef-scancode-mulanpsl-1.0-en",
"MulanPSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gaohj/1902_django | 3cea1f0935fd983f25c6fd832b103ac5165a2e30 | 822af7b42120c6edc699bf97c800887ff84f5621 | refs/heads/master | 2022-12-11T10:02:50.233398 | 2019-11-26T08:33:38 | 2019-11-26T08:33:38 | 209,241,390 | 2 | 0 | null | 2022-12-08T07:28:24 | 2019-09-18T07:05:48 | Python | UTF-8 | Python | false | false | 957 | py | from datetime import datetime
from django import template
from django.utils.timezone import now as now_func,localtime
register = template.Library()
@register.filter
def time_since(value):
if not isinstance(value,datetime):
return value
now = now_func()
timestamp = (now-value).total_seconds()
if timestamp < 60:
return '刚刚'
elif timestamp >=60 and timestamp < 60*60:
minitues = int(timestamp/60)
return '%s分钟前'% minitues
elif timestamp >=60*60 and timestamp < 60*60*24:
hours = int(timestamp/3600)
return '%s小时前'% hours
elif timestamp >=60*60*24 and timestamp < 60*60*24*30:
days = int(timestamp/3600*24)
return '%s天前'% days
else:
return value.strftime('%Y/%m/%d %H:%M')
@register.filter
def time_format(value):
if not isinstance(value,datetime):
return value
return localtime(value).strftime('%Y/%m/%d %H:%M:%S') | [
"[email protected]"
] | |
6b30fe3caac3fffcc402a8552c72efc350f09b96 | ccdeae68e468ad399a89181c37bba4490bcdc259 | /scripts/bestExpressions_L_TOP26_WM_LASSO_1.py | a7b9e93f86d34a509b96ab60a1ac2df818dabe1d | [] | no_license | jameshughes89/NonlinearModelsFMRI-2 | 19262d4494aa6adc0e9bd9592069ad6b757dda6b | a507a41d0a0a728d02616023aea0e66fafc1c387 | refs/heads/master | 2021-09-06T17:05:38.086733 | 2018-02-07T15:19:23 | 2018-02-07T15:19:23 | 109,417,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,587 | py | from math import *
def funcL_WM_100307(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.09574729849e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0897641196145 * v4 + 0.0 * v5 + 0.0 * v7 + -0.0 * v8 + 0.0 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0961547221197 * v13 + 0.0 * v14 + 0.196939244764 * v15 + 0.0769394752556 * v16 + 0.344392610866 * v17 + 0.0 * v18 + 0.0814563743731 * v19 + 0.0 * v20 + 0.0 * v21 + 0.0735098800637 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v27 + 0.0 * v28
def funcL_WM_100408(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -6.27662838751e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v4 + 0.170481233495 * v5 + 0.121231367064 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.000870619700537 * v16 + 0.226194422979 * v17 + 0.0 * v18 + 0.0 * v19 + 0.080978384483 * v20 + 0.146662515218 * v21 + 0.113010043781 * v22 + 0.0 * v23 + 0.0997859210423 * v24 + 0.0316586494501 * v25 + 0.0 * v27 + 0.0706717429605 * v28
def funcL_WM_101006(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.17470316183e-13 * 1 + -0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.186185804365 * v5 + -0.0 * v8 + 0.0625300451781 * v9 + 0.0 * v10 + -0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.1529647217 * v15 + 0.224851281639 * v16 + 0.0 * v17 + 0.0 * v18 + 0.222459750568 * v19 + 0.0 * v20 + 0.0 * v21 + 0.0 * v22 + 0.0 * v24 + 0.000214344441237 * v25 + 0.0 * v26 + 0.0 * v28
def funcL_WM_101107(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 9.43327671106e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0206707862075 * v4 + -0.0 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + -0.0 * v9 + 0.0 * v10 + -0.0 * v11 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v16 + 0.249551371124 * v17 + 0.0934527718085 * v18 + 0.165709120823 * v20 + 0.0 * v21 + 0.363189982138 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + -0.0 * v28
def funcL_WM_101309(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -2.26781198095e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0523427442996 * v4 + 0.0960075086689 * v5 + 0.00889677468049 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v17 + 0.0 * v18 + 0.145064432903 * v20 + 0.118383233007 * v21 + 0.0 * v22 + 0.0 * v24 + 0.253351212958 * v25 + 0.0 * v26 + 0.239639776793 * v27 + 0.0191803001548 * v28
def funcL_WM_101410(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 3.00238111698e-14 * 1 + 0.0 * v0 + 0.00745145383058 * v1 + -0.0 * v2 + 0.0 * v4 + 0.146560337568 * v5 + 0.0 * v7 + -0.0 * v8 + 0.0 * v9 + 0.125629017072 * v10 + 0.0 * v11 + -0.0 * v12 + 0.0 * v13 + 0.0658179570303 * v15 + 0.0 * v16 + 0.243234636022 * v17 + 0.0305085552523 * v18 + 0.0 * v19 + 0.0 * v20 + 0.0 * v21 + 0.0785959483455 * v22 + 0.246164864309 * v23 + -0.0 * v24 + 0.00777364636323 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_101915(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.6535109487e-13 * 1 + 0.0 * v0 + 0.181249062103 * v1 + 0.0 * v2 + 0.0 * v4 + 0.067232487182 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0547543886838 * v15 + 0.0 * v17 + 0.0 * v18 + 0.15007548187 * v20 + 0.30736940405 * v21 + 0.157690721709 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.00642298489153 * v28
def funcL_WM_102008(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -6.90771430695e-14 * 1 + 0.0 * v0 + 0.0420846960343 * v1 + 0.429353415755 * v2 + 0.0 * v3 + 0.0 * v5 + 0.0 * v6 + 0.0423139619633 * v7 + -0.0 * v8 + 0.0 * v10 + 0.0 * v11 + -0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0 * v16 + 0.0141188113612 * v17 + 0.0 * v18 + 0.0 * v19 + 0.287172076954 * v20 + 0.112493872227 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v25 + -0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_102311(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.23705311249e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.21646178955 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0783034733505 * v9 + 0.0 * v10 + 0.0859870374143 * v11 + 0.0 * v12 + 0.0 * v13 + 0.155469912559 * v15 + 0.0 * v16 + 0.0769217791098 * v17 + 0.0 * v18 + 0.0487138153117 * v20 + 0.20481346756 * v21 + 0.0762311375244 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_102816(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.27640540216e-13 * 1 + 0.0 * v0 + 0.00217164824841 * v1 + 0.0 * v2 + 0.0 * v3 + 0.221921091481 * v4 + 0.0 * v5 + 0.0 * v6 + 0.0736713034579 * v7 + 0.0413899649829 * v8 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0141698068682 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0468814411257 * v20 + 0.325253219436 * v21 + 0.168722747997 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0402709493746 * v27 + 0.0 * v28
def funcL_WM_103111(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 6.39600296536e-14 * 1 + 0.0282317035808 * v0 + 0.0914005296067 * v1 + 0.0527335660881 * v2 + 0.0 * v3 + 0.0 * v4 + 0.146392178976 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v14 + 0.0 * v15 + 0.0699834737897 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0440351738491 * v19 + 0.0 * v20 + 0.230447449872 * v21 + 0.226321914682 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.0379824849654 * v28
def funcL_WM_103414(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.23439031746e-13 * 1 + 0.13338270754 * v1 + 0.0135930226624 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0179463714468 * v5 + 0.0 * v6 + 0.080344455294 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0907503219549 * v11 + 0.0 * v12 + 0.0 * v14 + 0.0233692891605 * v15 + 0.0 * v16 + 0.0365782808089 * v17 + 0.0 * v18 + 0.0855375365364 * v19 + 0.184270293584 * v20 + 0.132730321028 * v21 + 0.0739064512502 * v22 + 0.0581208178043 * v23 + 0.0651312823592 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_103515(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.16479046243e-14 * 1 + -0.0 * v0 + -0.0 * v1 + 0.0 * v2 + 0.249670977437 * v3 + 0.0 * v4 + 0.0 * v5 + -0.0 * v7 + 0.0 * v9 + 0.0243305758584 * v10 + 0.0 * v11 + -0.244962276674 * v12 + -0.0 * v13 + -0.0 * v14 + -0.0 * v15 + -0.0 * v16 + 0.547896859324 * v17 + 0.0 * v19 + 0.172197659282 * v20 + -0.0 * v21 + 0.0 * v22 + -0.0 * v23 + 0.0 * v24 + 0.0 * v25 + -0.0 * v26 + -0.0 * v28
def funcL_WM_103818(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.1976762151e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + -0.0 * v3 + 0.00764386428837 * v4 + 0.332648997162 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + -0.0 * v11 + 0.0 * v12 + -0.0 * v14 + 0.28853360203 * v15 + -0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.135841202246 * v20 + 0.0393043158909 * v21 + 0.0530095356938 * v22 + 0.0 * v24 + 0.106735713624 * v25 + 0.0 * v26 + 0.0 * v27
def funcL_WM_104012(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.76313110393e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0536716379656 * v5 + 0.0 * v7 + 0.0 * v8 + 0.180056775785 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.458004837835 * v15 + 0.0 * v16 + 0.0615969946761 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.00551170290585 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.115441787104 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_104820(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -4.59518146726e-13 * 1 + 0.0974344271507 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + 0.0 * v6 + 0.103758415396 * v7 + 0.0 * v8 + 0.0693871347721 * v9 + 0.0947608986232 * v10 + 0.0385364104584 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v16 + 0.0 * v19 + 0.0493851991676 * v20 + 0.105536728482 * v21 + 0.165747690084 * v22 + 0.0409265492022 * v23 + 0.0454752403263 * v24 + 0.183402491219 * v25 + 0.0 * v26 + 0.0 * v27 + 0.049632895862 * v28
def funcL_WM_105014(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.97312315687e-14 * 1 + 0.0 * v0 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + -0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0 * v11 + -0.0 * v12 + -0.0 * v13 + 0.0932171550171 * v15 + 0.305861386466 * v16 + 0.0 * v17 + 0.0348896144543 * v19 + 0.275714784198 * v21 + 0.179513357404 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + -0.0 * v26 + -0.0 * v27 + 0.12303530295 * v28
def funcL_WM_105115(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.35543073911e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0402271361033 * v5 + 0.0 * v6 + 0.108326620231 * v7 + 0.0 * v8 + 0.275859786861 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0282262417893 * v12 + 0.0 * v13 + 0.119795238089 * v15 + 0.0 * v16 + 0.00629639184716 * v17 + 0.0 * v18 + 0.213426057168 * v21 + 0.0 * v22 + 0.0637131560992 * v23 + 0.0347157608695 * v24 + 0.0639936158033 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_105216(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -7.75903703346e-14 * 1 + 0.0 * v0 + 0.075535310574 * v1 + -0.0 * v2 + 0.0 * v4 + 0.145946072197 * v5 + 0.164246679434 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.183599394721 * v11 + -0.0 * v12 + -0.0 * v13 + 0.0 * v14 + 0.0 * v16 + -0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.147876721668 * v21 + 0.0 * v22 + 0.195368587692 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.00821036955314 * v28
def funcL_WM_105923(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -7.33605602247e-14 * 1 + 0.0 * v0 + 0.0349669645688 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0752104590769 * v5 + 0.0 * v7 + 0.0 * v8 + 0.110557487059 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0795082348141 * v20 + 0.365235181142 * v21 + 0.120697280052 * v22 + 0.0 * v23 + 0.131754346553 * v25 + 0.0 * v27 + 0.0169544656609 * v28
def funcL_WM_106016(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.18366392262e-14 * 1 + 0.0 * v0 + 0.0663111226123 * v2 + 0.0 * v3 + 0.0 * v4 + 0.10278247806 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0256708621639 * v9 + 0.0 * v10 + 0.0877778898898 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v16 + 0.169356972353 * v17 + 0.0 * v18 + 0.0 * v19 + 0.130182732374 * v20 + 0.0121056730249 * v21 + 0.0511597292502 * v22 + 0.0 * v23 + 0.0130261780452 * v24 + 0.0417676040925 * v25 + 0.300229383962 * v28
def funcL_WM_106319(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -6.49133469297e-14 * 1 + 0.122953375484 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0838423798382 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0917216107252 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.148504078333 * v15 + 0.0 * v17 + 0.0 * v18 + 0.137835578391 * v19 + 0.288345925862 * v20 + 0.0549643056839 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_106521(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.2585820309e-13 * 1 + 0.0590449116979 * v1 + 0.0 * v2 + 0.10406216207 * v4 + 0.0961311936793 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0437762360771 * v9 + 0.0 * v10 + 0.189289804632 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.16614709374 * v20 + 0.170037598777 * v21 + 0.150424556547 * v22 + 0.0106102829209 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_107321(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.21755927994e-15 * 1 + 0.0 * v0 + 0.0 * v1 + -0.0 * v2 + -0.0 * v3 + 0.0 * v4 + 0.0 * v5 + -0.0 * v6 + -0.0 * v7 + 0.0632582949122 * v8 + -0.0 * v9 + 0.0 * v10 + 0.0189756233606 * v11 + -0.0 * v12 + -0.0 * v13 + 0.0 * v15 + 0.0 * v16 + 0.253214365267 * v17 + -0.0 * v18 + 0.0 * v19 + 0.0228953021471 * v20 + 0.0 * v21 + 0.562931125094 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v28
def funcL_WM_107422(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -5.59947611145e-14 * 1 + 0.0 * v0 + 0.21993107236 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0189483723719 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0325708423151 * v15 + 0.0 * v16 + 0.226888461711 * v17 + 0.0 * v18 + 0.0 * v19 + 0.00946862836848 * v20 + 0.0184402799475 * v21 + 0.105470112372 * v22 + 0.21369921147 * v23 + 0.0 * v24 + 0.0 * v27 + 0.0435220234836 * v28
def funcL_WM_108121(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 7.23923939812e-14 * 1 + 0.0 * v0 + 0.0316091560521 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0379299395791 * v4 + 0.284128068061 * v5 + 0.199192575007 * v6 + 0.0 * v7 + 0.0 * v8 + -0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.126017053707 * v19 + 0.0964234849031 * v20 + 0.15624966013 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0236640411651 * v25 + 0.0 * v27 + 0.0467761797744 * v28
def funcL_WM_108323(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.49512451667e-13 * 1 + 0.0 * v0 + 0.0330147521331 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0101640395469 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.253213549329 * v9 + 0.0 * v10 + 0.0489321947874 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0253797309493 * v15 + 0.0384743177634 * v16 + 0.0508230363631 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.221295607782 * v21 + 0.0408801259459 * v22 + 0.0386342284653 * v23 + 0.0 * v25 + 0.0 * v27 + 0.269571091096 * v28
def funcL_WM_108525(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.85691746603e-14 * 1 + 0.0 * v0 + 0.0329591645677 * v1 + 0.0 * v2 + 0.0 * v3 + 0.00197283453879 * v4 + 0.247594000944 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v12 + 0.0 * v14 + 0.0 * v15 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.130095933808 * v20 + 0.237188777869 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.185542857473 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0961776603019 * v28
def funcL_WM_108828(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -2.38826340961e-13 * 1 + 0.122514517531 * v1 + 0.0 * v2 + 0.0 * v3 + 0.122985891352 * v4 + 0.147732440831 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.113647211708 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0321437842397 * v13 + 0.0 * v15 + 0.028222161484 * v16 + 0.00578554086157 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.263110243492 * v21 + 0.0752460504744 * v22 + 0.0 * v23 + 0.0 * v25 + 0.0 * v26 + 0.0524828073302 * v27 + 0.0 * v28
def funcL_WM_109123(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 7.11389851012e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0259507242811 * v9 + 0.0 * v10 + 0.243535691374 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0882816680672 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0204199331955 * v19 + 0.0 * v20 + 0.235175718291 * v21 + 0.172827941001 * v22 + 0.0 * v23 + 0.0 * v25 + 0.141557993669 * v28
def funcL_WM_109325(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 4.38242998377e-13 * 1 + 0.0342800192939 * v0 + 0.0 * v1 + 0.0 * v3 + 0.0982808833235 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0541006444817 * v9 + 0.0 * v10 + 0.00589742221588 * v11 + 0.0 * v12 + 0.0226716549101 * v13 + 0.0 * v15 + 0.00914969288889 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.03600852689 * v20 + 0.443192235401 * v21 + 0.15416747145 * v22 + 0.110331624343 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_110411(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.39819077349e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0495735582553 * v2 + 0.0 * v3 + 0.0428023892802 * v4 + 0.0 * v5 + 0.256885780849 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0162470146724 * v14 + 0.0 * v15 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.105637286003 * v20 + 0.311100247341 * v21 + 0.150403368082 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111312(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.68040950573e-14 * 1 + 0.0 * v0 + 0.0600805449007 * v1 + 0.0194090243591 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + 0.0 * v8 + 0.214081894394 * v9 + 0.0 * v10 + 0.0351554554672 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.026362785539 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.237131722238 * v21 + 0.226118181816 * v22 + 0.0 * v24 + 0.136073746448 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111413(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 7.16639937389e-14 * 1 + -0.0 * v0 + 0.0 * v1 + -0.0 * v2 + -0.0 * v3 + 0.0 * v4 + 0.0 * v5 + -0.262889530611 * v6 + 0.0 * v7 + -0.0 * v8 + 0.0 * v9 + -0.0 * v10 + -0.0 * v13 + 0.0 * v14 + 0.0200643214971 * v15 + -0.0895040126474 * v16 + 0.0 * v17 + -0.0 * v18 + 0.247299878599 * v20 + 0.0595791181758 * v21 + 0.300951491234 * v22 + -0.0 * v23 + -0.0 * v24 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111514(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 6.05989125703e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v4 + 0.237648008034 * v5 + 0.0 * v6 + 0.0919937336656 * v7 + 0.120190657794 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v17 + 0.0 * v18 + 0.0112772072631 * v19 + 0.158742275228 * v20 + 0.0407088181441 * v21 + 0.291770031132 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111716(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.01524013079e-13 * 1 + 0.305023135846 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + -0.0 * v5 + 0.299307045886 * v6 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + -0.0 * v11 + -0.0 * v14 + -0.173495746744 * v15 + 0.0 * v16 + 0.24742679182 * v17 + -0.0 * v18 + 0.0 * v19 + -0.0 * v20 + 0.0 * v21 + 0.185805008936 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + -0.0 * v27 + 0.146258574159 * v28
def funcL_WM_113215(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -8.73717812898e-14 * 1 + 0.0303828681139 * v0 + 0.0136229365316 * v1 + 0.0 * v2 + 0.112813822255 * v3 + 0.0489868522717 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0240474669251 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0542592598896 * v16 + 0.0 * v17 + 0.0 * v18 + 0.16409794668 * v20 + 0.377026593003 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.025711725253 * v25 + 0.0 * v27 + 0.170556218897 * v28
def funcL_WM_113619(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 4.72188905638e-14 * 1 + 0.132091733118 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.29991000266 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0354096067876 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0433511569709 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0932961724683 * v19 + 0.0 * v20 + 0.0549734630224 * v21 + 0.208817044814 * v22 + 0.0189850330395 * v25 + 0.0306566332134 * v27 + 0.0505106243963 * v28
def funcL_WM_113922(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -2.12748381379e-13 * 1 + 0.00575011322871 * v0 + 0.129489825793 * v1 + 0.0 * v2 + 0.0 * v4 + 0.0 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.181731657864 * v9 + 0.00621074590425 * v11 + -0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.246445837984 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0593708402951 * v20 + 0.219860367134 * v21 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.06680548719 * v28
def funcL_WM_114419(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.25244153069e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.234645054449 * v4 + 0.0 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.15936042648 * v9 + 0.0 * v11 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.00369367704254 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0537063490266 * v19 + 0.0 * v20 + 0.287635247731 * v21 + 0.121291245414 * v22 + 0.0 * v25 + 0.0886786936407 * v26 + 0.0 * v27 + 0.0451721400509 * v28
funcs = [funcL_WM_100307,funcL_WM_100408,funcL_WM_101006,funcL_WM_101107,funcL_WM_101309,funcL_WM_101410,funcL_WM_101915,funcL_WM_102008,funcL_WM_102311,funcL_WM_102816,funcL_WM_103111,funcL_WM_103414,funcL_WM_103515,funcL_WM_103818,funcL_WM_104012,funcL_WM_104820,funcL_WM_105014,funcL_WM_105115,funcL_WM_105216,funcL_WM_105923,funcL_WM_106016,funcL_WM_106319,funcL_WM_106521,funcL_WM_107321,funcL_WM_107422,funcL_WM_108121,funcL_WM_108323,funcL_WM_108525,funcL_WM_108828,funcL_WM_109123,funcL_WM_109325,funcL_WM_110411,funcL_WM_111312,funcL_WM_111413,funcL_WM_111514,funcL_WM_111716,funcL_WM_113215,funcL_WM_113619,funcL_WM_113922,funcL_WM_114419,]
def getFuncs(): return funcs
| [
"[email protected]"
] | |
156130cd7d52ce78d3ffe0cfb0f1316f7548cdbf | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/functional/intfunc/math/test_ceil_01.py | 97611af9f2cd0d2d47f57bfbf85d1844845159dc | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 707 | py | #coding:utf-8
"""
ID: intfunc.math.ceil
TITLE: CEIL( <number>)
DESCRIPTION:
Returns a value representing the smallest integer that is greater than or equal to the input argument.
FBTEST: functional.intfunc.math.ceil_01
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """select CEIL( 2.1) from rdb$database;
select CEIL( -2.1) from rdb$database;
"""
act = isql_act('db', test_script)
expected_stdout = """
CEIL
=====================
3
CEIL
=====================
-2
"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"[email protected]"
] | |
4496d5f07d39a193ef3fbfd8710da46756d19ecc | c62dbc5715fe80e106a666a8f7a6aeb051d0b40e | /analytical_solution.py | 016425a97e584c740f05ad933c74f8b757d5a4e2 | [] | no_license | mishaukr7/MM_LAB_5 | 14ebb2c8553cfb1f1b13293e6160294fb2684a9c | 610a623d1a63ddf0c231575c2b78c4fc1bb4a454 | refs/heads/master | 2021-08-23T15:16:34.096484 | 2017-12-05T09:03:46 | 2017-12-05T09:03:46 | 113,076,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import math
def analytical_method_find_solution_free(t0, N0, r, T):
N = []
time = []
for t in range(t0, T+1):
N_new = N0*math.exp(r*(t-20))
N.append(N_new)
time.append(t)
return time, N
def analytical_method_find_solution_limited(t0, N0, r, k, T):
N = []
time = []
for t in range(t0, T):
N_new = (k * N0 * math.exp(r * (t - 20)))/(k + N0 * (math.exp(r * (t - 20)) - 1))
N.append(N_new)
time.append(t)
return time, N
| [
"[email protected]"
] | |
e92090672df6dbc77947cca8dd3f20b98894a501 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/rffada002/question2.py | 5ad1e0412877bdf376192722edcf2c9130f0adb5 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | print ("Welcome to the 30 Second Rule Expert")
print ("------------------------------------")
print ("Answer the following questions by selecting from among the options.")
seen=input("Did anyone see you? (yes/no)\n")
if (seen == 'no'):
sticky=input("Was it sticky? (yes/no)\n")
if (sticky == 'no'):
emausaurus=input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == 'no'):
cat=input("Did the cat lick it? (yes/no)\n")
if (cat == 'no'):
print ("Decision: Eat it.")
elif (cat == 'yes'):
healthy=input("Is your cat healthy? (yes/no)\n")
if (healthy == 'yes'):
print ("Decision: Eat it.")
elif (healthy == 'no'):
print ("Decision: Your call.")
elif (emausaurus == 'yes'):
megalosaurus=input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == 'yes'):
print ("Decision: Eat it.")
elif (megalosaurus == 'no'):
print ("Decision: Don't eat it.")
elif (sticky == 'yes'):
steak=input("Is it a raw steak? (yes/no)\n")
if (steak == 'no'):
cat=input("Did the cat lick it? (yes/no)\n")
if (cat == 'no'):
print ("Decision: Eat it.")
elif (cat == 'yes'):
healthy=input("Is your cat healthy? (yes/no)\n")
if (healthy == 'yes'):
print ("Decision: Eat it.")
elif (healthy == 'no'):
print ("Decision: Your call.")
elif (steak == 'yes'):
puma=input("Are you a puma? (yes/no)\n")
if (puma == 'yes'):
print ("Decision: Eat it.")
elif (puma == 'no'):
print ("Decision: Don't eat it.")
elif (seen == 'yes'):
friend=input("Was it a boss/lover/parent? (yes/no)\n")
if (friend == 'no'):
print ("Decision: Eat it.")
elif (friend == 'yes'):
price=input("Was it expensive? (yes/no)\n")
if (price == 'no'):
chocolate=input("Is it chocolate? (yes/no)\n")
if (chocolate == 'no'):
print ("Decision: Don't eat it.")
elif (chocolate == 'yes'):
print ("Decision: Eat it.")
elif (price == 'yes'):
cut=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut == 'yes'):
print ("Decision: Eat it.")
elif (cut == 'no'):
print ("Decision: Your call.") | [
"[email protected]"
] | |
6d164cfc391db5ee4400cf4280c951a39b8e146a | 443585e4fc146308b18bc2f9234d0947da38d3e5 | /practice/yj/csv/Quiz2.py | cc4f15f0435d1e5ad3b650c79dc1a5fe19b07be9 | [] | no_license | ggyudongggyu/20201208commit | b524c4a7fb241cacaacffa5882c55d1d0ccba11f | fbb58a8ed06f454a2a79a9b8c75deabaec62b317 | refs/heads/master | 2023-02-02T21:59:51.518218 | 2020-12-24T14:32:21 | 2020-12-24T14:32:21 | 319,578,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from matplotlib.pyplot import *
title('plot graph')
plot([1, 2, 3, 4], [10, 20, 30, 40], marker='.', color= 'green', label = '1st')
plot([1, 2, 3, 4], [30, 15, 25, 10], marker= '^' ,color = 'pink', label = '2nd')
# plot([1, 2, 3, 4], [15, 25, 15, 25], linestyle= '-.' ,color = 'red', label = '3rd')
# plot([1, 2, 3, 4], [20, 10, 30, 5], linestyle= '-' ,color = 'blue', label = '4th')
legend()
show()
| [
"[email protected]"
] | |
dd1baa59268b60d7d8e6c9a30dd4be4fd8fe01c2 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/infra/rtclusterpol.py | 1b736c601de6b4c7027d78510986ca0e568afc10 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,589 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtClusterPol(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.infra.RtClusterPol", "cobra.model.vns.CtrlrMgmtPol")
meta.moClassName = "infraRtClusterPol"
meta.rnFormat = "rtvnsClusterPol-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Management Policy"
meta.writeAccessMask = 0x40000000000001
meta.readAccessMask = 0x4040000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.infra.ClusterPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtvnsClusterPol-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20603, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4934
prop.defaultValueStr = "vnsCtrlrMgmtPol"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsCtrlrMgmtPol", None, 4934)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 20602, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
daaf7110d0464d08291fb7f7a191cb8182631fa6 | 27040f0d537c1898c9f1fce4db68b24588840987 | /7. Reverse Integer.py | 834d39db9bf66caba7c2392e1009abf6fb37a850 | [] | no_license | xtanmaygarg/LeetCodeSolutions | 0197474e92d4ef14676342d00933e764f8b29581 | 5fd06d2f0da222977c1ae6e4d219a682b3596341 | refs/heads/master | 2021-06-14T09:39:37.795785 | 2020-12-04T10:44:07 | 2020-12-04T10:44:07 | 254,488,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | class Solution:
def reverse(self, x: int) -> int:
if x >= 0:
res = int(str(x)[::-1])
else:
res = -int(str(x)[1:][::-1])
if -2**31 <= res <= (2**31-1):
return res
return 0
| [
"[email protected]"
] | |
7cd5bf667dfd5853848da023118f67243641925b | e1adcd0173cf849867144a511c029b8f5529b711 | /ros_ws/Archive/ProductFiles20180213/positionControlPackage.py | c6e35e7f5eaadfa197321d29d10af5ea39366fea | [] | no_license | adubredu/cartbot_arm_subsystem | 20a6e0c7bacc28dc0486160c6e25fede49f013f2 | 3e451272ddaf720bc7bd24da2ad5201b27248f1c | refs/heads/master | 2022-01-04T23:01:25.061143 | 2019-05-14T16:45:02 | 2019-05-14T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py | import argparse
import sys
import struct
import time
import json
import rospy
from math import *
from std_msgs.msg import (
UInt16,
)
from StringIO import StringIO
import baxter_interface as baxter
import speech_recognition as SR
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
def xyzToAngles(limbs, x, y, z, xr, yr, zr, wr):
ns = "ExternalTools/" + limbs + "/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePositionIKRequest()
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
pose = PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=x,
y=y,
z=z,
),
orientation=Quaternion(
x=xr,
y=yr,
z=zr,
w=wr,
),
),
)
ikreq.pose_stamp.append(pose)
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
exit()
resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
resp.result_type)
if (resp_seeds[0] != resp.RESULT_INVALID):
seed_str = {
ikreq.SEED_USER: 'User Provided Seed',
ikreq.SEED_CURRENT: 'Current Joint Angles',
ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
}.get(resp_seeds[0], 'None')
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
return limb_joints
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return "invalid"
def euler2Quat(xr, yr, zr):
toRet = {'qw': 0, 'qx': 0, 'qy': 0, 'qz': 0}
xr = radians(xr)
yr = radians(yr)
zr = radians(zr)
c1 = cos(yr/2)
c2 = cos(zr/2)
c3 = cos(xr/2)
s1 = sin(yr/2)
s2 = sin(zr/2)
s3 = sin(xr/2)
toRet['qw'] = c1*c2*c3 - s1*s2*s3
toRet['qx'] = s1*s2*c3 + c1*c2*s3
toRet['qy'] = s1*c2*c3 + c1*s2*s3
toRet['qz'] = c1*s2*c3 - s1*c2*s3
return toRet
def moveOnAxis(limb, axis, dist, speed):
## Moves arm on x, y, or z axis keeping orientation constant
# speed is in m/s
# dist in m
# limb is a handle to a limb object
if 'left' in limb.joint_names()[0]: limbName = 'left'
else: limbName = 'right'
print(limbName)
position = {'x':0, 'y':1, 'z':2}
pose = limb.endpoint_pose()
position['x'] = pose['position'][0]
position['y'] = pose['position'][1]
position['z'] = pose['position'][2]
orient = pose['orientation']
secPframe = .05
frames = int(abs(dist)*(1/float(speed))*(1/secPframe))
if frames == 0: return limb.endpoint_pose()
distPframe = float(dist)/float(frames)
limb.set_joint_position_speed(1)
rate = rospy.Rate(1/secPframe)
for i in range(0, frames):
position[axis] += distPframe
jointPos = xyzToAngles(limbName, position['x'], position['y'], position['z'], orient[0], orient[1], orient[2], orient[3])
if jointPos != "invalid":
# Check if it is minor move. if it is not, use smoother movement function
minorMove = True
actualJointPos = limb.joint_angles()
for joint, angle in jointPos.iteritems():
if abs(angle-actualJointPos[joint]) > .8: minorMove = False
if minorMove:
limb.set_joint_positions(jointPos)
else:
print('bigmove')
limb.move_to_joint_positions(jointPos, timeout=3, threshold=.02)
else:
print("Can't Move Here")
return limb.endpoint_pose()
rate.sleep()
return limb.endpoint_pose()
def playPositionFile(fPath, lLimb, rLimb):
# Moves limb to specified joint positions
# fPath: string indentifying path to file
# lLimb handle to the left limb 'Limb' object
# rLimb hanld to the right limb 'Limb' object
with open(fPath, 'r') as f:
fText = f.read()
fText = fText.replace("'", '"')
wpArray = json.loads(fText)
lLimb.set_joint_position_speed(.5)
rLimb.set_joint_position_speed(.5)
rate = rospy.Rate(1000)
for wp in wpArray:
lPos = wp['left']
rPos = wp['right']
# move left
if lPos != '':
lLimb.move_to_joint_positions(lPos)
if rPos != '':
rLimb.move_to_joint_positions(rPos)
return (lLimb.endpoint_pose(), rLimb.endpoint_pose) | [
"[email protected]"
] | |
efd021c0316156776876ce0abeeb3c3283a39a3d | eea3f04dc73d4536083c74cac4478835a31c4a94 | /chinese_song_generation/data_utils.py | 75424c8a5cc4c880bf635faf9ab900953138832f | [] | no_license | yscoder-github/news-generate | 15d5f9acecc92add201fb3c53aa211c0aa474e1f | 6b8a98375db984dea9edb4abff72191477bdb406 | refs/heads/master | 2023-05-26T19:58:00.797573 | 2019-07-18T01:30:36 | 2019-07-18T01:30:36 | 187,489,859 | 4 | 4 | null | 2023-05-22T22:14:54 | 2019-05-19T14:50:32 | Python | UTF-8 | Python | false | false | 5,906 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import jieba
from six.moves import urllib
from tensorflow.python.platform import gfile
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
#print(sentence)
for space_separated_fragment in jieba.cut(sentence.strip()):
if isinstance(space_separated_fragment, str):
word = str.encode(space_separated_fragment)
else:
word = space_separated_fragment
words.append(word)
return words
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=False):
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
print('>> Full Vocabulary Size :',len(vocab_list))
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
#ct = 0
#for kk in vocab.keys():
# print(kk)
# ct += 1
# if ct == 5:
# break
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False):
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
#print(words[0].decode("utf8"))
#print(words[1])
if not normalize_digits:
return [vocabulary.get(w.decode("utf8"), UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_custom_data(working_directory, train_enc, train_dec, test_enc, test_dec, enc_vocabulary_size, dec_vocabulary_size, tokenizer=None):
# Create vocabularies of the appropriate sizes.
enc_vocab_path = os.path.join(working_directory, "vocab%d.enc" % enc_vocabulary_size)
dec_vocab_path = os.path.join(working_directory, "vocab%d.dec" % dec_vocabulary_size)
create_vocabulary(enc_vocab_path, train_enc, enc_vocabulary_size, tokenizer)
create_vocabulary(dec_vocab_path, train_dec, dec_vocabulary_size, tokenizer)
# Create token ids for the training data.
enc_train_ids_path = train_enc + (".ids%d" % enc_vocabulary_size)
dec_train_ids_path = train_dec + (".ids%d" % dec_vocabulary_size)
data_to_token_ids(train_enc, enc_train_ids_path, enc_vocab_path, tokenizer)
data_to_token_ids(train_dec, dec_train_ids_path, dec_vocab_path, tokenizer)
# Create token ids for the development data.
enc_dev_ids_path = test_enc + (".ids%d" % enc_vocabulary_size)
dec_dev_ids_path = test_dec + (".ids%d" % dec_vocabulary_size)
data_to_token_ids(test_enc, enc_dev_ids_path, enc_vocab_path, tokenizer)
data_to_token_ids(test_dec, dec_dev_ids_path, dec_vocab_path, tokenizer)
return (enc_train_ids_path, dec_train_ids_path, enc_dev_ids_path, dec_dev_ids_path, enc_vocab_path, dec_vocab_path)
| [
"[email protected]"
] | |
2d7098cb8174e3779d78a54cffcff3d299651034 | 5174346f6bd374cc8873a41ed336b7545756d753 | /examples/prompts/toolbar-prompt.py | ff31c5f2951a01c99352e655915d09e1f94ff7bc | [
"BSD-3-Clause"
] | permissive | calebstewart/python-prompt-toolkit | f06dd911399b75e9d4985b485a3e9897c04bf1d6 | 3f9f9a927b2d1a208e59af73e574825df2901e69 | refs/heads/master | 2022-07-02T16:23:24.682709 | 2020-05-14T22:45:14 | 2020-05-14T22:45:14 | 263,998,820 | 1 | 0 | null | 2020-05-14T18:51:02 | 2020-05-14T18:51:01 | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python
"""
Prompt for user input as a toolbar which disappears after submission.
"""
from prompt_toolkit import prompt
if __name__ == "__main__":
answer = prompt(message="prompt$ ", prompt_in_toolbar=True)
print(f"You said: {answer}")
| [
"[email protected]"
] | |
a44c312b288d21db66156e2ee38ac70257256d20 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02918/s539072224.py | 2dbe24e4788b05696bc3160ba49b9b37d37af922 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import sys
import numpy as np
input = lambda: sys.stdin.readline().rstrip()
INF = 10**9 + 1
def solve():
N, K = map(int, input().split())
S = np.array(list(input()), dtype='str')
if N == 1:
print(0)
exit()
ri = INF
kc = 0
fs = S[0]
if fs == 'R':
nfs = 'L'
else:
nfs = 'R'
for i in range(N):
if S[i] == nfs:
ri = min(ri, i)
elif S[i] == fs and ri != INF:
S[ri:i] = fs
ri = INF
kc += 1
if kc == K:
break
else:
if ri != INF and S[-1] == nfs:
S[ri:N] = fs
# print(S)
happy = 0
for i in range(N - 1):
if S[i] == S[i + 1]:
happy += 1
print(happy)
if __name__ == '__main__':
solve()
| [
"[email protected]"
] | |
fb2dac07af82c220e6e4a2f95942ed4fa435a178 | 6ffa236a008d1cd1dc70f2c8ea0544d20ec350ee | /aries_cloudagent/messaging/issue_credential/v1_0/messages/credential_stored.py | 59973aa3320b3ad20a261d63f724ad3d305ab2b3 | [
"Apache-2.0"
] | permissive | blockpass-identity-lab/aries-fl-demo | 99e849f782dd80e729e3fe24c3af2881c5c49dca | 310b748c1ac2e814ec6f97c46ddbb9985584e0fc | refs/heads/master | 2022-07-06T18:37:16.007582 | 2020-04-23T15:48:33 | 2020-04-23T15:48:33 | 221,698,330 | 5 | 0 | Apache-2.0 | 2021-02-26T02:40:03 | 2019-11-14T12:58:58 | Python | UTF-8 | Python | false | false | 911 | py | """A credential stored message."""
# from marshmallow import fields
from ....agent_message import AgentMessage, AgentMessageSchema
from ..message_types import CREDENTIAL_STORED
HANDLER_CLASS = (
"aries_cloudagent.messaging.issue_credential.v1_0.handlers."
"credential_stored_handler.CredentialStoredHandler"
)
class CredentialStored(AgentMessage):
"""Class representing a credential stored message."""
class Meta:
"""Credential metadata."""
handler_class = HANDLER_CLASS
schema_class = "CredentialStoredSchema"
message_type = CREDENTIAL_STORED
def __init__(self, **kwargs):
"""Initialize credential object."""
super(CredentialStored, self).__init__(**kwargs)
class CredentialStoredSchema(AgentMessageSchema):
"""Credential stored schema."""
class Meta:
"""Schema metadata."""
model_class = CredentialStored
| [
"[email protected]"
] | |
49239cd741a705842914498b8d8adcf755414d87 | 462e53caefc202f1e48f7a3891b27dad6d4032f1 | /src/networkcloud/azext_networkcloud/aaz/latest/networkcloud/clustermanager/_create.py | 17de61aba3e388ebf787cfed6f192b7718b26b70 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | bragi92/azure-cli-extensions | a910f14a0613f5611b08ed34ce8d67c1ad05013e | e9aebbcbd3df15fd874a32babc40ae1a0ba23c1f | refs/heads/k8s-extension/public | 2023-08-04T13:22:05.747918 | 2023-07-28T15:45:27 | 2023-07-28T15:45:27 | 205,455,084 | 0 | 0 | MIT | 2019-08-30T20:50:25 | 2019-08-30T20:50:25 | null | UTF-8 | Python | false | false | 16,035 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud clustermanager create",
is_experimental=True,
)
class Create(AAZCommand):
"""Create a new cluster manager or update properties of the cluster manager if it exists.
:example: Create or update cluster manager
az networkcloud clustermanager create --name "clusterManagerName" --location "location" --analytics-workspace-id "/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/microsoft.operationalInsights/workspaces/logAnalyticsWorkspaceName" --fabric-controller-id "/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.ManagedNetworkFabric/networkFabricControllers/fabricControllerName" --managed-resource-group-configuration name="my-managed-rg" --tags key1="myvalue1" key2="myvalue2" --resource-group "resourceGroupName"
"""
_aaz_info = {
"version": "2022-12-12-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/clustermanagers/{}", "2022-12-12-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.cluster_manager_name = AAZStrArg(
options=["-n", "--name", "--cluster-manager-name"],
help="The name of the cluster manager.",
required=True,
fmt=AAZStrArgFormat(
pattern="^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "ClusterManagerParameters"
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
arg_group="ClusterManagerParameters",
help="The geo-location where the resource lives",
required=True,
fmt=AAZResourceLocationArgFormat(
resource_group_arg="resource_group",
),
)
_args_schema.tags = AAZDictArg(
options=["--tags"],
arg_group="ClusterManagerParameters",
help="Resource tags.",
)
tags = cls._args_schema.tags
tags.Element = AAZStrArg()
# define Arg Group "Properties"
_args_schema = cls._args_schema
_args_schema.analytics_workspace_id = AAZStrArg(
options=["--analytics-workspace-id"],
arg_group="Properties",
help="The resource ID of the Log Analytics workspace that is used for the logs collection.",
)
_args_schema.availability_zones = AAZListArg(
options=["--availability-zones"],
arg_group="Properties",
help="Field deprecated, this value will no longer influence the cluster manager allocation process and will be removed in a future version. The Azure availability zones within the region that will be used to support the cluster manager resource.",
)
_args_schema.fabric_controller_id = AAZStrArg(
options=["--fabric-controller-id"],
arg_group="Properties",
help="The resource ID of the fabric controller that has one to one mapping with the cluster manager.",
required=True,
)
_args_schema.managed_resource_group_configuration = AAZObjectArg(
options=["--managed-resource-group-configuration"],
arg_group="Properties",
help="The configuration of the managed resource group associated with the resource.",
)
_args_schema.vm_size = AAZStrArg(
options=["--vm-size"],
arg_group="Properties",
help="Field deprecated, this value will no longer influence the cluster manager allocation process and will be removed in a future version. The size of the Azure virtual machines to use for hosting the cluster manager resource.",
)
availability_zones = cls._args_schema.availability_zones
availability_zones.Element = AAZStrArg()
managed_resource_group_configuration = cls._args_schema.managed_resource_group_configuration
managed_resource_group_configuration.location = AAZStrArg(
options=["location"],
help="The location of the managed resource group. If not specified, the location of the parent resource is chosen.",
)
managed_resource_group_configuration.name = AAZStrArg(
options=["name"],
help="The name for the managed resource group. If not specified, the unique name is automatically generated.",
fmt=AAZStrArgFormat(
max_length=75,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.ClusterManagersCreateOrUpdate(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ClusterManagersCreateOrUpdate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200_201,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200, 201]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200_201,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/clusterManagers/{clusterManagerName}",
**self.url_parameters
)
@property
def method(self):
return "PUT"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"clusterManagerName", self.ctx.args.cluster_manager_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-12-12-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("location", AAZStrType, ".location", typ_kwargs={"flags": {"required": True}})
_builder.set_prop("properties", AAZObjectType, ".", typ_kwargs={"flags": {"required": True, "client_flatten": True}})
_builder.set_prop("tags", AAZDictType, ".tags")
properties = _builder.get(".properties")
if properties is not None:
properties.set_prop("analyticsWorkspaceId", AAZStrType, ".analytics_workspace_id")
properties.set_prop("availabilityZones", AAZListType, ".availability_zones")
properties.set_prop("fabricControllerId", AAZStrType, ".fabric_controller_id", typ_kwargs={"flags": {"required": True}})
properties.set_prop("managedResourceGroupConfiguration", AAZObjectType, ".managed_resource_group_configuration")
properties.set_prop("vmSize", AAZStrType, ".vm_size")
availability_zones = _builder.get(".properties.availabilityZones")
if availability_zones is not None:
availability_zones.set_elements(AAZStrType, ".")
managed_resource_group_configuration = _builder.get(".properties.managedResourceGroupConfiguration")
if managed_resource_group_configuration is not None:
managed_resource_group_configuration.set_prop("location", AAZStrType, ".location")
managed_resource_group_configuration.set_prop("name", AAZStrType, ".name")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200_201(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200_201
)
_schema_on_200_201 = None
@classmethod
def _build_schema_on_200_201(cls):
if cls._schema_on_200_201 is not None:
return cls._schema_on_200_201
cls._schema_on_200_201 = AAZObjectType()
_schema_on_200_201 = cls._schema_on_200_201
_schema_on_200_201.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.location = AAZStrType(
flags={"required": True},
)
_schema_on_200_201.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200_201.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200_201.tags = AAZDictType()
_schema_on_200_201.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200_201.properties
properties.analytics_workspace_id = AAZStrType(
serialized_name="analyticsWorkspaceId",
)
properties.availability_zones = AAZListType(
serialized_name="availabilityZones",
)
properties.cluster_versions = AAZListType(
serialized_name="clusterVersions",
flags={"read_only": True},
)
properties.detailed_status = AAZStrType(
serialized_name="detailedStatus",
flags={"read_only": True},
)
properties.detailed_status_message = AAZStrType(
serialized_name="detailedStatusMessage",
flags={"read_only": True},
)
properties.fabric_controller_id = AAZStrType(
serialized_name="fabricControllerId",
flags={"required": True},
)
properties.managed_resource_group_configuration = AAZObjectType(
serialized_name="managedResourceGroupConfiguration",
)
properties.manager_extended_location = AAZObjectType(
serialized_name="managerExtendedLocation",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.vm_size = AAZStrType(
serialized_name="vmSize",
)
availability_zones = cls._schema_on_200_201.properties.availability_zones
availability_zones.Element = AAZStrType()
cluster_versions = cls._schema_on_200_201.properties.cluster_versions
cluster_versions.Element = AAZObjectType()
_element = cls._schema_on_200_201.properties.cluster_versions.Element
_element.support_expiry_date = AAZStrType(
serialized_name="supportExpiryDate",
flags={"read_only": True},
)
_element.target_cluster_version = AAZStrType(
serialized_name="targetClusterVersion",
flags={"read_only": True},
)
managed_resource_group_configuration = cls._schema_on_200_201.properties.managed_resource_group_configuration
managed_resource_group_configuration.location = AAZStrType()
managed_resource_group_configuration.name = AAZStrType()
manager_extended_location = cls._schema_on_200_201.properties.manager_extended_location
manager_extended_location.name = AAZStrType(
flags={"required": True},
)
manager_extended_location.type = AAZStrType(
flags={"required": True},
)
system_data = cls._schema_on_200_201.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200_201.tags
tags.Element = AAZStrType()
return cls._schema_on_200_201
class _CreateHelper:
"""Helper class for Create"""
__all__ = ["Create"]
| [
"[email protected]"
] | |
8b3a97ebe43ae145f472de830429cf5e306e5269 | 5c902cfea2856b5b591a9e4de4ecf7d66d01c3a0 | /백준/기초1/수학1/나머지.py | 36861f45235b2a9988962ca407e259b38e24cc23 | [] | no_license | VIXXPARK/pythonAlgorithm | 9cbedf1e9dc387756bed1793081be90e77daf9e8 | 8675fc0e078d90620ecf9dae95c1ccd6bcd36d37 | refs/heads/main | 2023-05-29T10:41:51.900075 | 2021-06-17T23:28:51 | 2021-06-17T23:28:51 | 316,072,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | import sys
a,b,c= map(int,sys.stdin.readline().rstrip().split())
print((a+b)%c)
print(((a%c)+(b%c))%c)
print((a*b)%c)
print(((a%c)*(b%c))%c) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.