blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb02c0864c123cdf0d4f5cb9a32740ba3ceb4d35 | a3926c09872e1f74b57431fbb3e711918a11dc0a | /python/array/1403_minimum_subsequence_in_non_increasing_order.py | 569a2d60dcf2044789138831a3d5a708757a6cfa | [
"MIT"
]
| permissive | linshaoyong/leetcode | e64297dc6afcebcee0614a153a566323bf223779 | 57080da5fbe5d62cbc0b8a34e362a8b0978d5b59 | refs/heads/main | 2022-09-15T00:05:36.476268 | 2022-08-16T14:09:11 | 2022-08-16T14:09:11 | 196,914,051 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | class Solution(object):
def minSubsequence(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
half = sum(nums) // 2
snums = sorted(nums, reverse=True)
s, k = 0, 0
for i, n in enumerate(snums):
s += n
k = i
if s > half:
break
return snums[:k + 1]
def test_min_subsequence():
s = Solution()
assert [10, 9] == s.minSubsequence([4, 3, 10, 9, 8])
assert [7, 7, 6] == s.minSubsequence([4, 4, 7, 6, 7])
assert [6] == s.minSubsequence([6])
| [
"[email protected]"
]
| |
84e3b4bc849e6265d6bf69e8105cdf2fc4a7b2fc | 35b0a18a89516ec84806ccb4662246673f109311 | /homework/utils/utils_hw4.py | 8ad12f3734ea88def8ee88e8468fd773013a1718 | []
| no_license | yqdch/MLDS-Note | 5db53236411c9f1f730f02734edc42437dd868d9 | d2800930ce501b4e12a439ac8cd6a75f0217c46d | refs/heads/master | 2021-09-22T01:20:03.243647 | 2018-09-04T15:32:14 | 2018-09-04T15:32:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from torchvision import transforms
transform = transforms.Compose([transforms.ToPILImage(mode='RGB'),
transforms.Grayscale(1),
transforms.Resize((84, 84)),
transforms.ToTensor()])
# process the image..
def pre_process(x):
return transform(x)
| [
"[email protected]"
]
| |
3d806887d12bfa660bd7e99f1caf1eb68fedf828 | 847f40e0e8dc155ccec271073cbf7eb705d10f9f | /prob22.py | 14c35d156262eda9bbf8a1b6ce9a6fce9e25147f | []
| no_license | joemeens/guvi-1 | 6730e763d6555c91e352e0a45bf804485a155b2a | 5058c9319b36df87394e857cca8d8bc1e998bcc3 | refs/heads/master | 2020-06-25T15:11:12.296939 | 2019-07-25T15:41:43 | 2019-07-25T15:41:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | a=int(input(" "))
v=map(int,input(" ").split(" "))
print(max(v))
| [
"[email protected]"
]
| |
74fe39e241501dac412c523f88a7df4820066301 | 50d10939f20e140a116ef222671bc2abb56fa5fd | /daemon/test.py | 8673809400aada1d9fa3662d43dad8d965d3ea67 | [
"CC-BY-4.0"
]
| permissive | mattvenn/energy-wristband | 27d82c6c30bc8156aea42705b4ae2b17d9c1f55d | d5afa1f3b86fd9e1f323f82f7af7fb2adb28d1c6 | refs/heads/master | 2020-04-08T07:40:58.606406 | 2017-03-23T10:31:52 | 2017-03-23T10:31:52 | 26,497,101 | 3 | 3 | null | 2017-03-23T10:31:53 | 2014-11-11T17:59:39 | Python | UTF-8 | Python | false | false | 2,822 | py | import unittest
import time
from diff import diff_energy
import os
sens=50
max_energy=3000
max_time=30
class Test_Diff(unittest.TestCase):
def setUp(self):
import logging
logging.basicConfig(level=logging.INFO)
self.d = diff_energy(logging, max_energy=max_energy, sens=sens, max_time=max_time)
def test_convert(self):
self.assertEqual(self.d.energy_to_div(max_energy),4)
self.assertEqual(self.d.energy_to_div(max_energy/5),1)
def test_convert_limits(self):
self.assertEqual(self.d.energy_to_div(-2*max_energy),1)
self.assertEqual(self.d.energy_to_div(2*max_energy),4)
def test_no_history(self):
self.assertEqual(self.d.get_last_valid(1),1)
def test_time_too_long(self):
now = time.time()
self.d.hist = {"t": now - max_time - 1, "e": 100}
self.assertEqual(self.d.get_last_valid(1),1)
def test_large_change_up(self):
now = time.time()
self.d.hist = {"t": now - max_time / 2, "e": 100}
self.assertEqual(self.d.get_last_valid(1500),100)
def test_large_change_down(self):
now = time.time()
self.d.hist = {"t": now - max_time / 2, "e": 2000}
self.assertEqual(self.d.get_last_valid(100),2000)
def test_ignore_large_slow_change(self):
e = 200
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
def test_ignore_small_change(self):
e = 200
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(0.5)
e += sens / 4
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(0.5)
e += sens / 4
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(0.5)
e += sens / 4
self.assertEqual(self.d.get_last_valid(e),e)
def test_small_change_over_boundary(self):
boundary = self.d.energy_per_div
now = time.time()
self.d.hist = {"t": now - 10, "e": boundary - 10}
time.sleep(1)
self.assertEqual(self.d.get_last_valid(boundary + 10),boundary + 10)
def test_no_repetition(self):
self.assertEqual(self.d.get_last_valid(200),200)
time.sleep(1)
self.assertEqual(self.d.get_last_valid(250),250)
time.sleep(1)
self.assertEqual(self.d.get_last_valid(3000),250)
time.sleep(1)
self.assertEqual(self.d.get_last_valid(2800),3000)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
cbde5fdd7190d1955e4101bdcf828544eb7389e6 | 145129d75d8255e609cdaf1bc9aa66424e4de0d1 | /pattern10.py | a3b75c166d4c647c813af35e6a4366f65cae4fe9 | []
| no_license | Neeraj-kaushik/coding_ninja | 7ca2d004b149ff0193400569864c48749e331aca | 23eb74bb8d4d80032b58c1408ac445aa87037a49 | refs/heads/master | 2022-11-07T19:43:05.869574 | 2020-06-19T17:31:34 | 2020-06-19T17:31:34 | 267,366,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | n=int(input())
i=1
while i<=n:
p=1
j=n
while j>=i:
print(p,end="")
j=j-1
p=p+1
print()
i=i+1 | [
"[email protected]"
]
| |
bb2496b1a827a445a662dae77a03705dc4c98659 | 852003f278d597a4a5e4cddfc12a480d563fb7db | /tests/settings.py | df4231c5aa2515f0bfd25444b2a2eafece753338 | [
"MIT"
]
| permissive | City-of-Helsinki/helsinki-profile-gdpr-api | 376faef774b673eaea543e92ca82eefb0b2c1a4a | 808dcd30a745f6d18cdf36ccaf07b0cd25844ab0 | refs/heads/main | 2023-04-29T12:20:32.747297 | 2023-04-18T12:18:34 | 2023-04-18T12:26:44 | 346,269,690 | 4 | 0 | MIT | 2023-04-19T05:28:03 | 2021-03-10T07:29:58 | Python | UTF-8 | Python | false | false | 1,244 | py | SECRET_KEY = "secret"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"helusers.apps.HelusersConfig",
"helusers.apps.HelusersAdminConfig",
"helsinki_gdpr",
"tests",
)
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "tests.urls"
AUTH_USER_MODEL = "tests.User"
GDPR_API_MODEL = "tests.Profile"
GDPR_API_QUERY_SCOPE = "testprefix.gdprquery"
GDPR_API_DELETE_SCOPE = "testprefix.gdprdelete"
DEBUG = True
USE_TZ = True
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": "test_audience",
"ISSUER": "https://test_issuer_1",
"REQUIRE_API_SCOPE_FOR_AUTHENTICATION": False,
"API_AUTHORIZATION_FIELD": "",
"API_SCOPE_PREFIX": "",
}
| [
"[email protected]"
]
| |
755da0bc4ea001022621c3901a91605255fac548 | f75609812d20d46a9f94ee0cfdb91c321d26b63d | /flask/flask_fundamentals/Dojo_Survey/server.py | 019a971b2dca48d2d6f92ada60c9697c8990bd46 | []
| no_license | IanAranha/Python2021 | eff47a20451f61b144b17f48321a7b06308aadca | d9769b8b387b77753b77f6efe3a9a270a1f158d3 | refs/heads/main | 2023-04-02T08:20:24.382913 | 2021-04-10T22:27:10 | 2021-04-10T22:27:10 | 345,918,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from flask import Flask, render_template, redirect, request
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/users", methods={"POST"})
def results():
return render_template("result.html")
@app.route("/back", methods=["post"])
def back():
return redirect("/")
@app.route("/danger")
def danger():
print('A user tried to visit /danger. We have redirected the user to /')
return redirect("/")
if __name__ == "__main__":
app.run(debug=True) | [
"[email protected]"
]
| |
9dd52a5dbaf126ed8780d8d3fabfc6508c0af245 | 32226e72c8cbaa734b2bdee081c2a2d4d0322702 | /visualization/grill/pusher_reward_type_ablation.py | 745f1dc8766a01ca39a8d6d7bf1d0f8fa7e1bc90 | [
"MIT"
]
| permissive | Asap7772/rail-rl-franka-eval | 2b1cbad7adae958b3b53930a837df8a31ab885dc | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | refs/heads/master | 2022-11-15T07:08:33.416025 | 2020-07-12T22:05:32 | 2020-07-12T22:05:32 | 279,155,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | import matplotlib
from visualization.grill.config import (
output_dir,
ashvin_base_dir,
format_func,
configure_matplotlib,
)
import matplotlib.pyplot as plt
from railrl.visualization import plot_util as plot
configure_matplotlib(matplotlib)
f = plot.filter_by_flat_params(
{'replay_kwargs.fraction_goals_are_env_goals': 0.5})
exps = plot.load_exps([
ashvin_base_dir + 's3doodad/share/steven/pushing-multipushing/pusher-reward-variants'],
f, suppress_output=True)
plot.tag_exps(exps, "name", "dsae")
plot.comparison(exps,
["Final puck_distance Mean", "Final hand_distance Mean"],
figsize=(6, 4),
vary=["vae_wrapped_env_kwargs.reward_params.type"],
default_vary={"reward_params.type": "unknown"},
smooth=plot.padded_ma_filter(10),
xlim=(0, 250000), ylim=(0.15, 0.22), method_order=None)
plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(format_func))
plt.xlabel("Timesteps")
plt.ylabel("")
plt.title("Visual Pusher")
plt.legend([])
plt.tight_layout()
plt.savefig(output_dir + "pusher_reward_type_ablation.pdf")
| [
"[email protected]"
]
| |
164292f20f8de66cf509569b2cdaffd15af4baee | b9a6440766ac6d09cbe5bcb0dd9ec035e79b68de | /0x0F-python-object_relational_mapping/1-filter_states.py | 6f2411d250a3d57fbab1662f954bca9cb3995269 | []
| no_license | zulsb/holbertonschool-higher_level_programming | aa684ce2bad9f583dd54224e7cb1d60d2189b229 | 0a23d2ffc4ec5810213b6fcd82732f221c97a553 | refs/heads/master | 2021-06-25T15:16:48.849508 | 2021-05-23T00:07:13 | 2021-05-23T00:07:13 | 226,905,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | #!/usr/bin/python3
"""
Script that lists states with a name starting with N (upper N)
from the database hbtn_0e_0_usa.
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
"""Connect to database."""
conec_db = MySQLdb.connect(
host="localhost",
port=3306,
user=argv[1],
passwd=argv[2],
db=argv[3])
"""Create cursor to exec queries using SQL."""
cursor = conec_db.cursor()
cursor.execute("""SELECT * FROM states
WHERE name LIKE BINARY 'N%' ORDER BY id ASC""")
for row in cursor.fetchall():
print(row)
cursor.close()
conec_db.close()
| [
"[email protected]"
]
| |
d7024372dfb48700aef449f128ccb72330a581de | 77d6ae92c38d56f2aa7a57fd24dd97bec6fa6cc4 | /blog/admin.py | a94a9be0c5919ee1f095e4b3bb1ae4e28bfb1c13 | []
| no_license | Gentility01/my-project | aaab030abad105094aa1c0206995a95a756448be | 9cd0805ffc7a02b811f6481ad79bda8e4b14a786 | refs/heads/master | 2023-06-17T13:39:01.353408 | 2021-07-07T01:20:59 | 2021-07-07T01:20:59 | 383,476,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from django.contrib import admin
from .models import Post,Like,Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Like)
admin.site.register(Comment)
# admin.site.register(Post_pictures) | [
"[email protected]"
]
| |
5d1e60ce7008a10f3d47c22c09d40c60ab591b0f | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/looker/instances/export.py | 5b33b981ed98210dc6b0a5035626ffad0122cc93 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,190 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a Looker instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.looker import instances
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.looker import flags
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.GA)
class Export(base.Command):
"""Export a Looker instance.
This command can fail for the following reasons:
* The instance specified does not exist.
* The active account does not have permission to access the given
instance.
* The Google Cloud Storage bucket does not exist.
"""
detailed_help = {'EXAMPLES': """\
To export an instance with the name `my-looker-instance` in the default
region, run:
$ {command} my-looker-instance --target-gcs-uri='gs://bucketName/folderName'
--kms-key='projects/my-project/locations/us-central1/keyRings/my-key-ring/cryptoKeys/my-key'
Note that the kms-key flag should be the full name of the kms key.
"""}
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddExportInstanceArgs(parser)
def Run(self, args):
instance_ref = args.CONCEPTS.instance.Parse()
op = instances.ExportInstance(instance_ref, args, self.ReleaseTrack())
log.status.Print(
'Export request issued for: [{}]\n'
'Check operation [{}] for status.'.format(args.instance, op.name)
)
return op
| [
"[email protected]"
]
| |
e6d17a0f40ceb577e98f6ae04e3eae4ea842f9db | be51250bcf59e1b47ed417c45e203c50aa233aae | /dojo-python-flask-mysql/pr1/servererr.py | fb918ee40f7bd663d312ead3bde46ed6a7560cc1 | []
| no_license | shoredata/dojo-python | 4f064c76632bf94a385bb9f552562eb5640398b2 | e7b7a542fa086088252ce92257f37c4b5eedd0c4 | refs/heads/master | 2020-05-02T22:07:31.547469 | 2019-03-28T17:44:06 | 2019-03-28T17:44:06 | 178,242,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | from flask import Flask
# import the function connectToMySQL from the file mysqlconnection.py
from mysqlconnerr import connectToMySQL
app = Flask(__name__)
# invoke the connectToMySQL function and pass it the name of the database we're using
# connectToMySQL returns an instance of MySQLConnection, which we will store in the variable 'mysql'
mysql = connectToMySQL('mydb')
# now, we may invoke the query_db method
query = "SELECT * FROM users;"
sqldata = mysql.query_db(query)
# print("all the users", sqldata)
print("mysql>>> mydb::users::"+query)
for i in sqldata:
print(i)
# for k in i:
# print(i,k,i[k])
if __name__ == "__main__":
app.run(debug=True) | [
"[email protected]"
]
| |
d623fe68c6326fee1aba92885d989524184c5f2d | 87d5b21265c381104de8f45aa67842a4adc880eb | /486. Predict the Winner.py | 447e9a702208ffb5145c18af27a90a00db128a70 | []
| no_license | MYMSSENDOG/leetcodes | ac047fe0d951e0946740cb75103fc94aae967166 | 8a52a417a903a0742034161471a084bc1e494d68 | refs/heads/master | 2020-09-23T16:55:08.579319 | 2020-09-03T19:44:26 | 2020-09-03T19:44:26 | 225,543,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | class Solution:
def PredictTheWinner(self, nums):
n = len(nums)
dp = [[0] * (n+1) for _ in range(n+1)]
for i in range(n-1,-1,-1):
for j in range(i+1):
l = j
r = i-j
dp[l][r] = max(nums[l] - dp[l+1][r], nums[-r-1] - dp[l][r + 1])
return dp[0][0] >= 0
sol = Solution()
nums = [1,5,233,7]
print(sol.PredictTheWinner(nums)) | [
"[email protected]"
]
| |
46d2e6cb4678a7bb86f2a93e3287a37554d642b4 | 0466559817d3a1be9409da2c83db99c4db3bacfe | /hubcheck/pageobjects/widgets/tags_view_form.py | 9dfbebcaaa5bac24ee44bdb846f38977e4b932c8 | [
"MIT"
]
| permissive | ken2190/hubcheck | 955cf9b75a1ee77e28256dfd3a780cfbc17de961 | 2ff506eb56ba00f035300862f8848e4168452a17 | refs/heads/master | 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,682 | py | from hubcheck.pageobjects.basepagewidget import BasePageWidget
import re
class TagsViewForm(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(TagsViewForm,self).__init__(owner,locatordict)
# load hub's classes
TagsViewForm_Locators = self.load_class('TagsViewForm_Locators')
TagSearchBox = self.load_class('TagSearchBox')
SortOrderOptions = self.load_class('SortOrderOptions')
SearchResults = self.load_class('SearchResults')
TagsViewResultsRow = self.load_class('TagsViewResultsRow')
ListPageNav = self.load_class('ListPageNav')
# update this object's locator
self.locators.update(TagsViewForm_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.searchbox = TagSearchBox(self,
{
'base' : 'searchbox',
'tags' : 'tags',
'tagsac' : 'tagsac',
'tagsacchoices' : 'tagsacchoices',
'tagsactoken' : 'tagsactoken',
'tagsacdelete' : 'tagsacdelete',
'submit' : 'submit',
})
self.sortoptions = SortOrderOptions(self,
{
'base' : 'sortoptions',
'date' : 'sortbydate',
'title' : 'sortbytitle',
})
self.footer = ListPageNav(self,{'base':'footer'})
self.search_results = SearchResults(self,
{
'base' : 'searchresults',
'counts' : 'sr_counts',
'row' : 'sr_row',
'substrow' : 'sr_substrow',
}, TagsViewResultsRow,
{
'src_title' : 'title',
'src_text' : 'text',
'src_href' : 'href',
})
# update the component's locators with this objects overrides
self._updateLocators()
def search_for(self,terms):
return self.searchbox.search_for(terms)
def goto_page_number(self,pagenumber):
return self.footer.goto_page_number(pagenumber)
def goto_page_relative(self,relation):
return self.footer.goto_page_relative(relation)
def get_caption_counts(self):
return self.search_results.header_counts()
def get_pagination_counts(self):
return self.footer.get_pagination_counts()
def get_current_page_number(self):
return self.footer.get_current_page_number()
def get_link_page_numbers(self):
return self.footer.get_link_page_numbers()
def search_result_rows(self):
return iter(self.search_results)
class TagsViewForm_Locators_Base(object):
"""locators for TagsViewForm object"""
locators = {
'base' : "css=#main form",
'searchbox' : "css=.data-entry",
'tags' : "css=#actags",
'tagsac' : "css=#token-input-actags",
'tagsacchoices' : "css=.token-input-dropdown-act",
'tagsactoken' : "css=.token-input-token-act",
'tagsacdelete' : "css=.token-input-delete-token-act",
'submit' : "css=.entry-search-submit",
'sortoptions' : "css=.entries-menu",
'sortbytitle' : "css=.entries-menu a[title='Sort by title']",
'sortbydate' : "css=.entries-menu a[title='Sort by newest to oldest']",
'footer' : "css=.list-footer",
# 'searchresults' : "css=#search .results",
'searchresults' : "css=.container-block",
'sr_substrow' : "css=#search .results li:nth-of-type({row_num})",
'sr_row' : "css=#search .results li",
'sr_counts' : "css=#rel-search span",
'src_title' : "css=#search .results li:nth-of-type({row_num}) .title",
'src_text' : "css=#search .results li:nth-of-type({row_num}) p:nth-of-type(2)",
'src_href' : "css=#search .results li:nth-of-type({row_num}) .href",
}
| [
"[email protected]"
]
| |
ef45fa15c61956b4c8f11b18890b9b8cf99f1ac7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_tremble.py | c57447f66e5255d1408b3edd615cc3538b9fba11 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py |
#calss header
class _TREMBLE():
def __init__(self,):
self.name = "TREMBLE"
self.definitions = [u'to shake slightly, usually because you are cold, frightened, or very emotional: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
7aabaa691f488bd199bfc9e87b7c5d6d5fc3cd09 | 5456502f97627278cbd6e16d002d50f1de3da7bb | /components/cronet/android/DEPS | b2ea5aa4bb5e57257706ea726a0e307f850883f7 | [
"BSD-3-Clause"
]
| permissive | TrellixVulnTeam/Chromium_7C66 | 72d108a413909eb3bd36c73a6c2f98de1573b6e5 | c8649ab2a0f5a747369ed50351209a42f59672ee | refs/heads/master | 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | include_rules = [
"+components/data_reduction_proxy",
"+components/metrics",
"+crypto",
"+jni",
]
| [
"[email protected]"
]
| ||
f22f021684ba14c9aea722e7b575fa83cc9e8627 | 907eecc07842e12cfef8d7dcc367052c528a7e45 | /python/test_builder_of_things.py | c3c788e8bf65bc9f1c47bb67bc6c1db7aadd6d01 | []
| no_license | desertSniper87/codewars | 0126a6962a7e22f9fd2e649c8a4abe2649d1988d | 03f918e818a1c5a8b68ff23e24ad09698c32346b | refs/heads/master | 2021-05-05T04:41:20.424603 | 2021-04-24T16:31:52 | 2021-04-24T16:31:52 | 118,615,045 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,648 | py | import unittest
from builder_of_things import *
class TestClass(unittest.TestCase):
def testName(self):
jane = Thing('Jane')
self.assertEqual(jane.name, 'Jane')
self.assertEqual(jane.is_a_woman, True)
self.assertEqual(jane.is_a_man, False)
def testArms(self):
jane = Thing('Jane')
self.assertEqual(isinstance(jane.arms, tuple), True)
self.assertEqual(len(jane.arms), 2)
self.assertEqual(all(isinstance(v, Thing) for v in jane.arms), True)
self.assertEqual(all(v.name=="arm" for v in jane.arms), True)
self.assertEqual(all(v.is_arm for v in jane.arms), True)
self.assertEqual(len(jane.arms), 2)
self.assertEqual(all(isinstance(v, Thing) for v in jane.arms), True)
def testHead(self):
jane = Thing('Jane')
self.assertEqual(isinstance(jane.head, Thing), True)
self.assertEqual(jane.head.name, "head")
def testEyes(self):
jane = Thing('Jane')
self.assertEqual(len(jane.head.eyes), 2)
self.assertEqual(all(isinstance(v, Thing) for v in jane.head.eyes), True)
self.assertEqual(all(v.name=='eye' for v in jane.head.eyes), True)
def testFingers(self):
jane = Thing('Jane')
self.assertEqual(all(len(v.fingers)==5 for v in jane.arms), True)
def testParent(self):
jane = Thing('Jane')
self.assertEqual(jane.parent_of, "joe")
def testEyeColor(self):
jane = Thing('Jane')
self.assertEqual(all(v.color=='blue' for v in jane.head.eyes), True)
def testEyeShape(self):
jane = Thing('Jane')
self.assertEqual(all(v.color=='blue' for v in jane.eyes), True)
self.assertEqual(all(v.shape=='round' for v in jane.eyes), True)
def testEyesColor(self):
jane = Thing('Jane')
self.assertEqual(all(v.color=='green' for v in jane.eyes), True)
self.assertEqual(all(v.pupil.color=='black' for v in jane.eyes), True)
# def testSpeech(self):
# jane = Thing('Jane')
# def fnc(phrase):
# return "%s says: %s" % (name, phrase)
# jane.can.speak(fnc)
# self.assertEqual(jane.speak('hi'), "Jane says: hi")
# def testSpeech2(self):
# jane = Thing('Jane')
# fnc = lambda phrase: "%s says: %s" % (name, phrase)
# jane.can.speak(fnc, 'spoke')
# jane.speak('hi')
# self.assertEqual(jane.spoke, ["Jane says: hi"])
# jane.speak('goodbye')
# self.assertEqual(jane.spoke, ["Jane says: hi", "Jane says: goodbye"])
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
b1041408c8d0b9d51353afa02be2a9407d84e133 | edb9dce04a0e1c7cae6a4fe54c8dc89cef69d8f0 | /Chapter-05/pandas_handlemissingdata.py | 8c1dc0ad66881643d36a5514b833e4bfacaa3926 | []
| no_license | lovejing0306/PythonforDataAnalysis | a4a38dbc0d24e69a5469c662be7e5cfdeba057cb | be8d4862acc7538b52379cec36047b1c5bd48b3e | refs/heads/master | 2021-06-09T20:40:04.157547 | 2016-11-20T02:23:07 | 2016-11-20T02:23:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | # coding=utf-8
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
# 测试
if __name__ == '__main__':
string_data = Series(['aardvark', 'artichoke', np.nan, 'avocado'])
print(string_data)
print(string_data.isnull())
string_data[0] = None # python内置的None值也会被当作NaN来处理
print(string_data.isnull())
print(string_data.notnull())
## 滤除缺失数据
# Series
data = Series([1, np.nan, 3.5, np.nan, 7])
print(data)
print(data.dropna())
print(data[data.notnull()]) # 通过布尔索引达到相同的效果
# DataFrame
data = DataFrame([[1., 6.5, 3.],
[1., np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 6.5, 3.]])
print(data)
print(data.dropna()) # 默认丢弃含有NaN的行
print(data.dropna(how='all')) # 丢弃全部都是NaN的行
data[4] = np.nan
print(data)
print(data.dropna(axis=1)) # 丢弃列
print(data.dropna(axis=1, how='all'))
df = DataFrame(np.random.randn(7,3))
df.ix[:4, 1] = np.nan
df.ix[:2, 2] = np.nan
print(df)
print(df.dropna())
print(df.dropna(thresh=3))
## 填充缺失数据
print(df.fillna(0))
print(df.fillna({1:0.5, 2:-1})) # 通过字典实现对不同列不同值的填充
dfInplace = df.fillna(0, inplace=True)
print(dfInplace) # fillna默认返回新对象,但也可以对现有对象进行就地修改
print(df)
df = DataFrame(np.random.randn(6,3))
print(df)
df.ix[2:, 1] = np.nan
df.ix[3:, 2] = np.nan
print(df)
print(df.fillna(method='ffill'))
print(df.fillna(method='ffill', limit=2))
data = Series([1., np.nan, 3.5, np.nan, 7])
print(data.fillna(data.mean())) | [
"[email protected]"
]
| |
c8ef1d94e1d70ae6da235286cc8081214dd6866f | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/release/v4_1/models/release_definition_gate.py | 45dbf84e28f1c28054605140573590d08557e663 | []
| no_license | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReleaseDefinitionGate(Model):
"""ReleaseDefinitionGate.
:param tasks:
:type tasks: list of :class:`WorkflowTask <release.v4_1.models.WorkflowTask>`
"""
_attribute_map = {
'tasks': {'key': 'tasks', 'type': '[WorkflowTask]'}
}
def __init__(self, tasks=None):
super(ReleaseDefinitionGate, self).__init__()
self.tasks = tasks
| [
"[email protected]"
]
| |
ac54da57e3eea33615811d6513ec2872ea1f784b | 72af42076bac692f9a42e0a914913e031738cc55 | /01, 특강_210705_0706/02, source/CookData(2021.01.15)/Code14-01.py | ba3157038d2415dc09aa47adb6edb5f1ec20f74f | []
| no_license | goareum93/Algorithm | f0ab0ee7926f89802d851c2a80f98cba08116f6c | ec68f2526b1ea2904891b929a7bbc74139a6402e | refs/heads/master | 2023-07-01T07:17:16.987779 | 2021-08-05T14:52:51 | 2021-08-05T14:52:51 | 376,908,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | ## 함수 선언 부분 ##
def knapsack():
print('## 메모이제이션 배열 ##')
array = [[0 for _ in range(maxWeight+1)] for _ in range(rowCount+1)] # 빈 배열을 만들고 모두 0으로
for row in range(1, rowCount+1) : # 1~4개 (4회)
print(row, '개 -->', end = ' ')
for col in range(1, maxWeight+1) : # 1colg ~ 7colg
if weight[row] > col : # 물건의 무게가 열보다 크면 == 물건이 가방에 안 들어가면
array[row][col] = array[row-1][col]
else : # 물건의 부피가 s보다 작거나 같으면
value1 = money[row] + array[row-1][col-weight[row]] # 각 그림의 1-1
value2 = array[row-1][col] # 각 그림의 1-2
array[row][col] = max(value1, value2)
print('%2d' % array[row][col], end = ' ')
print()
return array[rowCount][maxWeight]
## 전역 변수 선언 부분 ##
maxWeight = 7 # 배낭 최대 무게
rowCount = 4 # 보석 숫자
weight = [0, 6, 4,3, 5] # 보석 무게 (0, 금괴, 수정, 루비, 진주)
money = [0,13, 8, 6, 12] # 보석 가격 (0, 금괴, 수정, 루비, 진주)
## 메인 코드 부분 ##
maxValue = knapsack()
print()
print('배낭에 담을 수 있는 보석의 최대 가격 -->', maxValue, '억원')
| [
"[email protected]"
]
| |
381c39adca87bdc4c70516ba84929b4bbb345c7f | 73cacd0f22036bec4aa147f7c26961b4b991af22 | /castero/datafile.py | 99b25f7030014950516c1844bb0c4d828d3fd6fa | [
"MIT"
]
| permissive | kyokley/castero | cb15fe45fc84547ad1e6dcb1afb2181bdae86146 | 6998e3cbdd722efe53fdc23bb4bb46750dad2d8d | refs/heads/master | 2022-11-06T01:48:06.925790 | 2020-06-11T01:53:50 | 2020-06-11T01:53:50 | 272,463,362 | 0 | 0 | MIT | 2020-06-15T14:39:10 | 2020-06-15T14:39:09 | null | UTF-8 | Python | false | false | 4,245 | py | import collections
import os
import requests
from shutil import copyfile
import castero
from castero.net import Net
class DataFile:
"""Extendable class for objects with filesystem data.
Used when handling files with data that can reasonably be stored in a
dictionary. Particularly used in the Config class and the Feeds class.
Extended by classes which are based on a data file.
"""
PACKAGE = os.path.dirname(__file__)
HOME = os.path.expanduser('~')
XDG_CONFIG_HOME = os.getenv('XDG_CONFIG_HOME',
os.path.join(HOME, '.config'))
XDG_DATA_HOME = os.getenv('XDG_DATA_HOME',
os.path.join(HOME, '.local', 'share'))
CONFIG_DIR = os.path.join(XDG_CONFIG_HOME, castero.__title__)
DATA_DIR = os.path.join(XDG_DATA_HOME, castero.__title__)
DEFAULT_DOWNLOADED_DIR = os.path.join(DATA_DIR, "downloaded")
def __init__(self, path, default_path) -> None:
"""
Args:
path: the path to the data file
default_path: the path to the default data file
"""
assert os.path.exists(default_path)
self.data = collections.OrderedDict()
self._path = path
self._default_path = default_path
# if path doesn't exit, create it based on default_path
if not os.path.exists(self._path):
DataFile.ensure_path(self._path)
copyfile(self._default_path, self._path)
def __iter__(self) -> iter:
"""Iterator for the keys of self.data
In order to iterate over data values, you should use something like:
for key in file_instance:
value = file_instance[key]
"""
return self.data.__iter__()
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, item):
if item in self.data:
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
self.data.pop(key, None)
@staticmethod
def ensure_path(filename):
"""Ensure that the path to the filename exists, creating it if needed.
"""
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def download_to_file(url, file, name, download_queue, display=None):
"""Downloads a URL to a local file.
Args:
url: the source url
file: the destination path
name: the user-friendly name of the content
download_queue: the download_queue overseeing this download
display: (optional) the display to write status updates to
"""
chunk_size = 1024
chuck_size_label = "KB"
try:
response = Net.Get(url, stream=True)
except requests.exceptions.RequestException as e:
if display is not None:
display.change_status("RequestException: %s" % str(e))
download_queue.next()
return
else:
handle = open(file, "wb")
downloaded = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if display is not None:
status_str = "Downloading \"%s\": %d%s" % (
name, downloaded / chunk_size, chuck_size_label
)
if download_queue.length > 1:
status_str += " (+%d downloads in queue)" % \
(download_queue.length - 1)
display.change_status(status_str)
if chunk:
handle.write(chunk)
downloaded += len(chunk)
if display is not None:
display.change_status("Episode successfully downloaded.")
display.menus_valid = False
download_queue.next()
def load(self) -> None:
"""Loads the data file.
Should be implemented by classes which extend this class.
"""
pass
def write(self) -> None:
"""Writes to the data file.
Should be implemented by classes which extend this class.
"""
pass
| [
"[email protected]"
]
| |
bab08a4477751c2f1fc761d6c0504c5f4dfaba39 | 1e9c67785cd2a07fbd12b63bd93a2eba2272f237 | /image_task_kg/make_KG.py | 22d5463b6fa171d09e5cc20e81cda93da2ed656a | []
| no_license | monisha-jega/mmd | 2975d0f77bce4db38795fa201f515f35498f0eb3 | d4f9d2c94409c2877ff5a5a2242e7e7ed2f87921 | refs/heads/master | 2022-07-20T17:01:39.043859 | 2020-05-16T23:31:35 | 2020-05-16T23:31:35 | 264,543,426 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | from __future__ import print_function
import json, os, pickle
from parameters import *
def convert_json(the_json):
new_json = {}
for key, val in the_json.items():
new_json[key.lower()] = val
return new_json
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
domain_features = [
['price'],
#['style'],
['type'],
['fit'],
#['brand'],
['gender'],
#['neck'],
['material', 'fabric'],
#['length'],
#['sleeves'],
#['model_worn'],
['currency'],
['color']
]
num_features = len(domain_features)
feature_existence_count = [0 for i in range(num_features)]
root_dir = "../../raw_catalog/"
dirlist = [root_dir + name for name in ['public_jsons', 'public_jsons (2)', 'public_jsons (3)', 'public_jsons (4)']]
count = 0
KG = {}
for diri in dirlist[:]:
print(len(os.listdir(diri)))
for json_file in os.listdir(diri):
#print("ok")
the_json = convert_json(json.load(open(diri +"/" + json_file)))
feature_vec = ["" for i in range(num_features)]
for l in range(num_features):
feature_names = domain_features[l]
for feature_name in feature_names:
if feature_name in the_json:
if the_json[feature_name] == "" or (l == 0 and (not is_int(the_json[feature_name]) or int(the_json[feature_name]) == 0)):
pass
else:
feature_vec[l] = the_json[feature_name]
feature_existence_count[l] += 1
KG[the_json["image_filename"]] = feature_vec
for orientation, links in the_json['image_filename_all'].items():
for link in links:
KG[link] = feature_vec
count += 1
if count%20000 == 0:
print(count, end="")
print(" ")
print()
print(feature_existence_count)
json.dump(KG, open(data_dump_dir+"image_kg.json", "wb"))
pickle.dump(KG, open(data_dump_dir+"image_kg.pkl", "wb"))
| [
"[email protected]"
]
| |
da7b653eafb429de4d61b697f9d80f9c7895357e | 075ccb25d98e2e55adbbaf0723b99158747bf172 | /nickles_and_dimes.py | cc7764bfda7d891fcabc1e2ed4c5b18154d36da3 | []
| no_license | Th3Lourde/Mathematical_Modeling | 6863694d7a1526cfb665ecf32189522da1d2ee83 | f890273305263a90ac1b18c9fc12ad1aa70b23cf | refs/heads/master | 2020-04-24T01:23:26.447512 | 2019-04-08T02:38:56 | 2019-04-08T02:38:56 | 171,593,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
'''
11 coins
70 cents
nickles and dimes
'''
total = 70
coin_key = []
n = 5
d = 10
# Start with one dime and all nickles
# Hypothesis: 3 dimes, 8 nickles
# Done
# Equation: 5x+10y = 70
# Equation: x + y = 11
# .
# .
# .
| [
"[email protected]"
]
| |
47fb4ad7eb88aa972c8ada25f83b2c9c0ba6d233 | eebacbc58a1c99fb6e32f8cd56cac6e18947d3e7 | /3.advanced_features/1.advanced_features.py | 3dd7fc0bc7d1f5260a24d7e40a67c53d99fe69b8 | []
| no_license | fzingithub/LearnPythonFromLiao | ad7f959d7e667a464f2b9a6b1cedfd0f08baaf8e | fcb0f2e7f905aca253b3986c4a1ceab6b82b7cae | refs/heads/master | 2020-03-29T19:37:32.831341 | 2018-09-27T10:39:11 | 2018-09-27T10:39:11 | 150,273,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 18:43:50 2018
@author: FZ
"""
#make a list 1...99
L = list(range(1,100,2))
print (L)
#高级特性,1行代码能实现的功能,决不写5行代码。请始终牢记,代码越少,开发效率越高。
| [
"[email protected]"
]
| |
3fc3f033f79febb3ec41acc993e537e72912483a | 15373eaa353e8aece47a26741b7fb27795268bf6 | /medium/833_find_and_replace_in_string.py | 7ef69a2ccd6f43a06373517a1eba3a6f382b014a | []
| no_license | esddse/leetcode | e1a9bacf04c68a8d642a1e53c90e6c2dda2c1980 | 0ceccdb262149f7916cb30fa5f3dae93aef9e9cd | refs/heads/master | 2021-06-08T19:15:14.346584 | 2020-01-09T01:41:23 | 2020-01-09T01:41:23 | 109,675,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | class Solution:
def findReplaceString(self, S: str, indexes: List[int], sources: List[str], targets: List[str]) -> str:
replacements = sorted(zip(indexes, sources, targets), key=lambda item:item[0])
length = len(S)
start = 0
new_S = ""
for idx, source, target in replacements:
if idx >= length:
break
new_S += S[start:idx]
sl = len(source)
if S[idx:idx+sl] == source:
new_S += target
start = idx+sl
else:
start = idx
new_S += S[start:]
return new_S | [
"[email protected]"
]
| |
48cad96ba6767e03054cc6e5d3e7016bfe571a01 | 8ce721977eedb413cec325af9746c3aa778fe1cd | /vbclient/tests/common/test_manager.py | 6f3a10438cf52934342791a7d918c041808f14cb | [
"Apache-2.0"
]
| permissive | Huawei/OpenStackClient_VBS | a1618b5d5e0134affbb062da55b7aae79eead376 | 98834c9425af3e8bff6e2db4602c3f7bd72829bd | refs/heads/master | 2021-01-11T23:30:57.494128 | 2017-04-17T09:50:18 | 2017-04-17T09:50:18 | 78,592,078 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,774 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import six
from vbclient.common import httpclient
from vbclient.common import resource
from vbclient.tests import base
from vbclient.tests import fakes
class TestManager(base.BaseTestCase):
_headers = {"Accept": "application/json"}
def __init__(self, *args, **kwargs):
super(TestManager, self).__init__(*args, **kwargs)
self.instance = None
self.resource = None
self.manager = None
def setUp(self):
super(TestManager, self).setUp()
http_client = httpclient.OpenStackHttpClient(mock.MagicMock())
self.manager = fakes.FakeManager(http_client)
self.instance = dict(uuid=fakes.FAKE_RESOURCE_ID,
name=fakes.FAKE_RESOURCE_NAME)
self.resource = fakes.FakeResource(None, self.instance)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_get(self, mocked):
mocked.return_value = fakes.create_response(self.instance)
result = self.manager.get(self.resource)
self.assertTrue(result.has_attached())
self.assertIsInstance(result, resource.Resource)
expect_url = (fakes.FAKE_RESOURCE_ITEM_URL % fakes.FAKE_RESOURCE_ID)
mocked.assert_called_once_with(expect_url,
"GET",
params={},
headers=self._headers)
result = self.manager.get(self.resource, raw=True)
self.assertIsInstance(result, resource.DictWithMeta)
self.assertEqual(result, self.instance)
# return None
resp = fakes.create_response()
mocked.return_value = resp
result = self.manager.get(self.resource)
self.assertEqual(result, resp.text)
self.assertIsInstance(result, resource.StrWithMeta)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_list(self, mocked):
json = {"resources": [self.instance]}
mocked.return_value = fakes.create_response(json)
result = self.manager.list()
self.assertEqual(1, len(result))
self.assertEqual([self.resource], result)
self.assertIsInstance(result, resource.ListWithMeta)
self.assertIsInstance(result[0], fakes.FakeResource)
expect_url = fakes.FAKE_RESOURCE_COLLECTION_URL
mocked.assert_called_once_with(expect_url,
"GET",
headers=self._headers,
params={})
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_list_return_string(self, mocked):
json = {"resources": ["a", "b", "c"]}
mocked.return_value = fakes.create_response(json)
result = self.manager.list()
self.assertEqual(3, len(result))
self.assertEqual(json['resources'], result)
self.assertIsInstance(result, resource.ListWithMeta)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_patch(self, mocked):
mocked.return_value = fakes.create_response(self.instance)
result = self.manager.update(self.resource)
self.assertFalse(result.has_attached())
self.assertIsInstance(result, fakes.FakeResource)
expect_url = (fakes.FAKE_RESOURCE_ITEM_URL % fakes.FAKE_RESOURCE_ID)
mocked.assert_called_once_with(expect_url,
"PATCH",
json=self.resource,
headers=self._headers)
# return raw
result = self.manager.update(self.resource, raw=True)
self.assertIsInstance(result, resource.DictWithMeta)
self.assertEqual(result, self.instance)
# return None
resp = fakes.create_response()
mocked.return_value = resp
result = self.manager.update(self.resource)
self.assertEqual(result, resp.text)
self.assertIsInstance(result, resource.StrWithMeta)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_put(self, mocked):
mocked.return_value = fakes.create_response(self.instance)
result = self.manager.update_all(self.resource)
self.assertFalse(result.has_attached())
self.assertIsInstance(result, fakes.FakeResource)
expect_url = (fakes.FAKE_RESOURCE_ITEM_URL % fakes.FAKE_RESOURCE_ID)
mocked.assert_called_once_with(expect_url,
"PUT",
json=self.resource,
headers=self._headers)
# return raw
result = self.manager.update_all(self.resource, raw=True)
self.assertIsInstance(result, resource.DictWithMeta)
self.assertEqual(result, self.instance)
# return None
resp = fakes.create_response()
mocked.return_value = resp
result = self.manager.update_all(self.resource)
self.assertEqual(result, resp.text)
self.assertIsInstance(result, resource.StrWithMeta)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_delete(self, mocked):
mocked.return_value = fakes.create_response()
result = self.manager.delete(self.resource)
self.assertEqual(tuple(), result)
self.assertIsInstance(result, resource.TupleWithMeta)
expect_url = (fakes.FAKE_RESOURCE_ITEM_URL % fakes.FAKE_RESOURCE_ID)
mocked.assert_called_once_with(expect_url,
"DELETE",
headers=self._headers)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_create(self, mocked):
mocked.return_value = fakes.create_response(self.instance)
result = self.manager.create(self.resource)
self.assertIsInstance(result, fakes.FakeResource)
self.assertEqual(result.original, self.instance)
self.assertFalse(result.has_attached())
expect_url = fakes.FAKE_RESOURCE_COLLECTION_URL
mocked.assert_called_once_with(expect_url,
"POST",
json=self.resource,
headers=self._headers)
result = self.manager.create()
mocked.assert_called_with(expect_url, "POST", headers=self._headers)
@mock.patch("keystoneauth1.adapter.Adapter.request")
def test_manager_create_return_none(self, mocked):
mocked.return_value = fakes.create_response()
result = self.manager.create(self.resource)
self.assertIsInstance(result, resource.TupleWithMeta)
self.assertEqual(result, ())
expect_url = fakes.FAKE_RESOURCE_COLLECTION_URL
mocked.assert_called_once_with(expect_url,
"POST",
json=self.resource,
headers=self._headers)
def test_mixin_meta(self):
resp = fakes.create_response()
text = self.manager.mixin_meta('text', resp)
self.assertEqual('text', text)
self.assertIsInstance(text, resource.StrWithMeta)
text = self.manager.mixin_meta(u'text', resp)
self.assertEqual('text', text)
self.assertIsInstance(text, resource.UnicodeWithMeta)
list_item = ['a', 'b', 'c']
list_mixin = self.manager.mixin_meta(list_item, resp)
self.assertEqual(list_item, list_mixin)
self.assertIsInstance(list_mixin, resource.ListWithMeta)
tuple_item = ('a', 'b', 'c')
tuple_mixin = self.manager.mixin_meta(tuple_item, resp)
self.assertEqual(tuple_item, tuple_mixin)
self.assertIsInstance(tuple_mixin, resource.TupleWithMeta)
byte_item = six.binary_type('abc')
byte_mixin = self.manager.mixin_meta(byte_item, resp)
self.assertEqual(byte_item, byte_mixin)
if six.PY2:
self.assertIsInstance(byte_mixin, resource.StrWithMeta)
elif six.PY3:
self.assertIsInstance(byte_mixin, resource.BytesWithMeta)
| [
"[email protected]"
]
| |
018d690f2b09a7839e2233446c5334d07ba7e40a | 6c5ce1e621e0bd140d127527bf13be2093f4a016 | /ex073/venv/Scripts/easy_install-3.7-script.py | 04b8ee8d192cd98ecbf9446220d1339ffc006211 | [
"MIT"
]
| permissive | ArthurAlesi/Python-Exercicios-CursoEmVideo | 124e2ee82c3476a5a49baafed657788591a232c1 | ed0f0086ddbc0092df9d16ec2d8fdbabcb480cdd | refs/heads/master | 2022-12-31T13:21:30.001538 | 2020-09-24T02:09:23 | 2020-09-24T02:09:23 | 268,917,509 | 0 | 0 | null | null | null | null | ISO-8859-2 | Python | false | false | 508 | py | #!C:\Users\User\Documents\github-MeusRepositórios\Python-Exercicios-CursoEmVideo\ex073\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
]
| |
2706a914a8a9b70a04385ebad0180e6781fbffb5 | 07af05141f371ad1c2ab11634d4f5fad20ede2e0 | /python/src/nnabla/backward_function/bc_add2.py | b95e0477b113afd762dead0d62af1338523c465d | [
"Apache-2.0"
]
| permissive | chunxiaosz/nnabla | a9c9b30140def0bdf91dea24d70cfa9400258d66 | 9f4249313129d0fd23d304453830157fee96a2e5 | refs/heads/master | 2020-12-03T05:11:24.724125 | 2019-09-04T06:39:41 | 2019-09-04T06:39:41 | 95,741,841 | 0 | 0 | Apache-2.0 | 2019-09-04T06:39:42 | 2017-06-29T05:29:11 | C++ | UTF-8 | Python | false | false | 1,839 | py | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class BcAdd2Backward(BackwardFunction):
def name(self):
return 'BcAdd2Backward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
raise NotImplementedError(
"The backward method of BcAdd2Backward class is not implemented.")
| [
"[email protected]"
]
| |
1fd62a6bbd372e6a1a9e6651808cdfff227543ad | 4feaf520374804d6f3feebe3700fb448692a44ba | /pullenti/ner/address/StreetReferent.py | 1d48a1baaff7425540277b31fcda5562b60b8735 | []
| no_license | MihaJjDa/APCLtask | f7be3fb6b0f31801196bf779f6a7e62ce245493b | 4745b45e199887d433ab256bb2e2ebf5dbe3f7cd | refs/heads/master | 2020-04-16T17:15:10.846647 | 2020-02-24T16:06:43 | 2020-02-24T16:06:43 | 165,769,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,253 | py | # Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
import typing
import io
from pullenti.unisharp.Utils import Utils
from pullenti.ner.address.StreetKind import StreetKind
from pullenti.ner.Referent import Referent
from pullenti.ner.core.Termin import Termin
from pullenti.ner.core.IntOntologyItem import IntOntologyItem
from pullenti.ner.ReferentClass import ReferentClass
from pullenti.ner.address.internal.MetaStreet import MetaStreet
from pullenti.ner.core.MiscHelper import MiscHelper
from pullenti.ner.geo.GeoReferent import GeoReferent
class StreetReferent(Referent):
""" Улица, проспект, площадь, шоссе и т.п. """
def __init__(self) -> None:
super().__init__(StreetReferent.OBJ_TYPENAME)
self.instance_of = MetaStreet._global_meta
OBJ_TYPENAME = "STREET"
ATTR_TYP = "TYP"
ATTR_NAME = "NAME"
ATTR_NUMBER = "NUMBER"
ATTR_SECNUMBER = "SECNUMBER"
ATTR_GEO = "GEO"
ATTR_FIAS = "FIAS"
ATTR_BTI = "BTI"
ATTR_OKM = "OKM"
@property
def typs(self) -> typing.List[str]:
""" Тип(ы) """
res = list()
for s in self.slots:
if (s.type_name == StreetReferent.ATTR_TYP):
res.append(s.value)
return res
@property
def names(self) -> typing.List[str]:
""" Наименования """
res = list()
for s in self.slots:
if (s.type_name == StreetReferent.ATTR_NAME):
res.append(s.value)
return res
@property
def number(self) -> str:
""" Номер улицы (16-я Парковая) """
return self.getStringValue(StreetReferent.ATTR_NUMBER)
@number.setter
def number(self, value) -> str:
self.addSlot(StreetReferent.ATTR_NUMBER, value, True, 0)
return value
@property
def sec_number(self) -> str:
""" Дополнительный номер (3-я 1 Мая) """
return self.getStringValue(StreetReferent.ATTR_SECNUMBER)
@sec_number.setter
def sec_number(self, value) -> str:
self.addSlot(StreetReferent.ATTR_SECNUMBER, value, True, 0)
return value
@property
def geos(self) -> typing.List['GeoReferent']:
""" Ссылка на географические объекты """
res = list()
for a in self.slots:
if (a.type_name == StreetReferent.ATTR_GEO and (isinstance(a.value, GeoReferent))):
res.append(Utils.asObjectOrNull(a.value, GeoReferent))
return res
@property
def city(self) -> 'GeoReferent':
""" Город """
for g in self.geos:
if (g.is_city):
return g
elif (g.higher is not None and g.higher.is_city):
return g.higher
return None
@property
def parent_referent(self) -> 'Referent':
return Utils.asObjectOrNull(self.getSlotValue(StreetReferent.ATTR_GEO), GeoReferent)
def toString(self, short_variant : bool, lang : 'MorphLang'=None, lev : int=0) -> str:
tmp = io.StringIO()
nam = self.getStringValue(StreetReferent.ATTR_NAME)
typs_ = self.typs
if (len(typs_) > 0):
i = 0
first_pass2748 = True
while True:
if first_pass2748: first_pass2748 = False
else: i += 1
if (not (i < len(typs_))): break
if (nam is not None and typs_[i].upper() in nam):
continue
if (tmp.tell() > 0):
print('/', end="", file=tmp)
print(typs_[i], end="", file=tmp)
else:
print(("вулиця" if lang.is_ua else "улица"), end="", file=tmp)
if (self.number is not None):
print(" {0}".format(self.number), end="", file=tmp, flush=True)
if (self.sec_number is not None):
print(" {0}".format(self.sec_number), end="", file=tmp, flush=True)
if (nam is not None):
print(" {0}".format(MiscHelper.convertFirstCharUpperAndOtherLower(nam)), end="", file=tmp, flush=True)
if (not short_variant):
kladr = self.getSlotValue(StreetReferent.ATTR_FIAS)
if (isinstance(kladr, Referent)):
print(" (ФИАС: {0}".format(Utils.ifNotNull((kladr).getStringValue("GUID"), "?")), end="", file=tmp, flush=True)
for s in self.slots:
if (s.type_name == StreetReferent.ATTR_FIAS and (isinstance(s.value, Referent)) and s.value != kladr):
print(", {0}".format(Utils.ifNotNull((s.value).getStringValue("GUID"), "?")), end="", file=tmp, flush=True)
print(')', end="", file=tmp)
bti = self.getStringValue(StreetReferent.ATTR_BTI)
if (bti is not None):
print(" (БТИ {0})".format(bti), end="", file=tmp, flush=True)
okm = self.getStringValue(StreetReferent.ATTR_OKM)
if (okm is not None):
print(" (ОКМ УМ {0})".format(okm), end="", file=tmp, flush=True)
if (not short_variant and self.city is not None):
print("; {0}".format(self.city.toString(True, lang, lev + 1)), end="", file=tmp, flush=True)
return Utils.toStringStringIO(tmp)
@property
def kind(self) -> 'StreetKind':
""" Классификатор """
for t in self.typs:
if ("дорога" in t):
return StreetKind.ROAD
elif ("метро" in t):
return StreetKind.METRO
return StreetKind.UNDEFINED
def canBeEquals(self, obj : 'Referent', typ : 'EqualType'=Referent.EqualType.WITHINONETEXT) -> bool:
return self.__canBeEquals(obj, typ, False)
def __canBeEquals(self, obj : 'Referent', typ : 'EqualType', ignore_geo : bool) -> bool:
stri = Utils.asObjectOrNull(obj, StreetReferent)
if (stri is None):
return False
if (self.kind != stri.kind):
return False
typs1 = self.typs
typs2 = stri.typs
ok = False
if (len(typs1) > 0 and len(typs2) > 0):
for t in typs1:
if (t in typs2):
ok = True
break
if (not ok):
return False
num = self.number
num1 = stri.number
if (num is not None or num1 is not None):
if (num is None or num1 is None):
return False
sec = self.sec_number
sec1 = stri.sec_number
if (sec is None and sec1 is None):
if (num != num1):
return False
elif (num == num1):
if (sec != sec1):
return False
elif (sec == num1 and sec1 == num):
pass
else:
return False
names1 = self.names
names2 = stri.names
if (len(names1) > 0 or len(names2) > 0):
ok = False
for n in names1:
if (n in names2):
ok = True
break
if (not ok):
return False
if (ignore_geo):
return True
geos1 = self.geos
geos2 = stri.geos
if (len(geos1) > 0 and len(geos2) > 0):
ok = False
for g1 in geos1:
for g2 in geos2:
if (g1.canBeEquals(g2, typ)):
ok = True
break
if (not ok):
if (self.city is not None and stri.city is not None):
ok = self.city.canBeEquals(stri.city, typ)
if (not ok):
return False
return True
def addSlot(self, attr_name : str, attr_value : object, clear_old_value : bool, stat_count : int=0) -> 'Slot':
if (attr_name == StreetReferent.ATTR_NAME and (isinstance(attr_value, str))):
str0_ = Utils.asObjectOrNull(attr_value, str)
if (str0_.find('.') > 0):
i = 1
while i < (len(str0_) - 1):
if (str0_[i] == '.' and str0_[i + 1] != ' '):
str0_ = (str0_[0:0+i + 1] + " " + str0_[i + 1:])
i += 1
attr_value = (str0_)
return super().addSlot(attr_name, attr_value, clear_old_value, stat_count)
def mergeSlots(self, obj : 'Referent', merge_statistic : bool=True) -> None:
super().mergeSlots(obj, merge_statistic)
def canBeGeneralFor(self, obj : 'Referent') -> bool:
if (not self.__canBeEquals(obj, Referent.EqualType.WITHINONETEXT, True)):
return False
geos1 = self.geos
geos2 = (obj).geos
if (len(geos2) == 0 or len(geos1) > 0):
return False
return True
def createOntologyItem(self) -> 'IntOntologyItem':
oi = IntOntologyItem(self)
names_ = self.names
for n in names_:
oi.termins.append(Termin(n))
return oi
def _correct(self) -> None:
names_ = self.names
for i in range(len(names_) - 1, -1, -1):
ss = names_[i]
jj = ss.find(' ')
if (jj < 0):
continue
if (ss.rfind(' ') != jj):
continue
pp = Utils.splitString(ss, ' ', False)
if (len(pp) == 2):
ss2 = "{0} {1}".format(pp[1], pp[0])
if (not ss2 in names_):
self.addSlot(StreetReferent.ATTR_NAME, ss2, False, 0) | [
"[email protected]"
]
| |
2dffbb4034ba6be7a0ed58d07cc0b2c0128ce028 | eeb10d934e1981ba6784573e7de5a269c9ce865f | /RecoStudy/Draw_Sys_ttbar.py | 878b5f1418dde1eb8293db7ed4622234873b3331 | []
| no_license | bisnupriyasahu/DM2018 | c3e954bf0b14362a0b13ba4a46907aa8e9e8f5c1 | 74b5834deb9ae4489c55c8ed226270fa579a6a8c | refs/heads/master | 2022-02-17T22:43:37.325398 | 2019-08-20T22:03:31 | 2019-08-20T22:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,809 | py | import os
import ROOT
from ROOT import *
import math
#OutF=TFile('Out1200.root','RECREATE')
#outHistSys=TH1F('MeanSys','',20,0,2)
#
#File=TFile('test1200.root','R')
#sum=0
#for isys in range(0,100):
# HistSys=File.Get('___Sys_%s'%str(isys))
# mean=HistSys.GetMean()
# print mean
# OutF.cd()
# sum +=(mean*mean)
# outHistSys.Fill(mean)
#
#outHistSys.Write()
#OutF.Write()
#OutF.Close()
#print "sum=",sum
#
def add_CMS():
lowX=0.51
lowY=0.70
lumi = ROOT.TPaveText(lowX, lowY+0.06, lowX+0.15, lowY+0.16, "NDC")
lumi.SetTextFont(61)
lumi.SetTextSize(0.08)
lumi.SetBorderSize( 0 )
lumi.SetFillStyle( 0 )
lumi.SetTextAlign( 12 )
lumi.SetTextColor( 1 )
lumi.AddText("CMS")
return lumi
def add_Preliminary():
lowX=0.51
lowY=0.63
lumi = ROOT.TPaveText(lowX, lowY+0.06, lowX+0.15, lowY+0.16, "NDC")
lumi.SetTextFont(52)
lumi.SetTextSize(0.06)
lumi.SetBorderSize( 0 )
lumi.SetFillStyle( 0 )
lumi.SetTextAlign( 12 )
lumi.SetTextColor( 1 )
lumi.AddText("Simulation")
return lumi
def _Get_PDF_Unc(InputRootFile,Name,MTCut):
ROOT.gStyle.SetFrameLineWidth(3)
ROOT.gStyle.SetLineWidth(3)
ROOT.gStyle.SetOptStat(0)
OutF=TFile('out.root','RECREATE')
PlusPDF=TH1F('pdfUp','',20,0,2000)
MinusPDF=TH1F('pdfDown','',20,0,2000)
PlusScale=TH1F('qcdScaleUp','',20,0,2000)
MinusScale=TH1F('qcdScaleDown','',20,0,2000)
File=TFile(InputRootFile,'R')
c=ROOT.TCanvas("canvas","",0,0,600,600)
c.cd()
pad1 = ROOT.TPad("pad1","pad1",0,0.35,1,1)
#pad1.SetLogy()
pad1.Draw()
pad1.cd()
pad1.SetFillColor(0)
pad1.SetBorderMode(0)
pad1.SetBorderSize(10)
pad1.SetTickx(1)
pad1.SetTicky(1)
pad1.SetLeftMargin(0.18)
pad1.SetRightMargin(0.05)
pad1.SetTopMargin(0.122)
pad1.SetBottomMargin(0.026)
pad1.SetFrameFillStyle(0)
pad1.SetFrameLineStyle(0)
pad1.SetFrameLineWidth(3)
pad1.SetFrameBorderMode(0)
pad1.SetFrameBorderSize(10)
HistCentral=File.Get('MuJet_LQMass_Scale0_MT%s_MET100_Iso'%MTCut)
HistCentral.Draw('pe')
l2=add_CMS()
l2.Draw("same")
l3=add_Preliminary()
l3.Draw("same")
#HistCentral.GetXaxis().SetTitle("M_{#muj}")
HistCentral.GetXaxis().SetTitleSize(0)
HistCentral.GetXaxis().SetLabelSize(0)
HistCentral.GetXaxis().SetNdivisions(505)
HistCentral.GetYaxis().SetLabelFont(42)
HistCentral.GetYaxis().SetLabelOffset(0.01)
HistCentral.GetYaxis().SetLabelSize(0.06)
HistCentral.GetYaxis().SetTitleSize(0.075)
HistCentral.GetYaxis().SetTitleOffset(1.04)
HistCentral.SetTitle("")
# HistCentral.Setlabel("")
HistCentral.GetYaxis().SetTitle("Events")
HistCentral.SetLineColor(38)
HistCentral.SetLineWidth(2)
HistCentral.SetMarkerColor(38)
HistCentral.SetMarkerStyle(20)
pad1.RedrawAxis()
categ = ROOT.TPaveText(0.2, 0.3+0.013, 0.39, 0.4+0.1, "NDC")
categ.SetBorderSize( 0 )
categ.SetFillStyle( 0 )
categ.SetTextAlign( 12 )
categ.SetTextSize ( 0.05 )
categ.SetTextColor( 1 )
categ.AddText(Name.replace('.root',''))
categ.Draw()
c.cd()
pad2 = ROOT.TPad("pad2","pad2",0,0,1,0.35);
pad2.SetTopMargin(0.05);
pad2.SetBottomMargin(0.3);
pad2.SetLeftMargin(0.18);
pad2.SetRightMargin(0.05);
#pad2.SetTickx(1)
#pad2.SetTicky(1)
pad2.SetFrameLineWidth(3)
pad2.SetGridx()
pad2.SetGridy()
pad2.Draw()
pad2.cd()
for ibin in range(0,20):
###########################################################################################
## PDF Uncertainty
###########################################################################################
# sumP=0
# numP=0
#
# sumN=0
# numN=0
# for isys in range(0,100):
# HistCentral=File.Get('MuJet_LQMass_MT500_MET100_Iso')
# HistSys=File.Get('MuJet_LQMass_PDF%s_MT500_MET100_Iso'%str(isys))
#
# # HistCentral.Rebin(10)
# # HistSys.Rebin(10)
#
#
#
# # meanCental=HistCentral.Integral()
# # meanSys=HistSys.Integral()
# meanCental=HistCentral.GetBinContent(ibin+1)
# meanSys=HistSys.GetBinContent(ibin+1)
#
# if meanCental==0: continue
#
#
# if meanSys > meanCental:
# sumP +=pow( (meanSys-meanCental) ,2)
# numP +=1
#
# if meanSys < meanCental:
# sumN +=pow( (meanCental-meanSys) ,2)
# numN +=1
#
#
#
#
# print ibin+1, 'numP ', numP, 'sumP= ', sumP, ' Final Number plus is=', math.sqrt( 1./(numP -1) * sumP ), ' unc= ', math.sqrt( 1./(numP -1) * sumP )/ meanCental
#
# BinValuPlus=math.sqrt( 1./(numP -1) * sumP )/ meanCental
# PlusPDF.SetBinContent(ibin+1,1+BinValuPlus )
#
# print ibin+1, 'numN ', numN, 'sumN= ', sumN, ' Final Number minus is=', math.sqrt( 1./(numN -1) * sumN ), ' unc= ', math.sqrt( 1./(numN -1) * sumN )/ meanCental
#
# BinValuMinus=math.sqrt( 1./(numN -1) * sumN )/ meanCental
# MinusPDF.SetBinContent(ibin+1,1 - BinValuMinus )
##########################################################################################
# Scale Uncertainty
##########################################################################################
Minimum=0
Maximum=100
for isys in range(1,9):
if isys==5 or isys==7: continue # these 2 are non-physical related to the (2,0.5) or (0.5,2)
HistCentral=File.Get('MuJet_LQMass_Scale0_MT%s_MET100_Iso'%MTCut)
HistSys=File.Get('MuJet_LQMass_Scale%s_MT%s_MET100_Iso'%(str(isys),MTCut))
# HistCentral.Rebin(10)
# HistSys.Rebin(10)
# meanCental=HistCentral.Integral()
# meanSys=HistSys.Integral()
meanCental=HistCentral.GetBinContent(ibin+1)
meanSys=HistSys.GetBinContent(ibin+1)
if meanCental==0:
largestDeviationUp=largestDeviationDown=1
else:
if meanSys/meanCental > Minimum:
largestDeviationUp=meanSys/meanCental
Minimum=largestDeviationUp
if meanSys/meanCental < Maximum:
largestDeviationDown=meanSys/meanCental
Maximum=largestDeviationDown
print ibin, largestDeviationUp, largestDeviationDown
PlusScale.SetBinContent(ibin+1,largestDeviationUp )
MinusScale.SetBinContent(ibin+1,largestDeviationDown )
##########################################################################################
PlusScale.GetXaxis().SetTitle("")
PlusScale.GetXaxis().SetLabelSize(0.06)
PlusScale.GetYaxis().SetLabelSize(0.05)
PlusScale.GetYaxis().SetTitle("QCD Scale Unc")
PlusScale.GetXaxis().SetNdivisions(505)
PlusScale.GetYaxis().SetNdivisions(10)
PlusScale.GetXaxis().SetTitleSize(0.1)
PlusScale.GetYaxis().SetTitleSize(0.1)
PlusScale.GetYaxis().SetTitleOffset(0.5)
PlusScale.GetXaxis().SetTitleOffset(1.04)
PlusScale.GetXaxis().SetLabelSize(0.08)
PlusScale.GetYaxis().SetLabelSize(0.08)
PlusScale.GetXaxis().SetTitleFont(42)
PlusScale.GetYaxis().SetTitleFont(42)
PlusScale.GetXaxis().SetTitle('M_{#muj} (GeV)')
PlusScale.SetMinimum(0.5)
PlusScale.SetMaximum(1.5)
# PlusPDF.Draw('PL')
# PlusPDF.SetMinimum(0.5)
# PlusPDF.SetMaximum(1.5)
# PlusPDF.SetMarkerStyle(20)
# PlusPDF.SetMarkerColor(38)
# PlusPDF.SetLineColor(38)
# PlusPDF.SetLineWidth(2)
# PlusPDF.SetMarkerColor(38)
# PlusPDF.SetMarkerStyle(20)
#
#
# MinusPDF.Draw('PLsame')
# MinusPDF.SetLineColor(38)
# MinusPDF.SetLineWidth(2)
# MinusPDF.SetMarkerColor(38)
# MinusPDF.SetMarkerStyle(20)
PlusScale.Draw('PL')
PlusScale.SetLineColor(8)
PlusScale.SetLineWidth(2)
PlusScale.SetMarkerColor(8)
PlusScale.SetMarkerStyle(21)
MinusScale.Draw('PLsame')
MinusScale.SetLineColor(8)
MinusScale.SetLineWidth(2)
MinusScale.SetMarkerColor(8)
MinusScale.SetMarkerStyle(21)
c.cd()
pad1.Draw()
ROOT.gPad.RedrawAxis()
c.Modified()
c.SaveAs('FINALSYS__%s.pdf'%(Name.replace('.root','')))
fileOut=TFile('QCDScale_%s.root'%Name,'RECREATE')
fileOut.cd()
PlusScale.Write()
MinusScale.Write()
fileOut.Close()
_Get_PDF_Unc('testQCDScalettbar.root','TTbar','500')
#_Get_PDF_Unc('OutFiles_FullSelection/WJetsToLNu.root','W','500')
| [
"[email protected]"
]
| |
b6eebc3348f8b8c588194824b12e99245fc3b3e3 | 72ea510ceaa5a4aa1918ea0cf2bb699439d2587b | /Python/problem0082.py | 66f7113ebe2a267d3c64be861a1d3e5db40e7244 | [
"MIT"
]
| permissive | 1050669722/LeetCode-Answers | f18680e0fe74199a630fff214977e91fe428c550 | c8f4d1ccaac09cda63b60d75144335347b06dc81 | refs/heads/master | 2023-03-08T01:25:19.720931 | 2021-02-22T00:34:43 | 2021-02-22T00:34:43 | 270,304,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not (head and head.next):
return head
d = {}
tmp = head
while tmp:
if tmp.val in d:
d[tmp.val] += 1
else:
d[tmp.val] = 1
tmp = head
head = ListNode(None)
head.next = tmp
pre, cur = head, head.next
while cur:
if d[cur.val] == 1:
cur = cur.next
pre = pre.next
else:
for _ in range(d[cur.val]):
cur = cur.next
pre.next = cur
return head.next | [
"[email protected]"
]
| |
8f04c0f007f92b6b025b75fbb32b9204d00f39d6 | 67d95b72da34dcfb3bf0224e66c3f3d345c7c5be | /src/spaceone/inventory/connector/aws_secrets_manager_connector/schema/service_type.py | d671de6dd8ac7a3aae846cdd457597fc2dd8e8f5 | [
"Apache-2.0"
]
| permissive | khl6235/plugin-aws-cloudservices | 10971267902a000f3965bbf606283ab71c15823a | e13554df78ff97daefa7011559c00adc44fa10ea | refs/heads/master | 2022-12-12T03:12:10.744727 | 2020-09-11T06:15:42 | 2020-09-11T06:15:42 | 288,607,364 | 0 | 0 | Apache-2.0 | 2020-08-19T01:54:13 | 2020-08-19T01:54:12 | null | UTF-8 | Python | false | false | 1,535 | py | from spaceone.inventory.libs.schema.dynamic_field import TextDyField, DateTimeDyField, SearchField
from spaceone.inventory.libs.schema.resource import CloudServiceTypeResource, CloudServiceTypeResponse, \
CloudServiceTypeMeta
cst_secret = CloudServiceTypeResource()
cst_secret.name = 'Secret'
cst_secret.provider = 'aws'
cst_secret.group = 'SecretsManager'
cst_secret.labels = ['Security']
cst_secret.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/AWS-Secrets-Manager.svg',
'spaceone:is_major': 'true',
}
cst_secret._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Name', 'data.name'),
TextDyField.data_source('Description', 'data.description'),
DateTimeDyField.data_source('Last Retrieved', 'data.last_accessed_date'),
],
search=[
SearchField.set(name='Name', key='data.name'),
SearchField.set(name='ARN', key='data.arn'),
SearchField.set(name='Last Changed Time', key='data.last_changed_date', data_type='datetime'),
SearchField.set(name='Last Accessed Time', key='data.last_accessed_date', data_type='datetime'),
SearchField.set(name='Rotation Enabled', key='data.rotation_enabled', data_type='boolean'),
SearchField.set(name='Region', key='data.region_name'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
CLOUD_SERVICE_TYPES = [
CloudServiceTypeResponse({'resource': cst_secret}),
]
| [
"[email protected]"
]
| |
82aef273e8b579360dbe49588d59fbe2f8e5646f | cab812e505f6ffd0f54436beb053bff07d58a90a | /20_Day_Python_package_manager/mypackage/greetings.py | f256663fd6dd428349cc16a9548cd04497e0bfaa | [
"Apache-2.0"
]
| permissive | diegofregolente/30-Days-Of-Python | 778a3f97a03010a528afb91cde22332da81f4af3 | e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267 | refs/heads/master | 2023-03-24T10:25:20.708317 | 2021-03-19T12:30:02 | 2021-03-19T12:30:02 | 348,480,748 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | def greet_person(first_name, second_name):
return f'{first_name} {second_name}, welcome to 30daysofpython challenge'
| [
"[email protected]"
]
| |
c51f6f32ab60667f3221123353d8004395f8d50b | 7585c77f49d4a3643e4740b2ceae081c20fc4183 | /example06-06-01.py | 06ef28969f2a708ee44808db2e680530b77f4953 | []
| no_license | Minari766/study_python | c4243df47f23e8fda5bcdf16b65f3b3af97f888c | b1e48e750126f377a15f8be8b3c2547687416ec4 | refs/heads/master | 2022-12-15T10:29:05.306690 | 2020-09-03T13:05:21 | 2020-09-03T13:05:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,668 | py | # coding:utf-8
import tkinter as tk
import tkinter.messagebox as tmsg
import random
#ボタンがクリックされたときの処理
def ButtonClick():
b = editbox1.get() #テキスト入力欄に入力された文字列を取得。変数.getメソッドを使用
#Lesson 5-4のプログラム。4桁の数字かどうかを判定
#4桁の数字かどうかを判断する
isok = False
if len(b)!= 4:
tmsg.showerror("エラー","4桁の数字を入れてください")
else:
kazuok = True
for i in range(4):
if (b[i] < "0") or (b[i] > "9"):
tmsg.showerror("エラー", "数字ではありません")
kazuok = False
break
if kazuok:
isok = True
if isok:
#4桁の数字だった場合。ヒットを判定
hit = 0
for i in range(4):
if a[i] == int(b[i]):
hit = hit +1
#ブローを判定
blow = 0
for j in range(4):
for i in range(4):
if (int(b[j]) == a[i]) and (a[i] != int(b[i])) and (a[j] != int(b[j])):
blow = blow + 1
break
#ヒットが4なら正解となり、終了
if hit == 4:
tmsg.showinfo("正解!", "おめでとうございます!")
#終了
root.destroy()
else: #ヒット数とブロー数を表示。
rirekibox.insert(tk.END, b + " /H:" + str(hit) + " B:" + str(blow) + "\n")#ウィンドウ向けに新たに調整した部分
#メインのプログラム
#予めランダムな4つの数字を設定
a = [random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9)]
#ウィンドウを作る
root = tk.Tk()
root.geometry("600x400")
root.title("数当てゲーム") #ウィンドウを作る
#履歴表示のテキストボックスを作る
rirekibox = tk.Text(root, font = ("Meiryo UI", 14))
rirekibox.place(x=400, y=0, width=200, height=400)
#ラベルを作る
label1 = tk.Label(root, text="数を入力してください", font=("Meiryo UI", 14)) #文章を入れる
label1.place(x = 20, y = 20) #変数の入力欄を動かす
#テキストボックス(プレイヤーの入力欄)を作る
editbox1 = tk.Entry(width =4, font = ("Meiryo UI", 28)) #テキスト入力欄を作る
editbox1.place(x = 160, y = 20)
#ボタンを作る
button1 = tk.Button(root, text = "チェック", font = ("Meiryo UI", 14), command = ButtonClick)
button1.place(x = 240, y = 20)
#ウィンドウを表示する
root.mainloop() #ウィンドウを表示する
| [
"[email protected]"
]
| |
bddda615f934da0f8d51fc2c329bb1c89f27a5b8 | ecc0f628f9684cb4a0b18a8ff8c6e7a7fce9253d | /gears/color.py | 0e734838d1b432823e17410d764a2f335ab5983c | [
"Apache-2.0",
"CC-BY-4.0"
]
| permissive | ii0/gearhead-caramel | fb8f570046df01f000b15ba7073be82fcf2b791a | 8bf4572aefb5f3a1bafd20ad04dfa0b2f44be8b1 | refs/heads/master | 2022-02-28T15:43:46.261543 | 2019-07-12T13:30:30 | 2019-07-12T13:30:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,579 | py | from pbge.image import Gradient
import random
CLOTHING,SKIN,HAIR,MECHA,DETAILS,METAL = range(6)
# Color Families- the eleven basic color words in English.
PINK,RED,ORANGE,YELLOW,GREEN,BLUE,PURPLE,BROWN,GREY,BLACK,WHITE = range(11)
class ChannelRed( Gradient ):
NAME = 'Channel Red'
COLOR_RANGE = (255,0,0,0,0,0)
SETS = ()
FAMILY = -1
class ChannelYellow( Gradient ):
NAME = 'Channel Yellow'
COLOR_RANGE = (255,255,0,0,0,0)
SETS = ()
FAMILY = -1
class ChannelGreen( Gradient ):
NAME = 'Channel Green'
COLOR_RANGE = (0,255,0,0,0,0)
SETS = ()
FAMILY = -1
class ChannelCyan( Gradient ):
NAME = 'Channel Cyan'
COLOR_RANGE = (0,255,255,0,0,0)
SETS = ()
FAMILY = -1
class ChannelMagenta( Gradient ):
NAME = 'Channel Magenta'
COLOR_RANGE = (255,0,255,0,0,0)
SETS = ()
FAMILY = -1
class RoyalPink( Gradient ):
NAME = 'Royal Pink'
COLOR_RANGE = (255,135,241,80,43,54)
SETS = (CLOTHING,MECHA,DETAILS)
FAMILY = PINK
class Pink( Gradient ):
NAME = 'Pink'
COLOR_RANGE = (255,230,208,149,16,100)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = PINK
class HotPink( Gradient ):
NAME = 'Hot Pink'
COLOR_RANGE = (255,92,133,120,14,98)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = PINK
class Magenta( Gradient ):
NAME = 'Magenta'
COLOR_RANGE = (255,0,215,45,0,95)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = PINK
class AegisCrimson( Gradient ):
NAME = 'Aegis Crimson'
COLOR_RANGE = (255,45,109,41,3,33)
SETS = (CLOTHING,MECHA,DETAILS)
FAMILY = PINK
class Maroon( Gradient ):
NAME = 'Maroon'
COLOR_RANGE = (240,61,134,25,6,31)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = PINK
class CardinalRed( Gradient ):
NAME = 'Cardinal Red'
COLOR_RANGE = (240,80,72,32,8,12)
SETS = (CLOTHING,MECHA)
FAMILY = RED
class BrightRed( Gradient ):
NAME = 'Bright Red'
COLOR_RANGE = (255,57,56,112,4,12)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = RED
class GunRed( Gradient ):
NAME = 'Gun Red'
COLOR_RANGE = (248,20,20,69,5,26)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = RED
class PirateSunrise( Gradient ):
NAME = 'Pirate Sunrise'
COLOR_RANGE = (235,57,13,47,0,22)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = RED
class AceScarlet( Gradient ):
NAME = 'Ace Scarlet'
COLOR_RANGE = (255,96,72,60,19,27)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = RED
class BlackRose( Gradient ):
NAME = 'Black Rose'
COLOR_RANGE = (172,20,54,37,0,20)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = RED
class CometRed( Gradient ):
NAME = 'Comet Red'
COLOR_RANGE = (209,76,82,58,27,33)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = RED
class OrangeRed( Gradient ):
NAME = 'Orange Red'
COLOR_RANGE = (255,100,0,45,9,9)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = RED
class Persimmon( Gradient ):
NAME = 'Persimmon'
COLOR_RANGE = (255,159,90,77,16,0)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = ORANGE
class HunterOrange( Gradient ):
NAME = 'Hunter Orange'
COLOR_RANGE = (255,145,0,32,0,48)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = ORANGE
class Orange( Gradient ):
NAME = 'Orange'
COLOR_RANGE = (255,187,0,109,0,32)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = ORANGE
class Saffron( Gradient ):
NAME = 'Saffron'
COLOR_RANGE = (255,255,142,157,0,0)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = ORANGE
class DesertYellow( Gradient ):
NAME = 'Desert Yellow'
COLOR_RANGE = (229,234,163,26,32,15)
SETS = (CLOTHING,MECHA)
FAMILY = YELLOW
class Khaki( Gradient ):
NAME = 'Khaki'
COLOR_RANGE = (252,240,147,32,64,39)
SETS = (CLOTHING,MECHA)
FAMILY = YELLOW
class LemonYellow( Gradient ):
NAME = 'Lemon Yellow'
COLOR_RANGE = (255,255,77,74,80,56)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = YELLOW
class Gold( Gradient ):
NAME = 'Gold'
COLOR_RANGE = (0xDB,0xF8,0x96,0xDD,0x7C,0x00)
SETS = (CLOTHING,HAIR,MECHA,DETAILS,METAL)
FAMILY = YELLOW
class ElectricYellow( Gradient ):
NAME = 'Electric Yellow'
COLOR_RANGE = (255,224,0,120,69,80)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = YELLOW
class NobleGold( Gradient ):
NAME = 'NobleGold'
COLOR_RANGE = (255,249,128,69,38,23)
SETS = (CLOTHING,HAIR,MECHA,DETAILS,METAL)
FAMILY = YELLOW
class CharredBlonde( Gradient ):
NAME = 'Charred Blonde'
COLOR_RANGE = (255,255,208,111,80,56)
SETS = (HAIR,MECHA,DETAILS)
FAMILY = YELLOW
class Mustard( Gradient ):
NAME = 'Mustard'
COLOR_RANGE = (179,139,19,41,36,4)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = YELLOW
class GreenYellow( Gradient ):
NAME = 'Green Yellow'
COLOR_RANGE = (239,255,60,16,89,55)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = GREEN
class Celadon( Gradient ):
NAME = 'Celadon'
COLOR_RANGE = (232,255,190,19,60,46)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = GREEN
class MountainDew( Gradient ):
# This color's name is supposed to be rhyming slang for Zaku II
NAME = 'Mountain Dew'
COLOR_RANGE = (194,243,227,51,64,62)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = GREEN
class Avocado( Gradient ):
NAME = 'Avocado'
COLOR_RANGE = (183,224,82,31,34,36)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = GREEN
class ArmyDrab( Gradient ):
NAME = 'Army Drab'
COLOR_RANGE = (127,201,150,21,32,42)
SETS = (CLOTHING,MECHA)
FAMILY = GREEN
class GrassGreen( Gradient ):
NAME = 'Grass Green'
COLOR_RANGE = (138,232,93,3,47,70)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class Cactus( Gradient ):
NAME = 'Cactus'
COLOR_RANGE = (118,184,94,2,51,49)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class GriffinGreen( Gradient ):
NAME = 'Griffin Green'
COLOR_RANGE = (60,135,70,2,24,10)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class Olive( Gradient ):
NAME = 'Olive'
COLOR_RANGE = (126,153,72,13,18,8)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class DarkGreen( Gradient ):
NAME = 'Dark Green'
COLOR_RANGE = (43,140,0,0,36,26)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class MassiveGreen( Gradient ):
NAME = 'Massive Green'
COLOR_RANGE = (78,161,107,0,9,43)
SETS = (CLOTHING,MECHA)
FAMILY = GREEN
class ForestGreen( Gradient ):
NAME = 'ForestGreen'
COLOR_RANGE = (78,204,52,12,50,19)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class Malachite( Gradient ):
NAME = 'Malachite'
COLOR_RANGE = (0,255,94,12,78,35)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = GREEN
class SeaGreen( Gradient ):
NAME = 'SeaGreen'
COLOR_RANGE = (89,169,153,0,32,29)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class Jade( Gradient ):
NAME = 'Jade'
COLOR_RANGE = (115,255,223,17,49,87)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class Viridian( Gradient ):
NAME = 'Viridian'
COLOR_RANGE = (104,213,169,7,40,90)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class DoctorGreen( Gradient ):
NAME = 'Doctor Green'
COLOR_RANGE = (85,236,193,24,66,54)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = GREEN
class FlourescentGreen( Gradient ):
NAME = 'Flourescent Green'
COLOR_RANGE = (222,255,0,0,121,106)
SETS = (HAIR,DETAILS)
FAMILY = GREEN
class AeroBlue( Gradient ):
NAME = 'Aero Blue'
COLOR_RANGE = (240,252,255,42,66,93)
SETS = (CLOTHING,MECHA,DETAILS,METAL)
FAMILY = BLUE
class Aquamarine( Gradient ):
NAME = 'Aquamarine'
COLOR_RANGE = (171,255,240,50,0,103)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BLUE
class SkyBlue( Gradient ):
NAME = 'Sky Blue'
COLOR_RANGE = (96,255,255,30,88,118)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BLUE
class Cyan( Gradient ):
NAME = 'Cyan'
COLOR_RANGE = (0,255,234,0,79,86)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BLUE
class Turquoise( Gradient ):
NAME = 'Turquoise'
COLOR_RANGE = (50,250,222,60,0,90)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BLUE
class FadedDenim( Gradient ):
NAME = 'Faded Denim'
COLOR_RANGE = (222,233,249,0,7,97)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class SteelBlue( Gradient ):
NAME = 'Steel Blue'
COLOR_RANGE = (117,183,230,38,17,50)
SETS = (CLOTHING,HAIR,MECHA,METAL)
FAMILY = BLUE
class FreedomBlue( Gradient ):
NAME = 'Freedom Blue'
COLOR_RANGE = (21,177,255,12,3,36)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class PlasmaBlue( Gradient ):
NAME = 'Plasma Blue'
COLOR_RANGE = (247,255,232,0,128,171)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BLUE
class Azure( Gradient ):
NAME = 'Azure'
COLOR_RANGE = (47,151,198,26,0,79)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class BugBlue( Gradient ):
NAME = 'Bug Blue'
COLOR_RANGE = (49,85,153,46,3,43)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class Cobalt( Gradient ):
NAME = 'Cobalt'
COLOR_RANGE = (8,79,179,17,3,64)
SETS = (CLOTHING,HAIR,MECHA,METAL)
FAMILY = BLUE
class PrussianBlue( Gradient ):
NAME = 'Prussian Blue'
COLOR_RANGE = (0,136,217,10,10,18)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class MidnightBlue( Gradient ):
NAME = 'Midnight Blue'
COLOR_RANGE = (37,60,163,10,0,16)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class DeepSeaBlue( Gradient ):
NAME = 'Deep Sea Blue'
COLOR_RANGE = (99,136,172,25,5,41)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BLUE
class StarViolet( Gradient ):
NAME = 'Star Violet'
COLOR_RANGE = (236,163,231,48,24,82)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = PURPLE
class Fuschia( Gradient ):
NAME = 'Fuschia'
COLOR_RANGE = (191,112,247,35,31,69)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = PURPLE
class Twilight( Gradient ):
NAME = 'Twilight'
COLOR_RANGE = (255,170,255,0,69,82)
SETS = (HAIR,DETAILS)
FAMILY = PURPLE
class HeavyPurple( Gradient ):
NAME = 'Heavy Purple'
COLOR_RANGE = (142, 96, 176,16,7,71)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = PURPLE
class KettelPurple( Gradient ):
NAME = 'Kettel Purple'
COLOR_RANGE = (170,68,204,27,16,64)
SETS = (CLOTHING,MECHA)
FAMILY = PURPLE
class Wine( Gradient ):
NAME = 'Wine'
COLOR_RANGE = (210,62,105,44,16,92)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = PURPLE
class Eggplant( Gradient ):
NAME = 'Eggplant'
COLOR_RANGE = (209,95,217,60,9,98)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = PURPLE
class Grape( Gradient ):
NAME = 'Grape'
COLOR_RANGE = (120,20,204,30,14,43)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = PURPLE
class ShiningViolet( Gradient ):
NAME = 'Shining Violet'
COLOR_RANGE = (255,0,240,64,40,156)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = PURPLE
class Straw( Gradient ):
NAME = 'Straw'
COLOR_RANGE = (236,230,140,96,35,84)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BROWN
class Beige( Gradient ):
NAME = 'Beige'
COLOR_RANGE = (235,185,171,45,31,60)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BROWN
class RosyBrown( Gradient ):
NAME = 'Rosy Brown'
COLOR_RANGE = (245,192,192,101,12,51)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BROWN
class Sandstone( Gradient ):
NAME = 'Sandstone'
COLOR_RANGE = (192,141,88,77,16,21)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BROWN
class DarkBrown( Gradient ):
NAME = 'Dark Brown'
COLOR_RANGE = (166,115,49,51,0,23)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BROWN
class Cinnamon( Gradient ):
NAME = 'Cinnamon'
COLOR_RANGE = (207,123,0,51,10,44)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BROWN
class Terracotta( Gradient ):
NAME = 'Terracotta'
COLOR_RANGE = (237,67,45,89,31,91)
SETS = (CLOTHING,HAIR,MECHA)
FAMILY = BROWN
class GothSkin( Gradient ):
NAME = 'Goth Skin'
COLOR_RANGE = (255,232,248,47,100,70)
SETS = (SKIN,METAL)
FAMILY = BROWN
class Alabaster( Gradient ):
NAME = 'Alabaster'
COLOR_RANGE = (255,251,242,169,115,96)
SETS = (CLOTHING,SKIN,MECHA)
FAMILY = WHITE
class Maize( Gradient ):
NAME = 'Maize'
COLOR_RANGE = (251,236,93,88,76,57)
SETS = (CLOTHING,HAIR,MECHA,DETAILS)
FAMILY = BROWN
class Burlywood( Gradient ):
NAME = 'Burlywood'
COLOR_RANGE = (255,233,170,124,48,32)
SETS = (SKIN,MECHA,DETAILS)
FAMILY = BROWN
class LightSkin( Gradient ):
NAME = 'Light Skin'
COLOR_RANGE = (255,237,189,135,94,75)
SETS = (SKIN,)
FAMILY = BROWN
class SandyBrown( Gradient ):
NAME = 'Sandy Brown'
COLOR_RANGE = (255,214,135,131,82,51)
SETS = (CLOTHING,SKIN,HAIR,MECHA,DETAILS)
FAMILY = BROWN
class TannedSkin( Gradient ):
NAME = 'Tanned Skin'
COLOR_RANGE = (242,180,119,99,58,38)
SETS = (SKIN,)
FAMILY = BROWN
class MediumSkin( Gradient ):
NAME = 'Medium Skin'
COLOR_RANGE = (236,181,147,30,20,16)
SETS = (SKIN,)
FAMILY = BROWN
class Leather( Gradient ):
NAME = 'Leather'
COLOR_RANGE = (204,159,120,54,38,32)
SETS = (CLOTHING,SKIN,HAIR,MECHA)
FAMILY = BROWN
class Chocolate( Gradient ):
NAME = 'Chocolate'
COLOR_RANGE = (181,91,49,51,19,14)
SETS = (CLOTHING,SKIN,HAIR,MECHA)
FAMILY = BROWN
class DarkSkin( Gradient ):
NAME = 'Dark Skin'
COLOR_RANGE = (122,78,42,17,8,5)
SETS = (SKIN,)
FAMILY = BROWN
class Black( Gradient ):
NAME = 'Black'
COLOR_RANGE = (64,64,64,10,10,10)
SETS = (CLOTHING,HAIR,MECHA,DETAILS,METAL)
FAMILY = BLACK
class GhostGrey( Gradient ):
NAME = 'Ghost Grey'
COLOR_RANGE = (181,243,203,9,12,116)
SETS = (CLOTHING,MECHA,METAL)
FAMILY = GREY
class DeepGrey( Gradient ):
NAME = 'Deep Grey'
COLOR_RANGE = (102,102,120,42,42,42)
SETS = (CLOTHING,HAIR,MECHA,METAL)
FAMILY = GREY
class FieldGrey( Gradient ):
NAME = 'Field Grey'
COLOR_RANGE = (98,118,103,41,50,46)
SETS = (CLOTHING,MECHA,METAL)
FAMILY = GREY
class DimGrey( Gradient ):
NAME = 'Dim Grey'
COLOR_RANGE = (140,140,140,56,56,64)
SETS = (CLOTHING,HAIR,MECHA,METAL)
FAMILY = GREY
class WarmGrey( Gradient ):
NAME = 'Warm Grey'
COLOR_RANGE = (184,169,136,64,57,48)
SETS = (CLOTHING,MECHA,METAL)
FAMILY = GREY
class BattleshipGrey( Gradient ):
NAME = 'Battleship Grey'
COLOR_RANGE = (169,183,145,69,77,61)
SETS = (CLOTHING,HAIR,MECHA,METAL)
FAMILY = GREY
class LunarGrey( Gradient ):
NAME = 'Lunar Grey'
COLOR_RANGE = (146,166,164,65,70,70)
SETS = (CLOTHING,MECHA,METAL)
FAMILY = GREY
class SlateGrey( Gradient ):
NAME = 'Slate Grey'
COLOR_RANGE = (143,173,196,63,69,77)
SETS = (CLOTHING,HAIR,MECHA,METAL)
FAMILY = GREY
class GullGrey( Gradient ):
NAME = 'Gull Grey'
COLOR_RANGE = (200,220,234,80,86,98)
SETS = (CLOTHING,MECHA,METAL)
FAMILY = GREY
class CeramicColor( Gradient ):
NAME = 'Ceramic'
COLOR_RANGE = (255,255,255,90,115,124)
SETS = (CLOTHING,MECHA,DETAILS,METAL)
FAMILY = WHITE
class Cream( Gradient ):
NAME = 'Cream'
COLOR_RANGE = (255,253,219,135,123,97)
SETS = (CLOTHING,MECHA,DETAILS)
FAMILY = WHITE
class White( Gradient ):
NAME = 'White'
COLOR_RANGE = (255,255,255,106,95,108)
SETS = (CLOTHING,MECHA)
FAMILY = WHITE
class ShiningWhite( Gradient ):
NAME = 'Shining White'
COLOR_RANGE = (255,255,255,0,110,120)
SETS = (CLOTHING,MECHA,METAL)
FAMILY = WHITE
ALL_COLORS = list()
CLOTHING_COLORS = list()
SKIN_COLORS = list()
HAIR_COLORS = list()
MECHA_COLORS = list()
DETAIL_COLORS = list()
METAL_COLORS = list()
COLOR_LISTS = {
CLOTHING: CLOTHING_COLORS,
SKIN: SKIN_COLORS,
HAIR: HAIR_COLORS,
MECHA: MECHA_COLORS,
DETAILS: DETAIL_COLORS,
METAL: METAL_COLORS
}
CHARACTER_COLOR_LISTS = (CLOTHING_COLORS,SKIN_COLORS,HAIR_COLORS,DETAIL_COLORS,CLOTHING_COLORS)
CHARACTER_COLOR_CHANNELS = (CLOTHING,SKIN,HAIR,DETAILS,CLOTHING)
MECHA_COLOR_CHANNELS = (MECHA,MECHA,DETAILS,METAL,MECHA)
def random_character_colors():
return [random.choice(CLOTHING_COLORS),random.choice(SKIN_COLORS),random.choice(HAIR_COLORS),random.choice(DETAIL_COLORS),random.choice(CLOTHING_COLORS)]
def random_mecha_colors():
return [random.choice(MECHA_COLORS),random.choice(MECHA_COLORS),random.choice(DETAIL_COLORS),random.choice(METAL_COLORS),random.choice(MECHA_COLORS)]
| [
"[email protected]"
]
| |
9c3a7ff70a130962100e8013422cbac5532e9c68 | 1f006f0c7871fcde10986c4f5cec916f545afc9f | /apps/utils/xml_util.py | 88436818198431463283c56e400e32b729582e71 | []
| no_license | ptsefton/integrated-content-environment | 248b8cd29b29e8989ec1a154dd373814742a38c1 | c1d6b5a1bea3df4dde10cb582fb0da361dd747bc | refs/heads/master | 2021-01-10T04:46:09.319989 | 2011-05-05T01:42:52 | 2011-05-05T01:42:52 | 36,273,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,197 | py |
# Copyright (C) 2006 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
""" """
import libxml2
import os
import types
import tempfile
class _Node(object):
# Constructor
#
# Properties
#
#
# Methods
# getType()
# isElement()
# getName()
# setName(name)
# getNode(xpath)
# getNodes(xpath)
# getFirstChild()
# getLastChild()
# getNextSibling()
# getPrevSibling()
# getParent()
# getChildren()
# copy()
# getAttributes()
# getAttribute(name)
# setAttribute(name, content)
# removeAttribute(name)
# setRawContent(rawText)
# setContent(text)
# addRawContent(text)
# addContent(text)
# getContent(xpath=None)
# getContents(xpath=None)
# addChild(node)
# addChildren(nodeList)
# addNextSibling(node)
# addPrevSibling(node)
# delete()
# remove()
# replace(nodeOrNodeList)
# serialize(format=True)
# __str__()
#
#
#
def __init__(self, node):
self.__node = node
def __newNode(self, node):
if node==None:
return None
else:
return _Node(node)
def getType(self):
return self.__node.type
def isElement(self):
return self.__node.type=="element"
def getName(self):
return self.__node.name
def setName(self, name):
self.__node.setName(name)
def getNode(self, xpath):
nodes = self.__node.xpathEval(xpath)
if len(nodes)>0:
return self.__newNode(nodes[0])
def getNode2(self, xpath):
nodes = self.__node.xpathEval(xpath)
if len(nodes)>0:
return self.__newNode(nodes[0])
def getNodes(self, xpath):
nodes = []
ns = self.__node.xpathEval(xpath)
for n in ns:
nodes.append(self.__newNode(n))
return nodes
def getFirstChild(self):
return self.__newNode(self.__node.children)
def getLastChild(self):
return self.__newNode(self.__node.last)
def getNextSibling(self):
return self.__newNode(self.__node.next)
def getPrevSibling(self):
return self.__newNode(self.__node.prev)
def getParent(self):
p = self.__node.parent
if p!=None and p.type=="element":
return self.__newNode(self.__node.parent)
else:
return None
def getChildren(self):
""" returns a list of nodes (NodeList) """
nodes = []
n = self.__node.children
while n!=None:
nodes.append(self.__newNode(n))
n = n.next
return nodes
def copy(self):
return self.__newNode(self.__node.copyNode(1))
def getAttributes(self):
atts = {}
att = self.__node.properties
while att!=None:
atts[att.name] = att.content
att = att.next
return atts
def getAttribute(self, name):
att = self.__node.hasProp(name)
if att==None:
return None
else:
return att.content
def setAttribute(self, name, content):
att = self.__node.hasProp(name)
if att==None:
att = self.__node.newProp(name, content)
else:
#Note: this is a work around for a bug when adding 'one & two' type text
t = libxml2.newText(content)
content = t.serialize()
t.freeNode()
att.setContent(content)
def removeAttribute(self, name):
att = self.__node.hasProp(name)
if att!=None:
att.removeProp()
def setRawContent(self, rawText):
self.__node.setContent(rawText)
return self
def setContent(self, text):
#Note: this is a work around for a bug when adding 'one & two' type text
t = libxml2.newText(text)
text = t.serialize()
t.freeNode()
self.__node.setContent(text)
return self
def addRawContent(self, text):
tmpNode = libxml2.newNode("tmp")
try:
tmpNode.setContent(text)
textNode = tmpNode.children
textNode.replaceNode(None)
tmpNode.freeNode()
self.__node.addChild(textNode)
except:
tmpNode.freeNode()
raise Exception("text is not a well-formed raw xml string")
def addContent(self, text):
self.__node.addContent(text)
return self
def getContent(self, xpath=None):
if xpath==None:
return self.__node.content
else:
node = self.getNode(xpath)
if node!=None:
return node.__node.content
else:
return None
content = property(getContent, setContent)
def getContents(self, xpath=None):
if xpath==None:
return [self.__node.content]
else:
contents = []
nodes = self.__node.xpathEval(xpath)
for node in nodes:
contents.append(node.content)
return contents
def addChild(self, node):
if node.__node.parent!=None:
node.__node.replaceNode(None)
self.__node.addChild(node.__node)
return self
def addChildren(self, nodeList):
for node in nodeList:
self.addChild(node)
return self
def addNextSibling(self, node):
self.__node.addNextSibling(node.__node)
return self
def addPrevSibling(self, node):
self.__node.addPrevSibling(node.__node)
return self
def delete(self):
if self.__node.parent!=None:
self.__node.replaceNode(None)
self.__node.freeNode()
self.__node = None
def remove(self):
self.__node.replaceNode(None)
return self
#def __del__(self):
# # must keep a record of it's parent document to do this because the parent may have already been GC!
# if self.__node!=None and self.__node.parent==None:
# print "Warning: node type='%s', name='%s' has no parent and has not been deleted!" % (self.__node.type, self.__node.name)
def replace(self, nodeOrNodeList):
if isinstance(nodeOrNodeList, _Node):
self.__node.replaceNode(nodeOrNodeList.__node)
else:
if self.__node.type=="text" and len(nodeOrNodeList)>0:
node = nodeOrNodeList.pop(0)
self.__node.replaceNode(node.__node)
for n in nodeOrNodeList:
node.__node.addNextSibling(n.__node)
node = n
else:
for node in nodeOrNodeList:
self.__node.addPrevSibling(node.__node)
self.__node.replaceNode(None)
return self
def serialize(self, format=True):
if format:
return self.__node.serialize(None, 1)
else:
return self.__node.serialize()
def __str__(self):
return self.__node.serialize()
class xml(_Node):
# Note: extends the _Node class
# Constructor
# __init__(xmlcontent, nsList=None, parseAsHtml=False, dom=None)
# Properties
# fileName
# isHtml
# nsList
#
# Methods
# close()
# addNamespaceList(nsList)
# getRootNode()
# createElement(elementName, elementContent=None, elementNS=None, **args)
# createText(content)
# createPI(name, content=None)
# createComment(content)
# addComment(content)
# addElement(elementName)
# getNode(xpath) *
# getNodes(xpath) *
# getContents(xpath) *
# xmlStringToElement(xmlString)
# xmlStringToNodeList(xmlString)
# applyXslt(xslt)
# saveFile(fileName=None)
## Plus the following inherited methods
# getType()
# isElement()
# getName()
# setName(name)
# getNode(xpath) - overriden
# getNodes(xpath) - overriden
# getFirstChild()
# getLastChild()
# getNextSibling()
# getPrevSibling()
# getParent()
# getChildren()
# copy()
# getAttributes()
# getAttribute(name)
# setAttribute(name, content)
# removeAttribute(name)
# setRawContent(rawText)
# setContent(text)
# addRawContent(text)
# addContent(text)
# getContent(xpath=None)
# getContents(xpath=None) - overriden
# addChild(node)
# addChildren(nodeList)
# addNextSibling(node)
# addPrevSibling(node)
# delete()
# remove()
# replace(nodeOrNodeList)
# serialize(format=True)
# __str__()
#
@staticmethod
def newXml(self, xmlcontent, nsList=[], parseAsHtml=False, dom=None):
return xml(xmlcontent, nsList=[], parseAsHtml=False, dom=None)
def __init__(self, xmlcontent, nsList=[], parseAsHtml=False, dom=None):
# Note: the 'dom' argument is only for internal use. Please do not use.
self.fileName = None
self.isHtml = False
self.nsList = []
self.__dom = None
try:
if dom!=None:
self.__dom = dom
elif xmlcontent is None:
raise Exception("xmlcontent is None.")
elif os.path.isfile(xmlcontent):
self.fileName = xmlcontent
try:
if parseAsHtml: raise
self.__dom = libxml2.parseFile(xmlcontent)
except:
if not parseAsHtml:
print "Warning: parsing '%s' as HTML" % self.fileName
self.__dom = libxml2.htmlParseFile(xmlcontent, "UTF-8")
self.isHtml = True
else:
if xmlcontent.startswith("<"):
try:
if parseAsHtml: raise
self.__dom = libxml2.parseDoc(xmlcontent)
except:
if not xmlcontent.startswith("<"):
raise Exception("'%s' is not XML")
self.__dom = libxml2.htmlParseDoc(xmlcontent, "UTF-8")
self.isHtml = True
else:
raise Exception("No xml content given!")
#self.__dom = libxml2.parseDoc("<root/>")
except Exception, e:
msg = "xml_util.xml.__init__() ERROR - '%s'" % str(e)
print msg
#print "xmlcontent='%s'" % xmlcontent
raise e
self.__context = self.__dom.xpathNewContext()
self.addNamespaceList(nsList)
_Node.__init__(self, self.__dom)
self.__rootNode = _Node(self.__dom.getRootElement())
def __del__(self):
if self.__dom!=None:
print "Warning: xml object was not closed! (fileName='%s')" % self.fileName
print " Closing now."
self.close()
def close(self):
if self.__dom != None:
self.__dom.freeDoc()
self.__dom = None
self.__context = None
def addNamespaceList(self, nsList):
for nsName, nsUrl in nsList:
self.__context.xpathRegisterNs(nsName, nsUrl)
self.nsList += nsList
def getRootNode(self):
return self.__rootNode
def createElement(self, elementName, elementContent=None, elementNS=None, **args):
node = self.__dom.newDocNode(elementNS, elementName, None)
for name, content in args.items():
node.newProp(name, str(content))
node = _Node(node)
if elementContent!=None:
node.setContent(elementContent)
return node
def createText(self, content):
node = self.__dom.newDocText(content)
return _Node(node)
def createPI(self, name, content=None):
node = self.__dom.newDocPI(name, content)
return _Node(node)
def createComment(self, content):
node = self.__dom.newDocComment(content)
return _Node(node)
def addComment(self, content):
node = self.createComment(content)
self.__rootNode.addChild(node)
def addElement(self, elementName):
node = self.createElement(elementName)
self.__rootNode.addChild(node)
return node
def getNode(self, xpath):
node = None
ns = self.__context.xpathEval(xpath)
if len(ns)>0:
node = _Node(ns[0])
return node
def getNodes(self, xpath):
nodes = []
ns = self.__context.xpathEval(xpath)
for n in ns:
nodes.append(_Node(n))
return nodes
def getContents(self, xpath):
nodes = []
ns = self.__context.xpathEval(xpath)
for n in ns:
nodes.append(n.content)
return nodes
def xmlStringToElement(self, xmlString, parseAsHtml=False):
if parseAsHtml:
dom = libxml2.htmlParseDoc(xmlString, "UTF-8")
else:
dom = libxml2.parseDoc(xmlString)
node = dom.getRootElement()
node.replaceNode(None)
# fixup context
self.__dom.addChild(node)
node.replaceNode(None)
node = _Node(node)
dom.free()
return node
def xmlStringToNodeList(self, xmlString):
nodeList = []
dom = libxml2.parseDoc("<r>" + xmlString + "</r>")
rootNode = dom.getRootElement()
node = rootNode.children
while node!=None:
nodeList.append(_Node(node))
nextNode = node.next
node.replaceNode(None)
# fixup context
self.__dom.addChild(node)
node.replaceNode(None)
node = nextNode
dom.free()
return nodeList
def applyXslt(self, xslt):
dom = xslt.transformToDom(self.__dom)
return self.__class__(xmlcontent="", dom=dom)
#===============================
# def walk(self, node=None):
# if node==None:
# node = self.__context.getRootElement()
# for n in node.walk_depth_first():
# if n.type=="element":
# yield n.type, n.name, None
# p = n.properties
# while p!=None:
# yield p.type, p.name, p.content
# p = p.next
# else:
# yield n.type, n.name, n.content
def saveFile(self, fileName=None):
if fileName==None:
fileName = self.fileName
if fileName==None:
return
self.__dom.saveFile(fileName)
#def __str__(self):
# return self.__context.getRootElement().serialize()
# libxml2 Reading
# children - (property) first child
# next - (property) next child
# prev - (property) prev child
# type - (property string) "document_xml", "text", "element", "comment", "pi", "attribut", "framement"
# name - (property) the name of the node
# properties - (property) the first attribute of an element (next for the next proptery)
# content - (property) the content of the node
# getRootElement() - get the rootElement
# get_doc() - get the document DOM for the node
# walk_depth_first() - walk the nodes within a node or document
# libxml2 Writing
# doc.newCDataBlock(content, len(content))
# doc.newDocComment(content)
# doc.newDocFragment()
# doc.newDocPI(name, content)
# doc.newDocText(content)
# doc.newDocNode(namespace, elementName, content) or (None, name, None)
#
# node.newChild(ns, elementName, content) # also adds the element
# node.newProp(attName, content) # and adds
# node.addContent(content) # adds text
# node.addChild(node) # or fragment
# node.addNextSibling(node)
# node.addPrevSibling(node)
# node.addSibling(node)
# node.get_doc()
# free() dom.free()
# freeNode() node.freeNode()
# freeNodeList()
# saveFile(filename)
# libxml2.newNode(name)
# libxml2.newPI(name, content)
# libxml2.newComment(content)
# libxml2.newText(content)
| [
"[email protected]@110e3293-9ef9-cb8f-f479-66bdb1942d05"
]
| [email protected]@110e3293-9ef9-cb8f-f479-66bdb1942d05 |
afc4588addf41ced43e5b3c252a496aba4eb81b9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1400.py | 835aa64a29d9a0037854dec8a6d7fb866cca78a5 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | '''Qualification Round Problem A. Speaking in Tongues'''
import sys
SIZE = 4
def checkLine( line ):
o = 0
x = 0
for cell in line:
if cell == 'O':
o += 1
elif cell == 'X':
x += 1
elif cell == '.':
return None
if o == 0:
return 'X won'
elif x == 0:
return 'O won'
else:
return None
def checkResult( board ):
# horizontal
for line in board:
result = checkLine( line )
if result is not None:
return result
# vertical
for i in range( SIZE ):
line = ( row[ i ] for row in board )
result = checkLine( line )
if result is not None:
return result
# diagonal
line = ( row[ i ] for i, row in enumerate( board ) )
result = checkLine( line )
if result is not None:
return result
line = ( row[ SIZE - i - 1 ] for i, row in enumerate( board ) )
result = checkLine( line )
if result is not None:
return result
# completion-check
for line in board:
if line.find( '.' ) >= 0:
return 'Game has not completed'
return 'Draw'
def main( input ):
count = int( input.readline() )
for index in range( 1, count + 1 ):
board = []
for rowIndex in range( SIZE ):
line = input.readline().strip()
board.append( line )
input.readline()
result = checkResult( board )
print( 'Case #{}: {}'.format( index, result ) )
main( sys.stdin )
| [
"[email protected]"
]
| |
65f4ae8993271aeb836d270b7b7b32681944b932 | 46f8860e0e8d4252ad85d2121bb2387c74287a14 | /problem/p_1346_check_if_N_and_its_double_exist/solutions.py | 243895d2c72cb967c256c1796b9de2a4eb65c7cd | []
| no_license | Lee-W/leetcode | 2a11c853cf0a7f8bca39f94f93cc75401e5a1afa | b66e5a6016525bec98e7865d6e31f1dc9f0b4be6 | refs/heads/master | 2023-09-01T15:52:56.570276 | 2022-08-02T07:22:11 | 2022-08-02T07:22:11 | 180,076,811 | 3 | 1 | null | 2023-08-11T19:57:00 | 2019-04-08T05:35:55 | Python | UTF-8 | Python | false | false | 370 | py | from typing import List
class Solution:
def checkIfExist(self, arr: List[int]) -> bool:
for m_i, m in [(num_i, num) for num_i, num in enumerate(arr) if not num % 2]:
for n_i, num in enumerate(arr):
if m_i == n_i:
continue
if num * 2 == m:
return True
return False
| [
"[email protected]"
]
| |
2a243a90b605d80d93b16d8354927b293145343d | 2aa15786d231136f4487ac904ada5719a0605f3d | /testData/typeinspection/duplicateField.py | 6e6cfe0b41ee6aabb07d0d69b67dc91fe82aedf2 | [
"Apache-2.0",
"MIT"
]
| permissive | koxudaxi/pydantic-pycharm-plugin | 7b2f248e45aceccb58e12e67abb34c89e32a53a0 | 61455a7d63c46d567e739ae05f15475b84142a16 | refs/heads/main | 2023-08-23T07:23:40.067425 | 2023-08-07T16:25:52 | 2023-08-07T16:25:52 | 197,027,423 | 362 | 13 | MIT | 2023-09-14T16:39:41 | 2019-07-15T15:41:01 | Kotlin | UTF-8 | Python | false | false | 91 | py |
from pydantic import BaseModel
class A(BaseModel):
a: str
a: int
A(a=int(123))
| [
"[email protected]"
]
| |
29e33034fc5b2b1e405a3d3e676bea6aaa78649f | d007f9482146278d6bf983768bb380fd984d27bb | /manage.py | b4be45e26482dd632dac839736efccafaeec6696 | [
"MIT"
]
| permissive | jacob975/poke_dual_emu | 251c4648c2980be86534fc6140f6db8572ccd660 | a172bf7763144b28b87ce5a493d4ccbff038fbe7 | refs/heads/master | 2022-11-30T07:04:44.491426 | 2020-08-07T13:55:04 | 2020-08-07T13:55:04 | 284,881,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/python3
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'poke_dual_emu.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
aeb28c4f953948dd08fcce328c4fd0340eafeac0 | e811c41caa55559d3b482f26c31fcef02ec66138 | /venv/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py | c84a73473f195dc98a07dc9310c2b83dc3c77484 | [
"MIT"
]
| permissive | 1SouravGhosh/POC_REPO | 929ea865d60a51597966ffcfc4a7a3a350a00f54 | e486d9a1fe0e1215f24bac3aaf97517cda21a066 | refs/heads/master | 2022-11-01T09:53:56.443500 | 2019-02-17T16:21:48 | 2019-02-17T16:21:48 | 171,133,391 | 0 | 1 | MIT | 2022-10-22T04:17:54 | 2019-02-17T14:45:39 | Python | UTF-8 | Python | false | false | 15,089 | py | # sqlite/pysqlite.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: http://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\path\\to\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL
-------------------------------------------------------
In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
driver's assortment of issues that prevent several features of SQLite
from working correctly. The pysqlite DBAPI driver has several
long-standing bugs which impact the correctness of its transactional
behavior. In its default mode of operation, SQLite features such as
SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
non-functional, and in order to use these features, workarounds must
be taken.
The issue is essentially that the driver attempts to second-guess the user's
intent, failing to start transactions and sometimes ending them prematurely, in
an effort to minimize the SQLite databases's file locking behavior, even
though SQLite itself uses "shared" locks for read-only activities.
SQLAlchemy chooses to not alter this behavior by default, as it is the
long-expected behavior of the pysqlite driver; if and when the pysqlite
driver attempts to repair these issues, that will be more of a driver towards
defaults for SQLAlchemy.
The good news is that with a few events, we can implement transactional
support fully, by disabling pysqlite's feature entirely and emitting BEGIN
ourselves. This is achieved using two event listeners::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
Above, we intercept a new pysqlite connection and disable any transactional
integration. Then, at the point at which SQLAlchemy knows that transaction
scope is to begin, we emit ``"BEGIN"`` ourselves.
When we take control of ``"BEGIN"``, we can also control directly SQLite's
locking modes, introduced at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_,
by adding the desired locking mode to our ``"BEGIN"``::
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN EXCLUSIVE")
.. seealso::
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_ -
on the SQLite site
`sqlite3 SELECT does not BEGIN a transaction <http://bugs.python.org/issue9924>`_ -
on the Python bug tracker
`sqlite3 module breaks transactions and potentially corrupts data <http://bugs.python.org/issue10740>`_ -
on the Python bug tracker
""" # noqa
import os
from .base import DATE
from .base import DATETIME
from .base import SQLiteDialect
from ... import exc
from ... import pool
from ... import types as sqltypes
from ... import util
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = "qmark"
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
},
)
if not util.py2k:
description_encoding = None
driver = "pysqlite"
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
(
"The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended."
)
% ".".join([str(subver) for subver in sqlite_ver])
)
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
try:
from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name.
except ImportError as e:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ":memory:":
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,)
)
filename = url.database or ":memory:"
if filename != ":memory:":
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, "timeout", float)
util.coerce_kw_type(opts, "isolation_level", str)
util.coerce_kw_type(opts, "detect_types", int)
util.coerce_kw_type(opts, "check_same_thread", bool)
util.coerce_kw_type(opts, "cached_statements", int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.ProgrammingError
) and "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| [
"[email protected]"
]
| |
976e54ad71a17887bafad148b1c31dc652196bf7 | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/s1_3_getHtml_20210209182455.py | 29f07839537a635652c10e242644034e7aa175eb | []
| no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | # This module is called from 3R Automation Component.
import os
import sys
# pdftotree is available as part of the virtual environment for 3R Python processing
import pdftotree
import json
from pprint import pprint
import pdfminer
import ocr_extract as imgpdf
from utils.ocr.handle_image import *
# pdf_doc = json.loads(sys.argv[1])['doc_name']
# html_path = json.loads(sys.argv[1])['html_path']
# Use the following for testing
pdf_doc = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/Sri_khyati_CV.pdf'
html_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/Sri_khyati_CV.html'
def create_hocr(pdf_doc='', html_path='', model_path='./model/model.pkl'):
return pdftotree.parse(pdf_doc, html_path=html_path, model_type=None, model_path=model_path, visualize=False)
create_hocr_output = None
try:
create_hocr_output = create_hocr(pdf_doc=pdf_doc, html_path=html_path)
except pdfminer.pdfparser.PDFSyntaxError as pdfException:
create_hocr_output = pdfException
# Use the following for testing non PDF files
# print(f'{os.path.basename(pdf_doc).split(".")[0]+".pdf"}')
# print(f'{os.path.abspath(pdf_doc).split(".")[0]+".pdf"}')
# try:
# # imgpdf.convert_image_to_pdf(pdf_doc, os.path(pdf_doc)+os.path.basename(pdf_doc).split('.')[0]+'.pdf')
# imgpdf.convert_image_to_pdf(pdf_doc, os.path.dirname(pdf_doc), os.path.abspath(pdf_doc).split(".")[0]+".pdf")
# except Exception as exc:
# print(exc)
# Output of "print" statement is passed to the calling program
proc_status = "OK" if create_hocr_output == None else "Not a PDF document. Please provide a PDF file for processing."
json_out = {"pdf_doc": pdf_doc, "process_status": proc_status}
print(json_out) | [
"{[email protected]}"
]
| |
583fa6ef03b8b65452f1c039cbd3bf6fb3ea4678 | 8e5ed445e05274dd013f443d9d506695fa08ad9f | /dat/gui/load_variable_dialog.py | 78544e477425d33332d411bf878f0592ef506ab2 | []
| no_license | afcarl/DAT | 5c2237f4b3745d3b47df27a91eab469cfadd9da9 | 1272b630326b05bca27746993fe2d32479a7353c | refs/heads/master | 2020-06-22T05:49:54.135615 | 2015-09-16T14:10:41 | 2015-09-16T14:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,593 | py | import re
from PyQt4 import QtCore, QtGui
from dat import DEFAULT_VARIABLE_NAME, variable_format
from dat import data_provenance
from dat.gui import translate
from dat.gui.generic import AdvancedLineEdit
from dat.global_data import GlobalManager
from dat.vistrail_data import VistrailManager
from dat.vistrails_interface import FileVariableLoader, CustomVariableLoader
from vistrails.core.application import get_vistrails_application
_unique_varname_format = re.compile('^(.+)_([0-9]+)$')
def unique_varname(varname, vistraildata):
"""Makes a variable name unique.
Adds or increment a number suffix to a variable name to make it unique.
>>> vistraildata = VistrailManager()
>>> unique_varname('variable', vistraildata)
'variable_2'
>>> unique_varname('variable_4', vistraildata)
'variable_5'
"""
match = _unique_varname_format.match(varname)
num = 1
if match is not None:
varname = match.group(1)
num = int(match.group(2))
while True:
num += 1
new_varname = '%s_%d' % (varname, num)
if vistraildata.get_variable(new_varname) is None:
return new_varname
_varname_format = re.compile('^' + variable_format + '$')
class VariableNameValidator(object):
"""Validates variable names according to a given VistrailData.
The associated VistrailData object will be used to check for collisions.
"""
def __init__(self, vistraildata):
self._vistraildata = vistraildata
def unique(self, name):
"""Returns True if this name doesn't collide with an existing variable.
"""
return self._vistraildata.get_variable(name) is None
@staticmethod
def format(name):
"""Returns True if this name has an acceptable format.
"""
return bool(name) and bool(_varname_format.match(name))
def __call__(self, name):
"""Returns True if this name can be used for a new variable.
This checks both the format and the unicity of this name. It allows a
VariableNameValidator object to be passed a the 'validate' argument of
a AdvancedLineEdit widget.
"""
return self.format(name) and self.unique(name)
class FileLoaderPanel(QtGui.QWidget):
"""The first tab of the LoadVariableDialog.
Allows the user to select a file. It will then display the
FileVariableLoader's that can accept this time of file in a combobox, and
the parameters for the selected loader underneath.
"""
def __init__(self):
QtGui.QWidget.__init__(self)
_ = translate(LoadVariableDialog)
self._file_loaders = set()
self.default_variable_name_observer = None
main_layout = QtGui.QVBoxLayout()
header_layout = QtGui.QFormLayout()
file_edit = QtGui.QHBoxLayout()
self._file_edit = QtGui.QLineEdit()
self._file_edit.setEnabled(False)
file_edit.addWidget(self._file_edit)
file_button = QtGui.QPushButton(_("Browse..."))
self.connect(file_button, QtCore.SIGNAL('clicked()'),
self.pick_file)
file_edit.addWidget(file_button)
header_layout.addRow(_("File:"), file_edit)
self._loader_list = QtGui.QComboBox()
self.connect(self._loader_list,
QtCore.SIGNAL('currentIndexChanged(int)'),
self.update_widget)
header_layout.addRow(_("Loader:"), self._loader_list)
main_layout.addLayout(header_layout)
self._loader_stack = QtGui.QStackedWidget()
loader_groupbox = QtGui.QGroupBox(_("Loader parameters"))
groupbox_layout = QtGui.QVBoxLayout()
groupbox_layout.addWidget(self._loader_stack)
loader_groupbox.setLayout(groupbox_layout)
main_layout.addWidget(loader_groupbox)
self.setLayout(main_layout)
self.select_file('')
def pick_file(self):
_ = translate(LoadVariableDialog)
# Pick a file
picked = QtGui.QFileDialog.getOpenFileName(
self,
_("Choose a file"))
if not picked:
return
self.select_file(str(picked))
def select_file(self, filename):
"""Change the currently selected file.
The list of available loaders will be updated accordingly.
"""
_ = translate(LoadVariableDialog)
# Update self._file_edit
self._file_edit.setText(filename)
# Update self._loader_list
self._loader_list.clear()
while self._loader_stack.count() > 0:
self._loader_stack.removeWidget(self._loader_stack.widget(0))
if filename != '':
for loader in self._file_loaders:
if loader.can_load(filename):
widget = loader(filename)
widget.default_variable_name_observer = (
self.default_variable_name_changed)
# The order of these lines is important, because adding an
# item to the list emits a signal
self._loader_stack.addWidget(widget)
self._loader_list.addItem(loader.name, widget)
if self._loader_stack.count() == 0:
self._loader_stack.addWidget(
QtGui.QLabel(_("No loader accepts this file")))
else:
self._loader_stack.addWidget(QtGui.QLabel(_("No file selected")))
# Update the widget stack
self.update_widget()
def update_widget(self, index=None):
"""Makes the currently selected loader visible.
"""
if index is None:
index = self._loader_list.currentIndex()
if index == -1:
if self.default_variable_name_observer is not None:
self.default_variable_name_observer(self,
DEFAULT_VARIABLE_NAME)
return
self._loader_stack.setCurrentIndex(index)
self.default_variable_name_observer(
self,
self._loader_stack.widget(index).get_default_variable_name())
def add_file_loader(self, loader):
"""Adds a FileVariableLoader to this panel.
Of course, it will only be available if a file that it accepts is
selected.
"""
if loader not in self._file_loaders:
self._file_loaders.add(loader)
def remove_file_loader(self, loader):
"""Removes a FileVariableLoader from this panel.
"""
if loader in self._file_loaders:
self._file_loaders.remove(loader)
def reset(self):
"""Resets this panel, e.g. doesn't select any file.
"""
self.select_file('')
def default_variable_name_changed(self, loader, new_default_name):
"""Called when the default name for a loader is changed.
If this loader is the one currently selected, we forward this to the
dialog, which in turn might updates the name AdvancedLineEdit.
"""
if self._loader_list.currentIndex() == -1:
return None
current_loader = self._loader_stack.currentWidget()
if (current_loader is loader and
self.default_variable_name_observer is not None):
self.default_variable_name_observer(self, new_default_name)
def get_default_variable_name(self):
"""Returns the default name for the current loader.
"""
if self._loader_list.currentIndex() == -1:
return DEFAULT_VARIABLE_NAME
current_loader = self._loader_stack.currentWidget()
name = current_loader.get_default_variable_name()
return name
def load(self):
"""Loads a variable using the current loader.
"""
if self._loader_list.currentIndex() == -1:
return None
loader = self._loader_stack.currentWidget()
variable = loader.load()
if variable is not None and variable.provenance is None:
variable.provenance = data_provenance.Loader(
loader=loader,
file=str(self._file_edit.text()))
return variable
class LoadVariableDialog(QtGui.QDialog):
"""The variable loading dialog, displayed when clicking 'load variable'.
It shows one tab to load a file, and a tab for each CustomVariableLoader.
"""
def __init__(self, controller, parent=None):
QtGui.QDialog.__init__(self, parent, QtCore.Qt.Dialog)
self._vistraildata = VistrailManager(controller)
self._validator = VariableNameValidator(self._vistraildata)
_ = translate(LoadVariableDialog)
self.setWindowTitle(_("Load variable"))
self._tabs = []
main_layout = QtGui.QVBoxLayout()
self._tab_widget = QtGui.QTabWidget()
self.connect(self._tab_widget, QtCore.SIGNAL('currentChanged(int)'),
self.update_varname)
main_layout.addWidget(self._tab_widget)
varname_layout = QtGui.QHBoxLayout()
varname_layout.addWidget(QtGui.QLabel(_("Variable name:")))
self._varname_edit = AdvancedLineEdit(
DEFAULT_VARIABLE_NAME,
default=DEFAULT_VARIABLE_NAME,
validate=self._validator,
flags=(AdvancedLineEdit.COLOR_VALIDITY |
AdvancedLineEdit.COLOR_DEFAULTVALUE |
AdvancedLineEdit.FOLLOW_DEFAULT_UPDATE))
varname_layout.addWidget(self._varname_edit)
main_layout.addLayout(varname_layout)
buttons_layout = QtGui.QHBoxLayout()
load_cont_button = QtGui.QPushButton(_("Load and close"))
self.connect(load_cont_button, QtCore.SIGNAL('clicked()'),
self.loadclose_clicked)
buttons_layout.addWidget(load_cont_button)
load_button = QtGui.QPushButton(_("Load"))
self.connect(load_button, QtCore.SIGNAL('clicked()'),
self.load_clicked)
buttons_layout.addWidget(load_button)
cancel_button = QtGui.QPushButton(_("Cancel"))
self.connect(cancel_button, QtCore.SIGNAL('clicked()'), self.cancel)
buttons_layout.addWidget(cancel_button)
main_layout.addLayout(buttons_layout)
self.setLayout(main_layout)
self._file_loader = FileLoaderPanel()
self._file_loader.default_variable_name_observer = (
self.default_variable_name_changed)
self._add_tab(self._file_loader, _("File"))
app = get_vistrails_application()
app.register_notification('dat_new_loader', self.loader_added)
app.register_notification('dat_removed_loader', self.loader_removed)
for loader in GlobalManager.variable_loaders:
self.loader_added(loader)
idx = self._tab_widget.currentIndex()
if idx >= 0:
loader = self._tabs[idx]
self._varname_edit.setDefault(loader.get_default_variable_name())
else:
self._varname_edit.setDefault(DEFAULT_VARIABLE_NAME)
self._varname_edit.reset()
def update_varname(self, idx):
"""Updates the 'name' AdvancedLineEdit when the tab is changed.
"""
if idx >= 0:
loader = self._tabs[idx]
self.default_variable_name_changed(
None, loader.get_default_variable_name())
def _add_tab(self, tab, name):
widget = QtGui.QWidget()
lay = QtGui.QVBoxLayout()
lay.addWidget(tab)
lay.addStretch()
widget.setLayout(lay)
# The order of these lines is important, because adding a tab emits a
# signal
self._tabs.append(tab)
self._tab_widget.addTab(widget, name)
def _remove_tabs(self, tabfilter):
idx = 0
while idx < len(self._tabs):
if tabfilter(self._tabs[idx]):
del self._tabs[idx]
self._tab_widget.removeTab(idx)
else:
idx += 1
def loader_added(self, loader):
"""Called when a VariableLoader is added (by loading a package).
It will either be put in the FileLoaderPanel or in a new tab.
"""
if issubclass(loader, FileVariableLoader):
self._file_loader.add_file_loader(loader)
elif issubclass(loader, CustomVariableLoader):
l = loader()
l.default_variable_name_observer = (
self.default_variable_name_changed)
self._add_tab(l, loader.name)
def loader_removed(self, loader):
"""Called when a VariableLoader is removed.
It will remove it from the FileLoaderPanel (or the tabs).
"""
if issubclass(loader, FileVariableLoader):
self._file_loader.remove_file_loader(loader)
elif issubclass(loader, CustomVariableLoader):
self._remove_tabs(lambda tab: isinstance(tab, loader))
def default_variable_name_changed(self, loader, new_default_name):
"""Called by a loader to notify the default name changed.
If it came from the current loader, update the 'name' AdvancedLineEdit.
"""
idx = self._tab_widget.currentIndex()
if idx == -1:
return
current_loader = self._tabs[idx]
if not (loader is None or loader is current_loader):
return
self._default_varname = new_default_name
self._varname_edit.setDefault(self._default_varname)
def load_variable(self):
"""Displays the dialog to load a new variable.
"""
if not self.isVisible():
self.setVisible(True)
for tab in self._tabs:
tab.reset()
def cancel(self):
"""Cancels the loading operation (hides the dialog).
"""
self.setVisible(False)
def loadclose_clicked(self):
"""'Load and close' button.
Loads, then hide the dialog if successful.
"""
if self.load_clicked():
self.setVisible(False)
def load_clicked(self):
"""'Load' button.
Loads a variable using the current loader.
"""
varname = self._varname_edit.text()
varname = str(varname)
if not self._validator.format(varname):
self._varname_edit.setFocus()
return False
if not self._validator.unique(varname):
varname = unique_varname(varname, self._vistraildata)
self._varname_edit.setText(varname)
self._varname_edit.setFocus()
return False
loader = self._tabs[self._tab_widget.currentIndex()]
try:
variable = loader.load()
# The Loader may provide a provenance node (i.e. to record the
# specific parameters it used), else we'll just store that it came
# from this loader
if variable is not None and variable.provenance is None:
variable.provenance = data_provenance.Loader(loader=loader)
except Exception, e:
_ = translate(LoadVariableDialog)
QtGui.QMessageBox.critical(
self,
_("Error"),
"%s\n%s: %s" % (
_("Got an exception from the VisTrails package:"),
e.__class__.__name__,
str(e)))
return False
if variable is None:
# Here we assume the loader displayed the error itself in some way
return False
self._vistraildata.new_variable(varname, variable)
self._varname_edit.setDefault(self._default_varname)
return True
| [
"[email protected]"
]
| |
9b245ffc9a5b5714555bcedb7a015c8e4e6c6d80 | 8ec05f1d5800e0b98afa92367f74bed9f95e0ee9 | /venv/Scripts/rst2man.py | 9ef84bd9b4cc305a5aa6ffa93a6cc0aafa425c88 | []
| no_license | ayanchyaziz123/ecom-final-year-project | 28362922a88c71aba29d22f29c7f34e1cad6189f | d21fdd885b3b768935dc29171c5a6761c4b88e9c | refs/heads/master | 2023-08-12T17:10:23.826744 | 2021-10-06T12:36:17 | 2021-10-06T12:36:17 | 405,435,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!f:\proshop_django-master\venv\scripts\python.exe
# Author:
# Contact: [email protected]
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"[email protected]"
]
| |
769ca7862fc492c3162d45ff8ce8222afda2829c | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/hvs/adj.py | fb48fc6714990d8e24798555483c62576a4eb77b | []
| no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 7,640 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Adj(Mo):
"""
The connectivity to an external network.
"""
meta = ClassMeta("cobra.model.hvs.Adj")
meta.moClassName = "hvsAdj"
meta.rnFormat = "adj-[%(nbrKey)s]"
meta.category = MoCategory.REGULAR
meta.label = "Adjacency"
meta.writeAccessMask = 0x5
meta.readAccessMask = 0x5
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.hvs.RtNicAdj")
meta.childClasses.add("cobra.model.hvs.RsLsNode")
meta.childNamesAndRnPrefix.append(("cobra.model.hvs.RtNicAdj", "rtcompNicAdj-"))
meta.childNamesAndRnPrefix.append(("cobra.model.hvs.RsLsNode", "rsLsNode"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.comp.Hv")
meta.rnPrefixes = [
('adj-', True),
]
prop = PropMeta("str", "addr", "addr", 234, PropCategory.REGULAR)
prop.label = "Neighbor Address"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("addr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "configIssues", "configIssues", 238, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("missing-mgmtip", "management-address-is-not-configured-on-loosenode-(unmanaged-switch)", 1)
prop._addConstant("none", "none", 0)
meta.props.add("configIssues", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "ifId", "ifId", 236, PropCategory.REGULAR)
prop.label = "Interface Id"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("ifId", prop)
prop = PropMeta("str", "ifName", "ifName", 235, PropCategory.REGULAR)
prop.label = "Neighbor Interface"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("ifName", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 13777, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 229, PropCategory.REGULAR)
prop.label = "Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nbrDesc", "nbrDesc", 232, PropCategory.REGULAR)
prop.label = "Neighbor Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("nbrDesc", prop)
prop = PropMeta("str", "nbrId", "nbrId", 231, PropCategory.REGULAR)
prop.label = "Neighbor Identifier"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("nbrId", prop)
prop = PropMeta("str", "nbrKey", "nbrKey", 233, PropCategory.REGULAR)
prop.label = "Neighbor Key"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 512)]
meta.props.add("nbrKey", prop)
prop = PropMeta("str", "nbrName", "nbrName", 230, PropCategory.REGULAR)
prop.label = "Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("nbrName", prop)
prop = PropMeta("str", "nbrType", "nbrType", 237, PropCategory.REGULAR)
prop.label = "Neighbor Type, Leaf or Loosenode"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "leaf"
prop._addConstant("leaf", "leaf", 1)
prop._addConstant("loosenode", "loosenode", 2)
meta.props.add("nbrType", prop)
prop = PropMeta("str", "proto", "proto", 228, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("CDP", "cdp", 2)
prop._addConstant("LLDP", "lldp", 1)
prop._addConstant("none", "none", 0)
meta.props.add("proto", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "nbrKey"))
getattr(meta.props, "nbrKey").needDelimiter = True
def __init__(self, parentMoOrDn, nbrKey, markDirty=True, **creationProps):
namingVals = [nbrKey]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
ef356e1edaae73bf881c1937ec46f1ac044ffbe1 | ec56e3a57fb71f3fc4f19b168d3fa34cebb781ab | /tcga_encoder/models/regularizers.py | 7326b08e9ac4b6ace69433dc138071b247dc3932 | [
"MIT"
]
| permissive | tedmeeds/tcga_encoder | 64d60148b0c69092cb499abec22618e740ba8b6c | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | refs/heads/master | 2021-01-13T04:50:42.643743 | 2017-08-25T13:09:38 | 2017-08-25T13:09:38 | 78,621,753 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | import tensorflow as tf
from scipy import stats
def Drop( X, keep_rate ):
keep_mask = stats.bernoulli( keep_rate).rvs( X.shape )
Y = X*keep_mask + (1-keep_mask)*0
return Y
def DropColumns( X, cols2drop ):
Y = X.copy()
Y[:,cols2drop] = 0
return Y
class Regularizer(object):
def __init__( self, lam_value ):
self.lam = lam_value
def Apply( self, w ):
raise NotImplemented, "Must derive class"
class L2Regularizer(Regularizer):
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w) )
class L1Regularizer(Regularizer):
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w) )
class LqRegularizer(Regularizer):
def __init__( self, lam_value, q ):
self.lam = lam_value
self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.pow( tf.abs(w), self.q ) + self.eps )
class SortedL1RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w[:,:,1:]-w[:,:,:-1]) + self.eps )
class SortedL1RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w[:,1:,:]-w[:,:-1,:]) + self.eps )
class SortedL1RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w[1:,:,:]-w[:-1,:,:]) + self.eps )
class SortedAbsL1RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.abs(aw[:,:,1:]-aw[:,:,:-1]) + self.eps )
class SortedAbsL1RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.abs(aw[:,1:,:]-aw[:,:-1,:]) + self.eps )
class SortedAbsL1RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.abs(aw[1:,:,:]-aw[:-1,:,:]) + self.eps )
# ------------------
class SortedL2RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w[:,:,1:]-w[:,:,:-1]) + self.eps )
class SortedL2RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w[:,1:,:]-w[:,:-1,:]) + self.eps )
class SortedL2RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w[1:,:,:]-w[:-1,:,:]) + self.eps )
class SortedAbsL2RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.square(aw[:,:,1:]-aw[:,:,:-1]) + self.eps )
class SortedAbsL2RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.square(aw[:,1:,:]-aw[:,:-1,:]) + self.eps )
class SortedAbsL2RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.square(aw[1:,:,:]-aw[:-1,:,:]) + self.eps ) | [
"[email protected]"
]
| |
ba780aaf42ac35517e278f877cdb859dab20abd9 | 0bf6ecbdebc7424a8946b29127d55c5bc1e7442e | /wetLab/migrations/0017_auto_20161107_1626.py | a4121e0b72612c717750f8d9b822000923594343 | []
| no_license | dekkerlab/cLIMS | 2351a9c81f3e3ba982e073500a4a5cf2fd38ed51 | e76731032a5707027b53746a8f2cc9b01ab7c04e | refs/heads/master | 2021-03-27T06:28:49.718401 | 2017-10-10T19:22:33 | 2017-10-10T19:22:33 | 71,837,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-07 16:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wetLab', '0016_auto_20161107_1556'),
]
operations = [
migrations.RenameField(
model_name='treatmentrnai',
old_name='treatmentRnai_target_sequence',
new_name='treatmentRnai_targetNucleotide_seq',
),
]
| [
"[email protected]"
]
| |
77bf44bd368a70e3933cfcf5f32ec64eab7ecdd9 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/detail_placement_view_service/transports/base.py | 85ed85c711e8a89a9ba736c1cc3efae58586a2a2 | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,807 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import detail_placement_view
from google.ads.googleads.v7.services.types import detail_placement_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DetailPlacementViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for DetailPlacementViewService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_detail_placement_view: gapic_v1.method.wrap_method(
self.get_detail_placement_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_detail_placement_view(self) -> typing.Callable[
[detail_placement_view_service.GetDetailPlacementViewRequest],
detail_placement_view.DetailPlacementView]:
raise NotImplementedError
__all__ = (
'DetailPlacementViewServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
220ca96c3e7cbb881449a5efc32e58889f288fbc | 239eafa1bdf684ae8b8663c1f34f8f60df5f523e | /20180305_emeson_rnaediting/findWord.py | 98024d4dce24ad435493433bc73ad27ca88798c9 | [
"Apache-2.0"
]
| permissive | shengqh/vangard | 83069b0e2ff2951a8afe6a0e70ec542bb071d2f0 | 8ee611d7eaab2a8fac37aa756921fee2e195c86a | refs/heads/master | 2021-06-29T00:42:43.953492 | 2019-01-02T17:50:15 | 2019-01-02T17:50:15 | 114,392,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,173 | py | import argparse
import sys
import logging
import os
import gzip
sys.path.insert(0, '/home/shengq2/program/ngsperl/lib/RNAediting')
from WordRoleUtils import WordRole
roles = {}
roles["Turnee_word"] = [
WordRole("TurneeA", "AACCAT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeB", "AACGTC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeC", "AACTCA", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeD", "AAGACT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeE", "ACTATT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeH", "ATATGA", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeI", "CAATAT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeJ", "CCTCGG", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeK", "CGCTTC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeN", "GCAGAA", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeP", "GCGTCC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeQ", "GGAGTC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeT", "GTTGCC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeU", "TACCGG", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeV", "TCAGCC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeW", "TTCGGC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeX", "TTGACC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
]
logger = logging.getLogger('findWord')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
wordCounts = {}
discardCounts = {}
rootFolder = "/scratch/VANGARD/20180305_turnee_rnaediting/"
fileName = rootFolder + "1364-TM-1_S1_R1_001.fastq.gz"
gzipped = fileName.endswith(".gz")
if gzipped:
f = gzip.open(fileName, 'rt')
else:
f = open(fileName, 'r')
def findWord(sequence, roles, wordCounts):
for userName, userRoles in roles.iteritems():
if not userName in wordCounts:
wordCounts[userName] = {}
userCounts = wordCounts[userName]
if not userName in discardCounts:
discardCounts[userName] = {}
userDiscardCounts = discardCounts[userName]
for role in userRoles:
curWord = role.getWord(sequence)
if curWord.discarded:
if role.SampleName in userDiscardCounts:
userDiscardCounts[role.SampleName] = userDiscardCounts[role.SampleName] + 1
else:
userDiscardCounts[role.SampleName] = 1
return(None)
if len(curWord.word) > 0:
if not role.SampleName in userCounts:
userCounts[role.SampleName] = {}
sampleCounts = userCounts[role.SampleName]
if not curWord.word in sampleCounts:
sampleCounts[curWord.word] = 1
else:
sampleCounts[curWord.word] = sampleCounts[curWord.word] + 1
return(role)
return(None)
fastqMap = {}
try:
count = 0
while True:
header = f.readline()
if '' == header:
break
if not header.startswith("@"):
continue
sequence = f.readline().strip()
line3 = f.readline()
line4 = f.readline()
role = findWord(sequence, roles, wordCounts)
if role != None:
if not role.SampleName in fastqMap:
fastqFile = rootFolder + role.SampleName + ".fastq.gz"
fastqMap[role.SampleName] = gzip.open(fastqFile, "wt")
sw = fastqMap[role.SampleName]
sw.write("%s\n" % header.strip())
sw.write("%s\n" % sequence)
sw.write("%s\n" % line3.strip())
sw.write("%s\n" % line4.strip())
count = count+1
if count % 100000 == 0:
logger.info("%d reads processed" % count)
#if count % 20000 == 0:
# break
finally:
f.close()
logger.info("total %d reads processed" % count)
for sw in fastqMap.values():
sw.close()
for userName, userCounts in wordCounts.iteritems():
samples = sorted(userCounts.iterkeys())
totalCounts = {sample:sum(userCounts[sample].values()) for sample in samples}
words = sorted(set( val for dic in userCounts.values() for val in dic.keys()))
with open(rootFolder + userName + ".count.txt", "w") as swCount:
with open(rootFolder + userName + ".perc.txt", "w") as swPerc:
header = "Word\t%s\n" % "\t".join(samples)
swCount.write(header)
swPerc.write(header)
for word in words:
swCount.write(word)
swPerc.write(word)
for sample in samples:
sampleCounts = userCounts[sample]
if word in sampleCounts:
swCount.write("\t%d" % sampleCounts[word])
swPerc.write("\t%.4f" % (sampleCounts[word] * 1.0 / totalCounts[sample]))
else:
swCount.write("\t0")
swPerc.write("\t0")
swCount.write("\n")
swPerc.write("\n")
swCount.write("TotalWord\t%s\n" % "\t".join([str(totalCounts[sample]) for sample in samples]))
swCount.write("DiscardRead\t%s\n" % "\t".join([str(discardCounts[userName][sample]) for sample in samples]))
gPerc = {}
for sample in samples:
gPerc[sample] = {}
sampleCounts = userCounts[sample]
for word in sampleCounts:
maxChrInd = len(word)
wCount = sampleCounts[word]
for chrInx in range(0, len(word)) :
if chrInx in gPerc[sample]:
chrMap = gPerc[sample][chrInx]
else:
chrMap = {'G':0, 'R':0}
gPerc[sample][chrInx] = chrMap
if word[chrInx] == 'G':
chrMap['G'] = chrMap['G'] + wCount
else:
chrMap['R'] = chrMap['R'] + wCount
with open(rootFolder + userName + ".G.txt", "w") as swG:
swG.write("Word\t%s\n" % "\t".join(samples))
for chrInd in range(0, maxChrInd):
word = 'R' * chrInd + 'G' + 'R' *(maxChrInd - chrInd - 1)
swG.write(word)
for sample in samples:
chrMap = gPerc[sample][chrInd]
perc = chrMap['G'] * 1.0 / (chrMap['G'] + chrMap['R'])
swG.write('\t%.4f' % perc)
swG.write('\n')
logger.info("done")
| [
"[email protected]"
]
| |
7d62f9b9485d0a086b982af2224380bc8381d6ae | 52e2064daa678499d3d48f5704d68c2eeb549156 | /Facturacion/urls.py | f1fdb7159af18a2548897044a26bb76ac5ae4dcd | []
| no_license | matheo97/ERP | 159723cb42fba179561834d89c45af00163173df | 7ff4f452c6c84c85759a32351fc25cc111dd0c1f | refs/heads/master | 2021-11-12T12:00:55.403003 | 2019-03-06T14:58:02 | 2019-03-06T14:58:02 | 174,161,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | from django.conf.urls import url, include
from .views import *
urlpatterns = [
url(r'^generar_cotizacion/$', generarCotizacion.as_view(), name="generar_cotizacion"),
url(r'^generar_factura/$', generarFactura.as_view(), name="generar_factura"),
url(r'^editar_factura/(?P<id_factura>\w+)$', editarFactura.as_view(), name="editar_factura"),
url(r'^generar_factura_cotizacion/(?P<id_cotizacion>\d+)$', generarFacturadeCotizacion.as_view(), name="generar_factura_cotizacion"),
url(r'^generar_remision/$', generarRemision.as_view(), name="generar_remision"),
url(r'^generar_cotizacion/eliminar_presentacion_carrito$', eliminarPresentacionCarrito.as_view(), name="eliminar_presentacion_carrito"),
url(r'^generar_factura/eliminar_presentacion_carrito$', eliminarPresentacionCarrito.as_view(), name="eliminar_presentacion_carrito"),
url(r'^listar_facturacion/(?P<id_cliente>.+)$', listarFacturacion.as_view(), name="listar_facturacion"),
url(r'^listar_facturas/(?P<id_cliente>.+)$', listar_facturas, name="listar_facturas"),
url(r'^listar_cotizaciones/(?P<id_cliente>.+)$', listar_cotizaciones, name="listar_cotizaciones"),
url(r'^listar_remisiones/(?P<id_cliente>.+)$', listar_remisiones, name="listar_remisiones"),
url(r'^generar_cotizacion_pdf/(?P<id_cotizacion>\d+)$', generarCotizacionPDF.as_view(), name="generarCotizacion_pdf"),
url(r'^generar_factura_pdf/(?P<id_factura>\w+)$', generarFacturaPDF.as_view(), name="generarFactura_pdf"),
url(r'^generar_factura/validar_consecutivo/', validar_consecutivo, name="validar_consecutivo"),
url(r'^generar_factura_cotizacion/validar_consecutivo/', validar_consecutivo, name="generar_factura_cotizacion_1"),
url(r'^generar_remision_pdf/(?P<id_remision>\d+)$', generarRemisionPDF.as_view(), name="generarRemision_pdf"),
url(r'^ver_factura/(?P<id_factura>\d+)$', verFactura.as_view(), name="ver_factura"),
url(r'^limpiar_carrito/(?P<documento>\w+)$', limpiar_carrito, name="limpiar_carrito"),
url(r'^eliminar_factura/$', eliminar_factura.as_view(), name="eliminar_factura"),
] | [
"[email protected]"
]
| |
5752a80b9c4d569351b2dea27135216d006cfe5a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_focusing.py | 3bfe426f06c2ae27b809740c8cfc639425e5f31f | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _FOCUSING():
def __init__(self,):
self.name = "FOCUSING"
self.definitions = focus
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['focus']
| [
"[email protected]"
]
| |
81652859f613988bdd9753ef3a50a151f8e9cdf2 | 462682b3b29304b561eaea3833c29e84d1e95c0e | /PythonLoops/03-Patterns.py | 1d0b2cb5c40e17317ac93192942cee55c45f54a2 | []
| no_license | ravi4all/PythonDecMorning | 4452b8340ce0b4ab067bd769725c5a6f831b7f45 | 1e20da3c90d407dbef714770ad54e72f16be0eec | refs/heads/master | 2021-09-02T11:01:28.686860 | 2018-01-02T05:05:06 | 2018-01-02T05:05:06 | 113,133,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | Python 3.6.2 (v3.6.2:5fd33b5, Jul 8 2017, 04:57:36) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> a = 2
>>> a = "*"
>>> print(a*5)
*****
>>> for i in range(1,6):
print('*' * ((i*2)-1)))
SyntaxError: invalid syntax
>>> for i in range(1,6):
print('*' * ((i*2)-1))
*
***
*****
*******
*********
>>> for i in range(1,6):
print('*' * i)
*
**
***
****
*****
>>> for i in range(1,6):
print(' ' * (6-i) + '*' * (2*i + 1))
***
*****
*******
*********
***********
>>> for i in range(1,6):
print(' ' * (6-i) + '*' * (2*i - 1))
*
***
*****
*******
*********
>>> for i in reversed(range(1,6)):
print(' ' * (6-i) + '*' * (2*i - 1))
*********
*******
*****
***
*
>>>
| [
"[email protected]"
]
| |
50c7515de17781b2aa3264982c36d11d47c5e5bd | 4f408d65db60911f56110c351cb3b64835e0c5fb | /caffe2/python/operator_test/video_input_op_test.py | f447305f341c0f4a1633c9d321e6e10d3adb13eb | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
]
| permissive | KeyKy/caffe2_SSD | a02c065aef2dbcfd00faae8be0440d7a4ff0fb76 | 7235688ea5e212dbe8609d780dd94c8c7d9fef54 | refs/heads/master | 2021-09-18T14:36:11.247427 | 2018-07-10T09:59:35 | 2018-07-10T09:59:35 | 89,928,918 | 8 | 5 | null | 2018-07-27T02:14:38 | 2017-05-01T14:04:20 | Jupyter Notebook | UTF-8 | Python | false | false | 3,796 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import shutil
import lmdb
import unittest
import tempfile
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, cnn
import numpy as np
class VideoInputOpTest(unittest.TestCase):
def create_a_list(self, output_file, line, n):
# create a list that repeat a line n times
# used for creating a list file for simple test input
with open(output_file, 'w') as file:
for _i in range(n):
file.write(line)
def create_video_db(self, list_file, output_file, use_list=False):
# Write to lmdb database...
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
total_size = 0
file_name = []
start_frame = []
label = []
index = 0
with env.begin(write=True) as txn:
with open(list_file, 'r') as data:
for line in data:
p = line.split()
file_name = p[0]
start_frame = int(p[1])
label = int(p[2])
if not use_list:
with open(file_name, mode='rb') as file:
video_data = file.read()
else:
video_data = file_name
tensor_protos = caffe2_pb2.TensorProtos()
video_tensor = tensor_protos.protos.add()
video_tensor.data_type = 4 # string data
video_tensor.string_data.append(video_data)
label_tensor = tensor_protos.protos.add()
label_tensor.data_type = 2
label_tensor.int32_data.append(label)
start_frame_tensor = tensor_protos.protos.add()
start_frame_tensor.data_type = 2
start_frame_tensor.int32_data.append(start_frame)
txn.put(
'{}'.format(index).encode('ascii'),
tensor_protos.SerializeToString()
)
index = index + 1
total_size = total_size + len(video_data) + sys.getsizeof(int)
return total_size
def test_read_from_db(self):
random_label = np.random.randint(0, 100)
VIDEO = "/mnt/vol/gfsdataswarm-oregon/users/trandu/sample.avi"
temp_list = tempfile.NamedTemporaryFile(delete=False).name
line_str = '{} 0 {}\n'.format(VIDEO, random_label)
self.create_a_list(
temp_list,
line_str,
16)
video_db_dir = tempfile.mkdtemp()
self.create_video_db(temp_list, video_db_dir)
model = cnn.CNNModelHelper(name="Video Loader from LMDB")
reader = model.CreateDB(
"sample",
db=video_db_dir,
db_type="lmdb")
model.VideoInput(
reader,
["data", "label"],
name="data",
batch_size=10,
width=171,
height=128,
crop=112,
length=8,
sampling_rate=2,
mirror=1,
use_local_file=0,
temporal_jitter=1)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
data = workspace.FetchBlob("data")
label = workspace.FetchBlob("label")
np.testing.assert_equal(label, random_label)
np.testing.assert_equal(data.shape, [10, 3, 8, 112, 112])
os.remove(temp_list)
shutil.rmtree(video_db_dir)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
bd7bf40d30fa7f4cbeda69eb2b1844a862c81a84 | 341c7c9a8a8482c02c8427db3560b85427f1a7df | /regexlist.py | 9b7d231832c0ede1093ab8c3878927849cda8f23 | []
| no_license | amitks815/pycode | 7d20df97ce423e0a521658c8d9e1929ed04a0992 | 0d03598cd09821f38284fd48510fae236348dc29 | refs/heads/master | 2020-04-08T19:21:19.193705 | 2018-11-29T10:52:58 | 2018-11-29T10:52:58 | 159,651,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import re
list1=[]
with open ("file.txt") as f :
#content=f.readlines()
for line in f.readlines():
list1.append(line)
print(list1)
pattern=(r'[A-za-z0-9.]+@[A-za-z0-9]+.[A-za-z]+')
for val in list1:
matches=re.search(pattern,val)
if matches:
print(matches.group(0))
| [
"[email protected]"
]
| |
e3e353b9df75f077853dcb92e5f69f8cec164206 | 13a7859b59e401c83e12fd97ab93db9577f87dee | /forH4G/h4gFittingTools/MyCMSStyle.py | d2afed6baf568e36ea0afd8117ff8d3466d3b9fc | []
| no_license | bmarzocc/Scripts | 4a59527c3a8282d1dce2b76ee642d953743ed16c | 6cfc8c3bf10132a1ee55bfcca823c2cf711d783a | refs/heads/master | 2023-03-22T13:40:43.987502 | 2021-03-09T09:15:50 | 2021-03-09T09:15:50 | 255,868,174 | 0 | 0 | null | 2020-04-15T09:29:38 | 2020-04-15T09:29:37 | null | UTF-8 | Python | false | false | 1,108 | py | from ROOT import *
def SetAxisTextSizes(obj, yoff=0, ysize=1, xoff=0, xsize=1):
obj.GetYaxis().SetTitleOffset(1.1+yoff)
obj.GetYaxis().SetTitleSize(0.0425*ysize)
obj.GetYaxis().SetLabelSize(0.04*ysize)
obj.GetXaxis().SetTitleOffset(1.1+xoff)
obj.GetXaxis().SetTitleSize(0.0425*xsize)
obj.GetXaxis().SetLabelSize(0.04*xsize)
try:
obj.GetZaxis().SetTitleOffset(1.1)
obj.GetZaxis().SetTitleSize(0.0425)
obj.GetZaxis().SetLabelSize(0.04)
except AttributeError:
a=1
def SetGeneralStyle():
gStyle.SetFrameLineWidth(2)
def SetPadStyle(obj):
obj.SetTicky()
obj.SetTickx()
def DrawCMSLabels(obj, lumi=''):
pad = obj.cd()
l = pad.GetLeftMargin()
t = pad.GetTopMargin()
r = pad.GetRightMargin()
b = pad.GetBottomMargin()
lat = TLatex()
lat.SetTextSize(0.045)
lat.SetTextAlign(11)
lat.SetTextFont(42)
cmsTag = "#bf{CMS}"
lumiTag = lumi+' fb^{-1} (13 TeV)'
if lumi == '':
cmsTag = "#bf{CMS} #it{Simulation}"
lumiTag = '(13 TeV)'
lat.DrawLatexNDC(l+0.01, 1-t+0.02, cmsTag)
lat.SetTextAlign(31)
lat.DrawLatexNDC(1-r-0.001, 1-t+0.02, lumiTag)
| [
"[email protected]"
]
| |
b32c05a30d39745a342625d64163bd4819dcf565 | 6a55fc908497a0d4ada6eae74d64a057b609c261 | /model-optimizer/extensions/front/tf/InterpolateTransposes.py | 91616c0508d912fc5ea71d727d2ee4c924e1408c | [
"Apache-2.0"
]
| permissive | anton-potapov/openvino | 9f24be70026a27ea55dafa6e7e2b6b18c6c18e88 | 84119afe9a8c965e0a0cd920fff53aee67b05108 | refs/heads/master | 2023-04-27T16:34:50.724901 | 2020-06-10T11:13:08 | 2020-06-10T11:13:08 | 271,256,329 | 1 | 0 | Apache-2.0 | 2021-04-23T08:22:48 | 2020-06-10T11:16:29 | null | UTF-8 | Python | false | false | 3,105 | py | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral
from mo.graph.graph import Graph, Node
from mo.middle.pattern_match import find_pattern_matches, inverse_dict
class InterpolateTranspose(FrontReplacementFromConfigFileGeneral):
"""
Delete useless transposes around ResizeNearestNeighbor op. In TF this op is working in NHWC layout,
Resample in OpenVINO working in NCHW layout. If all graph has NCHW layout we should delete transposes around
Resample: (NCHW->NHWC) -> Resample -> (NHWC -> NCHW) to run this op in NCHW without changes of layout.
"""
enabled = True
replacement_id = 'InterpolateTranspose'
graph_condition = [lambda graph: graph.graph['layout'] == 'NCHW']
pattern_nodes = [
('interpolate', {'kind': 'op', 'op': 'Interpolate'}),
('transpose_1', {'kind': 'op', 'op': 'Transpose'}),
('transpose_1_order', {'kind': 'op', 'op': 'Const',
'value': lambda x: x is not None and np.array_equal(x, int64_array([0, 2, 3, 1]))}),
('transpose_2', {'kind': 'op', 'op': 'Transpose'}),
('transpose_2_order', {'kind': 'op', 'op': 'Const',
'value': lambda x: x is not None and np.array_equal(x, int64_array([0, 3, 1, 2]))}),
]
pattern_edges = [
('transpose_1', 'interpolate', {'in': 0, 'out': 0}),
('transpose_1_order', 'transpose_1', {'in': 1, 'out': 0}),
('interpolate', 'transpose_2', {'in': 0, 'out': 0}),
('transpose_2_order', 'transpose_2', {'in': 1, 'out': 0}),
]
def transform_graph(self, graph: Graph, replacement_descriptions: dict):
matches = find_pattern_matches(graph, self.pattern_nodes, self.pattern_edges)
for match in list(matches):
inverse_match = inverse_dict(match)
interpolate = Node(graph, inverse_match['interpolate'])
transpose_1 = Node(graph, inverse_match['transpose_1'])
transpose_2 = Node(graph, inverse_match['transpose_2'])
# because we remove Transpose layers the ResizeNearestNeighbor should be updated for NCHW layout
interpolate.axes = int64_array([2, 3])
transpose_1.in_port(0).get_connection().set_destination(interpolate.in_port(0))
transpose_2.out_port(0).get_connection().set_source(interpolate.out_port(0))
graph.remove_nodes_from([transpose_1.id, transpose_2.id])
| [
"[email protected]"
]
| |
b650f94d0356557f05a4e13a9e5e678884eeefcd | afa231ff3461a92a67c09ec9311611214a79deca | /pyleecan/Methods/Simulation/StructElmer/process_mesh.py | 8d24ee0ba18e75a04e10b602aec1d2fc335c6b06 | [
"Apache-2.0"
]
| permissive | jgdedamas/pyleecan | 38114d0e428597d90648939f9f35abb937fc243d | 52ca00b36bbf1a1ba24ae722cf72c5e8e8e16395 | refs/heads/master | 2023-05-29T16:32:48.380055 | 2021-03-04T10:57:26 | 2021-03-04T10:57:26 | 335,987,909 | 0 | 0 | Apache-2.0 | 2021-02-04T14:58:21 | 2021-02-04T14:58:21 | null | UTF-8 | Python | false | false | 11,783 | py | # -*- coding: utf-8 -*-
import gmsh
import sys
from os import replace
from os.path import splitext
def _remove_entities(gmsh, labels):
"""
Remove model entities that have one of the given labels in their physical
groups names.
Parameters
----------
gmsh :
gmsh object
labels : list
list of labels
"""
# TODO add test that entities are not in surf or part of other 'keeper'entities
# get all group names
grps = gmsh.model.getPhysicalGroups(-1)
grp_names = [gmsh.model.getPhysicalName(*grp) for grp in grps]
# get entities that will be removed
pt_list = []
line_list = []
surf_list = []
for grp, name in zip(grps, grp_names):
if any([label in name.lower() for label in labels]):
dim = grp[0]
entities = gmsh.model.getEntitiesForPhysicalGroup(dim, grp[1])
if dim == 0:
pt_list.extend(entities.tolist())
elif dim == 1:
line_list.extend(entities.tolist())
elif dim == 2:
surf_list.extend(entities.tolist())
# get lines of surfaces
for surf in surf_list:
lines = gmsh.model.getBoundary([(2, surf)]) # TODO check if holes are included
for line in lines:
line_list.append(line[1])
# get points of lines
for line in line_list:
pts = gmsh.model.getBoundary([(1, line)])
for pt in pts:
pt_list.append(pt[1])
# get unique list of entities to remove
line_list = list(set(line_list))
pt_list = list(set(pt_list))
# delete unused entities
for surf in surf_list:
# gmsh.model.removeEntities((2, surf), recursive=False)
gmsh.model.geo.remove([(2, surf)], recursive=False)
for line in line_list:
# gmsh.model.removeEntities((1, line), recursive=False)
gmsh.model.geo.remove([(1, line)], recursive=False)
for pt in pt_list:
# gmsh.model.removeEntities((0, pt), recursive=False)
gmsh.model.geo.remove([(0, pt)], recursive=False)
# synchronize to apply changes to model
gmsh.model.geo.synchronize()
def _get_names_physical(gmsh, dimtag):
grp_tags = gmsh.model.getPhysicalGroupsForEntity(*dimtag)
names = [gmsh.model.getPhysicalName(1, tag) for tag in grp_tags]
return names
def process_mesh(
self, file_in, file_out, is_get_lam=True, is_get_magnet=False, is_hole_air=True
):
"""Preprocess the GMSH model, i.e. remove unused parts, rename boundaries, ..."""
# TODO utilize 'translation' dict
gmsh.initialize(sys.argv)
gmsh.open(file_in)
gmsh.model.geo.removeAllDuplicates()
# remove unused model parts
_remove_entities(gmsh, labels=["stator", "w_sta"])
# get group names
grps = gmsh.model.getPhysicalGroups(-1)
grp_names = [gmsh.model.getPhysicalName(*grp) for grp in grps]
# get lists of some surfaces by name
magnet_list = []
for grp, name in zip(grps, grp_names):
if "magnet" in name.lower():
entities = gmsh.model.getEntitiesForPhysicalGroup(*grp)
if grp[0] == 2:
magnet_list.extend(entities.tolist())
if True: # is_get_lam:
lam_list = []
for grp, name in zip(grps, grp_names):
if "rotor_lam" in name.lower():
entities = gmsh.model.getEntitiesForPhysicalGroup(*grp)
if grp[0] == 2:
lam_list.extend(entities.tolist())
lam_lines = []
for lam in lam_list:
lam_lines.extend(gmsh.model.getBoundary([(2, lam)], oriented=False))
lam_lines = list(set([lam[1] for lam in lam_lines])) # unique
hole_lines = []
for line in lam_lines:
names = _get_names_physical(
gmsh,
dimtag=[
1,
line,
],
)
if any(["h_rotor" in name.lower() for name in names]):
hole_lines.append(line)
if is_get_lam and not is_get_magnet:
ext = "Master"
else:
ext = "Slave"
# setup dict to store physical groups, key: group name, value: list of tags
groups_dict = {}
# get lines of magnets for processing their physical groups
for id, magnet in enumerate(magnet_list):
lines = gmsh.model.getBoundary([(2, magnet)])
# store new group names in 'groups_dict' to set it later
for line in lines:
names = _get_names_physical(
gmsh,
dimtag=[
1,
abs(line[1]),
],
)
if not names:
print(f"Warning: Found magnet line without label - line {line[1]}")
if is_get_magnet or (is_get_lam and line[1] in lam_lines):
for name in names:
if "Magnet" in name:
if (
line[1] in lam_lines
): # only lines with direct contact for now
# replace number and add 'Slave'
s = name.split("_")
s.append(ext) # add extension
s[2] = str(id) # renumber
key = "_".join(s) # new name
if key not in groups_dict.keys():
groups_dict[key] = []
groups_dict[key].append(line)
# Test if other lines (with 'Hole' phy. group) and same phy. group ext.
# ('Left', ...) share the same points to add them as well
# TODO if not used with direct contact (see above),
# but I will keep it for Contact Simulation
# if is_get_lam:
# # get the name of the magnets line
# s = None
# for name in names:
# if "Magnet" in name:
# s = name.split('_')
# # basic check of magnet line name
# if s is not None and len(s) >= 3:
# for hline in hole_lines:
# if hline != abs(line[1]): # skip if hole line == magnet line
# # test for same extension ('Left', 'Right', ...)
# names = _get_names_physical(gmsh, dimtag=[1, hline])
# if any([s[2] in name for name in names]):
# p1 = [x[1] for x in gmsh.model.getBoundary([line])]
# p2 = [x[1] for x in gmsh.model.getBoundary([(1, hline)])]
# pt = [p for p in p1 if p in p2]
# if pt:
# if len(s) == 3:
# s.append('Master') # add extension
# s[1] = str(id) # renumber
# key = "_".join(s) # new name
# if key not in groups_dict.keys():
# groups_dict[key] = []
# groups_dict[key].append((1, hline))
# if not is_hole_air:
# pass # TODO implement
if is_get_magnet:
# store new magnet body name
s = "Magnet_" + str(id) + "_Body"
groups_dict[s] = [
(2, magnet),
]
if is_get_lam:
# store new lamination body name
for id, lam in enumerate(lam_list):
s = "Lamination_" + str(id) + "_Body"
if s not in groups_dict:
groups_dict[s] = []
groups_dict[s].append((2, lam))
# store hole if not air
if not is_hole_air:
pass # TODO
# add symmetry boundaries to keeper dict 'groups_dict'
if is_get_lam:
keeper_list = [
"MASTER_ROTOR_BOUNDARY",
"SLAVE_ROTOR_BOUNDARY",
"Rotor_Tangential_Bridge",
"Rotor_Radial_Bridge",
"ROTOR_BORE_CURVE",
]
for line in lam_lines:
names = _get_names_physical(gmsh, dimtag=[1, line])
for key in keeper_list:
if any([key in name for name in names]):
if key not in groups_dict:
groups_dict[key] = []
groups_dict[key].append((1, line))
# update group names
grps = gmsh.model.getPhysicalGroups(-1)
grp_names = [gmsh.model.getPhysicalName(*grp) for grp in grps]
# delete unused surfaces
del_list = ["shaft", "h_rotor"]
if not is_get_magnet:
del_list.append("magnet")
if not is_get_lam:
del_list.append("rotor_lam")
for grp, name in zip(grps, grp_names):
if any([n in name.lower() for n in del_list]):
entities = gmsh.model.getEntitiesForPhysicalGroup(*grp).tolist()
for entity in entities:
if grp[0] == 2:
gmsh.model.geo.remove([(2, entity)], recursive=False)
# set new physical group names after removing all 'old' physical groups
gmsh.model.removePhysicalGroups(dimTags=[])
for name in grp_names:
gmsh.model.removePhysicalName(name)
for key, values in groups_dict.items():
# lines
tags = [abs(dimtag[1]) for dimtag in values if dimtag[0] == 1]
tags = list(set(tags))
if tags:
print(f"Add physical group {key} with lines {tags}")
pg = gmsh.model.addPhysicalGroup(1, tags, tag=-1)
gmsh.model.setPhysicalName(1, pg, key)
# surfaces
tags = [abs(dimtag[1]) for dimtag in values if dimtag[0] == 2]
tags = list(set(tags))
if tags:
print(f"Add physical group {key} with surface {tags}")
pg = gmsh.model.addPhysicalGroup(2, tags, tag=-1)
gmsh.model.setPhysicalName(2, pg, key)
# cleanup
# get all entities
gmsh.model.geo.synchronize()
entities_all = gmsh.model.getEntities(dim=-1)
surf_list = gmsh.model.getEntities(dim=2)
print(surf_list)
# get lines of surfaces
line_list = []
for surf in surf_list:
line_list.extend(gmsh.model.getBoundary([surf]))
line_list = [(line[0], abs(line[1])) for line in line_list]
# remove all lines that are not part of the surfaces
for entity in entities_all:
if (entity[0], abs(entity[1])) not in line_list and entity[0] == 1:
gmsh.model.geo.remove([entity], recursive=False)
# entities_all = gmsh.model.getEntities(dim=-1)
# remove unknown/unused physical groups
# TODO
# save
gmsh.model.geo.synchronize()
# gmsh.model.geo.mesh.setTransfiniteSurface(tag)
for surf in surf_list:
gmsh.model.mesh.setRecombine(2, surf[1])
# gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 1)
gmsh.model.mesh.generate(2)
# gmsh.model.mesh.recombine()
gmsh.model.mesh.refine()
# gmsh.model.mesh.refine()
# gmsh.model.mesh.recombine()
# gmsh.model.mesh.refine()
# save mesh or geo file depending on file extension
filename, file_extension = splitext(file_out)
if file_extension == ".geo":
gmsh.write(filename + ".geo_unrolled")
replace(filename + ".geo_unrolled", filename + file_extension)
else:
gmsh.model.mesh.generate(2)
gmsh.write(file_out)
# gmsh.fltk.run() # Uncomment to launch Gmsh GUI
# update group names once again
grps = gmsh.model.getPhysicalGroups(-1)
grp_names = [gmsh.model.getPhysicalName(*grp) for grp in grps]
gmsh.finalize()
return gmsh, grps, grp_names
| [
"[email protected]"
]
| |
ded63a09112cf239b904ab299115127cdb7a7f01 | 11b583df4dd615a1cfa9d433318f1f7896c81e7e | /cride/users/views/__init__.py | cb7eb70b4b946844d7f271d02f70a64f8c304622 | [
"MIT"
]
| permissive | jecs580/django_second_app | 31720513894946d005e0bb37519ccdf46bd5308e | ef04b48342ef560eac8f58540ba684e5eb7d7926 | refs/heads/master | 2022-09-17T04:52:41.419770 | 2020-02-03T23:26:59 | 2020-02-03T23:26:59 | 229,647,968 | 0 | 0 | MIT | 2022-09-13T23:03:32 | 2019-12-23T00:13:33 | Python | UTF-8 | Python | false | false | 55 | py | from .users import *
# Del archivo users importa todo. | [
"[email protected]"
]
| |
541914f4535d123610ab11ffa73cc29570302b09 | 3add939686f188d4381ea4cc699fca285ff10a46 | /utils/map_county_position.py | a5afbb71d01fb3f177dddf8f28d8cc81bdec910b | []
| no_license | weixinl/2019MCM | a66fdf5ae85dc0dce1df655d2ccae85ed3b553ea | 5caddf4b01044475d2a22097742861eb5cc698b4 | refs/heads/master | 2022-06-25T09:32:57.042550 | 2019-03-07T06:28:38 | 2019-03-07T06:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | # to get new file map a place to a position on map
import pandas as pd
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
nflis_path="../MCM_NFLIS_Data.xlsx"
nflis_df=pd.read_excel(nflis_path,sheet_name="Data")
zipcode_path='../zipcode.csv'
zipcode_df=pd.read_csv(zipcode_path)
zipcode_df=zipcode_df.rename(columns={'city':'county','state':'state','latitude':'latitude',\
'longitude':'longitude'})
#further process nflis
nflis_df=nflis_df.rename(columns={'COUNTY':'county','State':'state','FIPS_County':'fips_county',\
'FIPS_State':'fips_state','TotalDrugReportsCounty':'county_report'})
tmp_year=2017
nflis_df=nflis_df.loc[nflis_df['YYYY']==tmp_year]
nflis_df=nflis_df[['county','state','county_report']]
nflis_df=nflis_df.groupby(['county','state']).mean().reset_index()
#convert to upper
zipcode_county=zipcode_df['county']
zipcode_county=zipcode_county.str.upper()
zipcode_df['county']=zipcode_county
# print(zipcode_df)
map_df=pd.merge(nflis_df,zipcode_df,how='left',on=['county','state'])
map_df=map_df.drop_duplicates(subset=['county','state'],keep='first')
map_df=map_df.dropna(axis=0,how='any')
map_df=map_df[['county','state','latitude','longitude','county_report']]
# map_df.to_csv('county2position.csv')
longitude_list=map_df['longitude'].tolist()
longitude_list=np.array(longitude_list)
# print(longitude_list)
max_longitude=longitude_list.max()
min_longitude=longitude_list.min()
horizon_start=0
horizon_end=99
k_longitude=(horizon_end-horizon_start)/(max_longitude-min_longitude)
b_longitude=horizon_start-k_longitude*min_longitude
# print(k_longitude)
latitude_list=map_df['latitude'].tolist()
latitude_list=np.array(latitude_list)
# print(longitude_list)
max_latitude=latitude_list.max()
min_latitude=latitude_list.min()
vertical_start=0
vertical_end=99
k_latitude=(vertical_end-vertical_start)/(max_latitude-min_latitude)
b_latitude=vertical_start-k_latitude*min_latitude
img_arr=np.zeros((100,100))
for row in map_df.iterrows():
row=row[1]
tmp_longitude=row['longitude']
tmp_latitude=row['latitude']
tmp_county_report=row['county_report']
tmp_horizon=k_longitude*tmp_longitude+b_longitude
tmp_vertical=k_latitude*tmp_latitude+b_latitude
tmp_vertical=int(tmp_vertical+0.1)
tmp_horizon=int(tmp_horizon+0.1)
img_arr[tmp_horizon][tmp_vertical]=tmp_county_report
img = Image.fromarray(img_arr)
img=img.convert('L')
img.show()
img.save(str(tmp_year)+'_img.jpg')
# print(map_df)
| [
"[email protected]"
]
| |
14c7ed5c95e103cec52a269326a642e3a9dea1cf | 9c4e02ba5201794a4c5cbff548db1be7c87409c1 | /venv/lib/python3.9/site-packages/pyshacl/constraints/sparql/sparql_based_constraint_components.py | 84736e2e6bb0984e9fc8b781b5de8e985def0fd1 | [
"Apache-2.0",
"MIT"
]
| permissive | ClassWizard/PodLockParser | 4faf4679d404158b3cf2b1ceb4faabca461b0008 | 84f6d3fced521849657d21ae4cb9681f5897b957 | refs/heads/master | 2022-12-23T20:39:48.096729 | 2022-02-08T09:49:01 | 2022-02-08T09:49:01 | 167,668,617 | 2 | 1 | MIT | 2022-12-14T10:01:41 | 2019-01-26T08:50:35 | Python | UTF-8 | Python | false | false | 18,547 | py | # -*- coding: utf-8 -*-
"""
https://www.w3.org/TR/shacl/#sparql-constraint-components
"""
import typing
from typing import Dict, List, Tuple, Type, Union
import rdflib
from pyshacl.constraints.constraint_component import ConstraintComponent, CustomConstraintComponent
from pyshacl.consts import SH, RDF_type, SH_ask, SH_ConstraintComponent, SH_message, SH_select
from pyshacl.errors import ConstraintLoadError, ValidationFailure
from pyshacl.helper import get_query_helper_cls
from pyshacl.pytypes import GraphLike
if typing.TYPE_CHECKING:
from pyshacl.shape import Shape
from pyshacl.shapes_graph import ShapesGraph
SH_SPARQLSelectValidator = SH.SPARQLSelectValidator
SH_SPARQLAskValidator = SH.SPARQLAskValidator
class BoundShapeValidatorComponent(ConstraintComponent):
shacl_constraint_component = SH_ConstraintComponent
def __init__(self, constraint, shape: 'Shape', validator):
"""
Create a new custom constraint, by applying a ConstraintComponent and a Validator to a Shape
:param constraint: The source ConstraintComponent, this is needed to bind the parameters in the query_helper
:type constraint: SPARQLConstraintComponent
:param shape:
:type shape: Shape
:param validator:
:type validator: AskConstraintValidator | SelectConstraintValidator
"""
super(BoundShapeValidatorComponent, self).__init__(shape)
self.constraint = constraint
self.validator = validator
params = constraint.parameters
SPARQLQueryHelper = get_query_helper_cls()
self.query_helper = SPARQLQueryHelper(
self.shape, validator.node, validator.query_text, params, messages=validator.messages
)
# Setting self.shape into QueryHelper automatically applies query_helper.bind_params and bind_messages
self.query_helper.collect_prefixes()
@classmethod
def constraint_parameters(cls):
# TODO:coverage: this is never used for this constraint?
return []
@classmethod
def constraint_name(cls):
return "ConstraintComponent"
def make_generic_messages(self, datagraph: GraphLike, focus_node, value_node) -> List[rdflib.Literal]:
return [rdflib.Literal("Parameterised SHACL Query generated constraint validation reports.")]
def evaluate(self, target_graph: GraphLike, focus_value_nodes: Dict, _evaluation_path: List):
"""
:type focus_value_nodes: dict
:type target_graph: rdflib.Graph
"""
reports = []
non_conformant = False
extra_messages = self.constraint.messages or []
rept_kwargs = {
# TODO, determine if we need sourceConstraint here
# 'source_constraint': self.validator.node,
'constraint_component': self.constraint.node,
'extra_messages': extra_messages,
}
for f, value_nodes in focus_value_nodes.items():
# we don't use value_nodes in the sparql constraint
# All queries are done on the corresponding focus node.
try:
violations = self.validator.validate(f, value_nodes, target_graph, self.query_helper)
except ValidationFailure as e:
raise e
for val, vio in violations:
non_conformant = True
msg_args_map = self.query_helper.param_bind_map.copy()
msg_args_map.update({"this": f, "value": val})
if self.shape.is_property_shape:
msg_args_map['path'] = self.shape.path()
self.query_helper.bind_messages(msg_args_map)
bound_messages = self.query_helper.bound_messages
# The DASH test suite likes _no_ value entry in the report if we're on a Property Shape.
report_val = val if not self.shape.is_property_shape else None
if isinstance(vio, bool):
if vio is False: # ASKValidator Result
new_kwargs = rept_kwargs.copy()
new_kwargs['extra_messages'].extend(bound_messages)
rept = self.make_v_result(target_graph, f, value_node=report_val, **new_kwargs)
else: # SELECTValidator Failure
raise ValidationFailure("Validation Failure generated by SPARQLConstraint.")
elif isinstance(vio, tuple):
t, p, v = vio
new_msg_args_map = msg_args_map.copy()
if v is None:
v = report_val
else:
new_msg_args_map['value'] = v
if p is not None:
new_msg_args_map['path'] = p
if t is not None:
new_msg_args_map['this'] = t
self.query_helper.bind_messages(new_msg_args_map)
new_bound_msgs = self.query_helper.bound_messages
new_kwargs = rept_kwargs.copy()
new_kwargs['extra_messages'].extend(new_bound_msgs)
rept = self.make_v_result(target_graph, t or f, value_node=v, result_path=p, **new_kwargs)
else:
new_kwargs = rept_kwargs.copy()
new_kwargs['extra_messages'].extend(bound_messages)
rept = self.make_v_result(target_graph, f, value_node=report_val, **new_kwargs)
reports.append(rept)
return (not non_conformant), reports
class SPARQLConstraintComponentValidator(object):
validator_cache: Dict[Tuple[int, str], Union['SelectConstraintValidator', 'AskConstraintValidator']] = {}
def __new__(cls, shacl_graph: 'ShapesGraph', node, *args, **kwargs):
cache_key = (id(shacl_graph.graph), str(node))
found_in_cache = cls.validator_cache.get(cache_key, False)
if found_in_cache:
return found_in_cache
sg = shacl_graph.graph
type_vals = set(sg.objects(node, RDF_type))
validator_type: Union[Type[SelectConstraintValidator], Type[AskConstraintValidator], None] = None
if len(type_vals) > 0:
if SH_SPARQLSelectValidator in type_vals:
validator_type = SelectConstraintValidator
elif SH_SPARQLAskValidator in type_vals:
validator_type = AskConstraintValidator
if not validator_type:
sel_nodes = set(sg.objects(node, SH_select))
if len(sel_nodes) > 0:
# TODO:coverage: No test for this case
validator_type = SelectConstraintValidator
if not validator_type:
ask_nodes = set(sg.objects(node, SH_ask))
if len(ask_nodes) > 0:
validator_type = AskConstraintValidator
if not validator_type:
# TODO:coverage: No test for this case
raise ConstraintLoadError(
"Validator must be of type sh:SPARQLSelectValidator or sh:SPARQLAskValidator and must have either a sh:select or a sh:ask predicate.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
validator = validator_type(shacl_graph, node, *args, **kwargs)
cls.validator_cache[cache_key] = validator
return validator
def apply_to_shape_via_constraint(self, constraint, shape, **kwargs) -> BoundShapeValidatorComponent:
"""
Create a new Custom Constraint (BoundShapeValidatorComponent)
:param constraint:
:type constraint: SPARQLConstraintComponent
:param shape:
:type shape: pyshacl.shape.Shape
:param kwargs:
:return:
"""
must_be_ask_val = kwargs.pop('must_be_ask_val', False)
if must_be_ask_val and not (isinstance(self, AskConstraintValidator)):
# TODO:coverage: No test for this case, do we need to test this?
raise ConstraintLoadError(
"Validator not for NodeShape or a PropertyShape must be of type SPARQLAskValidator.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
must_be_select_val = kwargs.pop('must_be_select_val', False)
if must_be_select_val and not (isinstance(self, SelectConstraintValidator)):
# TODO:coverage: No test for this case, do we need to test this?
raise ConstraintLoadError(
"Validator for a NodeShape or a PropertyShape must be of type SPARQLSelectValidator.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
return BoundShapeValidatorComponent(constraint, shape, self)
def __init__(self, shacl_graph: 'ShapesGraph', node, **kwargs):
initialised = getattr(self, 'initialised', False)
if initialised:
return
self.shacl_graph = shacl_graph
self.node = node
sg = shacl_graph.graph
message_nodes = set(sg.objects(node, SH_message))
for m in message_nodes:
if not (isinstance(m, rdflib.Literal) and isinstance(m.value, str)):
# TODO:coverage: No test for when SPARQL-based constraint is RDF Literal is is not of type string
raise ConstraintLoadError(
"Validator sh:message must be an RDF Literal of type xsd:string.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
self.messages = message_nodes
self.initialised = True
def make_messages(self, params_map=None):
if params_map is None:
return self.messages
ret_msgs = []
for m in self.messages:
this_m = m.value[:]
for a, v in params_map.items():
replace_me = "{$" + str(a) + "}"
if isinstance(v, rdflib.Literal):
v = v.value
this_m = this_m.replace(replace_me, str(v))
ret_msgs.append(rdflib.Literal(this_m))
return ret_msgs
class AskConstraintValidator(SPARQLConstraintComponentValidator):
def __new__(cls, shacl_graph: 'ShapesGraph', node, *args, **kwargs):
return object.__new__(cls)
def __init__(self, shacl_graph: 'ShapesGraph', node, *args, **kwargs):
super(AskConstraintValidator, self).__init__(shacl_graph, node, **kwargs)
g = shacl_graph.graph
ask_vals = set(g.objects(node, SH_ask))
if len(ask_vals) < 1 or len(ask_vals) > 1:
# TODO:coverage: No test for this case
raise ConstraintLoadError(
"AskValidator must have exactly one value for sh:ask.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
ask_val = next(iter(ask_vals))
if not (isinstance(ask_val, rdflib.Literal) and isinstance(ask_val.value, str)):
# TODO:coverage: No test for this case
raise ConstraintLoadError(
"AskValidator sh:ask must be an RDF Literal of type xsd:string.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
self.query_text = ask_val.value
def validate(self, focus, value_nodes, target_graph, query_helper=None, new_bind_vals=None):
"""
:param focus:
:param value_nodes:
:param query_helper:
:param target_graph:
:type target_graph: rdflib.Graph
:param new_bind_vals:
:return:
"""
param_bind_vals = query_helper.param_bind_map if query_helper else {}
new_bind_vals = new_bind_vals or {}
bind_vals = param_bind_vals.copy()
bind_vals.update(new_bind_vals)
violations = set()
for v in value_nodes:
if query_helper is None:
# TODO:coverage: No test for this case when query_helper is None
init_binds = {}
sparql_text = self.query_text
else:
init_binds, sparql_text = query_helper.pre_bind_variables(
focus, valuenode=v, extravars=bind_vals.keys()
)
sparql_text = query_helper.apply_prefixes(sparql_text)
init_binds.update(bind_vals)
try:
result = target_graph.query(sparql_text, initBindings=init_binds)
answer = result.askAnswer
except (KeyError, AttributeError):
# TODO:coverage: Can this ever actually happen?
raise ValidationFailure("ASK Query did not return an askAnswer.")
if answer is False:
violations.add((v, False))
return violations
class SelectConstraintValidator(SPARQLConstraintComponentValidator):
def __new__(cls, shacl_graph: 'ShapesGraph', node, *args, **kwargs):
return object.__new__(cls)
def __init__(self, shacl_graph: 'ShapesGraph', node, *args, **kwargs):
super(SelectConstraintValidator, self).__init__(shacl_graph, node, **kwargs)
g = shacl_graph.graph
select_vals = set(g.objects(node, SH_select))
if len(select_vals) < 1 or len(select_vals) > 1:
# TODO:coverage: No test for this case, do we need to test this?
raise ConstraintLoadError(
"SelectValidator must have exactly one value for sh:select.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
select_val = next(iter(select_vals))
if not (isinstance(select_val, rdflib.Literal) and isinstance(select_val.value, str)):
# TODO:coverage: No test for the case when sh:select is not a literal of type string
raise ConstraintLoadError(
"SelectValidator sh:select must be an RDF Literal of type xsd:string.",
"https://www.w3.org/TR/shacl/#ConstraintComponent",
)
self.query_text = select_val.value
def validate(self, focus, value_nodes, target_graph, query_helper=None, new_bind_vals=None):
"""
:param focus:
:param value_nodes:
:param query_helper:
:param target_graph:
:type target_graph: rdflib.Graph
:param new_bind_vals:
:return:
"""
param_bind_vals = query_helper.param_bind_map if query_helper else {}
new_bind_vals = new_bind_vals or {}
bind_vals = param_bind_vals.copy()
bind_vals.update(new_bind_vals)
violations = set()
for v in value_nodes:
if query_helper is None:
# TODO:coverage: No test for this case when query_helper is None
init_binds = {}
sparql_text = self.query_text
else:
init_binds, sparql_text = query_helper.pre_bind_variables(
focus, valuenode=v, extravars=bind_vals.keys()
)
sparql_text = query_helper.apply_prefixes(sparql_text)
init_binds.update(bind_vals)
results = target_graph.query(sparql_text, initBindings=init_binds)
if not results or len(results.bindings) < 1:
continue
for r in results:
try:
p = r['path']
except KeyError:
p = None
try:
v2 = r['value']
except KeyError:
v2 = None
try:
t = r['this']
except KeyError:
# TODO:coverage: No test for when result has no 'this' key
t = None
if p or v2 or t:
violations.add((v, (t, p, v2)))
else:
# TODO:coverage: No test for generic failure, when
# 'path' and 'value' and 'this' are not returned.
# here 'failure' must exist
try:
f = r['failure']
if f is True or (isinstance(f, rdflib.Literal) and f.value):
violations.add((v, True))
except KeyError:
pass
return violations
class SPARQLConstraintComponent(CustomConstraintComponent):
"""
SPARQL-based constraints provide a lot of flexibility but may be hard to understand for some people or lead to repetition. This section introduces SPARQL-based constraint components as a way to abstract the complexity of SPARQL and to declare high-level reusable components similar to the Core constraint components. Such constraint components can be declared using the SHACL RDF vocabulary and thus shared and reused.
Link:
https://www.w3.org/TR/shacl/#sparql-constraint-components
"""
__slots__: Tuple = tuple()
def __new__(cls, shacl_graph, node, parameters, validators, node_validators, property_validators):
return super(SPARQLConstraintComponent, cls).__new__(
cls, shacl_graph, node, parameters, validators, node_validators, property_validators
)
@property
def messages(self):
# TODO: allow messages at this SPARQLConstraintComponent level
return []
def make_validator_for_shape(self, shape: 'Shape'):
"""
:param shape:
:type shape: Shape
:return:
"""
val_count = len(self.validators)
node_val_count = len(self.node_validators)
prop_val_count = len(self.property_validators)
must_be_select_val = False
must_be_ask_val = False
if shape.is_property_shape and prop_val_count > 0:
validator_node = next(iter(self.property_validators))
must_be_select_val = True
elif (not shape.is_property_shape) and node_val_count > 0:
validator_node = next(iter(self.node_validators))
must_be_select_val = True
elif val_count > 0:
validator_node = next(iter(self.validators))
must_be_ask_val = True
else:
raise ConstraintLoadError(
"Cannot select a validator to use, according to the rules.",
"https://www.w3.org/TR/shacl/#constraint-components-validators",
)
validator = SPARQLConstraintComponentValidator(self.sg, validator_node)
applied_validator = validator.apply_to_shape_via_constraint(
self, shape, must_be_ask_val=must_be_ask_val, must_be_select_val=must_be_select_val
)
return applied_validator
| [
"[email protected]"
]
| |
e410f2ddda9eea30c3c3608956c7f3fd9f50ca3a | ddd993057174b52a9c4ecffddda655504ccc2366 | /src/main/python/systemds/operator/algorithm/builtin/imputeByMode.py | c6921b1ead323c2ce6c21c69cf1a5179aad7a138 | [
"Apache-2.0"
]
| permissive | atefeh-asayesh/systemds | 68840e3e8005d5bff3e76aeed811c7ab1cb89e8f | 96733360c8f600355d5600f2edb8960ba1d47861 | refs/heads/master | 2023-08-04T18:23:56.076995 | 2021-09-27T08:41:40 | 2021-09-27T08:41:40 | 368,129,199 | 0 | 0 | Apache-2.0 | 2021-06-08T20:22:08 | 2021-05-17T09:29:42 | Java | UTF-8 | Python | false | false | 1,421 | py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/imputeByMode.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def imputeByMode(X: Matrix):
params_dict = {'X': X}
return Matrix(X.sds_context,
'imputeByMode',
named_input_nodes=params_dict)
| [
"[email protected]"
]
| |
37909ffcf88ac36105ccbcdb2881775b527b6187 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_size02.py | bb8624007d0a0803fa1ea7fec99c5b6f4a73cd6e | [
"BSD-2-Clause-Views"
]
| permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,360 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_size01.xlsx')
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [61355904, 61365248]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_size({'x_scale': 1.066666666, 'y_scale': 1.11111111})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
]
| |
a5ed97929c86c1220ab0718c4de35447549ecd42 | fdd9e3131ead660db9485304438993a2a249fb1f | /tests/test_npc_cli/test_describe/test_describe_systems_cmd.py | 7d33b59443410bbcf0d0664b05ce152289b67f07 | [
"MIT"
]
| permissive | aurule/npc | 6807aa0723e765cb33fe5f5b49b0f579a6207153 | 2e1b2e92e2a4908d791846f184ee7e4de2f6682e | refs/heads/develop | 2023-09-02T02:46:47.900892 | 2023-08-30T17:31:00 | 2023-08-30T17:31:00 | 47,045,977 | 14 | 2 | MIT | 2023-08-18T20:49:12 | 2015-11-29T01:40:18 | Python | UTF-8 | Python | false | false | 590 | py | from click.testing import CliRunner
from tests.fixtures import tmp_campaign
from npc_cli import cli
def test_shows_system_name(tmp_path):
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path):
result = runner.invoke(cli, ['describe', 'systems'])
assert "New World of Darkness" in result.output
def test_shows_current_campaign_system(tmp_campaign):
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_campaign.root):
result = runner.invoke(cli, ['describe', 'systems'])
assert "Currently using" in result.output
| [
"[email protected]"
]
| |
bbb794515424fcae3a62640604b057d36412e869 | 2e74cff6c9639f3903ccde662e79359d0724285e | /2019_late/20190920/swea_5188_최소합.py | 73b522689d9b934a03bfafa00236176b182caa0d | []
| no_license | dodonmountain/algorithm | e29988071f651e51ba65e3926302f94a3d4074a5 | ce33e0d74220839aed4b17a47fa0069458a4324e | refs/heads/master | 2022-11-05T05:14:01.527015 | 2022-11-01T04:29:37 | 2022-11-01T04:29:37 | 200,008,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import sys
sys.stdin = open('5188.txt')
dx, dy = (1,0), (0,1)
def dfs(h,c):
global tmp
if h == (N-1,N-1):
if c < tmp:
tmp = c
return
for i in range(2):
nx, ny = h[0], h[1]
if 0 <= h[0] + dx[i] < N:
nx = h[0] + dx[i]
if 0 <= h[1] + dy[i] < N:
ny = h[1] + dy[i]
if not visit[nx][ny]:
if c+board[nx][ny] <= tmp:
visit[nx][ny] = 1
dfs((nx,ny),c+board[nx][ny])
visit[nx][ny] = 0
for t_case in range(int(input())):
N = int(input())
board,pp,ll = [],0,0
for _ in range(N):
board.append(list(map(int, input().split())))
for i in range(1, N):
pp += board[i][N-1];ll += board[i][0]
if sum(board[0])+ pp > sum(board[N-1]) + ll:
tmp = sum(board[N-1]) + ll
tmp = sum(board[0]) + pp
visit = [[0] * N for _ in range(N)]
dfs((0,0),board[0][0])
print('#{} {}'.format(t_case+1, tmp)) | [
"[email protected]"
]
| |
12d348348906199a44c75cc418f75704de8a63e2 | 0db97db08743783019efe022190f409d22ff95bd | /aliyun/api/rest/Ram20140214AddUserRequest.py | e2541abe6859593f3da162236c1ccb39f69cba4c | [
"Apache-2.0"
]
| permissive | snowyxx/aliyun-python-demo | 8052e2a165f1b869affe632dda484d6ca203bd9b | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | refs/heads/master | 2021-01-10T03:37:31.657793 | 2016-01-21T02:03:14 | 2016-01-21T02:03:14 | 49,921,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | '''
Created by auto_sdk on 2015.01.27
'''
from aliyun.api.base import RestApi
class Ram20140214AddUserRequest(RestApi):
def __init__(self,domain='ram.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountSpace = None
self.Comments = None
self.UserName = None
def getapiname(self):
return 'ram.aliyuncs.com.AddUser.2014-02-14'
| [
"[email protected]"
]
| |
ade36c2e192e4a454bafe0e1e3ff1cc305908cf3 | 5b9d8b8aeee3ec8c29ca3de3e90182d712705690 | /backend/manage.py | 43df8ce678c2c740da4bad02791a4cd854202e3d | []
| no_license | crowdbotics-apps/music-pro-21325 | 97ff337afbec507e9ed7d99ede19e0cfa2d16e81 | c1f71907b192f72a660f1ac98aee3ae348793cfd | refs/heads/master | 2022-12-25T23:06:26.557969 | 2020-10-10T08:14:08 | 2020-10-10T08:14:08 | 302,850,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'music_pro_21325.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
4b27a3e0aa124b223b6e2bb7cf56736f2f5905d6 | e6a8793b1b12d47e57f00485350d122946618245 | /home/migrations/0005_remove_page_is_footer_1.py | 02c0d3ec0eed67be17f85c803e78a91e63b41459 | []
| no_license | Fabricourt/school | 70b2eba2c0b8ff9b9290eb0f68d730698a6d3a63 | dad80c36be34b432dfadef195eb9e867f82cafff | refs/heads/main | 2023-01-01T15:48:43.760288 | 2020-10-26T11:15:32 | 2020-10-26T11:15:32 | 305,829,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Generated by Django 3.1.2 on 2020-10-17 13:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20201017_1552'),
]
operations = [
migrations.RemoveField(
model_name='page',
name='is_footer_1',
),
]
| [
"[email protected]"
]
| |
097187bfe4e27f5d2ceaa882fa7a70cfd771c020 | 5ee5853eb335fcf575d4344366ef9b4bce03570d | /p847h/shorest_path_length.py | 9777473ab2a7326a108ab1a574ad0307396ed666 | [
"MIT"
]
| permissive | l33tdaima/l33tdaima | 15463fb2f8d61286a4a3a7bacaaee2ab1f7c4f43 | f35305c618b383a79d05074d891cf0f7acabd88f | refs/heads/main | 2023-07-20T21:52:26.330301 | 2023-07-19T02:30:22 | 2023-07-19T02:30:22 | 99,509,451 | 1 | 0 | MIT | 2018-10-31T15:10:49 | 2017-08-06T19:44:29 | JavaScript | UTF-8 | Python | false | false | 939 | py | from cmath import exp
from curses.ascii import SO
class Solution:
def shortestPathLength(self, graph: list[list[int]]) -> int:
memo, final, steps = set(), (1 << len(graph)) - 1, 0
queue = [(i, 1 << i) for i in range(len(graph))]
while True:
new = []
for node, state in queue:
if state == final:
return steps
for v in graph[node]:
if (v, state | 1 << v) not in memo:
new.append((v, state | 1 << v))
memo.add((v, state | 1 << v))
queue = new
steps += 1
# TESTS
for graph, expected in [
([[1, 2, 3], [0], [0], [0]], 4),
([[1], [0, 2, 4], [1, 3, 4], [2], [1, 2]], 4),
]:
sol = Solution()
actual = sol.shortestPathLength(graph)
print("Shortest path to visit all nodes in", graph, "->", actual)
assert actual == expected
| [
"[email protected]"
]
| |
62f9354ab6fcf808aef2ae3afc2deb20f08226ab | 89d5dbd7672d9e03e7ca5277a9fd942b2444db86 | /Advance python/08_global_local_variable.py | c69f04343d0079f2abeadbbb65295b89401249d5 | []
| no_license | rameshparajuli-github/python-programming | 6ff218d4d1cdde73a578e4f2f3ba6241310d6408 | 5d2638a24e8b50614f2c15f04eb1f00dba0f6175 | refs/heads/master | 2023-06-17T18:40:47.797595 | 2021-07-16T14:02:52 | 2021-07-16T14:02:52 | 386,656,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | a=54 # Global Variable
def func1():
global a # This line change Global variable
print(f"This statement:{a}")
a=9 # Local Variable # yadi teo mathi ko global a use nagareko vaya yo line ma 54 naii print hunthyo
print(f"This statement:{a}")
func1()
print(f"This statement:{a}")
| [
"[email protected]"
]
| |
3a40a65282330a85aeb70fd636c76af7cc562d0b | a7a29bc1643e14ae74f95d0b6695de32b6d6cfb5 | /0x0B-python-input_output/1-number_of_lines.py | 148a4fadbe23197c0272795e3bbb6c927d9f17e7 | []
| no_license | ToqYang/holbertonschool-higher_level_programming | 95752a663307534e16d57a73cc4a8b0170f86614 | 862f88652619711eb0d1c7f821467b15d3f9b7cf | refs/heads/master | 2020-07-23T01:41:19.574505 | 2020-02-14T04:46:35 | 2020-02-14T04:46:35 | 207,403,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | #!/usr/bin/python3
""" Read a file """
def number_of_lines(filename=""):
""" Received the filename for it can does a syscall for read the file
Return: Counter of the lines"""
count = 0
with open(filename, mode="r", encoding="utf-8") as fil:
for line in fil:
count += 1
return count
| [
"[email protected]"
]
| |
d20d8ce179c8f7e5f77654c2d090e1085d1785fb | 1273cef322e21edd65f5c44e0d3ad592959d27bb | /tensorflow/python/eager/context.py | 13fb0e88a6df9cbe6921d8b81bed8003a6a2ed33 | [
"Apache-2.0"
]
| permissive | sreesms/tensorflow | 6e176b3ad9d68ac397d02f5dc0fcb4340aead4ae | e105c101f1df18a8f2f2d25f96f43e097483d99b | refs/heads/master | 2020-03-27T12:03:31.035937 | 2018-08-29T00:22:26 | 2018-08-29T00:26:35 | 146,522,667 | 1 | 0 | Apache-2.0 | 2018-08-29T00:33:03 | 2018-08-29T00:33:03 | null | UTF-8 | Python | false | false | 26,767 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
# Default execution mode.
_default_mode = GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
class _TensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256):
self._data = collections.OrderedDict()
self._max_items = max_items if max_items else 256
def put(self, key, value):
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
# TODO(agarwal): better name ?
class _EagerContext(threading.local):
"""Thread local eager context."""
def __init__(self):
super(_EagerContext, self).__init__()
self.device_spec = pydev.DeviceSpec.from_string("")
self.device_name = self.device_spec.to_string()
self.mode = _default_mode
self.is_eager = _default_mode == EAGER_MODE
self.scope_name = ""
self.recording_summaries = False
self.summary_writer_resource = None
self.scalar_cache = {}
self.ones_rank_cache = _TensorCache()
self.zeros_cache = _TensorCache()
self.execution_mode = None
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`_default_mode`) and (2) `__init__` is called each
# time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode)
def push(self, is_building_function, enter_context_fn):
"""Push metadata about a context switch onto the stack.
A context switch can take one of two forms: installing a graph as the
default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn))
def pop(self):
"""Pop the stack."""
self.stack.pop()
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32.
Valid values:
- tfe.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- tfe.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- tfe.DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- tfe.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
self._eager_context = _EagerContext()
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._post_execution_callbacks = []
self._config = config
self._seed = None
self._initialize_lock = threading.Lock()
self._device_policy = device_policy
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._execution_mode = execution_mode
self._server_def = server_def
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
self._rng = random.Random(self._seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_devices(self):
"""Helper to initialize devices."""
# Store list of devices
self._context_devices = []
device_list = pywrap_tensorflow.TFE_ContextListDevices(
self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):
dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)
self._context_devices.append(pydev.canonical_name(dev_name))
dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
pywrap_tensorflow.TF_DeleteDeviceList(device_list)
def _initialize_handle_and_devices(self):
"""Initialize handle and devices."""
with self._initialize_lock:
if self._context_handle is not None:
return
assert self._context_devices is None
opts = pywrap_tensorflow.TFE_NewContextOptions()
try:
if self._config is not None:
config_str = self._config.SerializeToString()
pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._execution_mode == ASYNC:
pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)
self._context_handle = pywrap_tensorflow.TFE_NewContext(opts)
finally:
pywrap_tensorflow.TFE_DeleteContextOptions(opts)
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle, 600,
server_def_str)
self._initialize_devices()
def _clear_caches(self):
self.scalar_cache().clear()
self.ones_rank_cache().flush()
self.zeros_cache().flush()
def set_server_def(self, server_def, keep_alive_secs=600):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
if not self._context_handle:
self._server_def = server_def
else:
server_def_str = server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,
keep_alive_secs, server_def_str)
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
self._initialize_devices()
@property
def _handle(self):
ctx = self._context_handle
if ctx is None:
self._initialize_handle_and_devices()
return self._context_handle
else:
return ctx
@property
def _devices(self):
devices = self._context_devices
if devices is None:
self._initialize_handle_and_devices()
return self._context_devices
else:
return devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._eager_context
old_mode = ctx.mode
old_is_eager = ctx.is_eager
ctx.mode = mode
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode)
try:
yield
finally:
ctx.is_eager = old_is_eager
ctx.mode = old_mode
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._eager_context.is_eager
def scalar_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.scalar_cache
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._eager_context.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._eager_context.scope_name = s
@property
def summary_writer_resource(self):
"""Returns summary writer resource."""
return self._eager_context.summary_writer_resource
@summary_writer_resource.setter
def summary_writer_resource(self, resource):
"""Sets summary writer resource."""
self._eager_context.summary_writer_resource = resource
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._eager_context.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._eager_context.device_spec
@tf_contextlib.contextmanager
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Yields:
Nothing.
Raises:
ValueError: If name is not a string or is an invalid device name.
"""
eager_context = self._eager_context
old_device_name = eager_context.device_name
old_device_spec = eager_context.device_spec
cache_key = (old_device_name, name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(name), name))
except KeyError:
# Handle a cache miss.
if name is not None:
if not isinstance(name, str):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(name), name))
device_spec = pydev.DeviceSpec.from_string(name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string(
"/job:localhost/replica:0/task:0/device:CPU:0")
new_device_spec.merge_from(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
try:
eager_context.device_name = new_device_name
eager_context.device_spec = new_device_spec
yield
finally:
eager_context.device_name = old_device_name
eager_context.device_spec = old_device_spec
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
def get_execution_mode(self):
mode = self._eager_context.execution_mode
if mode is None:
mode = self._execution_mode
return mode
def set_execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
self._eager_context.execution_mode = mode
pywrap_tensorflow.TFE_ContextSetAsyncForThread(self._handle, mode == ASYNC)
@tf_contextlib.contextmanager
def execution_mode(self, mode):
"""Context manager for setting execution mode for current thread."""
old_mode = self.get_execution_mode()
try:
self.set_execution_mode(mode)
yield
finally:
self.set_execution_mode(old_mode)
def async_wait(self):
"""Waits for ops dispatched in ASYNC mode to finish."""
pywrap_tensorflow.TFE_ContextAsyncWait(self._handle)
def async_clear_error(self):
"""Clears errors raised during ASYNC execution."""
pywrap_tensorflow.TFE_ContextAsyncClearError(self._handle)
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self._initialize_handle_and_devices()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
fdef_string = fdef.SerializeToString()
pywrap_tensorflow.TFE_ContextAddFunctionDef(
self._handle, fdef_string, len(fdef_string))
def add_post_execution_callback(self, callback):
"""Add a post-execution callback to the context.
A post-execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added.
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute names and attribute values.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
# TODO(cais): (b/64674139) Allow access to function-internal operations.
self._post_execution_callbacks.append(callback)
def clear_post_execution_callbacks(self):
"""Clear all post-execution callbacks added to the context."""
del self._post_execution_callbacks[:]
@property
def post_execution_callbacks(self):
"""Get the list of post-execution callbacks added to the context."""
return self._post_execution_callbacks
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)
@tf_contextlib.contextmanager
def device_policy(self, policy):
handle = self._handle
old = pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(handle)
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
handle, policy)
try:
yield
finally:
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
handle, old)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ContextExportRunMetadata(
self._context_handle, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tensorflow.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tensorflow.TFE_ContextEndStep(self._handle)
_context = None
_context_lock = threading.Lock()
def _initialize_context():
global _context
with _context_lock:
if _context is None:
_context = Context()
def context():
"""Returns a singleton context object."""
if _context is None:
_initialize_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly")
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
Eager execution is typically enabled via `tf.enable_eager_execution`,
but may also be enabled within the context of a Python function via
tf.contrib.eager.py_func.
"""
return context().executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
# TODO(agarwal): get rid of this and use ops.name_scope instead.
@contextlib.contextmanager
def namescope(name):
"""ContextManager for creating hierarchical name scopes."""
ctx = context()
old_name = ctx.scope_name
ctx.scope_name = "%s/%s" % (old_name, name) if old_name else name
try:
yield
finally:
ctx.scope_name = old_name
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tfe.device('gpu:0'):
with tfe.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
return context().device(name)
def list_devices():
"""List the names of the available devices.
Returns:
Names of the available devices, as a `list`.
"""
return context().devices()
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().set_execution_mode(mode)
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
return context().execution_mode(mode)
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().async_wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().async_clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
def set_server_def(server_def):
context().set_server_def(server_def)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
| [
"[email protected]"
]
| |
0af54381ce4e027b80fe03d99f639a2156eda620 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/mve.py | fc7410e0f820f7925d1245e7228a622819984cc9 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'mVE':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
caf49e720e7a11db13c00411024b3f18209fde61 | 9c20b0f0ad729b77e970dedaf4a138c99b4364bc | /Lib/site-packages/phonenumbers/shortdata/region_PW.py | 2b53f97da4c5bba6ee25876c99a74d7e9f7544e6 | []
| no_license | GlovesMaker/Sklepinternetowy | 4459f8651d2280e4840cfb293de28f9413df68af | d05372e96f7238c9459caf4f7a890a5a6f2bb2c3 | refs/heads/master | 2022-12-22T02:43:33.628016 | 2018-09-11T18:20:37 | 2018-09-11T18:20:37 | 167,855,928 | 0 | 1 | null | 2022-12-08T05:55:04 | 2019-01-27T20:36:42 | Python | UTF-8 | Python | false | false | 548 | py | """Auto-generated file, do not edit by hand. PW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PW = PhoneMetadata(id='PW', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d\\d', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_data=True)
| [
"[email protected]"
]
| |
9a4ba82eb6c3c0b5c1df859b72e504c966f1763a | 74482894c61156c13902044b4d39917df8ed9551 | /test/test_tokens_forwarding_fail_data_item.py | ce8cc93181cac7dc8210c609b3d54fa4af76f3a7 | [
"MIT"
]
| permissive | xan187/Crypto_APIs_2.0_SDK_Python | bb8898556ba014cc7a4dd31b10e24bec23b74a19 | a56c75df54ef037b39be1315ed6e54de35bed55b | refs/heads/main | 2023-06-22T15:45:08.273635 | 2021-07-21T03:41:05 | 2021-07-21T03:41:05 | 387,982,780 | 1 | 0 | NOASSERTION | 2021-07-21T03:35:29 | 2021-07-21T03:35:29 | null | UTF-8 | Python | false | false | 1,274 | py | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.tokens_forwarding_fail_data_item import TokensForwardingFailDataItem
class TestTokensForwardingFailDataItem(unittest.TestCase):
"""TokensForwardingFailDataItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTokensForwardingFailDataItem(self):
"""Test TokensForwardingFailDataItem"""
# FIXME: construct object with mandatory attributes with example values
# model = TokensForwardingFailDataItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
ad96a6a7aee94d1afcd1bceb24d8af138afdff98 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/fuchsia-gn-sdk/src/gn_run_binary.py | b39f49b5c30357b323b41f6911d8d6f0b7c05077 | [
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause"
]
| permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 720 | py | #!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script for GN to run an arbitrary binary.
Run with:
python3.8 gn_run_binary.py <binary_name> [args ...]
"""
import os
import subprocess
import sys
# This script is designed to run binaries produced by the current build. We
# may prefix it with "./" to avoid picking up system versions that might
# also be on the path.
path = sys.argv[1]
if not os.path.isabs(path):
path = './' + path
# The rest of the arguments are passed directly to the executable.
args = [path] + sys.argv[2:]
sys.exit(subprocess.call(args))
| [
"[email protected]"
]
| |
9bd8e8da9f5987759171e2b03d02241ddba91fc8 | a96bbc3da8557e68cb01db671b930fec9f46c0c2 | /blog/migrations/0005_comment.py | 2c007c09ada8afc58aec82f33ab624bfc2cadb43 | []
| no_license | winterash2/django_first_blog | dd3de9f01f8e2b2df21fba9cd3d636c6fbd94fd1 | 06f059570ae5851db97b5c7f9b9d043da033c023 | refs/heads/master | 2022-11-26T18:44:47.316248 | 2020-08-10T09:10:16 | 2020-08-10T09:10:16 | 286,345,428 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # Generated by Django 3.0.9 on 2020-08-07 02:01
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200806_1540'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| [
"[email protected]"
]
| |
01a0e9f474c62b7004189040a1985ecc8534dea3 | 0288f98eca5d7c5e274f186a61258746be8627d3 | /venv/Scripts/easy_install-script.py | cefdc254cee2a85daedbbafe7c9795bf3330059a | []
| no_license | Adem54/Python-Tutorials | df67d449e6d8c06134c6ae7a3fec0889e341530e | a30895d7f716d8a3115bc6df9f0af3feb43aa799 | refs/heads/master | 2020-12-24T02:18:49.564989 | 2020-01-31T02:48:03 | 2020-01-31T02:48:03 | 237,347,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #!"C:\Users\Adem\Desktop\Python Tutorials\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
]
| |
760764837e284ea13127fd1fbbc619a6dbbba28f | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PySide2/QtGui/QTextBlockUserData.py | a44da54e48e31d0038371f89e05a4a2201acfe19 | []
| no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # encoding: utf-8
# module PySide2.QtGui
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PySide2\QtGui.pyd
# by generator 1.146
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import Shiboken as __Shiboken
class QTextBlockUserData(__Shiboken.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
| [
"[email protected]"
]
| |
3772b803daea556dbaca21372e0a9c473332e531 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /sCH5gcyoRqq3Gfzyi_12.py | 581973007ad8ee1c31dd8d0af12ef826a10ecb9d | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | """
Create a function which validates whether a given number exists, and could
represent a real life quantity. Inputs will be given as a string.
### Examples
valid_str_number("3.2") ➞ True
valid_str_number("324") ➞ True
valid_str_number("54..4") ➞ False
valid_str_number("number") ➞ False
### Notes
Accept numbers such as `.5` and `0003`.
"""
import re
def valid_str_number(s):
return bool(re.fullmatch('\d*\.?\d*', s))
| [
"[email protected]"
]
| |
48379baa491f9b23fd8ab9d3483b39f243989824 | 1b52ba1cfc2ffb16cbcd61f89909dd51741f1c28 | /test_env/lib/python3.6/copyreg.py | 4249434c66aa32a572172ae23411e5c76357c82d | []
| no_license | dayananda30/test_run_framework | 9b0cc78ff0f439bf37ac52d6c4f2843ed36861de | b8aaf2fb88a33dff87dc652d833ab1e1f9ed28ab | refs/heads/master | 2020-07-31T09:20:01.714648 | 2019-09-24T05:45:09 | 2019-09-24T05:45:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | /home/sheetal/sid/projects/ananconda3/lib/python3.6/copyreg.py | [
"[email protected]"
]
| |
fb33dce1897d5dc122db0542d61b43b2317c257a | 015106a1a964305ef8ceb478cc56fd7d4fbd86d5 | /468.py | b55bc68b7bb8270e8e126714f8a586f504225d6b | []
| no_license | zenmeder/leetcode | 51a0fa4dc6a82aca4c67b5f4e0ee8916d26f976a | 0fddcc61923d760faa5fc60311861cbe89a54ba9 | refs/heads/master | 2020-12-02T18:16:10.825121 | 2018-10-30T11:47:53 | 2018-10-30T11:47:53 | 96,505,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
class Solution(object):
def validIPAddress(self, IP):
"""
:type IP: str
:rtype: str
"""
if self.isIPV4(IP):
return "IPv4"
elif self.isIPV6(IP):
return "IPv6"
else:
return "Neither"
def isIPV4(self, IP):
ip = IP.split('.')
if len(ip) != 4:
return False
for num in ip:
if '0' <= num <= '255' and (num[0] != '0' or num == '0') and len(num) <= 3:
continue
else:
return False
return True
def isIPV6(self, IP):
ip = IP.split(':')
if len(ip) != 8:
return False
for num in ip:
print(num)
if '0' <= num.upper() <= 'FFFF' and len(num) <= 4:
continue
else:
return False
return True
print(Solution().validIPAddress("192.0.0.1"))
| [
"[email protected]"
]
| |
987357f3daea5355b973c676404a76288914d82e | e9bf5fb440305c7b17935438fd515ca2541babc4 | /kinl.py | 7b30a2b77605cfb7bc749dfe93d4fb9c1031dc87 | []
| no_license | jyothiesai/guvij | 748518673509d4849803fc22b03cd6b2b0b3392f | fdcd29f5548c12095f259ff2f74024317787b110 | refs/heads/master | 2020-04-16T01:42:44.848569 | 2019-08-07T13:24:44 | 2019-08-07T13:24:44 | 165,183,822 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #jyothi
n,k=raw_input().split(' ')
n,k=int(n),int(k)
l=[int(x) for x in raw_input().split(' ')]
if k in l:
print('yes')
else:
print('no')
| [
"[email protected]"
]
| |
6801ecc4ad1ea58b5cafd1fd372f067ab59d8863 | 6712885a4c2a056eee3b4488382b9afc2149f799 | /New LeetCode Problems/remove_outermost_parens.py | 669927e0a71b7d1f372ff3cddc03ce00f0133f34 | []
| no_license | matthewharrilal/CS-Questions-GRIND | cac1cb562e5dad79ee4e224895d034f9c71d9ed3 | 7196c5e8df495d43ee91f218d6253c8a88a7d59d | refs/heads/master | 2020-04-16T01:30:53.289837 | 2019-07-24T04:34:21 | 2019-07-24T04:34:21 | 165,176,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | # https://leetcode.com/problems/remove-outermost-parentheses/
# def removeOuterParentheses(self, S):
# """
# :type S: str
# :rtype: str
# """ | [
"[email protected]"
]
| |
d47fcca6b67ca58f9a12f1e5299051c3e3d948b8 | c39e466c2b6fdffbc410f24669f214e13fb87781 | /PYTHON/EJERCICIOS/TEMA 5/COMPIS/017_G8-Carla Guillén Pingarrón_638450_assignsubmission_file_/S5_G8.py | a05a77ba9258fb980599167fb5bccc7bb6b79d0f | []
| no_license | enanibus/biopython | 3a58efbcc92f1ce60285a115c620de9295b7d281 | 613d334a5c0502059930d9381a9464ef533cca1c | refs/heads/master | 2021-01-12T17:27:39.516793 | 2017-01-02T18:30:09 | 2017-01-02T18:30:09 | 71,573,732 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,820 | py | ##Group 8: Álvaro Alfayate, Andrea de la Fuente, Carla Guillén y Jorge Nuevo.
def ReadFasta(FileName): ##Definimos una función que lea el archivo FASTA elegidoy extraiga la información requerida
MyFile=open(FileName,'r')
ReadSeq='' #Una variable vacia que va a almacenar el Fasta leído
for Line in MyFile: ##Unimos todas las líneas del fasta.
if '>' in Line: ##Si es la primera línea definimos esta condición
#No hacemos un strip para poder separar la primera línea de la secuencia por un \n
ReadSeq=ReadSeq+Line #Añadimos la primera línea con el \n
else:
Line=Line.strip().upper() #Con la secuencia si hacemos strip, para unir toda la secuencia junta.
ReadSeq=ReadSeq+Line
MySeq_RE=r'([NX]M_\d+\.\d).+\n([AGCT]+)' #Definimos la expresión regular que nos extrae por un lado el accession number y por otro la secuencia.
MySeq_Comp=re.compile(MySeq_RE)
SeqInfo=MySeq_Comp.search(ReadSeq).groups() #Buscamos nuestra expresión regular en la secuencia leída y sacamos los grupos.
return (SeqInfo) ##SeqInfo es una lista donde el primer elemento es el accesion number y el segundo la secuencia de DNA
MyFile.close()
def CreateDictionary(DicFile): ##Definimos una función que crea diccionarios a partir del archivo que le pasemos.
MyFile=open(DicFile,'r')
MyDic_RE=r'([ATGC]{3})\t([^BJUXZ])\t([A-Z][a-z]{2})' ##Definimos una expresión variable que saca por un lado el codon, por otro los aminoácidos (en ambos códigos)
MyDic_Comp=re.compile(MyDic_RE)
Data2=''
GENCODE={}
for Line in MyFile: ##Recorremos todas las líneas del archivo y las unimos en Data 2
Data2=Data2+Line.strip()
MyRes2=MyDic_Comp.findall(Data2) ##Busca en Data2 todos los elementos que cumplen la secuencia consenso y los almacena en MyRes2 como una lista de listas (2D)
x=0
for n in range(0,len(MyRes2)):##Durante la longitud de la lista MyRes2 va a ejecutar este bloque de código.
GENCODE[MyRes2[x][0]]=MyRes2[x][1:] #Forma un diccionario recorriendo todas las líneas del archivo (que corresponden a la primera dimensión de la lista)
x+=1 #Avanzamos una posición en la primera dimensión --> A la siguiente línea del archivo de código genético
return (GENCODE)
MyFile.close()
def ComplementaryGenerator(SeqName): #Creamos una función que nos devuelve la hebra complementaria de la secuencia de la primera función
SeqReverse=SeqName[::-1] ##Se invierte la secuencia, de forma que se va a leer la secuencia + en dirección 3'-5'
SeqComplementary='' ##Se genera la variable donde se almacenará la secuencia complementaria
GenCode={'A':'T','C':'G','G':'C','T':'A'} ##Diccionario con los nucleótidos complementarios
for Nucleotide in SeqReverse: ##Vamos itinerando por cada nucleótido de la secuencia
##Se van añadiendo los nucleótidos complementarios 1 a 1 en nuestra variable, generando la secuencia complementaria en dirección 5'-3'.
SeqComplementary=SeqComplementary+GenCode[Nucleotide]
return(SeqComplementary) ##Ahora SeqComplementary será la variable resultado de correr esta función.
def TranslateDNA(DNASeq,COMPSEQ,DicFile,ExportName):
MyFile=open(ExportName+'.txt','w')
Counter='+' #Declaramos Seq como +. Es un contador de en qué secuencia estamos
for Seq in (DNASeq,COMPSEQ):
if Counter=='+': ##Al empezar estamos en la secuencia +
print('\t\t\t\t\t\t\t\t\t\tPLUS STRAND\n')
MyFile.write('\t\t\t\t\t\t\t\t\t\tPLUS STRAND\n')
if Counter=='-': #Para que escriba Minus Strand en este caso
MyFile.write('\n\t\t\t\t\t\t\t\t\t\tMINUS STRAND\n\n')
print('\n\t\t\t\t\t\t\t\t\t\tMINUS STRAND\n\n')
for CodingFrame in range(0,3): #Bucle para leer en las tres pautas de lectura
ProtSeq=''
MyFile.write('\n\t\t\t\t\t\t\t\t\t\t Frame '+str(CodingFrame+1)+'\n\n')#Escribe el Frame en el que está (Sumando +1 pues el rango empieza en 0)
print('\n\t\t\t\t\t\t\t\t\t\t Frame '+str(CodingFrame+1)+'\n\n')
while True:
if CodingFrame>(((len(Seq)/3)-1)*3): ##Esta condición permite correr el código hasta que se alcanza el final de la secuencia.
break
SubSeq=Seq[CodingFrame]+Seq[CodingFrame+1]+Seq[CodingFrame+2] ##Formamos el codón y lo asignamos a SubSeq.
ProtSeq=ProtSeq+DicFile[SubSeq][0] ##Traducimos el codón actual a código de una letra y lo añadimos a la secuencia traducida que ya estuviera.
CodingFrame+=3 #Movemos 3 nucleótidos para leer el siguiente codón
print(ProtSeq)
MyFile.write(ProtSeq+'\n') #Escribimos la secuencia
Counter='-' #Cuando terminamos el bloque con SeqName, para la empezar con la reversa Seq será -
MyFile.close()
def Body():
DNAList=ReadFasta(sys.argv[1]) #Lista que contiene el DNA y el Accession number
GenCode=CreateDictionary('GeneticCode_standard.csv')
CompSeq=ComplementaryGenerator(DNAList[1]) #CompSeq contiene ahora la secuencia complementaria correspondente de llamar la función ComplementaryGenerator
Protein=TranslateDNA(DNAList[1],CompSeq,GenCode,DNAList[0]) ##DNAList[1] contiene la secuencia de DNA extraida y DNAList[0] el Accession Number
if __name__=='__main__':
import sys
import re
if len(sys.argv)<2:
print('Please, introduce as an argument the file you want to translate.') #Si no nos introduce el argumento con la secuencia, se lo pide.
if not('.fasta') in sys.argv[1]: #Si introducimos como argumento un archivo que no es fasta te indica que introduzcas un fasta
print('You have to introduce a fasta sequence')
else:
Body()
| [
"[email protected]"
]
| |
3e3fb85fab836eb760653086cf3ecdb7a8ab1a66 | b75c3da63a9f6c6fbc37c6ccfa12578d93935624 | /leetcode/241. Different Ways to Add Parentheses/Python3/diff_ways.py | e3937693534511eb81a14b1075bdf327c461071e | []
| no_license | bryand1/solutions | ce5b09b14b73bd6da214eac159af7d4439cdd6dd | 319741d720e1f0bb1b94629df410de392cbc755c | refs/heads/master | 2021-07-09T08:39:45.301754 | 2019-03-01T16:31:36 | 2019-03-01T16:31:36 | 144,401,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from operator import add, sub, mul
op = {'+': add, '-': sub, '*': mul}
class Solution:
def diffWaysToCompute(self, expr):
"""
:type input: str
:rtype: List[int]
"""
arr = self.parse(expr)
return self.diffWaysToComputeRec(arr, 0, len(arr))
def diffWaysToComputeRec(self, arr, lo, hi):
if hi - lo == 1:
return [arr[lo]]
res = []
for i in range(lo + 1, hi, 2):
op = arr[i]
lhs = self.diffWaysToComputeRec(arr, lo, i)
rhs = self.diffWaysToComputeRec(arr, i + 1, hi)
for a in lhs:
for b in rhs:
res.append(op(a, b))
return res
def parse(self, expr):
res = []
n = 0
for char in expr:
if char.isdigit():
n = 10 * n + int(char)
else:
res.append(n)
n = 0
res.append(op[char])
res.append(n)
return res
| [
"[email protected]"
]
| |
c52e35b3ac6d4beb8a90ab36aef6698e7deb2c12 | 4c3e992678341ccaa1d4d14e97dac2e0682026d1 | /addons/purchase/models/stock_config_settings.py | f07bb2fa1319b3603ec86c3a653f03c531b9923b | []
| no_license | gahan-corporation/wyatt | 3a6add8f8f815bd26643e1e7c81aea024945130d | 77e56da362bec56f13bf0abc9f8cf13e98461111 | refs/heads/master | 2021-09-03T18:56:15.726392 | 2018-01-08T02:54:47 | 2018-01-08T02:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from gerp import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
po_lead = fields.Float(related='company_id.po_lead')
use_po_lead = fields.Boolean(
string="Security Lead Time for Purchase",
oldname='default_new_po_lead',
help="Margin of error for vendor lead times. When the system generates Purchase Orders for reordering products,they will be scheduled that many days earlier to cope with unexpected vendor delays.")
@api.onchange('use_po_lead')
def _onchange_use_po_lead(self):
if not self.use_po_lead:
self.po_lead = 0.0
def get_values(self):
res = super(ResConfigSettings, self).get_values()
res.update(
use_po_lead=self.env['ir.config_parameter'].sudo().get_param('purchase.use_po_lead')
)
return res
def set_values(self):
super(ResConfigSettings, self).set_values()
self.env['ir.config_parameter'].sudo().set_param('purchase.use_po_lead', self.use_po_lead)
| [
"[email protected]"
]
| |
a710d37abf1d105c4e455d43f64b6efdd6cb4977 | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_Combo_SNMP_V2_Continue_Walking.py | 566d4e7bb980151a16a79e016fed8da87f2030ed | []
| no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,449 | py | '''
Continue walking via SNMP V2
Continue walking nodes many times, can get information correctly.
1. Continue walking system objects 50 times via SNMP V2, compare the information from SNMP and CLI.
expect result: All steps should result properly.
How to:
1) Get system information via ZD CLI
2) Continue walking system object 50 times, parsing the system result.
3) Compare the result for each walking are same
4) Compare the result from SNMP and CLI are same
Created on 2011-4-14
@author: [email protected]
'''
import sys
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
def define_test_cfg(tcfg):
test_cfgs = []
test_name = 'CB_Scaling_ZD_CLI_Process_Check'
common_name = 'apmgr and stamgr daemon pid mark'
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Set_SNMP_Agent'
common_name = 'Disable SNMP Agent Version 2'
test_cfgs.append(({'snmp_agent_cfg': {'version': 2, 'enabled': False}}, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Set_SNMP_Agent'
common_name = 'Disable SNMP Agent Version 3'
test_cfgs.append(({'snmp_agent_cfg': {'version': 3, 'enabled': False}}, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Set_SNMP_Agent'
common_name = 'Enable SNMP Agent Version 2'
test_cfgs.append(({'snmp_agent_cfg':tcfg['set_snmp_agent_cfg']}, test_name, common_name, 0, False))
test_case_name = '[Continue Walking]'
test_name = 'CB_ZD_CLI_Get_Sys_Basic_Info'
common_name = '%sGet System Info via ZD CLI' % (test_case_name,)
test_cfgs.append(( {}, test_name, common_name, 1, False))
test_name = 'CB_ZD_SNMP_Contine_Walking_Sys_Basic_Info'
common_name = '%sVerify continue walking to get system basic info' % (test_case_name,)
test_cfgs.append(( {'snmp_agent_cfg': tcfg['set_snmp_agent_cfg'],
'snmp_cfg': tcfg['snmp_cfg'],
'times': tcfg['times']},
test_name, common_name, 2, False))
test_name = 'CB_ZD_SNMP_Verify_Sys_Basic_Info_SNMPGet_CLIGet'
common_name = '%sVerify System Info between SNMP Get and CLI Get' % (test_case_name,)
test_cfgs.append(( {}, test_name, common_name, 2, False))
test_name = 'CB_Scaling_ZD_CLI_Process_Check'
common_name = 'apmgr and stamgr daemon pid checking.'
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 0, False))
return test_cfgs
def define_test_parameters(tbcfg):
set_snmp_agent_cfg = {'version': 2,
'enabled': True,
'ro_community': 'public',
'rw_community': 'private',
'contact': '[email protected]',
'location': 'shenzhen',}
snmp_cfg = {#'ip_addr': tbcfg['ZD']['ip_addr'],
'timeout': 20,
'retries': 3,}
tcfg = {'snmp_cfg': snmp_cfg,
'set_snmp_agent_cfg': set_snmp_agent_cfg,
'times': 50, }
return tcfg
def create_test_suite(**kwargs):
tb = testsuite.getTestbed2(**kwargs)
tbcfg = testsuite.getTestbedConfig(tb)
if str(tb.tbtype) == "ZD_Stations_IPV6":
zd_ip_version = tbcfg['ip_cfg']['zd_ip_cfg']['ip_version']
ap_ip_version = tbcfg['ip_cfg']['ap_ip_cfg']['ip_version']
ts_name = 'ZD SNMP V2 ZD %s AP %s - Continue Walking' % (zd_ip_version, ap_ip_version)
else:
ts_name = 'ZD SNMP V2 - Continue Walking'
tcfg = define_test_parameters(tbcfg)
ts = testsuite.get_testsuite(ts_name, 'Verify Continue Walking', combotest=True)
test_cfgs = define_test_cfg(tcfg)
test_order = 1
test_added = 0
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if testsuite.addTestCase(ts, testname, common_name, test_params, test_order, exc_level, is_cleanup) > 0:
test_added += 1
test_order += 1
print "Add test case with test name: %s\n\t\common name: %s" % (testname, common_name)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict(sys.argv[1:])
create_test_suite(**_dict)
| [
"[email protected]"
]
| |
f25a5e76793f59ea45d4b523e92b6a9ead54cdd7 | b424a13f032d5a607e6df4dd78bc47ad1d06a147 | /scipy/sparse/extract.py | 9cb83084601147f87f718d9cf00bf257abade13c | []
| no_license | EnjoyLifeFund/macSierra-py36-pkgs | 1e7eeb9b55415da6eb12465d67730d76e9cc619a | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | refs/heads/master | 2021-01-20T10:23:50.044019 | 2017-09-05T02:53:26 | 2017-09-05T02:53:26 | 90,333,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | ../../../../../Cellar/scipy/0.19.1_1/lib/python3.6/site-packages/scipy/sparse/extract.py | [
"[email protected]"
]
| |
6d377d17ec1cfb614d0530853ef4216679d840c8 | c8781d3dc17202fcc1b5358475071c0a834c7f82 | /ShowAndSearch/__init__.py | 58b6fee75af3be485a88240f3e64ee1dfc0cc118 | [
"Apache-2.0"
]
| permissive | guchengxi1994/show-and-search | 7b73d4a7a0250a0f70cf07b0de7695d6c8051545 | e955a6677f3cd23b1f7ed247e828a5852ec6ab20 | refs/heads/master | 2022-12-22T06:28:36.601500 | 2020-09-22T05:17:14 | 2020-09-22T05:17:14 | 295,630,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | '''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-09-15 13:53:11
LastEditors: xiaoshuyui
LastEditTime: 2020-09-22 11:20:14
'''
__version__ = '0.0.0'
__appname__ = 'show and search'
| [
"[email protected]"
]
| |
a83539222531d53944838325e21dc6f020e934e1 | 4edc95b0e9f739c5faa29704d8d0fe31d6074114 | /0x0F-python-object_relational_mapping/7-model_state_fetch_all.py | 52c794399d96e997e84932ac6d2b888beb5f6f22 | []
| no_license | Caroll1889/holbertonschool-higher_level_programming | 47a78074af5ec93f2e4bcf0cfb0579fb0f12c441 | f3c222c101e05bf5876951fc7a2566f3ce0ff7e6 | refs/heads/master | 2020-07-22T22:51:35.948398 | 2020-02-14T16:35:29 | 2020-02-14T16:35:29 | 207,356,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/python3
""" """
from sys import argv
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import sqlalchemy
if __name__ == "__main__":
en = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(argv[1],
argv[2],
argv[3]))
Base.metadata.create_all(en)
Session = sessionmaker(bind=en)
session = Session()
for states in session.query(State).order_by(State.id):
print("{}: {}".format(states.id, states.name))
session.close()
| [
"[email protected]"
]
| |
bbaaec7298b66a183a83e952cd94bf42b7c78062 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/jedi/api/project.py | eed8f3f9954ee61c107899d3452f6b026bed0a75 | [
"BSD-3-Clause"
]
| permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 6,235 | py | import os
import json
from jedi._compatibility import FileNotFoundError, NotADirectoryError
from jedi.api.environment import SameEnvironment, \
get_cached_default_environment
from jedi.api.exceptions import WrongVersion
from jedi._compatibility import force_unicode
from jedi.evaluate.sys_path import discover_buildout_paths
from jedi.evaluate.cache import evaluator_as_method_param_cache
from jedi.common.utils import traverse_parents
_CONFIG_FOLDER = '.jedi'
_CONTAINS_POTENTIAL_PROJECT = 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in'
_SERIALIZER_VERSION = 1
def _remove_duplicates_from_path(path):
used = set()
for p in path:
if p in used:
continue
used.add(p)
yield p
def _force_unicode_list(lst):
return list(map(force_unicode, lst))
class Project(object):
# TODO serialize environment
_serializer_ignore_attributes = ('_environment',)
_environment = None
@staticmethod
def _get_json_path(base_path):
return os.path.join(base_path, _CONFIG_FOLDER, 'project.json')
@classmethod
def load(cls, path):
"""
:param path: The path of the directory you want to use as a project.
"""
with open(cls._get_json_path(path)) as f:
version, data = json.load(f)
if version == 1:
self = cls.__new__()
self.__dict__.update(data)
return self
else:
raise WrongVersion(
"The Jedi version of this project seems newer than what we can handle."
)
def __init__(self, path, **kwargs):
"""
:param path: The base path for this project.
:param sys_path: list of str. You can override the sys path if you
want. By default the ``sys.path.`` is generated from the
environment (virtualenvs, etc).
:param smart_sys_path: If this is enabled (default), adds paths from
local directories. Otherwise you will have to rely on your packages
being properly configured on the ``sys.path``.
"""
def py2_comp(path, environment=None, sys_path=None,
smart_sys_path=True, _django=False):
self._path = path
if isinstance(environment, SameEnvironment):
self._environment = environment
self._sys_path = sys_path
self._smart_sys_path = smart_sys_path
self._django = _django
py2_comp(path, **kwargs)
def _get_base_sys_path(self, environment=None):
if self._sys_path is not None:
return self._sys_path
# The sys path has not been set explicitly.
if environment is None:
environment = self.get_environment()
sys_path = list(environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path
@evaluator_as_method_param_cache()
def _get_sys_path(self, evaluator, environment=None):
"""
Keep this method private for all users of jedi. However internally this
one is used like a public method.
"""
suffixed = []
prefixed = []
sys_path = list(self._get_base_sys_path(environment))
if self._smart_sys_path:
prefixed.append(self._path)
if evaluator.script_path is not None:
suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
traversed = list(traverse_parents(evaluator.script_path))
# AFAIK some libraries have imports like `foo.foo.bar`, which
# leads to the conclusion to by default prefer longer paths
# rather than shorter ones by default.
suffixed += reversed(traversed)
if self._django:
prefixed.append(self._path)
path = prefixed + sys_path + suffixed
return list(_force_unicode_list(_remove_duplicates_from_path(path)))
def save(self):
data = dict(self.__dict__)
for attribute in self._serializer_ignore_attributes:
data.pop(attribute, None)
with open(self._get_json_path(self._path), 'wb') as f:
return json.dump((_SERIALIZER_VERSION, data), f)
def get_environment(self):
if self._environment is None:
return get_cached_default_environment()
return self._environment
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._path)
def _is_potential_project(path):
for name in _CONTAINS_POTENTIAL_PROJECT:
if os.path.exists(os.path.join(path, name)):
return True
return False
def _is_django_path(directory):
""" Detects the path of the very well known Django library (if used) """
try:
with open(os.path.join(directory, 'manage.py'), 'rb') as f:
return b"DJANGO_SETTINGS_MODULE" in f.read()
except (FileNotFoundError, NotADirectoryError):
return False
return False
def get_default_project(path=None):
if path is None:
path = os.getcwd()
check = os.path.realpath(path)
probable_path = None
first_no_init_file = None
for dir in traverse_parents(check, include_current=True):
try:
return Project.load(dir)
except (FileNotFoundError, NotADirectoryError):
pass
if first_no_init_file is None:
if os.path.exists(os.path.join(dir, '__init__.py')):
# In the case that a __init__.py exists, it's in 99% just a
# Python package and the project sits at least one level above.
continue
else:
first_no_init_file = dir
if _is_django_path(dir):
return Project(dir, _django=True)
if probable_path is None and _is_potential_project(dir):
probable_path = dir
if probable_path is not None:
# TODO search for setup.py etc
return Project(probable_path)
if first_no_init_file is not None:
return Project(first_no_init_file)
curdir = path if os.path.isdir(path) else os.path.dirname(path)
return Project(curdir)
| [
"[email protected]"
]
| |
2f6935ac11ea708cd225be988e352fdc29e40119 | b5313b8442b26e4a54172e55eb84d501ee4cae12 | /run.py | 783d9038c8e358e77f451405fc4d41584bbd6c56 | [
"MIT"
]
| permissive | summeraz/ljmc | bca23b37c29847ebb9a763146e9f67fb32843912 | 207326f61d6be4063d06dfc2df6fb1f61dd57e27 | refs/heads/master | 2020-03-20T06:10:31.396309 | 2018-06-13T22:49:39 | 2018-06-13T22:49:39 | 137,241,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | from __future__ import division
import pyximport; pyximport.install()
from forcefield import *
from mc import *
from system import *
from utils import *
### Define Lennard-Jones Parameters ###
sigma = 1.0
epsilon = 1.0
cutoff = 2.5
### Define System Parameters ###
n_particles = 125
number_density = 0.5
### Define Monte Carlo Parameters ###
temperature = 1.2 # Temperature of the simulation
dx = 0.1 # Initial maximum displacement
target = 0.5 # Target acceptance probabality
n_relax = 2500 # Number of timesteps to relax from initial configuration
n_mc = 25000 # Total number of MC steps
#############################################################################
#######
# RUN #
#######
# Create the force field
forcefield = ForceField(sigma=sigma, epsilon=epsilon, cutoff=cutoff)
# Create the system
system = System(n_particles, number_density, forcefield)
# Initialize the neighborlist
system.build_nlist(skin=0.5)
# Create Monte Carlo instance
mc = MonteCarlo(system=system, dx=dx, temperature=temperature, target=target)
# Relax the system and optimize `dx`
mc.relax(n_relax, adjust_freq=50)
# Monte Carlo production run
mc.run(traj_filename='traj.xyz', steps=n_mc, freq=100)
| [
"[email protected]"
]
| |
95e1a0cfc755b266419200defd030c1fe6f9f3bb | 0566cf76b456518875edecece15e763a36a4795f | /scrapers/megafilmeshd21_net.py | 79e1c527816b062bae23ffbd86c1a90255b460d7 | []
| no_license | theclonedude/Scraping_BeautifulSoup_phantomjs | 684b1f7a993e0d2555daa7a5455cf19bd29b0b1b | faf653feae46c21a72d13b2123cdebdb2f7c05d8 | refs/heads/master | 2023-03-16T19:36:14.867361 | 2018-06-14T14:21:02 | 2018-06-14T14:21:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | # coding=utf-8
from sandcrawler.scraper import ScraperBase, SimpleScraperBase
class Megafilmeshd21Net(SimpleScraperBase):
BASE_URL = 'http://megafilmeshd21.net'
OTHER_URLS = ['http://maxfilmesonline.net']
SCRAPER_TYPES = [ ScraperBase.SCRAPER_TYPE_OSP, ]
LANGUAGE = 'por'
MEDIA_TYPES = [ ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV, ]
URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING, ]
def _fetch_search_url(self, search_term, media_type):
return '{base_url}?&s={search_term}'.format(base_url=self.BASE_URL, search_term=search_term)
def _fetch_no_results_text(self):
return None
def _fetch_next_button(self, soup):
next_button = soup.select_one('a.nextpostslink')
if next_button:
return next_button.href
return None
def _parse_search_result_page(self, soup):
found=0
for result in soup.select('a.thumb'):
self.submit_search_result(
link_url=result.href,
link_title=result.text,
image=self.util.find_image_src_or_none(result, 'img'),
)
found=1
if not found:
return self.submit_search_no_results()
def _parse_parse_page(self, soup):
index_page_title = self.util.get_page_title(soup)
series_season = series_episode = None
title = soup.select_one('h1')
if title and title.text:
series_season, series_episode = self.util.extract_season_episode(title.text)
for link in soup.select('nav.lista-players a.btn-player'):
self.submit_parse_result(
index_page_title=index_page_title,
link_url=link['data-href'],
link_title=link.text,
series_season=series_season,
series_episode=series_episode,
)
| [
"[email protected]"
]
| |
c24872cd28d1af12915e4db50558f458d2ddbe15 | 9af204535dfc39d5c9a2dc4e2daf538cb2454caf | /src/tasks/shapes/parameters.py | e9f86a6d13a821edc1beebb17b97b1a42a49e26f | []
| no_license | kevinyu/reimagined-robot | c94f51e1b025dc3636a40b06cf8d914238596f9e | 970e451a70b43d1cd7ac7f8f3700ea8e9eb88aa3 | refs/heads/master | 2021-01-20T15:57:41.095221 | 2017-05-24T01:40:25 | 2017-05-24T01:40:51 | 90,803,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | import os
import numpy as np
import theano
import theano.tensor as T
import config
from utils import float_x
from utils.complex import ComplexTuple
from shapes.properties import properties
from shapes.objects import shapes
if os.path.exists(os.path.join(config.SAVE_DIR, "S0.npy")):
S0_array = np.load(os.path.join(config.SAVE_DIR, "S0.npy"))
S0 = ComplexTuple(
theano.shared(float_x(S0_array[0])),
theano.shared(float_x(S0_array[1]))
)
else:
S0 = ComplexTuple(
theano.shared(float_x(np.zeros(config.DIM))),
theano.shared(float_x(np.zeros(config.DIM)))
)
# D_table is a dict mapping categories to dictionaries of hypervectors
D_table = {}
filename = os.path.join(config.SAVE_DIR, "D_Shapes.npy")
if os.path.exists(filename):
darray = np.load(filename)
D_table["Shapes"] = ComplexTuple(
theano.shared(float_x(darray[0])),
theano.shared(float_x(darray[1]))
)
else:
D_table["Shapes"] = ComplexTuple(
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(shapes))))),
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(shapes)))))
)
for prop in properties:
filename = os.path.join(config.SAVE_DIR, "D_{}.npy".format(prop.__name__))
if os.path.exists(filename):
darray = np.load(filename)
D_table[prop.__name__] = ComplexTuple(
theano.shared(float_x(darray[0])),
theano.shared(float_x(darray[1]))
)
else:
D_table[prop.__name__] = ComplexTuple(
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(prop.params))))),
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(prop.params)))))
)
# Generate all bound combinations of available objects with properties
_D_combined = D_table["Shapes"]
for i, prop in enumerate(properties):
# each iteration increases the dimensionality of D_combined by one
# the last dimension corresponds to the ith property
i += 1
_D_combined = (
_D_combined.dimshuffle([0] + range(1, i+1) + ["x"]) *
D_table[prop.__name__].dimshuffle(*[[0] + (["x"] * i) + [1]])
)
D = _D_combined.flatten(2)
# Concatenate a single vector representing background to D
bg_filename = os.path.join(config.SAVE_DIR, "D_bg.npy")
if os.path.exists(bg_filename):
darray = np.load(bg_filename)
bg_vector = ComplexTuple(
theano.shared(float_x(darray[0])),
theano.shared(float_x(darray[1]))
)
else:
bg_vector = ComplexTuple(
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, 1)))),
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, 1))))
)
D = ComplexTuple(
T.concatenate([D.real, bg_vector.real], axis=1),
T.concatenate([D.imag, bg_vector.imag], axis=1)
)
learn_params = [bg_vector.real, bg_vector.imag]
for D_prop in D_table.values():
learn_params += [D_prop.real, D_prop.imag]
learn_params = [S0.real, S0.imag] + learn_params
def save_params():
s0_filename = os.path.join(config.SAVE_DIR, "S0")
np.save(s0_filename, np.array(list(S0.get_value())))
D_filename = os.path.join(config.SAVE_DIR, "D_Shapes")
np.save(D_filename, np.array(list(D_table["Shapes"].get_value())))
for prop in properties:
D_filename = os.path.join(config.SAVE_DIR, "D_{}".format(prop.__name__))
np.save(D_filename, np.array(list(D_table[prop.__name__].get_value())))
np.save(os.path.join(config.SAVE_DIR, "D_bg"), np.array(list(bg_vector.get_value())))
| [
"[email protected]"
]
| |
ca1fc2d69082f42093ba01898f4e9cbfde8dba16 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/ms_data/identity_constraint/id_l051_xsd/__init__.py | 5f14f91fccf3bc1c1eb8c2330c525ca13b8ee553 | [
"MIT"
]
| permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 135 | py | from output.models.ms_data.identity_constraint.id_l051_xsd.id_l051 import (
Root,
Uid,
)
__all__ = [
"Root",
"Uid",
]
| [
"[email protected]"
]
| |
3f96a324be5d36b890aa2e0eb8f0d22f9106d7b3 | 03195206540b44d74f86801c5e58b2b731c863bf | /pi/mission_control/debile/pousse_feu.py | 0c57edb5ec2e019d69a3560ce3287b3d729de533 | []
| no_license | clement91190/eurobot | 7f242f15b966216ef81d4851c338493ccf056c26 | e61c9b3a32c1ee1417d807be6c4f97032a7e55a6 | refs/heads/master | 2021-01-25T09:53:38.517607 | 2014-05-30T09:16:40 | 2014-05-30T09:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | from mission_control.mission import Mission, SuccessOut, FailOut
from utils.coord import Coord
from mae_generator.mae import MAE, InitState, debugger
def get_mission(com_state_factory):
#raise NotImplementedError(" mission non codee")
return Mission(" pousse_feu ", Coord(-800, 600, 180), MAEPousseFeu(com_state_factory))
class MAEPousseFeu(MAE):
def __init__(self, com_state_factory):
MAE.__init__(self)
self.sf = com_state_factory
#states
init = InitState()
#tape = self.sf.get_pmi_tape()
avance_triangle1 = self.sf.get_bf_fw(Coord(200))
out = SuccessOut()
out2 = FailOut()
#transitions
init.add_instant_transition(avance_triangle1)
avance_triangle1.add_afini_transition(out)
avance_triangle1.add_bloc_transition(out)
avance_triangle1.add_advd_transition(out)
self.state_list = [
init, avance_triangle1, out, out2
]
self.reinit_state()
if __name__ == "__main__":
from com_state_factory import ComStateFactory
from communication import PipoCommunication
com = PipoCommunication()
mae = MAEPousseFeu(ComStateFactory(com))
com.set_global_mae(mae)
#mae = MAEGlobal()
debugger(mae)
| [
"[email protected]"
]
| |
c08a65cd4eaa895be6579ac952edbbb1cfd00cc9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/kindergarten-garden/d338583c2ba84654b20ddcf61ff29827.py | c739d565e7f3e97d21da436f3a80d769200473d7 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 961 | py | from collections import defaultdict, deque
from typing import Dict, Iterable, List
PLANTS = {
'R': 'Radishes',
'C': 'Clover',
'G': 'Grass',
'V': 'Violets',
}
STUDENTS = (
'Alice', 'Bob', 'Charlie', 'David',
'Eve', 'Fred', 'Ginny', 'Harriet',
'Ileana', 'Joseph', 'Kincaid', 'Larry'
)
class Garden:
def __init__(self, garden: str, students: Iterable[str]=STUDENTS, grouping: int=2) -> None:
self.students = defaultdict(List[str]) # type: Dict[str, List[str]]
if isinstance(students, list):
students.sort()
for group in garden.split():
group_queue = deque(group)
for student in students:
for x in range(grouping):
self.students[student].append(PLANTS[group_queue.popleft()])
if not group_queue:
break
def plants(self, student: str) -> List[str]:
return self.students.get(student)
| [
"[email protected]"
]
| |
c31badd4543416eb333ff1a8f62aac8c166188c5 | f94f9ddd8f7ec105161366461275f16b325d9c3e | /l2hmc-qcd/train.py | d9568bd70244bddda72a9c96ba1f88273e2b46d6 | [
"Apache-2.0"
]
| permissive | FigTop/l2hmc-qcd | da086545b94f5ff2da835b0f2c440e077a28e15a | 0003da4f6c76172a27dbdec223393ce04cf73805 | refs/heads/master | 2023-04-21T21:20:43.724346 | 2021-05-22T19:28:21 | 2021-05-22T19:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,794 | py | """
train.py
Train 2D U(1) model using eager execution in tensorflow.
"""
# noqa: E402, F401
# pylint:disable=wrong-import-position,invalid-name, unused-import,
# pylint: disable=ungrouped-imports
from __future__ import absolute_import, division, print_function
import os
import json
import contextlib
import logging
import tensorflow as tf
from config import BIN_DIR
import utils
try:
import horovod
import horovod.tensorflow as hvd
try:
RANK = hvd.rank()
except ValueError:
hvd.init()
RANK = hvd.rank()
HAS_HOROVOD = True
logging.info(f'using horovod version: {horovod.__version__}')
logging.info(f'using horovod from: {horovod.__file__}')
GPUS = tf.config.experimental.list_physical_devices('GPU')
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
if GPUS:
gpu = GPUS[hvd.local_rank()]
tf.config.experimental.set_visible_devices(gpu, 'GPU')
except (ImportError, ModuleNotFoundError):
HAS_HOROVOD = False
from utils.file_io import console
import utils.file_io as io
from utils.attr_dict import AttrDict
from utils.parse_configs import parse_configs
from dynamics.gauge_dynamics import build_dynamics
from utils.training_utils import train, train_hmc
from utils.inference_utils import run, run_hmc, run_inference_from_log_dir
# os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = logging.getLogger(__name__)
logging_datefmt = '%Y-%m-%d %H:%M:%S'
logging_level = logging.WARNING
logging_format = (
'%(asctime)s %(levelname)s:%(process)s:%(thread)s:%(name)s:%(message)s'
)
logging.info(f'using tensorflow version: {tf.__version__}')
logging.info(f'using tensorflow from: {tf.__file__}')
@contextlib.contextmanager
def experimental_options(options):
"""Run inside contextmanager with special options."""
old_opts = tf.config.optimizer.get_experimental_options()
tf.config.optimizer.set_experimental_options(options)
try:
yield
finally:
tf.config.optimizer.set_experimental_options(old_opts)
def restore_flags(flags, train_dir):
"""Update `FLAGS` using restored flags from `log_dir`."""
rf_file = os.path.join(train_dir, 'FLAGS.z')
if os.path.isfile(rf_file):
try:
restored = io.loadz(rf_file)
restored = AttrDict(restored)
io.log(f'Restoring FLAGS from: {rf_file}...')
flags.update(restored)
except (FileNotFoundError, EOFError):
pass
return flags
def main(configs, num_chains=None, run_steps=None):
"""Main method for training."""
hmc_steps = configs.get('hmc_steps', 0)
# tf.keras.backend.set_floatx('float32')
log_file = os.path.join(os.getcwd(), 'log_dirs.txt')
x = None
log_dir = configs.get('log_dir', None)
beta_init = configs.get('beta_init', None)
beta_final = configs.get('beta_final', None)
if log_dir is not None: # we want to restore from latest checkpoint
configs.restore = True
run_steps = configs.get('run_steps', None)
train_steps = configs.get('train_steps', None)
restored = restore_flags(configs,
os.path.join(configs.log_dir, 'training'))
for key, val in configs.items():
if key in restored:
if val != restored[key]:
io.log(f'Restored {key}: {restored[key]}')
io.log(f'Using {key}: {val}')
configs.update({
'train_steps': train_steps,
'run_steps': run_steps,
})
if beta_init != configs.get('beta_init', None):
configs.beta_init = beta_init
if beta_final != configs.get('beta_final', None):
configs.beta_final = beta_final
else: # New training session
train_steps = configs.get('train_steps', None)
run_steps = configs.get('run_steps', None)
timestamps = AttrDict({
'month': io.get_timestamp('%Y_%m'),
'time': io.get_timestamp('%Y-%M-%d-%H%M%S'),
'hour': io.get_timestamp('%Y-%m-%d-%H'),
'minute': io.get_timestamp('%Y-%m-%d-%H%M'),
'second': io.get_timestamp('%Y-%m-%d-%H%M%S'),
})
configs.log_dir = io.make_log_dir(configs, 'GaugeModel', log_file,
timestamps=timestamps)
io.write(f'{configs.log_dir}', log_file, 'a')
configs.restore = False
if hmc_steps > 0:
# x, _, eps = train_hmc(args)
x, dynamics_hmc, _, hflags = train_hmc(configs,
num_chains=num_chains)
# dirs_hmc = hflags.get('dirs', None)
# args.dynamics_config['eps'] = dynamics_hmc.eps.numpy()
_ = run(dynamics_hmc, hflags, save_x=False)
if num_chains is None:
num_chains = configs.get('num_chains', 15)
x, dynamics, train_data, configs = train(configs, x=x, make_plots=True,
num_chains=num_chains)
if run_steps is None:
run_steps = configs.get('run_steps', 50000)
# ====
# Run inference on trained model
if run_steps > 0:
# run_steps = args.get('run_steps', 125000)
log_dir = configs.log_dir
beta = configs.get('beta_final')
if configs.get('small_batch', False):
batch_size = 256
old_shape = configs['dynamics_config']['x_shape']
new_shape = (batch_size, *old_shape[1:])
configs['dynamics_config']['x_shape'] = new_shape
dynamics = build_dynamics(configs, log_dir=log_dir)
x = x[:batch_size]
results = run(dynamics, configs, x, beta=beta, make_plots=True,
therm_frac=0.1, num_chains=num_chains, save_x=False)
try:
run_data = results.run_data
run_dir = run_data.dirs['run_dir']
dataset = run_data.save_dataset(run_dir, therm_frac=0.)
except:
# TODO: Properly catch exception (if thrown)
pass
# _ = run_inference_from_log_dir(log_dir=log_dir,
# run_steps=run_steps,
# beta=beta,
# num_chains=num_chains,
# batch_size=batch_size,
# therm_frac=0.2,
# make_plots=True,
# train_steps=0,
# x=xbatch)
# Run with random start
# _ = run(dynamics, args)
# # Run HMC
# args.hmc = True
# args.dynamics_config['eps'] = 0.15
# hmc_dir = os.path.join(args.log_dir, 'inference_hmc')
# _ = run_hmc(args=args, hmc_dir=hmc_dir)
if __name__ == '__main__':
timestamp = io.get_timestamp('%Y-%m-%d-%H%M')
# debug_events_writer = tf.debugging.experimental.enable_dump_debug_info(
# debug_dir, circular_buffer_size=-1,
# tensor_debug_mode="FULL_HEALTH",
# )
CONFIGS = parse_configs()
CONFIGS = AttrDict(CONFIGS.__dict__)
if CONFIGS.get('debug', False):
logging_level = logging.DEBUG
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
else:
logging_level = logging.WARNING
console.log(f'CONFIGS: {dict(**CONFIGS)}')
# io.print_dict(CONFIGS)
main(CONFIGS)
# if RANK == 0:
# console.save_text(os.path.join(os.getcwd(), 'train.log'), styles=False)
#
# debug_events_writer.FlushExecutionFiles()
# debug_events_writer.FlushNonExecutionFiles()
| [
"[email protected]"
]
| |
9a8dd15216b814300933712ca28aed2bceba2f40 | 743d58c35caf21568feddc86946bbee340174721 | /app_spider/dongfangtoutiao_send_like.py | 88afa08afc11daba8347a0ec38d8c384283f4ee4 | []
| no_license | klgentle/lc_python | 38009ed82614c8f21ca9af6e3779a2e0898af09f | aabe56e690b40e4b93afef99bfe46d9a06e20cea | refs/heads/master | 2022-12-02T05:23:55.585659 | 2022-08-07T12:11:38 | 2022-08-07T12:11:38 | 176,750,473 | 2 | 0 | null | 2022-11-15T23:42:06 | 2019-03-20T14:21:51 | Python | UTF-8 | Python | false | false | 17,563 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: [email protected]
@site: http://www.xingag.top
@software: PyCharm
@file: 东方头条.py
@time: 2/8/19 16:56
@description:东方头条新闻客户端
"""
__author__ = "xingag"
from airtest.core.api import *
from poco.exceptions import PocoNoSuchNodeException
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
from utils.file_utils import *
from utils.string_utils import *
from queue import Queue
import datetime
from comments import generate_a_comment
from airtest.core.android import Android
from airtest_utils import *
from utils.device_utils import *
from utils.norm_utils import current_time
# 应用包名和启动Activity
package_name = 'com.songheng.eastnews'
activity = 'com.oa.eastfirst.activity.WelcomeActivity'
device_1 = Android('c54613d')
poco = AndroidUiautomationPoco(device_1, use_airtest_input=True, screenshot_each_action=False)
auto_setup(__file__)
# 收益来源
# 0.顶部的时长领取金币
# 1.任务:包含签到、
# 1.收益
# 2.阅读新闻
# 3.评论
class DongFangTouTiao(object):
"""
东方头条
"""
def __init__(self):
# 保留最新的5条新闻标题
self.news_titles = []
# 视频播放时间
self.video_play_time = 30
# 间隔获取标题栏的金币
self.interval_time = 5
# 跳过的页数
self.skip_page = 0
def run(self):
# 0.打开应用
home()
stop_app(package_name)
start_my_app(package_name, activity)
# 1.预加载
self.__pre_and_skip_ads()
# 2.获取顶部的金币
self.get_top_title_coin()
# 3.任务
# self.norm_task()
# 4.查看推荐的新闻
self.__skip_same_pages()
while True:
self.watch_news_recommend()
print('查看一页完成,继续查看下一页的新闻。')
# 顶部金币领取
self.get_top_title_coin()
# 滑动下一页的新闻
poco.swipe([0.5, 0.8], [0.5, 0.3], duration=1)
# 5.看视频
# self.__video()
# 6.看小视频
# self.mini_video()
def __sign_in(self):
"""
任务-签到
:return:
"""
pass
def __lottery(self):
"""
任务:大转盘抽奖
:return:
"""
pass
def watch_news_recommend(self):
"""
查看新闻
:return:
"""
# 1.推荐的所有新闻元素
lv_elements = poco('com.songheng.eastnews:id/g_').children()
if not lv_elements.exists():
print('新闻列表不存在')
return
# 下面的循环经常会报错:PocoNoSuchNodeException
# 遍历每一条新闻
for news_element in lv_elements:
# 1.查看要闻
self.__read_key_news()
# 2.新闻标题
news_title = news_element.offspring('com.songheng.eastnews:id/pb')
# 作者
author_element = news_element.offspring('com.songheng.eastnews:id/a4f')
# 3.注意:必须保证元素加载完全
# 下面会报错:hrpc.exceptions.RpcRemoteException: java.lang.IndexOutOfBoundsException
try:
if not news_title.exists() or not author_element.exists():
print("【标题】元素加载不完全" if not news_title.exists() else "【发布者】元素加载不完全")
continue
except Exception as e:
print("******注意注意注意!exist()方法报错******")
print("判断下面两个东西是否存在")
print(e)
self.__back_to_list()
print('回到首页')
return
# 4.过滤广告
# 到这里标识此条新闻:是一条有效的新闻【包含广告】
# 注意:部分广告【包含点击标题就自动下载,左下角显示广告字眼等】要过滤掉
# 场景一:
if news_element.attr('name') == 'android.widget.FrameLayout':
print('广告!这是一个FrameLayout广告,标题是:%s' % news_title.get_text())
continue
# 常见二:点击标题直接下载其他应用
ads_tips_element = news_element.offspring(name='com.songheng.eastnews:id/a4f', text='广告通')
if ads_tips_element.exists():
print('广告!这是一个【广点通】广告,标题是:%s' % news_title.get_text())
continue
# 常见三:有效角标识是广告的图标【奇虎广告】
ads_tips_element2 = news_element.offspring('com.songheng.eastnews:id/q5')
if ads_tips_element2.exists():
print('广告!广告标题是:%s' % news_title.get_text())
continue
# 已经查看过了,过滤掉
if news_title.get_text() in self.news_titles:
print('已经看过了,不看了!')
continue
# ==========================================================================
# 5.查看新闻
# 下面是一条有效的新闻
# 新闻类型
# 文字0、视频1、图片2
news_type = self.get_news_type(news_element)
if 5 == len(self.news_titles):
self.news_titles.pop()
self.news_titles.insert(0, news_title.get_text())
print('==' * 30)
print('当前时间:%s' % current_time())
print('准备点击刷新闻,这条新闻的标题是:%s' % news_title.get_text())
# 以上还在主界面
# 如果是正常的新闻就点击进去
news_title.click()
# 等待新闻元素都加载完全
sleep(2)
print('这条新闻类型:%d' % news_type)
print('已阅读新闻包含:')
for temp_title in self.news_titles:
print(temp_title)
# 评论拿金币和发表按钮
comments_with_coins = poco('com.songheng.eastnews:id/m9')
submit_btn_element = poco("com.songheng.eastnews:id/m6").offspring('com.songheng.eastnews:id/vw')
# 记录时长的标识
# 不存在就直接返回
red_coin_element = poco('com.songheng.eastnews:id/aq8')
if not red_coin_element.exists():
print('当前新闻没有红包,返回!')
self.__back_keyevent()
continue
if comments_with_coins.exists() and comments_with_coins.get_text() == '评论拿金币':
# 输入评论拿金币
# comments_with_coins.click()
# comments_edittext_element = poco('com.songheng.eastnews:id/vt')
# 注意:部分新闻不容许评论,原因你懂的
# if comments_with_coins.attr('editalbe'):
# comments_edittext_element.set_text(generate_a_comment())
#
# print('按钮文字内容:%d' % submit_btn_element.get_text())
# 发表按钮
# submit_btn_element.click()
# print('点击之后休眠5秒钟')
# sleep(5)
# 退回到当前新闻页面
# self.__back()
# else:
# print('注意!!!当前新闻不容许评论!!!')
# self.__back()
oldtime = datetime.datetime.now()
# 文字
if news_type == 0:
while True:
print("循环-滑动查看内容")
self.__swipe(True if random.randint(0, 1) == 0 else False)
# 如果发现有【点击查看全文】按钮,点击查看全文
see_all_article_element = poco('点击查看全文')
if see_all_article_element.exists():
print('点击展开全文内容...')
see_all_article_element.focus('center').click()
# 注意:有的时候点击展开全文,会点击到图片,需要规避一下
while poco('com.songheng.eastnews:id/lz').exists():
print('不小心点到图片了,返回到新闻详情页面')
self.__back_keyevent()
newtime = datetime.datetime.now()
interval_time = (newtime - oldtime).seconds
if interval_time >= 30:
print('阅读30秒新闻完成')
break
self.__read_key_news()
# 视频
elif news_type == 1:
while True:
print("循环-滑动查看视频")
newtime = datetime.datetime.now()
interval_time = (newtime - oldtime).seconds
if interval_time >= 30:
print('观看30秒视频完成')
break
self.__read_key_news()
else:
print('这是一篇没有金币的文章!')
print('==' * 30)
self.__back_to_list()
def __video(self):
"""
查看视频
:return:
"""
poco('com.songheng.eastnews:id/ko').click()
while True:
# 视频列表
poco('com.songheng.eastnews:id/a0z').wait_for_appearance()
sleep(2)
self.__read_key_news()
video_elements = poco('com.songheng.eastnews:id/a0z').children()
print('video items是否存在:')
print(video_elements.exists())
# 遍历视频
# 注意:视频播放完全可以提前返回
for video_element in video_elements:
# 1.标题元素
video_title_element = video_element.offspring('com.songheng.eastnews:id/a3q')
# 播放按钮
video_play_element = video_element.offspring('com.songheng.eastnews:id/nj')
# 2.必须保证【视频标题】和【播放按钮】都可见
if not video_title_element.exists() or not video_play_element.exists():
continue
# 3.标题
video_title = video_element.offspring('com.songheng.eastnews:id/a3q').get_text()
print('当前视频的标题是:%s,播放当前视频' % video_title)
# 点击播放视频
video_play_element.focus("center").click()
# 4.播放视频
self.play_video()
print('播放下一个视频')
self.__back_keyevent()
# 滑动到下一页的视频
poco.swipe([0.5, 0.8], [0.5, 0.3], duration=0.2)
def mini_video(self):
"""
查看小视频
:return:
"""
poco('com.songheng.eastnews:id/kr').click()
# 加载出列表元素,点击第一项进入
poco('com.songheng.eastnews:id/a0p').child('com.songheng.eastnews:id/g_').wait_for_appearance(60)
poco('com.songheng.eastnews:id/a0p').child('com.songheng.eastnews:id/g_').children()[0].click()
while True:
sleep(30)
# 向左滑动
poco.swipe([0.9, 0.5], [0.1, 0.5], duration=0.2)
def __swipe(self, up_or_down):
"""
滑动单条新闻
:param up_or_down: true:往上滑动;false:往下滑动【慢慢滑动】
:return:
"""
if up_or_down:
poco.swipe([0.5, 0.6], [0.5, 0.4], duration=0.5)
else:
poco.swipe([0.5, 0.4], [0.5, 0.6], duration=0.5)
def get_news_type(self, news_element):
"""
获取新闻的类型【文字0、视频1、图片2】
:param news_element:
:return:
"""
# 默认是文字新闻
type = 0
video_element = poco('com.songheng.eastfirst.business.video.view.widget.ijkplayer.h')
if video_element.exists():
type = 1
return type
def __wait_for_element_exists(self, elements):
"""
一直等待元素出现
:param elements: 元素列表
:return:
"""
try:
while True:
# 元素是否存在
element_exists = True
# 元素列表
for element in elements:
if not element.exists():
element_exists = False
break
else:
continue
if element_exists:
break
else:
print('元素暂时找不到,继续等待')
continue
except PocoNoSuchNodeException as e:
print('找不到这个元素异常')
def __remove_disturb(self):
# 退出对话框元素
exit_dialog_tips_element = poco('com.songheng.eastnews:id/xm')
if exit_dialog_tips_element.exists():
self.__back_keyevent()
def __pre_and_skip_ads(self):
"""
预加载和跳过广告
:return:
"""
# 1.广告页面元素的出现
# 两种样式:跳过、跳过广告*秒
try:
poco('com.songheng.eastnews:id/aoy').wait_for_appearance(10)
except Exception as e:
print('等待广告元素异常')
print(e)
ads_element = poco(name='com.songheng.eastnews:id/aoy', textMatches='^跳过广告.*$')
ads_element1 = poco(name='android.widget.TextView', text='跳过')
# ads_element = poco(name='com.songheng.eastnews:id/aoy')
# Splash 图片加载完成的时候打印UI树
# print_ui_tree(poco)
# write_ui_tree(poco)
# 跳过广告(0s)
if ads_element.exists():
print('跳过广告1!!!')
ads_element.click()
if ads_element1.exists():
print('跳过广告2!!!')
ads_element1.click()
# 2.等到到达主页面
poco('com.songheng.eastnews:id/g_').wait_for_appearance(120)
def __read_key_news(self):
"""
处理【要闻】对话框,需要阅读
:return:
"""
# 对于弹出来的要闻对话框,需要处理
key_news_element = poco(name='com.songheng.eastnews:id/x2', text='立即查看')
if key_news_element.exists():
print('要闻推送!需要看一下')
key_news_element.click()
# TODO 需不需要另外停留
sleep(3)
self.__back_keyevent()
def norm_task(self):
"""
普通任务领取金币【包含:签到、大转盘】
:return:
"""
self.__sign_in()
self.__lottery()
def play_video(self):
"""
播放一个视频
:return:
"""
# 开始时间
start_time = datetime.datetime.now()
while True:
# 视频播放结束或者超过30秒
scale_element = poco('com.songheng.eastnews:id/oy')
if scale_element.exists():
print('视频播放完了,结束播放。')
break
# 结束时间
end_time = datetime.datetime.now()
# 时间间隔
interval_time = (end_time - start_time).seconds
if interval_time > 30:
print('播放超过30秒,结束播放。')
break
def get_top_title_coin(self):
"""
顶部金币领取
仅仅在新闻首页的时候才可以领取
:return:
"""
get_coin_element = poco(name='com.songheng.eastnews:id/arq', text="领取")
if get_coin_element.exists():
print('顶部有金币可以领取!')
get_coin_element.click()
print('领完金币后可以关闭对话框!')
# 关掉对话框
self.__back_keyevent()
else:
print('顶部没有金币或者不在首页')
def __skip_same_pages(self):
"""
往下滑动【跳过】几页
:param num:
:return:
"""
current_page = 0
while current_page < self.skip_page:
poco.swipe([0.5, 0.8], [0.5, 0.3], duration=1)
current_page += 1
print('跳过结束,继续获取金币')
def __back_keyevent(self):
"""
返回的时候可能会出现关键要闻
:return:
"""
self.__read_key_news()
back_keyevent()
def __back_to_list(self):
"""
回退到首页
:return:
"""
print('准备回到首页')
while not poco('com.songheng.eastnews:id/g_').exists():
print('回退一次')
self.__back_keyevent()
if __name__ == "__main__":
dftt = DongFangTouTiao()
dftt.run()
| [
"[email protected]"
]
| |
325cc25af1d6dc78baf0a0a9ddce7df6d98af533 | a2cf2e8e5bf2c5604071c22356fb94bb5e6bcc13 | /190820/working_order.py | 641f5dafc8ca37db1be143ba119f97f4bfa232db | []
| no_license | baambox5/algorithm | a9a3b05d1e87c6bf713aca1770ea1a2e0c728120 | ce28170db4277faaabbc4a06602aafab1a1129a3 | refs/heads/master | 2020-09-01T11:19:17.484337 | 2019-12-19T12:45:29 | 2019-12-19T12:45:29 | 218,948,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | import sys
sys.stdin = open('working_order.txt', 'r')
# for test_case in range(1, 11):
# V, E = tuple(map(int, input().split()))
# G = [[] for _ in range(V + 1)]
# condition = [0] * (V + 1)
# visit = [0] * (V + 1)
# arr = list(map(int, input().split()))
# stack_list = []
# for i in range(len(arr)):
# if i % 2:
# condition[arr[i]] += 1
# else:
# G[arr[i]].append(arr[i + 1])
# print('#{}'.format(test_case), end=' ')
# for i in range(1, V + 1):
# if not condition[i] and not visit[i]:
# stack_list.append(i)
# visit[i] = 1
# print('{}'.format(i), end=' ')
# for j in G[i]:
# condition[j] -= 1
# while stack_list:
# for w in G[i]:
# if not condition[w] and not visit[w]:
# visit[w] = 1
# stack_list.append(i)
# print('{}'.format(w), end=' ')
# i = w
# for j in G[w]:
# condition[j] -= 1
# break
# else:
# i = stack_list.pop()
# print()
def dfs(v):
visit[v] = 1
print('{}'.format(v), end=' ')
for w in G[v]:
condition[w] -= 1
if not condition[w] and not visit[w]:
dfs(w)
for test_case in range(1, 11):
V, E = tuple(map(int, input().split()))
G = [[] for _ in range(V + 1)]
condition = [0] * (V + 1)
visit = [0] * (V + 1)
arr = list(map(int, input().split()))
for i in range(len(arr)):
if i % 2:
condition[arr[i]] += 1
else:
G[arr[i]].append(arr[i + 1])
print('#{}'.format(test_case), end=' ')
for i in range(1, V + 1):
if not condition[i] and not visit[i]:
dfs(i)
print()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.