blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20223d251cf7c1ee244f3ff6bda6aeac1170471e
|
02842943a8e8c5c53f5f8146234271446f1203ce
|
/102_morphological_analysis.py
|
eb9ada5fd4c6b12d0915f447c3a6585661eacd1e
|
[
"CC0-1.0"
] |
permissive
|
utda/portal_keyword
|
e38856747bdd413519fe249a2bf4a7c49011bc37
|
b83b5a70e766235361ec34e5d5d45610d649c248
|
refs/heads/master
| 2022-12-12T07:03:34.552994 | 2020-06-12T08:55:56 | 2020-06-12T08:55:56 | 252,589,741 | 0 | 0 |
CC0-1.0
| 2022-09-30T19:00:11 | 2020-04-02T23:48:40 |
Python
|
UTF-8
|
Python
| false | false | 1,672 |
py
|
# text-mining.py
# python解析器janomeをインポート - 1
from janome.tokenizer import Tokenizer
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import json
import os
import requests
import configparser
import numpy as np
import glob
import csv
import os.path
# 形態素解析用オブジェクトの生成 - 2
text = Tokenizer()
idir = "data/text"
odir = "data/ma"
os.makedirs(odir, exist_ok=True)
files = glob.glob(odir+'/*.txt')
for i in range(len(files)):
if i % 100 == 0:
print(i+1, len(files))
file = files[i]
output = file.replace(idir, odir)
# txtファイルからデータの読み込み - 3
text_file = open(file)
bindata = text_file.read()
txt = bindata
# テキストを一行ごとに処理 - 5
word_dic = {}
lines_1 = txt.split("\r\n")
for line in lines_1:
malist = text.tokenize(line)
for w in malist:
word = w.surface
ps = w.part_of_speech # 品詞 - 6
if ps.find("名詞") < 0:
continue # 名詞だけをカウント - 7
if not word.isalpha():
continue
if not word in word_dic:
word_dic[word] = 0
word_dic[word] += 1
if "『" in word:
print(word)
# よく使われる単語を表示 - 8
keys = sorted(word_dic.items(), key=lambda x: x[1], reverse=True)
f2 = open(output, 'w')
writer = csv.writer(f2, lineterminator='\n')
writer.writerow(["word", "cnt"])
for word, cnt in keys:
writer.writerow([word, cnt])
f2.close()
|
[
"[email protected]"
] | |
69b79f560be12c0e9e42677a4b97215c43d4af93
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_cuneiform.py
|
aa5197e96b7bd8efc91b06c79ac4112f74a72e7c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
py
|
#calss header
class _CUNEIFORM():
def __init__(self,):
self.name = "CUNEIFORM"
self.definitions = [u'of a form of writing used for over 3,000 years until the 1st century BC in the ancient countries of Western Asia', u'pointed at one end and wide at the other: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"[email protected]"
] | |
c695fddcefdc0efae0816934bae5aaba3b17ab7c
|
54ddb3f38cd09ac25213a7eb8743376fe778fee8
|
/topic_02_syntax/hw/tests/logic_1_arithmetic_test.py
|
24a92c5b8bec9f07cd079054c5fbfa6afd539e1c
|
[] |
no_license
|
ryndovaira/leveluppythonlevel1_300321
|
dbfd4ee41485870097ee490f652751776ccbd7ab
|
0877226e6fdb8945531775c42193a90ddb9c8a8b
|
refs/heads/master
| 2023-06-06T07:44:15.157913 | 2021-06-18T11:53:35 | 2021-06-18T11:53:35 | 376,595,962 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,327 |
py
|
import pytest
from topic_02_syntax.hw.logic_1_arithmetic import arithmetic
params = [
(0, 0, '+', 0),
(1, 0, '+', 1),
(0, 1, '+', 1),
(100, 100, '+', 200),
(100, -100, '+', 0),
(-100, 100, '+', 0),
(-100, -100, '+', -200),
(0, 0, '-', 0),
(1, 0, '-', 1),
(0, 1, '-', -1),
(100, 100, '-', 0),
(100, -100, '-', 200),
(-100, 100, '-', -200),
(-100, -100, '-', 0),
(0, 0, '*', 0),
(1, 0, '*', 0),
(0, 1, '*', 0),
(100, 100, '*', 10000),
(100, -100, '*', -10000),
(-100, 100, '*', -10000),
(-100, -100, '*', 10000),
(0, 1, '/', 0),
(1, 1, '/', 1),
(100, 100, '/', 1),
(100, -100, '/', -1),
(-100, 100, '/', -1),
(-100, -100, '/', 1),
(0, 1, '=', "Unknown operator"),
(1, 1, '%', "Unknown operator"),
(100, 100, '#', "Unknown operator"),
(100, -100, '.', "Unknown operator"),
(-100, 100, '0', "Unknown operator"),
(-100, -100, '&', "Unknown operator"),
]
ids = ["(%s) %s (%s) == (%s)" % (num1, op, num2, expected) for (num1, num2, op, expected) in params]
@pytest.mark.parametrize(argnames="num1, num2, op, expected",
argvalues=params,
ids=ids)
def test_arithmetic(num1, num2, op, expected):
assert arithmetic(num1, num2, op) == expected
|
[
"[email protected]"
] | |
cd7ca0848790ab8b6fa8f0a2dca430f44d1e1aea
|
362224f8a23387e8b369b02a6ff8690c200a2bce
|
/django/django_orm/courses/courses_app/migrations/0004_auto_20210507_1257.py
|
44b3c5750ec2e09c2a574516f4e4ef23d781992c
|
[] |
no_license
|
Helenyixuanwang/python_stack
|
ac94c7c532655bf47592a8453738daac10f220ad
|
97fbc77e3971b5df1fe3e79652b294facf8d6cee
|
refs/heads/main
| 2023-06-11T02:17:27.277551 | 2021-06-21T17:01:09 | 2021-06-21T17:01:09 | 364,336,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
# Generated by Django 2.2 on 2021-05-07 19:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses_app', '0003_auto_20210507_1107'),
]
operations = [
migrations.RemoveField(
model_name='description',
name='course',
),
migrations.AddField(
model_name='course',
name='description',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='course', to='courses_app.Description'),
),
]
|
[
"[email protected]"
] | |
7ee6dfd65f6902adeb07ab3e77ae072964561905
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/iam/_admin_get_list_country_age_restriction_v3.py
|
74d2f077e0a0ec3dedd98d7a8e75cccd7aeadc41
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 |
MIT
| 2022-08-02T03:54:11 | 2021-09-27T04:00:10 |
Python
|
UTF-8
|
Python
| false | false | 2,385 |
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Iam Service (6.2.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.iam import (
admin_get_list_country_age_restriction_v3 as admin_get_list_country_age_restriction_v3_internal,
)
from accelbyte_py_sdk.api.iam.models import ModelCountryV3Response
from accelbyte_py_sdk.api.iam.models import RestErrorResponse
@click.command()
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_get_list_country_age_restriction_v3(
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_get_list_country_age_restriction_v3_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = admin_get_list_country_age_restriction_v3_internal(
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminGetListCountryAgeRestrictionV3 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_get_list_country_age_restriction_v3.operation_id = (
"AdminGetListCountryAgeRestrictionV3"
)
admin_get_list_country_age_restriction_v3.is_deprecated = False
|
[
"[email protected]"
] | |
da6cdfe9ab180d0e96dc02d884b46c6a2f8a3e88
|
6e8f2e28479566dbaa338300b2d61f784ff83f97
|
/.history/code/preprocess_20210421153926.py
|
328dd81758da2a656921e2d8033defa2f29c1d4b
|
[] |
no_license
|
eeng5/CV-final-project
|
55a7d736f75602858233ebc380c4e1d67ab2b866
|
580e28819560b86f6974959efb1d31ef138198fc
|
refs/heads/main
| 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,518 |
py
|
import os
import random
import numpy as np
from PIL import Image
import tensorflow as tf
import hyperparameters as hp
class Datasets():
""" Class for containing the training and test sets as well as
other useful data-related information. Contains the functions
for preprocessing.
"""
def __init__(self, data_path, task, aug, generate):
self.data_path = data_path
self.emotions = ['angry', 'happy', 'disgust', 'sad', 'neutral', 'surprise', 'fear']
self.emotion_dict = self.createEmotionDict()
self.task = task
self.aug = aug
if generate == 1:
if self.aug == '1':
self.createSimpleData()
else:
self.createComplexData()
# Dictionaries for (label index) <--> (class name)
self.idx_to_class = {}
self.class_to_idx = {}
# For storing list of classes
self.classes = [""] * hp.num_classes
# Setup data generators
self.train_data = self.get_data(
os.path.join(self.data_path, "train/"), False)
self.test_data = self.get_data(
os.path.join(self.data_path, "test/"), False)
def cleanTestDirs(self,):
for e in self.emotions:
pathy = self.data_path+'test/'+e
pics = 1
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs(self,):
for e in self.emotions:
pathy = self.data_path+'train/'+e
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll(self,):
self.cleanTestDirs()
self.cleanTrainDirs()
def createPixelArray(self, arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(self, img):
img = cv2.equalizeHist(img)
return img
def showImages(self, imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(self, img, task):
imgs = [img]
img1 = self.equalize_hist(img)
imgs.append(img1)
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
if task == 3:
kernel = np.array([[-1.0, -1.0, -1.0],
[-1.0, 9, -1.0],
[-1.0, -1.0, -1.0]])
img3 = cv2.filter2D(img2,-1,kernel)
imgs.append(img3)
img4 = self.equalize_hist(img3)
imgs.append(img4)
img5 = cv2.bilateralFilter(img4, d=9, sigmaColor=100, sigmaSpace=100)
imgs.append(img5)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(self, arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(self, task):
path1 = self.data_path+"train.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path+"train/" # CHANGE ME
for index, row in df.iterrows():
px = row['pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
self.saveIMG(i, num, filename)
def createTest(self, task):
path1 = data_path +"icml_face_data.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path + "test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict(self,):
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData(self,):
self.cleanAll()
print("Cleaning done")
self.createTrain(1)
print("Training Data Generation done")
self.createTest(1)
print("Testing Data Generation done")
def createComplexData(self,):
self.cleanAll()
self.createTrain(3)
self.createTest(3)
def preprocess_fn(self, img):
""" Preprocess function for ImageDataGenerator. """
img = img / 255.
return img
def get_data(self, path, shuffle):
""" Returns an image data generator which can be iterated
through for images and corresponding class labels.
Arguments:
path - Filepath of the data being imported, such as
"../data/train" or "../data/test"
shuffle - Boolean value indicating whether the data should
be randomly shuffled.
Returns:
An iterable image-batch generator
"""
data_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=self.preprocess_fn)
# VGG must take images of size 224x224
img_size = hp.img_size
classes_for_flow = None
# Make sure all data generators are aligned in label indices
if bool(self.idx_to_class):
classes_for_flow = self.classes
# Form image data generator from directory structure
data_gen = data_gen.flow_from_directory(
path,
target_size=(img_size, img_size),
class_mode='sparse',
batch_size=hp.batch_size,
shuffle=shuffle,
classes=classes_for_flow)
# Setup the dictionaries if not already done
if not bool(self.idx_to_class):
unordered_classes = []
for dir_name in os.listdir(path):
if os.path.isdir(os.path.join(path, dir_name)):
unordered_classes.append(dir_name)
for img_class in unordered_classes:
self.idx_to_class[data_gen.class_indices[img_class]] = img_class
self.class_to_idx[img_class] = int(data_gen.class_indices[img_class])
self.classes[int(data_gen.class_indices[img_class])] = img_class
return data_gen
|
[
"[email protected]"
] | |
4db1b6a570c6c09cb4abbde4d2d5b91439464880
|
86a563e6eff56cf96bfa3c6dcdfb706e68114530
|
/ch05/layer_naive.py
|
f4262f3f86dabc938f835840d0e9ffd66c61601c
|
[] |
no_license
|
mingrammer/deep-learning-from-scratch
|
be322ee82fe5c8d2bcde3ac3e7d35792c5314d1f
|
4e158aa3f773ac7c60585f3f1627e94dac7a05ba
|
refs/heads/master
| 2021-01-01T06:36:44.414300 | 2017-08-10T17:15:55 | 2017-08-10T17:15:55 | 97,468,838 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 519 |
py
|
class MulLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return out
def backward(self, dout):
dx = dout * self.y
dy = dout * self.x
return dx, dy
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx, dy
|
[
"[email protected]"
] | |
a6d693cdcbe37656bb5535ac4a05fe5cc9372d37
|
41d0bd94bbaec0299e6be6fc56a726545c1894cb
|
/sources/nytimes/__init__.py
|
6b17755df17000bfee582d94d3ef7ceaa7c83853
|
[
"Unlicense"
] |
permissive
|
AB9IL/stream-sources
|
f86eec0552d0992e7ee02a39076e0a1042ebfe27
|
ede8bd3ad7d51723d489192d0a6c5b2ea31ffe56
|
refs/heads/master
| 2023-02-03T23:09:25.582012 | 2020-12-23T08:12:42 | 2020-12-23T08:12:42 | 319,333,418 | 0 | 0 |
Unlicense
| 2020-12-07T13:47:06 | 2020-12-07T13:47:05 | null |
UTF-8
|
Python
| false | false | 244 |
py
|
from sources.generic import FeedSource
class Source(FeedSource):
SOURCE = {
'name': 'The New York Times',
'url': 'https://www.nytimes.com',
}
FEED_URL = 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'
|
[
"[email protected]"
] | |
57058094d1fac2a6430800baef3bfb044fb40353
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/plugin/core/searchtext/iterators/InstructionSearchAddressIterator.pyi
|
714c2a10a62f4d6f5eb1d692fba25f1bfdbdb764
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,144 |
pyi
|
from typing import Iterator
import ghidra.program.model.address
import java.lang
import java.util
import java.util.function
class InstructionSearchAddressIterator(object, ghidra.program.model.address.AddressIterator):
def __init__(self, __a0: ghidra.program.model.listing.InstructionIterator): ...
def __iter__(self) -> Iterator[object]: ...
def equals(self, __a0: object) -> bool: ...
def forEach(self, __a0: java.util.function.Consumer) -> None: ...
def forEachRemaining(self, __a0: java.util.function.Consumer) -> None: ...
def getClass(self) -> java.lang.Class: ...
def hasNext(self) -> bool: ...
def hashCode(self) -> int: ...
def iterator(self) -> java.util.Iterator: ...
def next(self) -> object: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def remove(self) -> None: ...
def spliterator(self) -> java.util.Spliterator: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"[email protected]"
] | |
56be18a63c0d30a9e4ba2dae5d07aad985c61656
|
40c4b8b618d67fc48b862809b6e2835bb7cf76eb
|
/leetcode/65.py
|
e19e991fccbe8881504df78c7650cbe96eaad2ad
|
[] |
no_license
|
berquist/ctci
|
9fa08ac724990eee32f8ad7cffc3517491570d41
|
f0a69d3e4dd1b73a43c96dcb7a9c7b9955c04c39
|
refs/heads/master
| 2022-08-18T01:53:16.994300 | 2022-08-15T00:36:07 | 2022-08-15T00:36:07 | 120,108,966 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 343 |
py
|
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
# assert Solution().isNumber("0") == True
# assert Solution().isNumber(" 0.1 ") == True
# assert Solution().isNumber("abc") == False
# assert Solution().isNumber("1 a") == False
# assert Solution().isNumber("2e10") == True
|
[
"[email protected]"
] | |
101f05c1b708685c9f582744ecc1a14472bcf253
|
30b2b8a449558fc327daebf51096bf251ef6a8e9
|
/scripts/Assemble.py
|
389daba962491debc1e343d62c2dfc8ec94ca8d5
|
[
"Zlib",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ekg/shasta
|
0ac3462d0e3f73375a1b583967992b7e5deba1fd
|
e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01
|
refs/heads/master
| 2020-06-02T12:59:50.717211 | 2019-06-10T12:13:22 | 2019-06-10T12:13:22 | 191,161,600 | 0 | 0 |
NOASSERTION
| 2019-06-10T12:13:04 | 2019-06-10T12:13:03 | null |
UTF-8
|
Python
| false | false | 686 |
py
|
#!/usr/bin/python3
import shasta
import GetConfig
import ast
# Read the config file.
config = GetConfig.getConfig()
# Create the Assembler.
a = shasta.Assembler()
# Set up the consensus caller.
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
# Figure out if we should use marginPhase, and if so set it up.
useMarginPhase = ast.literal_eval(config['Assembly']['useMarginPhase'])
if useMarginPhase:
a.setupMarginPhase()
a.accessKmers()
a.accessMarkers()
a.accessMarkerGraphVertices()
a.accessMarkerGraphEdges()
a.accessAssemblyGraphEdges()
a.accessAssemblyGraphEdgeLists()
a.accessMarkerGraphVertexRepeatCounts()
a.accessMarkerGraphEdgeConsensus()
a.assemble()
|
[
"[email protected]"
] | |
2cfadbdf605826104ecf7f24efa19f78691766cf
|
c11f92e6a1578338cf759b5e1624a53225642e79
|
/babi/user_data.py
|
8307f03bf2d4e55df9bd70db01a1dca7746c0fcf
|
[
"MIT"
] |
permissive
|
pganssle/babi
|
c1d50df3bdb924316779ab82e996ad46baafb986
|
d20be693d2c067570f0a82e2c2baee34c827c3bd
|
refs/heads/master
| 2021-04-11T19:55:08.285937 | 2020-03-21T18:47:37 | 2020-03-21T18:47:37 | 249,049,571 | 0 | 0 |
MIT
| 2020-03-21T19:50:32 | 2020-03-21T19:50:32 | null |
UTF-8
|
Python
| false | false | 393 |
py
|
import os.path
def _xdg(*path: str, env: str, default: str) -> str:
return os.path.join(
os.environ.get(env) or os.path.expanduser(default),
'babi', *path,
)
def xdg_data(*path: str) -> str:
return _xdg(*path, env='XDG_DATA_HOME', default='~/.local/share')
def xdg_config(*path: str) -> str:
return _xdg(*path, env='XDG_CONFIG_HOME', default='~/.config')
|
[
"[email protected]"
] | |
1084dd65c5e897d08750a0765d039c5aa79fbda4
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/gpt2/src/utils/tensor_manipulations.py
|
8ff23330029fad9374e2b614e0f24e24d7e6f763
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 |
Apache-2.0
| 2023-05-17T11:22:28 | 2021-10-15T06:38:37 |
Python
|
UTF-8
|
Python
| false | false | 7,159 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
tensor manipulations
"""
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
def extract_string_from_tensor(input_ids, mode="single", config=None, tokenizer=None):
"""
Args:
input_ids (Tensor): input sentences with shape [batch_size, seq_len].
mode (str): ["pair", "single"]
"pair" for tasks with paired inputs `<bos> A <eos> B <eos>`,
such as summarization task, the dataset format `<bos> Article <eos> Summary <eos>`,
reading comprehension task, the dataset format `<bos> Passage Question <eos> Answer <eos>`.
"single" for tasks with single input `<bos> A <eos>`, such as Language Modeling, Lambada task.
config: the configuration of GPT-2 model.
tokenizer: the tokenizer of GPT-2 model.
Return:
prompt_list (list): list of prompt_text
reference_list (list): list of reference_text, or second part of text
rest_list (list): list of rest_text, or rest part of text
"""
batch_size = config.batch_size
seq_length = config.seq_length
prompt_list = [""] * batch_size
reference_list = [""] * batch_size
eos_text = tokenizer.eos_token
len_eos_text = len(eos_text)
input_ids = P.Reshape()(input_ids, (batch_size, seq_length))
if mode == "pair":
for batch_idx in range(batch_size):
sentence_tensor = input_ids[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
reference_start = prompt_end + len_eos_text
reference_end = sentence[reference_start:].find(
eos_text, 0) + reference_start
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
reference_list[batch_idx] = sentence[reference_start:reference_end]
return prompt_list, reference_list
# For single output datasets such as WikiText, etc.
if mode == "single":
for batch_idx in range(batch_size):
sentence_tensor = input_ids[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
else:
raise NotImplementedError('mode:{} not supported.'.format(mode))
return prompt_list
def extract_single_token_logits(logits=None, seq_pos=None):
"""
Args
logits: (batch_size,seq_length,vocab_size) e.g. when batchsize is 8,
sequence length is 1024 and vocab_size is 50257,
then logits is a Tensor with shape (8,1024,50257)
seq_pos:(batch_size) list
Return:
output_logits: (batch_size,1,vocab_size) extract the logit to predict the last token.
"""
batch_size = logits.shape[0]
for i in range(batch_size):
logit = logits[i:i + 1:1, seq_pos[i]:seq_pos[i] + 1:1, ::]
if i == 0:
output_logits = logit
else:
output_logits = P.Concat()((output_logits, logit))
return output_logits
def get_last_one_pos(input_mask: Tensor):
"""
Arg:
input_mask (Tensor): (batch_size,seq_length)
Return:
pos (Tensor): (batch_size,)
"""
input_mask_ = P.Cast()(input_mask, mstype.float32)
pos = P.ReduceSum(keep_dims=False)(input_mask_, axis=1) # (batch_size,)
pos = P.Cast()(pos, mstype.int32)
pos = pos - 1
return pos
def get_next_one_pos(input_mask: Tensor):
"""
Arg:
input_mask (Tensor): (batch_size,seq_length)
"""
input_mask_ = P.Cast()(input_mask, mstype.float32)
pos = P.ReduceSum(keep_dims=False)(input_mask_, axis=1) # (batch_size,)
pos = P.Cast()(pos, mstype.int32)
return pos
def add_last_token_mask(input_mask: Tensor, overflow_strategy: str = "shift"):
"""
add last token mask
Args:
input_mask: Tensor
overflow_strategy: str
Returns:
Tensor
"""
pos = get_next_one_pos(input_mask).asnumpy()
input_mask_np = input_mask.asnumpy()
maximum_length = input_mask.shape[1]
batch_size = input_mask.shape[0]
for idx in range(batch_size):
# not overflow
if pos[idx] < maximum_length:
input_mask_np[idx][pos[idx]] = 1
# overflow
else:
if overflow_strategy == "shift":
continue
if overflow_strategy == "truncate":
continue
else:
raise ValueError("{} is not an option in ['shift','truncate'].".format(overflow_strategy))
return Tensor(input_mask_np, dtype=mstype.int32)
def add_last_token(input_ids: Tensor, input_mask: Tensor, overflow_strategy: str = "shift", append_ids=None,
next_token_pos=None):
"""
add last token
Args:
input_ids: Tensor
input_mask: Tensor
overflow_strategy: str
append_ids: Any
next_token_pos: Any
Returns:
Tensor
"""
# get positional list/numpy array
if next_token_pos is None:
pos = get_next_one_pos(input_mask).asnumpy()
else:
pos = next_token_pos
# get numpy of inputs
input_mask_np = input_mask.asnumpy()
input_ids_np = input_ids.asnumpy()
maximum_length = int(input_mask.shape[1])
batch_size = int(input_mask.shape[0])
for idx in range(batch_size):
if append_ids[idx] == -1:
continue
# not overflow
if pos[idx] < maximum_length:
input_mask_np[idx][int(pos[idx])] = 1
input_ids_np[idx][int(pos[idx])] = append_ids[idx]
# overflow
else:
if overflow_strategy == "shift":
# shift one token left
input_ids_np[idx][0:maximum_length - 1] = input_ids_np[idx][1:maximum_length]
input_ids_np[idx][maximum_length - 1] = append_ids[idx]
continue
if overflow_strategy == "truncate":
# do nothing
continue
else:
raise ValueError("{} is not an option in ['shift','truncate'].".format(overflow_strategy))
return Tensor(input_ids_np, dtype=mstype.int32), Tensor(input_mask_np, dtype=mstype.int32)
|
[
"[email protected]"
] | |
8b38feee1e7984c093ab2477b1e6c94aa9ae5032
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/rearrange_20200119162227.py
|
a2b26cbc31714c4d2901c190ccccaf9a0c97fe88
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 |
Python
|
UTF-8
|
Python
| false | false | 5,847 |
py
|
import random
def random_rearrange(input_string):
''' Asks user for input of words, then
rearranges those words in a random order
'''
# input_string = input("enter words: ")
words = input_string.split(' ')
len_words = len(words)
# print(words)
word_list = []
for word in range(len_words):
rand = random.randint(0,len_words-1)
# print(rand)
word_list.append(words[rand])
# print(word_list)
space = ' '
sentence = space.join(word_list)
print(sentence)
return sentence
def reverse_order(input_string):
'''
Reverses the order or words inputted by user
'''
# input_string = input("enter words: ")
words = input_string.split(' ')
print(words)
length = len(words) - 1
word_list = []
for word in words:
word_list.append(words[length])
length -= 1
print(word_list)
space = ' '
sentence = space.join(word_list)
print(sentence)
return sentence
def mad_libs():
nouns_string = input('Give me a noun: ')
names_string = input('Give me a name: ')
verbs_string = input('Give me two verbs: ')
nouns = nouns_string.split(' ')
names = names_string.split(' ')
verbs = verbs_string.split(' ')
print(verbs)
print("One day I went to the store to buy myself a {}.".format(nouns[0]))
print("'What's the matter with you {}?' The clerk asked.".format(names[0]))
print("'This fits me well' I said")
print("'Well go on then {} it out so you don't miss out.".format(verbs[0]))
print("'Let me {} first and I'll give you what I have.'".format(verbs[1]))
# def anagram():
# ''' handshake with each letter
# rearrange to see every possible combination of words
# '''
# word = input('Letters/word: ')
# length = len(word)
# current = None
# temp = None
# for letter in word:
# current = letter
# for letter2 in word:
# temp = letter2
# if letter == letter2:
# pass
# else:
def anagram(input_string):
''' takes a word and returns every possible combination of letters
'''
word_string = input_string
new_strings = []
linked_list = LinkedList()
linked_list_swaps = LinkedList()
linked_list.read()
linked_list_swaps.read()
for letter in input_string:
linked_list.insert(letter)
linked_list_swaps.insert(letter)
linked_list.read()
print(len(word_string))
index = 0
while index < len(word_string):
for letter in word_string:
for letter2 in word_string:
linked_list_swaps.swap(letter, letter2)
new_strings.append(linked_list.read() + "\n")
linked_list_swaps.swap(letter2, letter)
index += 1
linked_list_swaps.read()
print(new_strings)
return
class Node():
def __init__(self, data=None, next_pointer=None):
self.data = data
self.next_pointer = next_pointer
def get_data(self):
return self.data
def get_next(self):
return self.next_pointer
def set_next(self, next_node):
self.next_pointer = next_node
class LinkedList():
def __init__(self, head=None):
self.head = head
def insert(self, data):
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
def delete(self, data):
current = self.head
previous = None
found = False
while current and found == False:
if current.get_data() == data:
found = True
else:
previous = current
current = current.get_next()
if current == None:
return ValueError("does not exist")
if previous == None:
self.head = current.get_next()
if found == True:
previous.set_next(current.get_next())
def read(self):
current = self.head
read = []
while current:
data = current.get_data()
read.append(data)
current = current.get_next()
no_space = ''
sentence = no_space.join(read)
print(sentence)
return
def swap(self, data1, data2):
node1 = None
node2 = None
current = self.head
if data1 == data2:
print("n/a")
return
while current:
curr_data = current.get_data()
if curr_data == data1:
node1 = current
elif curr_data == data2:
node2 = current
current = current.get_next()
temp1 = node1.get_data()
temp2 = node2.get_data()
node1.data = temp2
node2.data = temp1
return
def size(self):
current = self.head
counter = 0
while current:
counter += 1
current = current.get_next()
print(counter)
return counter
if __name__ == '__main__':
input_string = 'hello yellow fellow'
anagram_string = 'superduper'
# random_rearrange(input_string)
# reverse_order()
# mad_libs()
anagram(anagram_string)
# linked_list = LinkedList()
# linked_list.insert('a')
# linked_list.insert('b')
# linked_list.insert('c')
# linked_list.insert('d')
# linked_list.insert('e')
# linked_list.insert('f')
# linked_list.insert('g')
# linked_list.insert('h')
# linked_list.insert('i')
# linked_list.insert('j')
# linked_list.insert('k')
# linked_list.read()
# linked_list.delete('a')
# linked_list.read()
# print(range(linked_list.size()))
# linked_list.swap([0],[10])
# linked_list.read()
|
[
"[email protected]"
] | |
655703818b71a380d0ddde23057a56603097cada
|
e41e2505ff0b0534017e85bda0e06493094d1498
|
/frontend/corona_REST/setting.py
|
6315adfe2d6fb9e632722dc0d095178b642a7331
|
[
"MIT"
] |
permissive
|
luyuliu/COVID19-Dashboard
|
5d516f85284ca908321696bee405fdf1da5531d1
|
717f83e2767fa53367232e742c110515957a94fd
|
refs/heads/master
| 2023-09-04T11:59:37.076149 | 2021-11-12T20:32:46 | 2021-11-12T20:32:46 | 253,892,926 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,538 |
py
|
DOMAIN = {
'ridership_actual': {'datasource': {'source': 'ridership_actual'}},
'county_info': {'datasource': {'source': 'county_info'}},
'census_occu_pop': {'datasource': {'source': 'census_occu_pop'}},
'corona_cases_state_level': {'datasource': {'source': 'corona_cases_state_level'}},
'census_occupation_population': {'datasource': {'source': 'census_occupation_population'}},
'system_info': {'datasource': {'source': 'system_info'}},
'other_ridership_hourly': {'datasource': {'source': 'other_ridership_hourly'}},
'corona_cases_github': {'datasource': {'source': 'corona_cases_github'}},
'other_ridership': {'datasource': {'source': 'other_ridership'}},
'ridership': {'datasource': {'source': 'ridership'}},
'census_occupation_industry': {'datasource': {'source': 'census_occupation_industry'}},
'ridership_hourly': {'datasource': {'source': 'ridership_hourly'}},
'aggregated_ridership_hourly': {'datasource': {'source': 'aggregated_ridership_hourly'}},
'system_info_backup': {'datasource': {'source': 'system_info_backup'}},
'google_trend': {'datasource': {'source': 'google_trend'}},
'corona_cases_usafacts': {'datasource': {'source': 'corona_cases_usafacts'}},
'census_transit_pop': {'datasource': {'source': 'census_transit_pop'}},
}
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DBNAME = "corona"
ALLOW_UNKNOWN=True
X_DOMAINS='*'
PAGINATION_LIMIT = 10000
PAGINATION_DEFAULT = 10000
|
[
"[email protected]"
] | |
aef9f80055a7aed0d9ee6b1f6e97282e910a9c59
|
a8b17b17f9b2a640013064c50e1cebc27a7a68de
|
/10-Merging-DataFrames-with-Pandas/04-case-study-Summer-Olympics/02-loading-ioc-codes-dataframe.py
|
6f36f6445cdf16c2b2857aa63e94ef5d965ab92a
|
[] |
no_license
|
JohnnyFang/datacamp
|
20eae09752521f14006cb3fda600b10bd7b12398
|
0fa8fa7682c23b0eb07bd03e4b75f5b77aeafa75
|
refs/heads/master
| 2020-04-18T00:27:37.358176 | 2020-02-04T20:54:19 | 2020-02-04T20:54:19 | 167,078,316 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 834 |
py
|
'''
Read file_path into a DataFrame called ioc_codes. The identifier file_path has been pre-defined with the filename 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'.
Select only the columns 'Country' and 'NOC' from ioc_codes.
Print the leading 5 and trailing 5 rows of the DataFrame ioc_codes (there are 200 rows in total). This has been done for you, so hit 'Submit Answer' to see the result!
'''
# Import pandas
import pandas as pd
# Create the file path: file_path
file_path = 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'
# Load DataFrame from file_path: ioc_codes
ioc_codes = pd.read_csv(file_path)
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Print first and last 5 rows of ioc_codes
print(ioc_codes.head())
print(ioc_codes.tail())
|
[
"[email protected]"
] | |
a434c943b8afac2a3ba516952790983f4bebf8d9
|
def27d5864764b877b6786835ec97f2bd74c6ba8
|
/easy/HammingDistance.py
|
b9cb3fe45c35fdf770719e3a32aa986bf2a73a40
|
[] |
no_license
|
bolan2014/leetcode
|
f6cf38a49a9250abeb36543ea2498062c58e811d
|
1c35fde3a65c4f216218f459736d4c39a29980d5
|
refs/heads/master
| 2021-04-09T16:59:41.494568 | 2017-05-10T03:47:14 | 2017-05-10T03:47:14 | 46,648,353 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 466 |
py
|
class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
bix, biy = bin(x)[2:], bin(y)[2:]
if len(bix) > len(biy):
biy = (len(bix) - len(biy)) * '0' + biy
else:
bix = (len(biy) - len(bix)) * '0' + bix
cnt = 0
for i in range(len(bix)):
if bix[i] != biy[i]:
cnt += 1
return cnt
|
[
"[email protected]"
] | |
c453f63b56b29011977ee32465c52b69a612a70d
|
630fe47bb5aa5e49b45ab101d87c2dd2c53d180f
|
/venv/Lib/site-packages/com/vmware/nsx/node/aaa/providers/vidm_client.py
|
b5c31723c754c80b2bea2a739a2388630213feb8
|
[] |
no_license
|
shrivastava-himanshu/Leetcode_practice
|
467497a58d82ff3ae2569d5e610dc6f27a1f31d6
|
4c59799947c2b17bfd22ca2a08707ef85e84a913
|
refs/heads/main
| 2023-06-12T13:14:45.381839 | 2021-07-05T04:09:05 | 2021-07-05T04:09:05 | 367,546,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,544 |
py
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.node.aaa.providers.vidm.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Status(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.aaa.providers.vidm.status'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatusStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read AAA provider vIDM status
:rtype: :class:`com.vmware.nsx.model_client.NodeAuthProviderVidmStatus`
:return: com.vmware.nsx.model.NodeAuthProviderVidmStatus
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
class _StatusStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/node/aaa/providers/vidm/status',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeAuthProviderVidmStatus'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.node.aaa.providers.vidm.status',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Status': Status,
}
|
[
"[email protected]"
] | |
debe5f15c52bb08f8beadfea06a498d86d7c81c4
|
27880c807b97b3b318d002a547680c6881acf460
|
/tests/argparse/special/test_overwrite.py
|
a4721283725798b1b7e6875be3aed206d66f9fc3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sbrodehl/miniflask
|
a1ebb809d544fbc235044624af9193982f01aced
|
55b350b951ad2120ea13a986f742523206f407c6
|
refs/heads/master
| 2022-11-05T05:18:43.383396 | 2022-09-14T15:26:17 | 2022-09-14T15:26:17 | 252,498,534 | 0 | 0 | null | 2020-04-02T15:46:39 | 2020-04-02T15:46:39 | null |
UTF-8
|
Python
| false | false | 2,702 |
py
|
from pathlib import Path
import pytest
import miniflask # noqa: E402
def test_setup(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 2
modules.defaults.var_default_override_twice: 3
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults_override"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 12
modules.defaults.var_default_override_twice: 13
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override_twice(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults_override", "defaults_override_twice"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 12
modules.defaults.var_default_override_twice: 113
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override_conflict():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override"])
with pytest.raises(miniflask.exceptions.RegisterError):
mf.parse_args([])
mf.event.print_all()
def test_override_scoped_absolute():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override_scoped_absolute"])
mf.parse_args([])
mf.event.print_all()
def test_override_scoped_relative():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override_scoped_relative"])
mf.parse_args([])
mf.event.print_all()
|
[
"[email protected]"
] | |
683cb94f99b944c57b75bcff395c4d70823f1021
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/isbn/156.py
|
5d83c65f74ee33e129c19964d85548161b6c4135
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446 | 2021-02-28T23:28:36 | 2021-02-28T23:28:36 | 240,883,220 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,113 |
py
|
def isISBN(code):
if not (
isinstance(code, str) and
len(code) == 13 and
( code.startswith('978') or code.startswith('979') ) and
code.isdigit()
):
return 0
controle=0
for i in range(12):
if i%2:
controle += 3* int(code[i])
else:
controle += int(code[i])
cc = controle % 10
cc = (10 - cc) % 10
return cc == int(code[-1])
def overzicht(codes):
groepen = {}
for i in range(11):
groepen[i] = 0
for code in codes:
if not isISBN(code):
groepen[10] += 1
else:
groepen[int(code[3])] += 1
print('Engelstalige landen: {}'.format(groepen[0] + groepen[1]))
print('Franstalige landen: {}'.format(groepen[2]))
print('Duitstalige landen: {}'.format(groepen[3]))
print('Japan: {}'.format(groepen[4]))
print('Russischtalige landen: {}'.format(groepen[5]))
print('China: {}'.format(groepen[7]))
print('Overige landen: {}'.format(groepen[6] + groepen[8] + groepen[9]))
print('Fouten: {}'.format(groepen[10]))
|
[
"[email protected]"
] | |
222e0833d388b0280d65ff78eb7ee790a0581964
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/student/role_helpers.py
|
ffe0f2c9f20f8f9d2d6244b6ab63b737d5bbcf22
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 |
MIT
| 2021-11-22T12:12:31 | 2019-01-02T14:21:30 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,412 |
py
|
"""
Helpers for student roles
"""
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
Role
)
from common.djangoapps.student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
OrgInstructorRole,
OrgStaffRole
)
def has_staff_roles(user, course_key):
"""
Return true if a user has any of the following roles
Staff, Instructor, Beta Tester, Forum Community TA, Forum Group Moderator, Forum Moderator, Forum Administrator
"""
forum_roles = [FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR]
is_staff = CourseStaffRole(course_key).has_user(user)
is_instructor = CourseInstructorRole(course_key).has_user(user)
is_beta_tester = CourseBetaTesterRole(course_key).has_user(user)
is_org_staff = OrgStaffRole(course_key.org).has_user(user)
is_org_instructor = OrgInstructorRole(course_key.org).has_user(user)
is_global_staff = GlobalStaff().has_user(user)
has_forum_role = Role.user_has_role_for_course(user, course_key, forum_roles)
if any([is_staff, is_instructor, is_beta_tester, is_org_staff,
is_org_instructor, is_global_staff, has_forum_role]):
return True
return False
|
[
"[email protected]"
] | |
28a140f400a6d510811875a29923efe76038cf73
|
ebe422519443dbe9c4acd3c7fd527d05cf444c59
|
/evaluation_expression.py
|
ae02e8d4501a759bbab9c83d68ce0494a8051e94
|
[] |
no_license
|
SaiSudhaV/coding_platforms
|
2eba22d72fdc490a65e71daca41bb3d71b5d0a7b
|
44d0f80104d0ab04ef93716f058b4b567759a699
|
refs/heads/master
| 2023-06-19T18:05:37.876791 | 2021-07-15T18:02:19 | 2021-07-15T18:02:19 | 355,178,342 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
class Solution:
# @param A : list of strings
# @return an integer
def evalRPN(self, A):
res, opr = [], ['+', '-', '*', '/']
for i in A:
if i not in opr:
res.append(i)
elif len(res) >= 2:
tem1 = str(res.pop())
tem2 = str(res.pop())
p = int(eval(tem2 + i + tem1))
res.append(p)
return res.pop()
|
[
"[email protected]"
] | |
64b12d49a26a0628242f870670d9e5d34e02cb5e
|
f850e0f75a76c500f5ba8a9ab6fa6d5f40d22b23
|
/pyecharts_demo/demos/Bar/multiple_y_axes.py
|
e006b619f9172a4af780cb1631e85e41c4e503b7
|
[
"MIT"
] |
permissive
|
jay20161013/pywebio-chart-gallery
|
805afa2643b0d330a4a2f80f1e0a8827e8f61afe
|
11fd8a70b2e9ff5482cf5924b110a11f3469edfc
|
refs/heads/master
| 2023-03-20T01:58:30.979109 | 2021-03-18T12:48:31 | 2021-03-18T12:48:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,851 |
py
|
from pywebio.output import put_html
import pyecharts.options as opts
from pyecharts.charts import Bar, Line
"""
Gallery 使用 pyecharts 1.0.0
参考地址: https://www.echartsjs.com/examples/editor.html?c=multiple-y-axis
目前无法实现的功能:
1、暂无
"""
colors = ["#5793f3", "#d14a61", "#675bba"]
x_data = ["1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"]
legend_list = ["蒸发量", "降水量", "平均温度"]
evaporation_capacity = [
2.0,
4.9,
7.0,
23.2,
25.6,
76.7,
135.6,
162.2,
32.6,
20.0,
6.4,
3.3,
]
rainfall_capacity = [
2.6,
5.9,
9.0,
26.4,
28.7,
70.7,
175.6,
182.2,
48.7,
18.8,
6.0,
2.3,
]
average_temperature = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2]
bar = (
Bar(init_opts=opts.InitOpts(width="1260px", height="720px"))
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="蒸发量",
yaxis_data=evaporation_capacity,
yaxis_index=0,
color=colors[1],
)
.add_yaxis(
series_name="降水量", yaxis_data=rainfall_capacity, yaxis_index=1, color=colors[0]
)
.extend_axis(
yaxis=opts.AxisOpts(
name="蒸发量",
type_="value",
min_=0,
max_=250,
position="right",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[1])
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
)
)
.extend_axis(
yaxis=opts.AxisOpts(
type_="value",
name="温度",
min_=0,
max_=25,
position="left",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[2])
),
axislabel_opts=opts.LabelOpts(formatter="{value} °C"),
splitline_opts=opts.SplitLineOpts(
is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)
),
)
)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
type_="value",
name="降水量",
min_=0,
max_=250,
position="right",
offset=80,
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[0])
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"),
)
)
line = (
Line()
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="平均温度", y_axis=average_temperature, yaxis_index=2, color=colors[2]
)
)
put_html(bar.overlap(line).render_notebook())
|
[
"[email protected]"
] | |
ee0d1f6ab07282ef487a55f8caa50881541945c5
|
48a7b266737b62da330170ca4fe4ac4bf1d8b663
|
/molsysmt/form/string_pdb_text/extract.py
|
73bb0feea3ace5d705b0963185af3e24f5ad4607
|
[
"MIT"
] |
permissive
|
uibcdf/MolSysMT
|
ddab5a89b8ec2377f383884c5169d147cab01322
|
c3d713ba63db24eb8a2426115cf8d9cb3665d225
|
refs/heads/main
| 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 |
MIT
| 2023-06-04T20:27:06 | 2018-06-19T19:38:44 |
Python
|
UTF-8
|
Python
| false | false | 812 |
py
|
from molsysmt._private.exceptions import NotImplementedMethodError
from molsysmt._private.digestion import digest
from molsysmt._private.variables import is_all
@digest(form='string:pdb_text')
def extract(item, atom_indices='all', structure_indices='all', copy_if_all=True):
if is_all(atom_indices) and is_all(structure_indices):
if copy_if_all:
from copy import copy
tmp_item = copy(item)
else:
tmp_item = item
else:
from . import to_molsysmt_MolSys
from ..molsysmt_MolSys import to_string_pdb_text as molsysmt_MolSys_to_string_pdb_text
tmp_item = to_molsysmt_MolSys(item, atom_indices=atom_indices, structure_indices=structure_indices)
tmp_item = molsysmt_MolSys_to_string_pdb_text(tmp_item)
return tmp_item
|
[
"[email protected]"
] | |
6394a2ecb06983781a9b4f36dfbe1b467f515d16
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS/YCHF_KCBYCHF_OMS_063.py
|
bf7954767971a8fe32cc9735084cfdcaf4130323
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,495 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_063(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_063')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_063(self):
title = '停止OMS服务(沪A五档即成转限价未成卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '未成交',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
382b4289c3b1bb000f2690f9c6f2a63fe5e1583c
|
f33885d6f1e963586f9e7b1e1a46a271d125e2e7
|
/ci/nur/fileutils.py
|
338149b414047c1411f8783359d43a434d120e33
|
[
"MIT"
] |
permissive
|
nix-community/NUR
|
cad821a31d965ade9869c21f03edf9f7bb4cdf02
|
80012e6c2de5ea9c4101948b0d58c745e7813180
|
refs/heads/master
| 2023-09-03T05:05:30.497198 | 2023-09-03T04:32:01 | 2023-09-03T04:32:01 | 123,327,588 | 965 | 385 |
MIT
| 2023-09-12T07:10:52 | 2018-02-28T18:49:50 |
Python
|
UTF-8
|
Python
| false | false | 921 |
py
|
import json
import os
import shutil
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Generator, Union
PathType = Union[str, Path]
def to_path(path: PathType) -> Path:
if isinstance(path, Path):
return path
else:
return Path(path)
def write_json_file(data: Any, path: PathType) -> None:
path = to_path(path)
f = NamedTemporaryFile(mode="w+", prefix=path.name, dir=str(path.parent))
with f as tmp_file:
json.dump(data, tmp_file, indent=4, sort_keys=True)
shutil.move(tmp_file.name, path)
# NamedTemporaryFile tries to delete the file and fails otherwise
open(tmp_file.name, "a").close()
@contextmanager
def chdir(dest: PathType) -> Generator[None, None, None]:
previous = os.getcwd()
os.chdir(dest)
try:
yield
finally:
os.chdir(previous)
|
[
"[email protected]"
] | |
a2455184714558aeedd27f30413d548c77e63c4b
|
7e260342bb04eba9bff4289da938e859b8d68b82
|
/contrib/scripts.py
|
d6d2ef643382ab83ba2df65618bc02d78d78ab2f
|
[
"MIT"
] |
permissive
|
christopherjenness/fava
|
72c2d0e201f7792ac32a643be0479fa7623efc27
|
71c25d8a0ae08aa84150e33d464000d0161610ea
|
refs/heads/master
| 2020-04-28T15:29:34.446050 | 2019-03-12T17:58:03 | 2019-03-12T17:58:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,374 |
py
|
#!/usr/bin/env python3
"""Various utilities."""
import json
import os
from beancount.query import query_env
from beancount.query import query_parser
import click
import requests
BASE_PATH = os.path.normpath(
os.path.join(os.path.dirname(__file__), "../fava")
)
LANGUAGES = ["de", "es", "fr", "nl", "pt", "ru", "zh-CN", "sk", "uk"]
@click.group()
def cli():
"""Various utilities."""
def _env_to_list(attributes):
for name in attributes.keys():
if isinstance(name, tuple):
name = name[0]
yield name
@cli.command()
def generate_bql_grammar_json():
"""Generate a JSON file with BQL grammar attributes.
The online code editor needs to have the list of available columns,
functions, and keywords for syntax highlighting and completion.
Should be run whenever the BQL changes."""
target_env = query_env.TargetsEnvironment()
data = {
"columns": sorted(set(_env_to_list(target_env.columns))),
"functions": sorted(set(_env_to_list(target_env.functions))),
"keywords": sorted({kw.lower() for kw in query_parser.Lexer.keywords}),
}
path = os.path.join(
os.path.dirname(__file__),
"../fava/static/javascript/codemirror/bql-grammar.json",
)
with open(path, "w") as json_file:
json.dump(data, json_file)
@cli.command()
def download_translations():
"""Fetch updated translations from POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
for language in LANGUAGES:
download_from_poeditor(language, "po", token)
download_from_poeditor(language, "mo", token)
@cli.command()
def upload_translations():
"""Upload .pot message catalog to POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
path = os.path.join(BASE_PATH, f"translations/messages.pot")
click.echo(f"Uploading message catalog: {path}")
data = {
"api_token": token,
"id": 90283,
"updating": "terms",
"sync_terms": 1,
}
files = {"file": open(path, "rb")}
request = requests.post(
"https://api.poeditor.com/v2/projects/upload", data=data, files=files
)
click.echo("Done: " + str(request.json()["result"]["terms"]))
def download_from_poeditor(language, format_, token):
"""Download .{po,mo}-file from POEditor and save to disk."""
click.echo(f'Downloading .{format_}-file for language "{language}"')
language_short = language[:2]
data = {
"api_token": token,
"id": 90283,
"language": language,
"type": format_,
}
request = requests.post(
"https://api.poeditor.com/v2/projects/export", data=data
)
url = request.json()["result"]["url"]
content = requests.get(url).content
folder = os.path.join(
BASE_PATH, "translations", language_short, "LC_MESSAGES"
)
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join(folder, f"messages.{format_}")
with open(path, "wb") as file_:
file_.write(content)
click.echo(f'Downloaded to "{path}"')
if __name__ == "__main__":
cli()
|
[
"[email protected]"
] | |
59778d5cfdb33ed8ffbcd1d7c0f2b05cd15a366d
|
5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e
|
/py/google/cj2014/round1A/FullBinaryTree.py
|
df737dafe506eb93570aed7b49ecc60662a2dc43
|
[
"MIT"
] |
permissive
|
shhuan/algorithms
|
36d70f1ab23dab881bf1a15573fbca7b2a3f4235
|
2830c7e2ada8dfd3dcdda7c06846116d4f944a27
|
refs/heads/master
| 2021-05-07T14:21:15.362588 | 2017-11-07T08:20:16 | 2017-11-07T08:20:16 | 109,799,698 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,055 |
py
|
# -*- coding: utf-8 -*-
"""
created by huash06 at 2015-04-08 10:48
"""
__author__ = 'huash06'
import sys
import os
import py.lib.Utils as Utils
from datetime import datetime
# sys.stdin = open('input/sample.txt', 'r')
sys.stdin = open('input/B-large-practice.in', 'r')
# sys.stdout = open('output/B-large-practice.out', 'w')
MAXNN = 301
def count_node(graph, node, parent):
cc = 1
for i in range(len(graph)):
if i != parent and graph[node][i]:
cc += count_node(graph, i, node)
return cc
def dfs(graph, node, parent, memo):
"""
返回以node為根的子樹變成完全二叉樹時,剪掉的節點數量和剩餘的節點數量
:param graph:
:param node:
:param parent:
:param memo: record calculated result
:return: how many node in this full-binary tree rooted at node
"""
max1 = -1
max2 = -1
if memo[node][parent] == -1 or True:
for child in graph[node]:
if child != parent:
nc = dfs(graph, child, node, memo)
if nc > max1:
max2 = max1
max1 = nc
elif nc > max2:
max2 = nc
if max2 == -1:
memo[node][parent] = 1
else:
memo[node][parent] = 1 + max1 + max2
return memo[node][parent]
T = int(sys.stdin.readline())
sys.setrecursionlimit(3000)
# start_time = datetime.now()
for ti in range(1, T + 1):
N = int(sys.stdin.readline())
GRAPH = dict()
for ei in range(1, N+1):
GRAPH[ei] = list()
for ni in range(N-1):
S, T = map(int, sys.stdin.readline().strip().split(' '))
GRAPH[S].append(T)
GRAPH[T].append(S)
count = N
memo = [[-1 for c in range(N+1)] for r in range(N+1)]
for r in range(1, N+1):
c = N - dfs(GRAPH, r, 0, memo)
if c < count:
count = c
print('Case #{}: {}'.format(ti, count))
# end_time = datetime.now()
# time_cost = end_time-start_time
# print('Time Cost: {}s'.format(time_cost.seconds))
|
[
"[email protected]"
] | |
d34afd28088c387fc104acc632df1276df76726e
|
b2c070e09bff49241fcff98bcde825cfa96e93ca
|
/HackerEarth/Recursion/SubsetGeneration.py
|
9af011b3289a694f328f9d18d4a03292e2e93f09
|
[
"MIT"
] |
permissive
|
Beryl2208/CI-2
|
dcb1b923f9c4f1f8b167c36c8b22a80522322c53
|
f671292dad2695e37458866442a6b951ba4e1a71
|
refs/heads/master
| 2022-12-26T19:11:28.559911 | 2020-10-06T06:27:51 | 2020-10-06T06:27:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 743 |
py
|
# Subset or Subsequence generation
# Input - "abc", Output - "a", "b", "c", "ab", "ac", "abc", "bc"
# Input - "abcd", Output - "a", "b", "c", "d", "ab", "ac", "ad", "abc", "acd", "abd", "abcd", "bc", "bcd", "bd", "cd"
# "abc" "ab" "ac" "a" "bc" "b" "c" ""
# \ / \ / \ / \ /
# "ab" "a" "b" ""
# \ / \ /
# "a" ""
# \ /
# curr = ""
# Options -
# 1) Consider curr as a part of subset
# 2) Do not consider curr as a part of subset
def Subset(s, index = 0, curr = ''):
if index == len(s):
print(curr, end = ' ')
return
Subset(s, index + 1, curr + s[index])
Subset(s, index + 1, curr)
Subset("abc")
print()
Subset("abcd")
print()
|
[
"[email protected]"
] | |
2641b37d027fbff1ece30b7f2825fb2fcbd20653
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/0150.0_Evaluate_Reverse_Polish_Notation.py
|
0a7404c8bbd5ea8d7d771e5b14d18c16066b3ef5
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,151 |
py
|
'''
approach: Stack
Time: O(N)
Space: O(N)
执行用时:32 ms, 在所有 Python 提交中击败了60.21%的用户
内存消耗:14.3 MB, 在所有 Python 提交中击败了76.44%的用户
'''
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for token in tokens:
stack.append(token)
while len(stack) >= 3 and stack[-1] in ['+', '-', '*', '/']:
operator = stack.pop()
operand2 = int(stack.pop())
operand1 = int(stack.pop())
result = 0
if operator == '+':
result = operand1 + operand2
elif operator == '-':
result = operand1 - operand2
elif operator == '*':
result = operand1 * operand2
elif operator == '/':
# Note that division between two integers should truncate toward zero.
result = int(operand1 * 1.0/ operand2)
stack.append(result)
return int(stack[-1])
|
[
"[email protected]"
] | |
0d049d8ba10dab9d75bd9355eb364b3565a2349b
|
6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a
|
/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/generic_container.py
|
f6e8fff7ae9f79d55e3c6619b9dd2ff2044fb9c6
|
[
"MIT"
] |
permissive
|
ashirey-msft/azure-sdk-for-python
|
d92381d11c48f194ec9f989f5f803db614fb73f2
|
e04778e13306dad2e8fb044970215bad6296afb6
|
refs/heads/master
| 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 |
MIT
| 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null |
UTF-8
|
Python
| false | false | 2,678 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protection_container import ProtectionContainer
class GenericContainer(ProtectionContainer):
"""Base class for generic container of backup items.
All required parameters must be populated in order to send to Azure.
:param friendly_name: Friendly name of the container.
:type friendly_name: str
:param backup_management_type: Type of backup managemenent for the
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param registration_status: Status of registration of the container with
the Recovery Services Vault.
:type registration_status: str
:param health_status: Status of health of the container.
:type health_status: str
:param container_type: Required. Constant filled by server.
:type container_type: str
:param fabric_name: Name of the container's fabric
:type fabric_name: str
:param extended_information: Extended information (not returned in List
container API calls)
:type extended_information:
~azure.mgmt.recoveryservicesbackup.models.GenericContainerExtendedInfo
"""
_validation = {
'container_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
'extended_information': {'key': 'extendedInformation', 'type': 'GenericContainerExtendedInfo'},
}
def __init__(self, **kwargs):
super(GenericContainer, self).__init__(**kwargs)
self.fabric_name = kwargs.get('fabric_name', None)
self.extended_information = kwargs.get('extended_information', None)
self.container_type = 'GenericContainer'
|
[
"[email protected]"
] | |
48ad1087d1425fbf659db1aec546c48a22425705
|
5491e80f7dc72a8091b16c26a5cfee93381ee30d
|
/Challenge202E_I_AM_BENDER_Binary_To_Text/challenge202E.py
|
a35a3a4915220b1f0ced3a8f61896c03fca380db
|
[] |
no_license
|
tuipopenoe/DailyProgrammer
|
87167c2ae275c40c3b1a30ae14497a3289f8797f
|
8d42947b576b78456fa72cdf5b886cff9f32b769
|
refs/heads/master
| 2016-09-05T21:13:30.805504 | 2015-10-16T02:57:20 | 2015-10-16T02:57:20 | 21,139,505 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 353 |
py
|
#!/usr/bin/env python
# Tui Popenoe
# challenge202E.py - Binary to String
import sys
import binascii
def i_am_bender(binary):
return binascii.unhexlify('%x' % int(binary, 2))
def main():
if len(sys.argv) > 1:
print(i_am_bender(sys.argv[1]))
else:
print(i_am_bender(sys.stdin.read()))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
5f91841d99dce028ef4112a7f1b5929f5529de42
|
729aa3af1e6de25c0e46192ef62aaf77cc622979
|
/comentarios/models.py
|
68e967afb7853be71fb6423710c8f2e8619ff015
|
[] |
no_license
|
xuting1108/API-Pontos-Tur-sticos
|
8b583869006b8570c44eebfc885bb3db7eff4f1d
|
7a01434e806a7b3b1409f7c490071ba682525ad3
|
refs/heads/master
| 2022-11-19T15:09:48.057402 | 2020-06-15T21:38:00 | 2020-06-15T21:38:00 | 267,150,058 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 377 |
py
|
from django.db import models
from django.contrib.auth.models import User
class Comentario(models.Model):
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
comentarios = models.TextField()
data = models.DateTimeField(auto_now_add=True)
aprovado = models.BooleanField(default=True)
def __str__(self):
return self.usuario.username
|
[
"[email protected]"
] | |
d5e1d94b0f4269311fc4634072447854264afac3
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/CDqMdrTvfn2Wa8igp_16.py
|
12713c2aa2161258166fab90eabe089a4b047990
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
"""
Create a function that returns the next element in an **arithmetic sequence**.
In an arithmetic sequence, each element is formed by adding the same constant
to the previous element.
### Examples
next_element([3, 5, 7, 9]) ➞ 11
next_element([-5, -6, -7]) ➞ -8
next_element([2, 2, 2, 2, 2]) ➞ 2
### Notes
All input arrays will contain **integers only**.
"""
def next_element(lst):
a = lst[-1] - lst[-2]
return lst[-1] + a
|
[
"[email protected]"
] | |
4f729df74aa3cb8e7f8acf86cf08033467732bf3
|
5982a9c9c9cb682ec9732f9eeb438b62c61f2e99
|
/Problem_234/my_bad_solution.py
|
d6896b10334da48b8afeefb2a9c1fcca30a0b44b
|
[] |
no_license
|
chenshanghao/LeetCode_learning
|
6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c
|
acf2395f3b946054009d4543f2a13e83402323d3
|
refs/heads/master
| 2021-10-23T05:23:01.970535 | 2019-03-15T05:08:54 | 2019-03-15T05:08:54 | 114,688,902 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 470 |
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
val_list = []
while(head):
val_list.append(head.val)
head = head.next
if val_list == val_list[::-1]:
return True
else:
return False
|
[
"[email protected]"
] | |
67086c4670dfe4cb66c73ee192fb47a5a8183bcf
|
4597f9e8c2772f276904b76c334b4d181fa9f839
|
/Python/Compare-Version-Numbers.py
|
85b753029af257cf562da8fb4d2fb870da2c0e73
|
[] |
no_license
|
xxw1122/Leetcode
|
258ee541765e6b04a95e225284575e562edc4db9
|
4c991a8cd024b504ceb0ef7abd8f3cceb6be2fb8
|
refs/heads/master
| 2020-12-25T11:58:00.223146 | 2015-08-11T02:10:25 | 2015-08-11T02:10:25 | 40,542,869 | 2 | 6 | null | 2020-09-30T20:54:57 | 2015-08-11T13:21:17 |
C++
|
UTF-8
|
Python
| false | false | 872 |
py
|
class Solution:
# @param a, a string
# @param b, a string
# @return a boolean
def compareVersion(self, version1, version2):
seq1 = []
seq2 = []
if version1.find('.') >= 0:
seq1 = version1.split('.')
else:
seq1.append(version1)
if version2.find('.') >= 0:
seq2 = version2.split('.')
else:
seq2.append(version2)
for i in range(len(seq1)):
seq1[i] = int(seq1[i])
for i in range(len(seq2)):
seq2[i] = int(seq2[i])
maxlen = max(len(seq1), len(seq2))
for i in range(len(seq1), maxlen):
seq1.append(0)
for i in range(len(seq2), maxlen):
seq2.append(0)
if seq1 < seq2:
return -1
elif seq1 > seq2:
return 1
else:
return 0
|
[
"[email protected]"
] | |
bd4bfd2045243258a2936d602e25e747bd5817ce
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_quivered.py
|
ae5ecb9ccecdd6d0e423ea42fa27b78863065fdc
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
from xai.brain.wordbase.nouns._quiver import _QUIVER
#calss header
class _QUIVERED(_QUIVER, ):
def __init__(self,):
_QUIVER.__init__(self)
self.name = "QUIVERED"
self.specie = 'nouns'
self.basic = "quiver"
self.jsondata = {}
|
[
"[email protected]"
] | |
434c4bd312a9abd7b4c412e91f46470e4d93787a
|
3151fabc3eb907d6cd1bb17739c215a8e95a6370
|
/storagetest/pkgs/pts/compilebench/__init__.py
|
2b4e431708e278479b68217206765020f8856961
|
[
"MIT"
] |
permissive
|
txu2k8/storage-test
|
a3afe96dc206392603f4aa000a7df428d885454b
|
62a16ec57d619f724c46939bf85c4c0df82ef47c
|
refs/heads/master
| 2023-03-25T11:00:54.346476 | 2021-03-15T01:40:53 | 2021-03-15T01:40:53 | 307,604,046 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,040 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 18:27
@Author: Tao.Xu
@Email : [email protected]
"""
from .compile_bench import *
__all__ = ['CompileBench']
"""
compilebench
==============
https://oss.oracle.com/~mason/compilebench/
https://openbenchmarking.org/test/pts/compilebench
Compilebench tries to age a filesystem by simulating some of the disk IO
common in creating, compiling, patching, stating and reading kernel trees.
It indirectly measures how well filesystems can maintain directory locality
as the disk fills up and directories age.
This current test is setup to use the makej mode with 10 initial directories
Quick and dirty usage: (note the -d option changed in 0.6)
1. Untar compilebench
2. run commands:
./compilebench -D some_working_dir -i 10 -r 30
./compilebench -D some_working_dir -i 10 --makej
./copmilebench -D some_working_dir -i 10 --makej -d /dev/xxx -t trace_file
./compilebench --help for more
"""
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
b280a2a7d4766e6375a02765b3244e920e0b405b
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/list_2d_sort.py
|
ed70c8ed858f38ef3ada5a56ba0468b997f515fc
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293 | 2023-07-26T13:21:11 | 2023-07-26T13:21:11 | 98,900,570 | 253 | 77 |
MIT
| 2020-10-25T01:12:53 | 2017-07-31T14:54:47 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,885 |
py
|
import pprint
l_2d = [[20, 3, 100], [1, 200, 30], [300, 10, 2]]
pprint.pprint(l_2d, width=20)
# [[20, 3, 100],
# [1, 200, 30],
# [300, 10, 2]]
pprint.pprint(sorted(l_2d), width=20)
# [[1, 200, 30],
# [20, 3, 100],
# [300, 10, 2]]
pprint.pprint([sorted(l) for l in l_2d], width=20)
# [[3, 20, 100],
# [1, 30, 200],
# [2, 10, 300]]
pprint.pprint([list(x) for x in zip(*[sorted(l) for l in zip(*l_2d)])], width=20)
# [[1, 3, 2],
# [20, 10, 30],
# [300, 200, 100]]
import numpy as np
print(np.sort(l_2d))
# [[ 3 20 100]
# [ 1 30 200]
# [ 2 10 300]]
print(np.sort(l_2d, axis=0))
# [[ 1 3 2]
# [ 20 10 30]
# [300 200 100]]
print(type(np.sort(l_2d)))
# <class 'numpy.ndarray'>
print(np.sort(l_2d).tolist())
# [[3, 20, 100], [1, 30, 200], [2, 10, 300]]
print(type(np.sort(l_2d).tolist()))
# <class 'list'>
l_2d_error = [[1, 2], [3, 4, 5]]
# print(np.sort(l_2d_error))
# ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (2,) + inhomogeneous part.
pprint.pprint(sorted(l_2d, key=lambda x: x[1]), width=20)
# [[20, 3, 100],
# [300, 10, 2],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d, key=lambda x: x[2]), width=20)
# [[300, 10, 2],
# [1, 200, 30],
# [20, 3, 100]]
import operator
pprint.pprint(sorted(l_2d, key=operator.itemgetter(1)), width=20)
# [[20, 3, 100],
# [300, 10, 2],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d, key=operator.itemgetter(2)), width=20)
# [[300, 10, 2],
# [1, 200, 30],
# [20, 3, 100]]
l_2d_dup = [[1, 3, 100], [1, 200, 30], [1, 3, 2]]
pprint.pprint(l_2d_dup, width=20)
# [[1, 3, 100],
# [1, 200, 30],
# [1, 3, 2]]
pprint.pprint(sorted(l_2d_dup), width=20)
# [[1, 3, 2],
# [1, 3, 100],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d_dup, key=operator.itemgetter(0, 2)), width=20)
# [[1, 3, 2],
# [1, 200, 30],
# [1, 3, 100]]
pprint.pprint(sorted(l_2d_dup, key=lambda x: (x[0], x[2])), width=20)
# [[1, 3, 2],
# [1, 200, 30],
# [1, 3, 100]]
import pandas as pd
df = pd.DataFrame(l_2d_dup, columns=['A', 'B', 'C'], index=['X', 'Y', 'Z'])
print(df)
# A B C
# X 1 3 100
# Y 1 200 30
# Z 1 3 2
print(df.sort_values('C'))
# A B C
# Z 1 3 2
# Y 1 200 30
# X 1 3 100
print(df.sort_values('Z', axis=1))
# A C B
# X 1 100 3
# Y 1 30 200
# Z 1 2 3
print(df.sort_values(['A', 'C']))
# A B C
# Z 1 3 2
# Y 1 200 30
# X 1 3 100
df = pd.DataFrame(l_2d_dup)
print(df)
# 0 1 2
# 0 1 3 100
# 1 1 200 30
# 2 1 3 2
print(df.sort_values(2))
# 0 1 2
# 2 1 3 2
# 1 1 200 30
# 0 1 3 100
print(df.sort_values(2, axis=1))
# 0 2 1
# 0 1 100 3
# 1 1 30 200
# 2 1 2 3
print(df.sort_values([0, 2]))
# 0 1 2
# 2 1 3 2
# 1 1 200 30
# 0 1 3 100
|
[
"[email protected]"
] | |
e93bfd5399e5ab1d1e5fa8e1374a7859d94a0446
|
512b388a53022f561e2375b4621f78572d3b4f04
|
/clients/migrations/0010_auto_20200904_1044.py
|
cb1046a194005d2c79ecd0cc9708388a797fa99b
|
[] |
no_license
|
Madoka09/Worker15
|
006d5ac44dc55c3ae7f72d3b8300f3567395cdff
|
181012d309052b2df3d4ef99a197e8acef73a185
|
refs/heads/master
| 2023-03-24T05:29:02.060796 | 2021-03-16T21:56:21 | 2021-03-16T21:56:21 | 336,394,683 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 386 |
py
|
# Generated by Django 3.0.4 on 2020-09-04 15:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clients', '0009_auto_20200903_2132'),
]
operations = [
migrations.RenameField(
model_name='clientsaddress',
old_name='altern_phone',
new_name='alternate_phone',
),
]
|
[
"[email protected]"
] | |
c8bf10335c7c1e07b2176c968917ab7c4d5ace34
|
0f3a0be642cd6a2dd792c548cf7212176761e9b1
|
/pywps_services/r_mult.py
|
9910ee9228a37f667c6a73112163cb45b3e7d2ec
|
[] |
no_license
|
huhabla/wps-grass-bridge
|
63a5d60735d372e295ec6adabe527eec9e72635a
|
aefdf1516a7517b1b745ec72e2d2481a78e10017
|
refs/heads/master
| 2021-01-10T10:10:34.246497 | 2014-01-22T23:40:58 | 2014-01-22T23:40:58 | 53,005,463 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,141 |
py
|
# ################################################ #
# This process was generated using GrassXMLtoPyWPS #
# Author: Soeren Gebbert #
# Mail: soerengebbert <at> googlemail <dot> com #
# ################################################ #
from pywps.Process import WPSProcess
from PyWPSGrassModuleStarter import PyWPSGrassModuleStarter
class r_mult(WPSProcess):
def __init__(self):
WPSProcess.__init__(self, identifier = 'r.mult', title = 'Multiplies a raster map with one or more raster maps', version = 1, statusSupported = True, storeSupported = True, metadata = [{'type': 'simple', 'title': 'raster'}, {'type': 'simple', 'title': 'math'}], abstract = 'http://grass.osgeo.org/grass70/manuals/html70_user/r.mult.html')
# Literal and complex inputs
self.addComplexInput(identifier = 'inputs', title = 'Raster maps to multiply', minOccurs = 1, maxOccurs = 1024, formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'image/png'}, {'mimeType': 'image/gif'}, {'mimeType': 'image/jpeg'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
self.addLiteralInput(identifier = 'grass_resolution_ns', title = 'Resolution of the mapset in north-south direction in meters or degrees', abstract = 'This parameter defines the north-south resolution of the mapset in meter or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_resolution_ew', title = 'Resolution of the mapset in east-west direction in meters or degrees', abstract = 'This parameter defines the east-west resolution of the mapset in meters or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_band_number', title = 'Band to select for processing (default is all bands)', abstract = 'This parameter defines band number of the input raster files which should be processed. As default all bands are processed and used as single and multiple inputs for raster modules.', minOccurs = 0, maxOccurs = 1, type = type(0), allowedValues = '*')
# complex outputs
self.addComplexOutput(identifier = 'output', title = 'The result of the mathematical operation', formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
def execute(self):
starter = PyWPSGrassModuleStarter()
starter.fromPyWPS("r.mult", self.inputs, self.outputs, self.pywps)
if __name__ == "__main__":
process = r_mult()
process.execute()
|
[
"soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202"
] |
soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202
|
2d51dc8a47690b543abd5f2196e6d22032e34caf
|
de3b77cb0927f28cbd85e9142c2dfd7c8be7c27e
|
/tests/migrations/015_user_demographics_up.py
|
9e08957363737d8cf8968f4a19885fea3c67bec4
|
[
"MIT"
] |
permissive
|
LoansBot/database
|
f3dcbccde59fdb80c876d2612f250662946588e6
|
eeaed26c2dcfdf0f9637b47ebe15cd1e000d8cc4
|
refs/heads/master
| 2021-07-02T22:07:18.683278 | 2021-06-02T04:09:38 | 2021-06-02T04:09:38 | 239,400,935 | 0 | 1 |
MIT
| 2021-06-02T04:14:31 | 2020-02-10T01:06:53 |
Python
|
UTF-8
|
Python
| false | false | 1,166 |
py
|
import unittest
import helper
class UpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = helper.setup_connection()
cls.cursor = cls.connection.cursor()
@classmethod
def tearDownClass(cls):
cls.cursor.close()
cls.connection.rollback()
helper.teardown_connection(cls.connection)
def tearDown(self):
self.connection.rollback()
def test_user_demographics_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographics')
)
def test_user_demographic_lookups_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_lookups')
)
def test_user_demographic_views_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_views')
)
def test_user_demographic_history_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_history')
)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1f25eaacf5c9ccac5ef060cdcaf3e75712ac30ba
|
4cc285b0c585241ff4404087e6fbb901195639be
|
/NeuralNetworkNumbers/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/nn/__init__.py
|
422cec64251f38906def1dec89cf3e9f3c1cb091
|
[] |
no_license
|
strazhg/NeuralNetworksPython
|
815542f4ddbb86e918e657f783158f8c078de514
|
15038e44a5a6c342336c119cdd2abdeffd84b5b1
|
refs/heads/main
| 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f9f44f6062a76ea4edc6b57e9980c88ed09cd53ee57337d2e7cebd8696fc0e2f
size 6611
|
[
"[email protected]"
] | |
6a244e5d202b43213040fc14188fe4cf309356c2
|
a7b78ab632b77d1ed6b7e1fa46c33eda7a523961
|
/src/foreign_if/python/UT/src/eigen/test_049.py
|
558da13e6a88495e2835d12cb1b59571e2a9938d
|
[
"BSD-2-Clause"
] |
permissive
|
frovedis/frovedis
|
80b830da4f3374891f3646a2298d71a3f42a1b2d
|
875ae298dfa84ee9815f53db5bf7a8b76a379a6f
|
refs/heads/master
| 2023-05-12T20:06:44.165117 | 2023-04-29T08:30:36 | 2023-04-29T08:30:36 | 138,103,263 | 68 | 13 |
BSD-2-Clause
| 2018-12-20T10:46:53 | 2018-06-21T01:17:51 |
C++
|
UTF-8
|
Python
| false | false | 926 |
py
|
#!/usr/bin/env python
import sys
from frovedis.exrpc.server import FrovedisServer
from frovedis.linalg import eigsh
from scipy.sparse import csr_matrix
desc = "Testing eigsh() for csr_matrix and which = 'SM': "
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if argc < 2:
print ('Please give frovedis_server calling command as the first argument \n'
'(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
# sample square symmetric sparse matrix (6x6)
mat = csr_matrix([[ 2.,-1., 0., 0.,-1., 0.], [-1., 3.,-1., 0.,-1., 0.],
[ 0.,-1., 2.,-1., 0., 0.], [ 0., 0.,-1., 3.,-1.,-1],
[-1.,-1., 0.,-1., 3., 0.], [ 0., 0., 0.,-1., 0., 1.]])
try:
eigen_vals, eigen_vecs = eigsh(mat, k = 3, which = 'SM')
print(desc, "Passed")
except:
print(desc, "Failed")
FrovedisServer.shut_down()
|
[
"[email protected]"
] | |
f3b344d9bd81f498554471e88f34378fee094fa7
|
5a5e0a01efa6ef0961992e53bb4f64840f93150b
|
/RegressionVisualizer/manage.py
|
b5db558ef481979ffecd909114ebd0e5bdf372b6
|
[] |
no_license
|
scotteskridge/RegressionApp
|
ed059e3205ab54061129779404345b55c0dee75c
|
68932a9c94235a1e8bd6cd71a765b545f2266189
|
refs/heads/master
| 2021-01-19T20:48:13.495541 | 2017-04-25T02:39:49 | 2017-04-25T02:39:56 | 88,555,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 838 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RegressionVisualizer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
print(sys.argv)
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
3475609803c5fec24d9602e8f2f214ff2e1146fa
|
0c66e605e6e4129b09ea14dbb6aa353d18aaa027
|
/diventi/products/migrations/0028_auto_20200119_1557.py
|
e26102391438dd63340bedc439d85503f7d4b02e
|
[
"Apache-2.0"
] |
permissive
|
flavoi/diventi
|
58fbc8c947f387cbcc1ce607878a59a6f2b72313
|
c0b1efe2baa3ff816d6ee9a8e86623f297973ded
|
refs/heads/master
| 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 |
Apache-2.0
| 2023-02-08T01:03:17 | 2017-09-09T14:10:51 |
Python
|
UTF-8
|
Python
| false | false | 521 |
py
|
# Generated by Django 2.2.8 on 2020-01-19 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0027_auto_20191217_0738'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.PositiveIntegerField(default=0, help_text='This price must be valued in euro cents. For example: 500 for 5.00€, 120 for 1.20€ etc.', verbose_name='price'),
),
]
|
[
"[email protected]"
] | |
5ae8895b70d3c766d80a1f22a634ad71a70d012e
|
ab1d0fcd4900e0a88d49999cbbde4b06cc441e5d
|
/Labs/Lab 5/lab05_soln/raytracer_main.py
|
9cd89a7bb62e8dba71c76dd33c177a47aecd373e
|
[] |
no_license
|
ThomasMGilman/ETGG1803_ConceptsOf3DGraphicsAndMath
|
bf261b7ce16bb686e42b1a2600aa97b4f8984b65
|
fdf4e216b117769246154cd360b2c321f4581354
|
refs/heads/master
| 2020-03-29T23:14:05.715926 | 2018-09-26T17:18:25 | 2018-09-26T17:18:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,643 |
py
|
import raytracer
import objects3d
import time
import pygame
import math3d
caseNum = 2
# Pygame setup
if caseNum == 1:
win_width = 700; win_height = 150;
elif caseNum == 2:
win_width = 800; win_height = 600;
else:
win_width = 300; win_height = 200;
pygame.display.init()
screen = pygame.display.set_mode((win_width, win_height))
clock = pygame.time.Clock()
done = False
# Raytracer setup
if caseNum == 1:
cameraPos = math3d.VectorN(0, 0, -20)
cameraUp = math3d.VectorN(0, 1, 0)
cameraCoi = math3d.VectorN(0, 0, 0)
cameraNear = 3.2
cameraFov = 45.0
elif caseNum == 2:
cameraPos = math3d.VectorN(5, 7, -20)
cameraUp = math3d.VectorN(1, 10, 0).normalized()
cameraCoi = math3d.VectorN(2, 5, 3)
cameraNear = 1.5
cameraFov = 60.0
elif caseNum == 3:
cameraPos = math3d.VectorN(-5, 7, -30)
cameraUp = math3d.VectorN(0, 1, 0)
cameraCoi = math3d.VectorN(2, 5, 3)
cameraNear = 1.5
cameraFov = 60.0
camera = objects3d.Camera(cameraPos, cameraCoi, cameraUp, screen, cameraFov, cameraNear, True)
sphere1 = objects3d.Sphere(math3d.VectorN(2,5,3), 7.0, math3d.VectorN(1,0,0))
plane1 = objects3d.Plane(math3d.VectorN(0,1,0), 5.0, math3d.VectorN(0,1,0))
plane2 = objects3d.Plane(math3d.VectorN(0.1,1,0), 4.0, math3d.VectorN(0,0,1))
box1 = objects3d.AABB(math3d.VectorN(2, 9, -6), math3d.VectorN(8, 15, 0), math3d.VectorN(1,1,0))
#mesh1 = objects3d.Polymesh("sword.obj", math3d.VectorN(-10,8,3), 1.0, math3d.VectorN(1.0,0.3,0.8))
rt = raytracer.Raytracer(camera)
rt.addObject(sphere1)
rt.addObject(plane1)
rt.addObject(plane2)
rt.addObject(box1)
#rt.addObject(mesh1)
totalTime = 0.0
currentLine = 0
print("\n+==============================================+")
print("| PHASE II tests |")
print("+==============================================+")
if caseNum == 1:
testPts = [(0, 0), (win_width - 1, win_height - 1), (win_width // 2, win_height // 2), (113, 23), (623,83)]
else:
testPts = [(0, 0), (win_width - 1, win_height - 1), (win_width // 2, win_height // 2), (113, 542), (723,11)]
for pygamePos in testPts:
camera.getViewplanePosition(pygamePos[0], pygamePos[1], True)
# Game Loop
while not done:
# Update
if currentLine < win_height:
rt.renderOneLine(currentLine)
currentLine += 1
dt = clock.tick()
totalTime += dt
# Input
event = pygame.event.poll()
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
done = True
# Draw (nothing to do!)
pygame.display.flip()
# Pygame shutdown
pygame.display.quit()
|
[
"[email protected]"
] | |
c0c758ec3f45045fd732d1505955fd973d3253de
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc036/D/4119191.py
|
5214b136ffa3fbda10cfeb4ddda4f643d5080a9d
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,269 |
py
|
import sys
stdin = sys.stdin
sys.setrecursionlimit(10**5)
def li(): return map(int, stdin.readline().split())
def li_(): return map(lambda x: int(x)-1, stdin.readline().split())
def lf(): return map(float, stdin.readline().split())
def ls(): return stdin.readline().split()
def ns(): return stdin.readline().rstrip()
def lc(): return list(ns())
def ni(): return int(stdin.readline())
def nf(): return float(stdin.readline())
def dfs(graph:list, par:int, cur:int, mod:int):
children = []
for child in graph[cur]:
if child == par:
continue
children.append(child)
if len(children) == 0:
return 2, 1
else:
topall = 1
topwht = 1
for child in children:
topallchild, topwhtchild = dfs(graph, cur, child, mod)
topwht *= topallchild
topwht %= mod
topall *= topwhtchild
topall %= mod
return (topall+topwht)%mod, topwht
n = ni()
graph = [[] for _ in range(n)]
MOD = 10**9+7
for _ in range(n-1):
a,b = li_()
graph[a].append(b)
graph[b].append(a)
ans, _ = dfs(graph, 0, 0, MOD)
print(ans)
|
[
"[email protected]"
] | |
2a6b93697a823699f907bd04a3d16ae2b742d3dd
|
8b683dd48ad3021990ca5133ec24a1ab260b687c
|
/worm_plates/collect/refine_coords.py
|
c86eb3c3b422cbf802411536855a272433f692d0
|
[] |
no_license
|
ver228/worm-eggs
|
fd4afa13cba12f6553c0e8225fb591d9ea3806f1
|
0b2db08d9d81c3b31d9ebcd593059db02b3ee2fe
|
refs/heads/master
| 2022-04-01T06:29:56.358944 | 2020-02-14T15:55:39 | 2020-02-14T15:55:39 | 240,544,952 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,710 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 16:22:37 2019
@author: avelinojaver
"""
from pathlib import Path
import pandas as pd
import tables
import tqdm
import cv2
import numpy as np
from skimage.feature import peak_local_max
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
#%%
def correct_coords(img_, coords_, min_distance = 1, max_dist = 5):
#%%
peaks = peak_local_max(img_, min_distance = min_distance)
peaks = peaks[:, ::-1]
#remove `peaks` that is not close by to any `coord` by at most `max_dist`
D = cdist(coords_, peaks)
#peaks with an intensity smaller than the coords intensities will be spurious
peaks_ints = img_[peaks[:, 1], peaks[:, 0]]
cc = coords_.astype(np.int)
coords_int = img_[cc[:, 1], cc[:, 0]]
good = (D <= max_dist).any(axis=0)
good &= peaks_ints >= coords_int.min()
D = D[:, good]
valid_peaks = peaks[good]
#find the closest peaks
closest_indexes = np.argmin(D, axis=1)
#we will consider as an easy assigment if the closest peak is assigned to only one coord
u_indexes = np.unique(closest_indexes)
counts = np.bincount(closest_indexes)[u_indexes]
easy_assigments = u_indexes[counts == 1]
valid_pairs = [(ii, x) for ii, x in enumerate(closest_indexes) if x in easy_assigments]
if len(valid_pairs) > 0:
easy_rows, easy_cols = map(np.array, zip(*valid_pairs))
easy_cost = D[easy_rows, easy_cols]
good = easy_cost<max_dist
easy_rows = easy_rows[good]
easy_cols = easy_cols[good]
assert (D[easy_rows, easy_cols] <= max_dist).all()
#now hard assigments are if a peak is assigned to more than one peak
ambigous_rows = np.ones(D.shape[0], np.bool)
ambigous_rows[easy_rows] = False
ambigous_rows, = np.where(ambigous_rows)
ambigous_cols = np.ones(D.shape[1], np.bool)
ambigous_cols[easy_cols] = False
ambigous_cols, = np.where(ambigous_cols)
else:
ambigous_rows = np.arange(D.shape[0])
ambigous_cols = np.arange(D.shape[1])
easy_rows = np.array([], dtype=np.int)
easy_cols = np.array([], dtype=np.int)
D_r = D[ambigous_rows][:, ambigous_cols]
good = (D_r <= max_dist).any(axis=0)
D_r = D_r[:, good]
ambigous_cols = ambigous_cols[good]
#for this one we use the hungarian algorithm for the assigment. This assigment is to slow over the whole matrix
ri, ci = linear_sum_assignment(D_r)
hard_rows, hard_cols = ambigous_rows[ri], ambigous_cols[ci]
assert (D_r[ri, ci] == D[hard_rows, hard_cols]).all()
hard_cost = D[hard_rows, hard_cols]
good = hard_cost<max_dist
hard_rows = hard_rows[good]
hard_cols = hard_cols[good]
#let's combine both and assign the corresponding peak
rows = np.concatenate((easy_rows, hard_rows))
cols = np.concatenate((easy_cols, hard_cols))
new_coords = coords_.copy()
new_coords[rows] = valid_peaks[cols] #coords that do not satisfy the close peak condition will not be changed
return new_coords
#%%
if __name__ == '__main__':
_debug = False
min_distance = 2
max_dist = 5
r = max_dist*2+1
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(r, r))
src_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_adam/'
dst_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_adam_refined/'
src_files = [x for x in src_root_dir.rglob('*.hdf5') if not x.name.startswith('.')]
for src_file in tqdm.tqdm(src_files):
with pd.HDFStore(src_file, 'r') as fid:
df = fid['/coords']
img = fid.get_node('/img')[:]
#%%
#create a mask using the known coordinates
valid_mask = np.zeros_like(img)
cols = df['cx'].astype(np.int)
rows = df['cy'].astype(np.int)
valid_mask[rows, cols] = 1
valid_mask = cv2.dilate(valid_mask, kernel) > 0
#then I will use the inverted maxima to to create local maxima corresponding to the refined eggs peaks
img_peaks = ~img
img_peaks -= img_peaks[valid_mask].min()
img_peaks[~valid_mask] = 0
#img_peaks = cv2.blur(img_peaks, (1,1))
#%%
#finaly use the correct coords function to assing each labelled coords to a local maxima
cc = df[['cx','cy']].values
new_coords = correct_coords(img_peaks, cc, min_distance, max_dist)
coords = pd.DataFrame({'type_id':1, 'cx':new_coords[:,0], 'cy':new_coords[:,1]})
coords = coords.to_records(index=False)
dst_file = str(src_file).replace(str(src_root_dir), str(dst_root_dir))
dst_file = Path(dst_file)
dst_file.parent.mkdir(exist_ok=True, parents=True)
with tables.File(str(dst_file), 'w') as fid:
fid.create_carray('/', 'img', obj = img)
fid.create_table('/', 'coords', obj = coords)
#%%
if _debug:
#%%
import matplotlib.pylab as plt
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].imshow(img, cmap = 'gray')
axs[1].imshow(img_peaks, cmap = 'gray')
for ax in axs:
ax.plot(df['cx'], df['cy'], '.r')
ax.plot(coords['cx'], coords['cy'], '.g')
plt.show()
#%%
break
|
[
"[email protected]"
] | |
f58c19c5218fc279438b07e3ca1976d176013a3a
|
2868a3f3bca36328b4fcff5cce92f8adeb25b033
|
/+100ns/Co_optimized/step1_dc/set.py
|
25b40663a2257d720ef9bd0d368b0791db804c94
|
[] |
no_license
|
linfranksong/TM-enzyme_input
|
1c2a5e12e69c48febd5b5900aa00fe2339d42298
|
6e46a5b2c451efb93761707b77917a98ca0bfedc
|
refs/heads/master
| 2022-03-19T19:49:09.373397 | 2019-12-04T00:11:59 | 2019-12-04T00:11:59 | 205,220,795 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,131 |
py
|
import os
dir = os.path.dirname(os.path.realpath(__file__)) + '/'
#for a in [150,200,250,300,350,400,450,500,550,600]:
for a in [150]:
#for a in [200,250,300,350,400,450,500,550,600]:
os.system("rm -r %s_dc_repe"%(a))
os.system("cp -r temp/ %s_dc_repe"%(a))
adir=dir+ "%s_dc_repe/"%(a)
os.chdir(adir)
os.system("sed -i 's/MMM/%s/g' */*pbs"%(a))
array= [0,0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078,1.0]
for n in range(1,len(array)-1):
i=array[n]
os.system("rm -r %s"%(i))
os.system("cp -r files %s"%(i))
wdir=adir+"%s/"%(i)
os.chdir(wdir)
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/NNN/%s/g' *.pbs"%(array[n+1]))
os.system("sed -i 's/PPP/%s/g' *.pbs"%(array[n-1]))
os.chdir(adir)
sdir=adir+"0/"
os.chdir(sdir)
i=0
os.system("cp /mnt/gs18/scratch/users/songlin3/run/glx-0904/+100ns/Co_optimized/step0_fep/%s_fep/1.0/%s_1.0_eq_center.rst ."%(a,a))
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sbatch 0_eq.pbs")
sdir=adir+"1.0/"
os.chdir(sdir)
i=1.0
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sed -i 's/MMM/%s/g' center.in"%(a))
os.chdir(dir)
|
[
"[email protected]"
] | |
acd65c46ffa5dd3f4fa612a415887f694e67e27f
|
9a6c5607ae6f6305f1427fe5ee37ab8a0aa9b710
|
/0 Python Fundamental/25.c.filter.py
|
bf848a8ad7d8b4c95625bf195a090ed00fc3af2e
|
[] |
no_license
|
raviitsoft/Python_Fundamental_DataScience
|
3796b957751a6d9125452bcf2aa409e64d7c8d8a
|
6f99fdd187646f0d28ffd4ddbe3ace4597c47967
|
refs/heads/master
| 2020-12-22T19:39:46.814043 | 2020-01-28T09:04:55 | 2020-01-28T09:04:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 560 |
py
|
ages = [5, 12, 17, 18, 24, 32]
def myFunc(x):
if x < 18:
return False
else:
return True
adults = filter(myFunc, ages)
# print(adults)
# print(list(adults))
#############################
z = filter(lambda a: True if a >= 18 else False, ages)
print(list(z))
z = filter(lambda a: a >= 18, ages)
print(list(z))
############################
x = [1, 2, 3, 4, 5, 99]
y = [1, 2, 6, 7, 8, 99]
z = list(filter(lambda a: a in x, y))
# print(z)
z = list(filter(lambda x: True if x<3 else False, x))
print(z)
z = list(filter(lambda x: x<3, x))
print(z)
|
[
"[email protected]"
] | |
643b2ad8db2c458d77f96dff2374d2efa0c66723
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/airflow/api_connexion/schemas/dag_warning_schema.py
|
9531eb6b36bc3833a39d24bcef895f01444f9bb6
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 |
Apache-2.0
| 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null |
UTF-8
|
Python
| false | false | 1,705 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.dagwarning import DagWarning
class DagWarningSchema(SQLAlchemySchema):
"""Import error schema"""
class Meta:
"""Meta"""
model = DagWarning
dag_id = auto_field(data_key="dag_id", dump_only=True)
warning_type = auto_field()
message = auto_field()
timestamp = auto_field(format="iso")
class DagWarningCollection(NamedTuple):
"""List of dag warnings with metadata"""
dag_warnings: List[DagWarning]
total_entries: int
class DagWarningCollectionSchema(Schema):
"""Import error collection schema"""
dag_warnings = fields.List(fields.Nested(DagWarningSchema))
total_entries = fields.Int()
dag_warning_schema = DagWarningSchema()
dag_warning_collection_schema = DagWarningCollectionSchema()
|
[
"[email protected]"
] | |
162eb2ee34fdecebf7be87ac009e79c0a715e25f
|
77077a391973d1f8c05647d08fc135facd04fc5e
|
/xlsxwriter/test/app/test_app02.py
|
fa347d734560186995daf0fad3e57c79c5129178
|
[
"BSD-2-Clause-Views"
] |
permissive
|
DeltaEpsilon7787/XlsxWriter
|
28fb1012eaa42ea0f82e063f28c0c548ca016c5e
|
550b9c5bd678c861dcc9f6f4072b33a69566e065
|
refs/heads/main
| 2023-08-02T09:14:10.657395 | 2021-09-06T10:51:56 | 2021-09-06T10:51:56 | 384,948,081 | 0 | 0 |
NOASSERTION
| 2021-07-11T12:57:26 | 2021-07-11T12:57:25 | null |
UTF-8
|
Python
| false | false | 2,234 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...app import App
class TestAssembleApp(unittest.TestCase):
"""
Test assembling a complete App file.
"""
def test_assemble_xml_file(self):
"""Test writing an App file."""
self.maxDiff = None
fh = StringIO()
app = App()
app._set_filehandle(fh)
app._add_part_name('Sheet1')
app._add_part_name('Sheet2')
app._add_heading_pair(('Worksheets', 2))
app._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>2</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="2" baseType="lpstr">
<vt:lpstr>Sheet1</vt:lpstr>
<vt:lpstr>Sheet2</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company>
</Company>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>12.0000</AppVersion>
</Properties>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"[email protected]"
] | |
884954af9fd64a0f3d0508d1272327e2ed3bedf5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03013/s813541273.py
|
bdbeb4a2fbfb0352ad56b6b9937305511b1f8a7c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 379 |
py
|
N, M = map(int, input().split(' '))
broken_list = []
if M > 0:
for i in range(M):
broken_list.append(int(input()))
broken_set =set(broken_list)
nums = [0] * (N + 1)
nums[0] = 1
if 1 not in broken_set:
nums[1] = 1
for i in range(2, N + 1):
nums[i] = nums[i - 1] + nums[i - 2]
if i in broken_set:
nums[i] = 0
print(nums[N] % 1000000007)
|
[
"[email protected]"
] | |
4159cf0257ad3d20a29b9c1d3308026f6be5c1cf
|
1925c535d439d2d47e27ace779f08be0b2a75750
|
/leetcode/best_time_to_buy_and_sell_stock_4.py
|
1d58d8730fa45eba6ecf813ee448ef105a05236d
|
[] |
no_license
|
arthurDz/algorithm-studies
|
ee77d716041671c4b8bb757d8d96f3d10b6589f7
|
1e4d23dd0c40df34f58d71c7ca3e6491be732075
|
refs/heads/master
| 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,462 |
py
|
# Say you have an array for which the ith element is the price of a given stock on day i.
# Design an algorithm to find the maximum profit. You may complete at most k transactions.
# Note:
# You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
# Example 1:
# Input: [2,4,1], k = 2
# Output: 2
# Explanation: Buy on day 1 (price = 2) and sell on day 2 (price = 4), profit = 4-2 = 2.
# Example 2:
# Input: [3,2,6,5,0,3], k = 2
# Output: 7
# Explanation: Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4.
# Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
def maxProfit(k, prices):
if not prices or k < 0: return 0
minimum = prices[0]
profit = 0
for key, v in enumerate(prices):
minimum = min(minimum, v)
if k == 1:
profit = max(profit, v - minimum)
else:
profit = max(profit, v - minimum + maxProfit(k - 1, prices[key + 1:]))
return profit
def maxProfit(k, prices):
n = len(prices)
if n < 2:
return 0
# k is big enougth to cover all ramps.
if k >= n / 2:
return sum(i - j
for i, j in zip(prices[1:], prices[:-1]) if i - j > 0)
globalMax = [[0] * n for _ in xrange(k + 1)]
for i in xrange(1, k + 1):
# The max profit with i transations and selling stock on day j.
localMax = [0] * n
for j in xrange(1, n):
profit = prices[j] - prices[j - 1]
localMax[j] = max(
# We have made max profit with (i - 1) transations in
# (j - 1) days.
# For the last transation, we buy stock on day (j - 1)
# and sell it on day j.
globalMax[i - 1][j - 1] + profit,
# We have made max profit with (i - 1) transations in
# (j - 1) days.
# For the last transation, we buy stock on day j and
# sell it on the same day, so we have 0 profit, apparently
# we do not have to add it.
globalMax[i - 1][j - 1], # + 0,
# We have made profit in (j - 1) days.
# We want to cancel the day (j - 1) sale and sell it on
# day j.
localMax[j - 1] + profit)
globalMax[i][j] = max(globalMax[i][j - 1], localMax[j])
return globalMax[k][-1]
|
[
"[email protected]"
] | |
f2af3503bf7206c6d28a8f29b727061a682f9706
|
3bafaed1d12e4e1fb221a11998a7b9a858b04644
|
/App/migrations/0013_auto_20201230_1553.py
|
fb1ff2ce8bdd568a36fb4d395ecb6cc782160ba0
|
[] |
no_license
|
nian-20/AtroBOM
|
8c96e9247292b5f4a3a4f22b7d93a8749f7ed80c
|
0370636238e722489b3fddc3a65d4e9ceb7cbfb0
|
refs/heads/master
| 2023-08-15T09:13:10.042024 | 2021-09-30T19:12:03 | 2021-09-30T19:12:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 437 |
py
|
# Generated by Django 3.1.4 on 2020-12-30 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0012_auto_20201230_1544'),
]
operations = [
migrations.AlterField(
model_name='rate',
name='rate',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name=' ضریب مصرف '),
),
]
|
[
"[email protected]"
] | |
50a143d4fe47cc7b13e7ca802246ee09743ff7a8
|
2d82d4c6574bd6d32f2cf1c781615f7951f55f66
|
/muntjac/event/dd/acceptcriteria/and_.py
|
255229b61f9d197892bc0c331d353dba4488b0e7
|
[
"Apache-2.0"
] |
permissive
|
metaperl/muntjac
|
f83f745ee03942a61af92ee7fba7285aa9c46f3c
|
8db97712edd81b4d25deaaa48587d2a08010f2c8
|
refs/heads/master
| 2021-01-15T22:04:25.057862 | 2012-11-09T03:52:59 | 2012-11-09T03:52:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 960 |
py
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
"""A compound criterion that accepts the drag if all of its criteria
accepts the drag."""
from muntjac.event.dd.acceptcriteria.client_side_criterion import \
ClientSideCriterion
class And(ClientSideCriterion):
"""A compound criterion that accepts the drag if all of its criteria
accepts the drag.
@see: L{Or}
"""
def __init__(self, *criteria):
"""@param criteria:
criteria of which the And criterion will be composed
"""
self.criteria = criteria
def paintContent(self, target):
super(And, self).paintContent(target)
for crit in self.criteria:
crit.paint(target)
def accept(self, dragEvent):
for crit in self.criteria:
if not crit.accept(dragEvent):
return False
return True
def getIdentifier(self):
return 'com.vaadin.event.dd.acceptcriteria.And'
|
[
"[email protected]"
] | |
73a3cec53ce6d0265522dccd62a747fdbcca6834
|
f023692f73992354a0b7823d9c49ae730c95ab52
|
/AtCoderBeginnerContest/1XX/157/D.py
|
b0ded2ec31985f6eebca56e6df87d7327321da26
|
[] |
no_license
|
corutopi/AtCorder_python
|
a959e733f9a3549fab7162023e414ac2c99c4abe
|
a2c78cc647076071549e354c398155a65d5e331a
|
refs/heads/master
| 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 |
Python
|
UTF-8
|
Python
| false | false | 2,140 |
py
|
import sys
sys.setrecursionlimit(10 ** 6)
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N, M, K, ABs, CDs):
friend_map = [[] for _ in range(N + 1)]
for a, b in ABs:
friend_map[a].append(b)
friend_map[b].append(a)
block_map = [[] for _ in range(N + 1)]
for c, d in CDs:
block_map[c].append(d)
block_map[d].append(c)
def dfs(group_num, members, now_n):
belongs[now_n] = group_num
members.append(now_n)
for f in friend_map[now_n]:
if belongs[f] == -1:
members = dfs(group_num, members, f)
return members
friend_groups = []
belongs = [-1] * (N + 1)
for i in range(1, N + 1):
if belongs[i] == -1:
m = dfs(len(friend_groups), [], i)
m.sort()
friend_groups.append(m)
ans = ''
for n in range(1, N + 1):
block = 0
group = friend_groups[belongs[n]]
for b in block_map[n]:
if belongs[n] == belongs[b]:
block += 1
ans += ' ' + str(len(group) - len(friend_map[n]) - block - 1)
print(ans[1:])
if __name__ == '__main__':
# # handmade test
# N, M, K = 2 * 10 ** 5, 10 ** 5, 10 ** 5
# ABs = [[1, i] for i in range(2, 10 ** 5 + 2)]
# CDs = [[i, i + 1] for i in range(2, 10 ** 5 + 2)]
# # handmade random
# import random
# N, M, K = 20, 10, 10
# ABs = []
# while True:
# if len(ABs) == M:
# break
# a = random.randint(1, N - 1)
# b = random.randint(a + 1, N)
# if not [a, b] in ABs:
# ABs.append([a, b])
# CDs = []
# while True:
# if len(CDs) == K:
# break
# c = random.randint(1, N - 1)
# d = random.randint(c + 1, N)
# if not [c, d] in ABs and not [c, d] in CDs:
# CDs.append([c, d])
# print(N, M, K)
# print(ABs)
# print(CDs)
N, M, K = map(int, input().split())
ABs = [[int(i) for i in input().split()] for _ in range(M)]
CDs = [[int(i) for i in input().split()] for _ in range(K)]
solve(N, M, K, ABs, CDs)
|
[
"[email protected]"
] | |
6aa7e3d975d5bf066350200717a911882e17e7eb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02572/s151845218.py
|
31aa5234e9d20d7b4ae01fd2cf130eac5d0d9908
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
N = int(input()) #入力する整数
A = list(map(int,input().split())) #入力する数列A
SUMA = sum(A) #数列の和
MOD = 10**9 + 7 # mod
C = [0] * (N-1) #累積和数列
for i in range(N-1): #\sum_{j = i+1}^{N}を求めて数列に代入する
SUMA -= A[i]
C[i] = SUMA
ans = 0 #求める答え
for i in range(N-1):
ans += A[i]*C[i]
ans %= MOD #その都度modで割った余りにする
print(ans) #答えを出力する
|
[
"[email protected]"
] | |
0225bd6623519534724f02704f9d1bdca8fa82b6
|
210af68aec4713e8cbe8dc988d509090815e6ff4
|
/0x04-python-more_data_structures/9-multiply_by_2.py
|
adcaf10fe0fc6a3ad8467a5cb752a4816fcc9910
|
[] |
no_license
|
mahdibz97/holbertonschool-higher_level_programming
|
8e383d474438ba563311f829a763ce8733931c1a
|
7184a1eadcaf76f33135c00effe4390b1c227cbd
|
refs/heads/master
| 2022-12-19T12:29:44.678292 | 2020-09-25T07:56:44 | 2020-09-25T07:56:44 | 259,281,398 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
#!/usr/bin/python3
def multiply_by_2(a_dictionary):
new = {}
for i in a_dictionary.keys():
new[i] = (a_dictionary[i] * 2)
return new
|
[
"[email protected]"
] | |
b67f2769bfefa0625cc6527943ef1b7faf9c0f9a
|
ff1fe0e31e863ab69e2434b574115fed782d76ad
|
/set.py
|
e37f9c7d1e8de9534208c0ced057cebe0e3f014c
|
[] |
no_license
|
tasnuvaleeya/python_programming
|
cd7200e0dc0c4ec6bd23c4f9360fc251a7c4a516
|
45a577634e53a1c4cab927eb770cde01a00571ce
|
refs/heads/master
| 2021-04-12T02:47:46.011445 | 2018-03-17T14:54:09 | 2018-03-17T14:54:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
groceries = {'cereal', 'milk','rice', 'beer', 'beer'}
if 'milk' in groceries:
print('you already have milk')
else:
print('oh yes u need milk')
|
[
"[email protected]"
] | |
9f46f7e89e19b7e65cfb7e37c5e03e9be0b2d4fe
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/speech-text-file/gTTS/build/lib/gtts/tokenizer/symbols.py
|
3d40893c51295eda1b689b6f438f7089a38dc848
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a7c43c0c9dfa06ad8af4ec38d5a26b50deffacc6f2b881170eb8a37576f6d970
size 278
|
[
"[email protected]"
] | |
4b063dac8fbb9c047f40f60e35b317e14d6ab716
|
ba2f34ff8a7b2c36ae88a2f02ca495ad084bb6ab
|
/Cryptanalysis/break_autokey.py
|
aecc051205c004e9a18d31b229c6ec47d72a3899
|
[
"MIT"
] |
permissive
|
BlackLuny/cyberweapons
|
bc05e07cdc67f58c9cf68178762eb541c8c0cc55
|
dfd4623f323ba702bae7c9f71132b4584636d2e5
|
refs/heads/master
| 2021-05-16T07:28:35.651835 | 2017-09-16T21:04:50 | 2017-09-16T21:04:50 | 103,801,285 | 1 | 0 | null | 2017-09-17T03:50:18 | 2017-09-17T03:50:18 | null |
UTF-8
|
Python
| false | false | 2,037 |
py
|
from ngram_score import ngram_score
from pycipher import Autokey
import re
from itertools import permutations
qgram = ngram_score('quadgrams.txt')
trigram = ngram_score('trigrams.txt')
ctext = 'isjiqymdebvuzrvwhmvysibugzhyinmiyeiklcvioimbninyksmmnjmgalvimlhspjxmgfiraqlhjcpvolqmnyynhpdetoxemgnoxl'
ctext = re.sub(r'[^A-Z]','',ctext.upper())
# keep a list of the N best things we have seen, discard anything else
class nbest(object):
def __init__(self,N=1000):
self.store = []
self.N = N
def add(self,item):
self.store.append(item)
self.store.sort(reverse=True)
self.store = self.store[:self.N]
def __getitem__(self,k):
return self.store[k]
def __len__(self):
return len(self.store)
#init
N=100
for KLEN in range(3,20):
rec = nbest(N)
for i in permutations('ABCDEFGHIJKLMNOPQRSTUVWXYZ',3):
key = ''.join(i) + 'A'*(KLEN-len(i))
pt = Autokey(key).decipher(ctext)
score = 0
for j in range(0,len(ctext),KLEN):
score += trigram.score(pt[j:j+3])
rec.add((score,''.join(i),pt[:30]))
next_rec = nbest(N)
for i in range(0,KLEN-3):
for k in xrange(N):
for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
key = rec[k][1] + c
fullkey = key + 'A'*(KLEN-len(key))
pt = Autokey(fullkey).decipher(ctext)
score = 0
for j in range(0,len(ctext),KLEN):
score += qgram.score(pt[j:j+len(key)])
next_rec.add((score,key,pt[:30]))
rec = next_rec
next_rec = nbest(N)
bestkey = rec[0][1]
pt = Autokey(bestkey).decipher(ctext)
bestscore = qgram.score(pt)
for i in range(N):
pt = Autokey(rec[i][1]).decipher(ctext)
score = qgram.score(pt)
if score > bestscore:
bestkey = rec[i][1]
bestscore = score
print bestscore,'autokey, klen',KLEN,':"'+bestkey+'",',Autokey(bestkey).decipher(ctext)
|
[
"[email protected]"
] | |
bff7768f9a5f3a84f3142fcac45842c549f8bd13
|
d5b60325d88d59bb3c6cde58036514921abfd6e9
|
/DjangoChat/DjangoChat/wsgi.py
|
c2d57315e9c4b78413c290b4da11fa09adacfd85
|
[] |
no_license
|
dagrishin/DjangoChat
|
472044874bbd1a91efe5a7e6611af02aa485acd1
|
d800fff81ac3632752e3486a90c062dde4b18780
|
refs/heads/master
| 2022-12-22T06:56:57.676392 | 2020-09-29T07:14:50 | 2020-09-29T07:14:50 | 299,532,590 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
WSGI config for DjangoChat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoChat.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
10ea87ac6afba40de0a3d96e81db5dc69ef6df3d
|
7c3ad63b17b868672ff14e798bb965109c10d403
|
/src/kNN_single.py
|
6b257b9b63560794a04b98462bedff7409e85679
|
[] |
no_license
|
ternaus/kaggle_liberty
|
87cc6e5259e1ea4ce69726a83e4e642db85d8e22
|
5eb17b6bf1f6f6f6f4f6eab880592547ad41007d
|
refs/heads/master
| 2016-09-11T02:13:22.121760 | 2015-08-26T22:23:47 | 2015-08-26T22:23:47 | 39,865,075 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,025 |
py
|
from __future__ import division
__author__ = 'Vladimir Iglovikov'
from operator import itemgetter
from sklearn import metrics
from gini_normalized import normalized_gini
import numpy as np
import pandas as pd
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
import time
joined = pd.read_csv('../data/joined.csv')
train = joined[joined['Hazard'] != -1]
test = joined[joined['Hazard'] == -1]
y = train['Hazard']
X = train.drop(['Hazard', 'Id'], 1)
X_test = test.drop(['Hazard', 'Id'], 1)
scaler = StandardScaler()
print 'scaling train'
X = scaler.fit_transform(X)
print 'scaling test'
X_test = scaler.transform(X_test)
clf = KNeighborsRegressor(n_neighbors=550)
print 'fitting'
clf.fit(X, y)
print 'predicting'
prediction = clf.predict(X_test)
submission = pd.DataFrame()
submission['Id'] = test['Id']
submission['Hazard'] = prediction
submission.to_csv('kNN/kNN_{timestamp}.csv'.format(timestamp=time.time()), index=False)
|
[
"[email protected]"
] | |
01a1ef6dc25aacb7b99e3bb2d2e912e04233c3cc
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_outgoes.py
|
710d7af255478e9b9f5ce4bf9bc34b044eb81186
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
#calss header
class _OUTGOES():
def __init__(self,):
self.name = "OUTGOES"
self.definitions = outgo
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['outgo']
|
[
"[email protected]"
] | |
5a6960680cae86c401d945eb77b50e792096b7ac
|
464850ba426263b17084fc71363ca14b8278b15e
|
/80.py
|
c539164e19aa8d461121a1829efe084c3408f060
|
[] |
no_license
|
eng-arvind/python
|
8442c30ec10f979f913b354458b4f910539d8728
|
249f5f35f245a3f1742b10310de37ca6c6023af2
|
refs/heads/master
| 2020-12-23T06:40:16.911269 | 2020-02-02T18:42:01 | 2020-02-02T18:42:01 | 237,069,973 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
n = 7
for i in range(n):
for j in range(n):
if i + j == n//2 or i - j == n//2 or i + j == (n//2)*3 or j - i == n//2:
print("*", end="")
else:
print(end=" ")
print()
|
[
"[email protected]"
] | |
fb3b2fd6f3497e8dd1ded9a6c54a330aac22db31
|
3fa1b23746232975b3b014db2f525007a3b49991
|
/anna_code/demographics/rct_consented/subset_values_to_randomized_people.py
|
a4791706554ee798896de773f5da39c3e0e96e89
|
[] |
no_license
|
AshleyLab/myheartcounts
|
ba879e10abbde085b5c9550f0c13ab3f730d7d03
|
0f80492f7d3fc53d25bdb2c69f14961326450edf
|
refs/heads/master
| 2021-06-17T05:41:58.405061 | 2021-02-28T05:33:08 | 2021-02-28T05:33:08 | 32,551,526 | 7 | 1 | null | 2020-08-17T22:37:43 | 2015-03-19T23:25:01 |
OpenEdge ABL
|
UTF-8
|
Python
| false | false | 325 |
py
|
import pandas as pd
import sys
import pdb
data=pd.read_csv(sys.argv[1],header=None,sep='\t')
subjects=pd.read_csv('subjects.txt',header=None)
subset=data[data[0].isin(subjects[0])]
#nums=pd.to_numeric(subset[1],errors='coerce')
#mean_val=nums.mean()
#print(mean_val)
#std_val=nums.std()
#print(std_val)
pdb.set_trace()
|
[
"[email protected]"
] | |
63d46a52a9c3929779b4d498745424b1505a9754
|
17f29e8f3eab9deb724b10bc7e61c73f1fca21c6
|
/backend/home/migrations/0004_auto_20200320_0813.py
|
8596cdb6cafc9245c067cfa29396a8d0c4ff6f09
|
[] |
no_license
|
crowdbotics-apps/mobilemobapp-dev-2035
|
91df345e8f6e42760c4156a7dd73a6d8b17250e0
|
041b1c20c4a14b4595fbcca943cdf46dec445497
|
refs/heads/master
| 2022-04-12T06:06:17.910111 | 2020-03-20T08:13:11 | 2020-03-20T08:13:11 | 248,153,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,311 |
py
|
# Generated by Django 2.2.11 on 2020-03-20 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_customtext_test'),
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testtt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('testt', models.BinaryField()),
],
),
migrations.RemoveField(
model_name='customtext',
name='test',
),
migrations.AddField(
model_name='customtext',
name='name',
field=models.BinaryField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
53ff44496cb0984d03f5da6f7271f4c8652cc91d
|
14561adc9918f32b7f9334fa4dde08a3bfa17c26
|
/pipeline/Bacteria_denovo/Bacteria_denovo.pipeline.py
|
d4951738835c6a9781c9201f9ea8cd6c6fcab482
|
[] |
no_license
|
ZhikunWu/awesome-metagenomic
|
b932169f505d39864a91067283ad7ce954280923
|
71183f262aa539a3983af4de47f7cc69be8cf7a6
|
refs/heads/master
| 2021-10-08T00:00:00.181593 | 2018-12-06T02:07:42 | 2018-12-06T02:07:42 | 111,966,593 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,029 |
py
|
#!/usr/bin/env python
import yaml
import os
import sys
IN_PATH = config["IN_PATH"]
PIPE_DIR = config["PIPE_DIR"]
THREADS = config["THREADS"]
ThreadFold = config["ThreadFold"]
SAMPLES = config["SAMPLES"]
PROJECTS = config["PROJECTS"]
include: PIPE_DIR + "/Nano_QualityControl.rule.py"
include: PIPE_DIR + "/GenePridiction.rule.py"
rule all:
input:
expand(IN_PATH + "/clean/{sample}.fastq", sample=SAMPLES),
expand(IN_PATH + "/qualityControl/raw/nanoQC/{sample}/nanoQC.html", sample=SAMPLES),
expand(IN_PATH + "/qualityControl/raw/NanoPlot/{sample}/NanoPlot-report.html", sample=SAMPLES),
expand(IN_PATH + '/annotation/{project}/Prokka/assembly.faa', project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/tRNAscan/assembly_tRNA_gene.fna", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatMasker/assembly.fasta.out", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatModeler/assembly_RepeatModeler.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRFinder/LTR.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/TandemRepeatFinder/TandemRepeat.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRFinder/finder.scn", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRretriever/assembly.fasta.mod.pass.list", project=PROJECTS),
expand(IN_PATH + "/assembly/{project}/assembly.fasta.mod.out.LAI", project=PROJECTS),
expand(IN_PATH + "/assembly/{project}/assembly_index.esq", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRharvest/assembly_ltrharvest.gff3", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRharvest/assembly_ltrdigest.gff3", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatScout/seq_freq.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatScout/seq_repeat.txt", project=PROJECTS),
|
[
"[email protected]"
] | |
fa959aa6f4a922c56b0970dcb74658e61c42d1f2
|
4ef98e50c40dc9f79ac9e422a208427f034f804d
|
/maps/models.py
|
1e2a9a1d04f3ff48376a6325fbc92a1d1d52468a
|
[] |
no_license
|
couleurmate/DeweyMaps
|
5bd4eef11d429a7f252b8fb3141a7a47697154b4
|
063e9e7e412d57d2fdaf976728aaff66eb5fd38a
|
refs/heads/master
| 2021-01-17T04:51:22.226762 | 2015-07-05T10:38:57 | 2015-07-05T10:38:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,404 |
py
|
from django.contrib.gis.db import models
from closet.models import Subcategory
class Marker(models.Model):
name = models.CharField(blank=False, max_length=255)
position = models.PointField(geography=True, blank=False)
comment = models.TextField(blank=True, null=False, default="")
subcategories = models.ManyToManyField(Subcategory)
web = models.URLField(default="")
phone = models.CharField(max_length=15, default="")
adress = models.CharField(max_length=1000, default="")
public = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
objects = models.GeoManager()
def __str__(self):
return self.name
@property
def content(self):
return self.comment
@property
def lat(self):
return self.position.y
@property
def lon(self):
return self.position.x
@property
def popup(self):
tpl = """<h5>{0.name}</h5>"""
if self.adress != "":
tpl += "<em>Adresse</em> : {0.adress}<br><br>"
if self.phone != "":
tpl += "<em>Téléphone</em> : {0.phone}<br><br>"
if self.web != "":
tpl += '<b><a target="_blank" href="{0.web}">Site web</a></b><br><br>'
tpl += "{0.comment}<br><br>"
tpl += '<a href="http://dewey.be/contact.html">Signaler un problème</a>'
return tpl.format(self)
|
[
"[email protected]"
] | |
1093b9c3c57519cf4dc597bf6df497b6e31fe0fe
|
e15f86312db3109bbda053063557693518af4ead
|
/pcsk9/select_fam.py
|
35318362eec5e7e8604254ceeeedd5879854dcdc
|
[] |
no_license
|
heichiyidui/dev
|
3aecf0f92e4af4184b4eae2b1935f281b7746c86
|
73c20c19928eb94d9aec10f0d307604b147b8088
|
refs/heads/master
| 2020-12-29T01:54:24.236229 | 2016-07-01T14:51:01 | 2016-07-01T14:51:01 | 35,271,765 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,000 |
py
|
#!/usr/bin/env python3
# tail -n +2 plink.genome | awk '{print $2,$4}' > t.in
edges = []
ifile =open('t.in')
for line in ifile:
cols = line[:-1].split()
edges.append([cols[0],cols[1]])
ifile.close()
import collections
node_dgres = collections.Counter()
nodes_1 = [x[0] for x in edges]
nodes_2 = [x[1] for x in edges]
node_dgres.update(nodes_1)
node_dgres.update(nodes_2)
# lets remove nodes according to their connection degrees
to_remove_list = []
for l in range(10000):
if edges == []:
break
# find the most connected node
to_remove_id = node_dgres.most_common(1)[0][0]
to_remove_list.append(to_remove_id)
# update edge set
new_edges = [x for x in edges if to_remove_id not in x]
edges = new_edges
# update node connection degree
node_dgres = collections.Counter()
nodes_1 = [x[0] for x in edges]
nodes_2 = [x[1] for x in edges]
node_dgres.update(nodes_1)
node_dgres.update(nodes_2)
for id in to_remove_list:
print(id)
|
[
"[email protected]"
] | |
fd038588e1514db2ce8a3b98d9a04bf9c08b8692
|
9c3c83007c5bf0f36635b0045b2aad7f8a11ac11
|
/novice/04-05/graphql/venv/lib/python3.6/site-packages/graphql/utils/value_from_ast.py
|
7ad52bca43bf423c08c5f077dd51404ba8164137
|
[
"MIT"
] |
permissive
|
septiannurtrir/praxis-academy
|
bc58f9484db36b36c202bf90fdfd359482b72770
|
1ef7f959c372ae991d74ccd373123142c2fbc542
|
refs/heads/master
| 2021-06-21T17:04:58.379408 | 2019-09-13T16:46:08 | 2019-09-13T16:46:08 | 203,007,994 | 1 | 0 |
MIT
| 2021-03-20T01:43:24 | 2019-08-18T13:38:23 |
Python
|
UTF-8
|
Python
| false | false | 2,920 |
py
|
from ..language import ast
from ..type import (
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLList,
GraphQLNonNull,
GraphQLScalarType,
)
# Necessary for static type checking
if False: # flake8: noqa
from ..language.ast import Node
from ..type.definition import GraphQLType
from typing import Dict, Union, Optional, List
def value_from_ast(value_ast, type, variables=None):
# type: (Optional[Node], GraphQLType, Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and a value AST node known to match this type, build a
runtime value."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValueAST is non-null.
# We're assuming that this query has been validated and the value used here is of the correct type.
return value_from_ast(value_ast, type.of_type, variables)
if value_ast is None:
return None
if isinstance(value_ast, ast.Variable):
variable_name = value_ast.name.value
if not variables or variable_name not in variables:
return None
# Note: we're not doing any checking that this variable is correct. We're assuming that this query
# has been validated and the variable usage here is of the correct type.
return variables.get(variable_name)
if isinstance(type, GraphQLList):
item_type = type.of_type
if isinstance(value_ast, ast.ListValue):
return [
value_from_ast(item_ast, item_type, variables)
for item_ast in value_ast.values
]
else:
return [value_from_ast(value_ast, item_type, variables)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
if not isinstance(value_ast, ast.ObjectValue):
return None
field_asts = {}
for field_ast in value_ast.fields:
field_asts[field_ast.name.value] = field_ast
obj = {}
for field_name, field in fields.items():
if field_name not in field_asts:
if field.default_value is not None:
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field.default_value
continue
field_ast = field_asts[field_name]
field_value_ast = field_ast.value
field_value = value_from_ast(field_value_ast, field.type, variables)
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_literal(value_ast)
|
[
"[email protected]"
] | |
7e5dbb102fab53228104ce9a43c6407ab1972c45
|
50989266203628be7649d152392f4a1789997b90
|
/lisp.py
|
9c96a7942a34631c24cce5c62058308aa3242b27
|
[] |
no_license
|
cheery/snakelisp
|
b2820819959be4ed0b62a60c511b15623ae5589e
|
c62c0401e7d8cbd63afb8a7242850f7740420614
|
refs/heads/master
| 2020-05-15T08:53:26.443191 | 2014-09-16T15:55:43 | 2014-09-16T15:55:43 | 23,539,541 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,257 |
py
|
#!/usr/bin/env python
from pythonboot.blip import ListNode, TextNode, MarkNode, isList, isText, isMark, open_list
import json
import transpiler
from cps import Call, Lambda, Assign, Variable, Constant, Environ, null, true, false
import subprocess
import sys
import re
# call = Call([arguments]), call[i]
# lambda = Lambda([arguments], body), lambda[i]
# Assign(var, val, body)
# Variable(name, value)
# Constant(value)
def main():
path = sys.argv[1]
mks = []
env = Environ()
ret = env.new_argument("cont", False)
env = env.new_environ()
ret = env.new_argument('cont', False)
exprs = open_list(path).strip_rec()
#exprs = list(open_list("base.sl")) + list(open_list(path))
program = env.close(compile_list(exprs, env, ret))
program = program.coalesce()
snakesource = "snakelisp.c"
rootdecl = re.compile(r'newRoot\("(.+)",')
with open(snakesource) as fd:
src = fd.read()
c_roots = dict((decl, "(root+{})".format(i)) for i, decl in enumerate(rootdecl.findall(src)))
c_api = {
"uncallable-hook": "&uncallable_hook",
"type-error-hook": "&type_error_hook",
}
c_use = set()
for var in env.seal():
if var.name in c_roots:
var.c_handle = c_roots[var.name]
continue
var.c_handle = c_api[var.name]
c_use.add(var.c_handle)
cdefns = ["extern value_t {};".format(value[1:]) for value in c_use]
#import visuals
#visuals.create_graph("demo.png", program)
source = transpiler.transpile(program, cdefns, path)
open(path+'.c', 'w').write(source)
subprocess.call(["gcc", path+'.c', snakesource, "-I.", "-lm"])
constants = {'null': null, 'true':true, 'false':false}
def compile(expr, env, k):
if isList(expr, 'include') and isText(expr[0]):
return compile_list(open_list(expr[0].text).strip_rec(), env, k)
if isList(expr, 'let') and isText(expr[0]):
var = env.get_local(expr[0].text)
return compile(expr[1], env,
(lambda val: Assign(var, val, retrieve(k, val))))
if isList(expr, 'set') and isText(expr[0]):
var = env.lookup(expr[0].text)
return compile(expr[1], env,
(lambda val: Assign(var, val, retrieve(k, val))))
if isList(expr, 'cond'):
return compile_cond(expr, env, k)
if isList(expr, 'while'):
return compile_while(expr, env, k)
if isList(expr, 'func'):
env = env.new_environ()
ret = env.new_argument('cont', False)
for sym in expr[0]:
assert sym.label == ''
env.new_argument(sym.text)
return retrieve(k, env.close(compile_list(expr[1:], env, ret)))
if isList(expr, 'infix') and len(expr) == 3:
return compile(ListNode([expr[1], expr[0], expr[2]]), env, k)
if isList(expr, ''):
params = []
seq = list(expr)
def next_parameter(param):
params.append(param)
if len(seq) > 0:
return compile(seq.pop(0), env, next_parameter)
else:
callee = params.pop(0)
return Call([callee, lift(k)] + params)
return compile(seq.pop(0), env, next_parameter)
#if expr.group == 'integer':
# return retrieve(k, Constant(expr.value))
#if expr.group == 'double':
# return retrieve(k, Constant(expr.value))
if isText(expr, "string"):
return retrieve(k, Constant(expr.text))
if isText(expr, ''):
if expr.text[:1].isdigit():
return retrieve(k, Constant(int(expr.text)))
if expr.text in constants:
param = constants[expr.text]
else:
param = env.lookup(expr.text)
return retrieve(k, param)
raise Exception("what is {}?".format(expr))
def compile_list(exprs, env, k):
seq = list(exprs)
def next_parameter(param):
if len(seq) > 1:
return compile(seq.pop(0), env, next_parameter)
else:
return compile(seq.pop(0), env, k)
if len(exprs) == 0:
return retrieve(k, null)
return next_parameter(null)
def retrieve(k, param):
if callable(k):
return k(param)
else:
return Call([k, param])
def lift(k):
if callable(k):
x = Variable()
return Lambda([x], k(x))
else:
return k
def compile_cond(expr, env, k):
seq = list(expr[0:])
if len(seq) == 0:
return retrieve(k, null)
def next_cond(k):
if len(seq) == 0:
return retrieve(k, null)
head = seq.pop(0)
if len(seq) == 0 and isList(head, 'else'):
return compile_list(head[0:], env, k)
if isList(head, 'else'):
raise Exception("invalid cond expression")
return compile(head[0], env,
(lambda truth: pick(env, k, truth,
enclose(head[1:], env),
lambdaCont(next_cond))))
return next_cond(k)
def compile_while(expr, env, k):
self = Variable()
seq = expr[1:]
def compile_body(k):
return compile_list(expr[1:], env, (lambda _: Call([self, lift(k)])))
cont = Variable()
looplambda = Lambda([cont], compile(expr[0], env,
(lambda truth: pick(env, cont, truth, lambdaCont(compile_body), lambdaNull()))))
return Assign(self, looplambda, Call([self, lift(k)]), True)
def pick(env, k, truth, yes, no):
return Call([env.new_implicit('pick'), lift(k), truth, yes, no])
def lambdaNull():
cont = Variable()
return Lambda([cont], Call([cont, null]))
def lambdaCont(func):
cont = Variable()
return Lambda([cont], func(cont))
def enclose(exprs, env):
cont = Variable()
return Lambda([cont], compile_list(exprs, env, cont))
#def open_list(path):
# with open(path, 'r') as fd:
# plop = json.load(fd)
# return decodeJson(plop)
#
#def decodeJson(node):
# if node["type"] == "list":
# return ListNode([decodeJson(a) for a in node["list"]], node["label"] or '').strip()
# elif node["type"] == 'text':
# return TextNode(node["text"], node["label"] or '')
# elif node["type"] == 'mark':
# return MarkNode(node["label"] or '')
# else:
# raise Exception("unknown {}".format(node))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
992f8823515ccee3a140f890755137552e8928d4
|
438ee853669a67cd46537f6d02cf356d05e03681
|
/doctor_dashboard/urls.py
|
47694bb78b753fc56cdb14fe68d5c7380a309fe8
|
[] |
no_license
|
tngeene/doc_appointment
|
a6648bed5c3d1d27e25131945910c5c425468fa1
|
6d1f320db03ad9fcc42b09e19a0d0a73e5af233a
|
refs/heads/master
| 2023-02-22T05:37:36.510685 | 2021-01-19T11:46:01 | 2021-01-19T11:46:01 | 324,834,090 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
from django.urls import path, include
app_name = "doctor_dashboard"
urlpatterns = [
path('', include('doctor_dashboard.routes.index')),
path('appointments/', include('doctor_dashboard.routes.appointments')),
# path('doctors/', include('doctor_dashboard.routes.doctors')),
# path('patients/', include('doctor_dashboard.routes.patients')),
path('events/', include('doctor_dashboard.routes.events')),
]
|
[
"[email protected]"
] | |
d29ecd2dab536aba7307bb95697055dbc30cf2aa
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_3377.py
|
561d811f19c812512cfb3db4c9e030dcd1210575
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,331 |
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((262, 533, 768), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((911, 601, 823), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((932, 878, 424), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((897, 147, 198), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((654, 184, 344), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((563, 71, 808), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((515, 319, 492), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((194, 440, 798), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((535, 777, 166), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((143, 239, 358), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((320, 498, 370), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((288, 147, 63), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((949, 360, 485), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((436, 819, 284), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((44, 825, 43), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((11, 479, 395), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((991, 520, 392), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((788, 680, 50), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((475, 141, 883), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((184, 381, 961), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((694, 467, 322), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"[email protected]"
] | |
82f573ab57442baca38130076f8b17ddd1163034
|
a665f561b103a51404785f35d0026c60f0083cb4
|
/0x05-python-exceptions/101-safe_function.py
|
38683ee508361b035c621dad79ea63525fad197f
|
[] |
no_license
|
Joshua-Enrico/holbertonschool-higher_level_programming
|
c5f3c9ab55167ea2e7ea3b31dd8edf2e22a18bde
|
8c1559f9c772b60186e899e17c67d299f88de726
|
refs/heads/main
| 2023-07-31T17:45:16.723947 | 2021-09-23T00:29:25 | 2021-09-23T00:29:25 | 361,960,411 | 1 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 235 |
py
|
#!/usr/bin/python3
def safe_function(fct, *args):
try:
div = fct(*args)
return div
except Exception as error:
import sys
print("Exception: {}".format(error), file=sys.stderr)
return None
|
[
"[email protected]"
] | |
d4c44550df6570a3c03d89d628513a25c2868572
|
0ae589f33fbf37a6af830dd7494cc576f267f202
|
/scenario/settings.py
|
ea8db96a3b7c5d412a773b2d60a74cbfa2abfd55
|
[] |
no_license
|
vamsi9477/sosioHosting
|
85be712762738604625a13569f85aa986c31d5b0
|
42dbe2171a32b4cf40d202f16d89c49db9b3c10e
|
refs/heads/master
| 2020-04-05T01:09:02.486917 | 2018-11-06T18:03:07 | 2018-11-06T18:03:07 | 156,425,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,136 |
py
|
"""
Django settings for scenario project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*l#4^7y1%o0r9p01f)lz7mcdw-nc9#2iet=ak3ma9rj53f+zyh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sc1.apps.Sc1Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scenario.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scenario.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
eeedc6e06be66be4ba83b0914b71cabc517a8dc2
|
ad010f3ecdaa260b2d8732b8b784d58b3c812b9e
|
/spider_admin_pro/config/yaml_config.py
|
a43dc91138192f1c70a92ea9429b25cabd30f721
|
[] |
no_license
|
laashub-soa/spider-admin-pro
|
52261816015afa672176423f38d0206f9bbafa15
|
5faefebd25ad6a163a6a7d18076dc10adba7d970
|
refs/heads/master
| 2023-08-14T01:24:15.659796 | 2021-09-27T04:15:52 | 2021-09-27T04:15:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,533 |
py
|
# -*- coding: utf-8 -*-
#################################
# 读取用户自定义变量
#################################
import os
import yaml
from spider_admin_pro.config import env_config
from spider_admin_pro.logger import logger
config_file = os.path.join(os.getcwd(), 'config.yml')
logger.info('config_file: %s', config_file)
if os.path.exists(config_file):
f = open(config_file, "rb")
config = yaml.safe_load(f)
f.close()
else:
config = {}
# flask 服务配置
FLASK_PORT = config.get('PORT', env_config.FLASK_PORT)
FLASK_HOST = config.get('HOST', env_config.FLASK_HOST)
# 登录账号密码
BASIC_AUTH_USERNAME = config.get('USERNAME', env_config.BASIC_AUTH_USERNAME)
BASIC_AUTH_PASSWORD = config.get('PASSWORD', env_config.BASIC_AUTH_PASSWORD)
BASIC_AUTH_JWT_KEY = config.get('JWT_KEY', env_config.BASIC_AUTH_JWT_KEY)
# token过期时间,单位天
BASIC_AUTH_EXPIRES = config.get('EXPIRES', env_config.BASIC_AUTH_EXPIRES)
# scrapyd地址, 结尾不要加斜杆
SCRAPYD_SERVER = config.get('SCRAPYD', env_config.SCRAPYD_SERVER)
# 调度器 调度历史存储设置
# mysql or sqlite and other, any database for peewee support
SCHEDULE_HISTORY_DATABASE_URL = config.get('SCHEDULE_HISTORY_DATABASE_URL',
env_config.SCHEDULE_HISTORY_DATABASE_URL)
# 调度器 定时任务存储地址
JOB_STORES_DATABASE_URL = config.get('JOB_STORES_DATABASE_URL', env_config.JOB_STORES_DATABASE_URL)
# 日志文件夹
LOG_DIR = config.get("LOG_DIR", env_config.LOG_DIR)
|
[
"[email protected]"
] | |
096bc1c7152955fc7efee92dc96b6923843848ec
|
ee41311a11a1c6baedafd9a914d5a1f8330fe8a9
|
/SANEF_LIVE/venv/Lib/site-packages/anaconda_navigator/widgets/tabs/tests/test_environments_tab.py
|
2e4d36bd2647c721b4161cbc2957d1664db066a3
|
[] |
no_license
|
sethnanati/CodeRepoPython
|
2dffb7263620bd905bf694f348485d894a9513db
|
b55e66611d19b35e9926d1b1387320cf48e177c8
|
refs/heads/master
| 2023-07-07T11:16:12.958401 | 2021-02-13T10:09:48 | 2021-02-13T10:09:48 | 376,531,283 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,911 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016-2017 Anaconda, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""Tests for environments tab."""
# yapf: disable
# Standard library imports
import sys
# Third party imports
from qtpy.QtCore import Qt
import pytest
# Local imports
from anaconda_navigator.api.conda_api import CondaAPI
from anaconda_navigator.utils.fixtures import tmpfile, tmpfolder
from anaconda_navigator.widgets.dialogs import MessageBoxError
from anaconda_navigator.widgets.tabs.environments import EnvironmentsTab
# yapf: enable
tmpfile
tmpfolder
PY3 = sys.version_info >= (3, 4)
xfail = pytest.mark.xfail
@pytest.fixture()
def env_tab(qtbot, tmpfile):
widget = EnvironmentsTab()
qtbot.addWidget(widget)
widget.show()
widget.setup_tab(metadata={})
widget.load_environment()
with qtbot.waitSignal(widget.sig_status_updated) as blocker:
blocker
return widget, qtbot, tmpfile
MessageBoxError.exec_ = lambda *args: True
class TestEnvironmentsTab:
def package_version(self, pkg, name='root'):
api = CondaAPI()
return api.package_version(name=name, pkg=pkg, build=True)
def remove_env(self, widget):
worker = widget.packages_widget.remove_environment(
name='navigatortest'
)
worker.communicate() # run create
@xfail
def test_bad_create(self, env_tab): # analysis:ignore
widget, qtbot, tmpfile = env_tab
with open(tmpfile, 'w') as f:
raw = "name: navigatortest\ndependencies:\n- not-real=0.0.0=py36_0"
f.write(raw)
worker = widget.packages_widget.import_yaml(
name="navigatortest", yaml=tmpfile
)
with qtbot.waitSignal(widget.sig_error_popped_up, timeout=5000):
with qtbot.waitSignal(worker.sig_finished, timeout=5000):
worker.name = "navigatortest"
worker.sig_finished.connect(widget._environment_created)
@xfail
def test_ipython_option(self, env_tab, tmpfolder):
widget, qtbot, tmpfile = env_tab
pyver = 'python={0}'.format(self.package_version('python'))
self.remove_env(widget)
worker = widget.packages_widget.create_environment(
name='navigatortest', packages=[pyver]
)
worker.name = 'navigatortest'
worker.communicate() # run create
widget._environment_created(worker, "", "")
widget.menu_list.exec_ = lambda *args: True
qtbot.mouseClick(
widget.list_environments.currentItem().button_options,
Qt.LeftButton
)
is_action_enabled = widget.menu_list.actions()[2].isEnabled()
assert not is_action_enabled
worker = widget.packages_widget.api.conda_install(
name='navigatortest', pkgs=['jupyter-core']
)
worker.communicate()
qtbot.mouseClick(
widget.list_environments.currentItem().button_options,
Qt.LeftButton
)
assert not widget.menu_list.actions()[2].isEnabled()
worker = widget.packages_widget.api.conda_install(
name='navigatortest', pkgs=['ipython']
)
worker.communicate()
qtbot.mouseClick(
widget.list_environments.currentItem().button_options,
Qt.LeftButton
)
assert widget.menu_list.actions()[2].isEnabled()
worker = widget.packages_widget.remove_environment(
name='navigatortest'
)
worker.communicate() # run create
self.remove_env(widget)
|
[
"[email protected]"
] | |
382ec61b5d92e38174ded2840080940b3653dd40
|
d72505a7961bf7f96094a6c7013f3c794495044b
|
/client.py
|
4856fbb4f78c0f80314f35362b41858153512a26
|
[] |
no_license
|
520hacker/websocket-connetions-benchmark
|
fa6ce757ec9cd68c5bcd60a5421700af6ae4814b
|
af609d775742cfeca5714133cddea32c8b0c51c0
|
refs/heads/master
| 2020-06-05T19:20:43.277616 | 2019-02-13T08:08:55 | 2019-02-13T08:08:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,821 |
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# debug in python 3.6
#__author__ == 'ipcpu'
import websocket
import time
import threading
import json
import multiprocessing
from threadpool import ThreadPool, makeRequests
#修改成自己的websocket地址
WS_URL = "ws://10.140.12.45:8888/"
#定义进程数
processes=5
#定义线程数(每个文件可能限制1024个,可以修改fs.file等参数)
thread_num=5000
def on_message(ws, message):
print(message)
pass
def on_error(ws, error):
print(error)
pass
def on_close(ws):
print("### closed ###")
pass
def on_open(ws):
def send_trhead():
#设置你websocket的内容
send_info = {"cmd": "refresh", "data": {"room_id": "58", "wx_user_id": 56431}}
#每隔10秒发送一下数据使链接不中断
while True:
time.sleep(10)
ws.send(json.dumps(send_info))
t = threading.Thread(target=send_trhead)
t.start()
def on_start(num):
time.sleep(num%20)
websocket.enableTrace(True)
ws = websocket.WebSocketApp(WS_URL + str(num),
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
def thread_web_socket():
#线程池
pool = ThreadPool(thread_num)
num = list()
#设置开启线程的数量
for ir in range(thread_num):
num.append(ir)
requests = makeRequests(on_start, num)
[pool.putRequest(req) for req in requests]
pool.wait()
if __name__ == "__main__":
#进程池
pool = multiprocessing.Pool(processes=processes)
#设置开启进程的数量
for i in range(processes):
pool.apply_async(thread_web_socket)
pool.close()
pool.join()
|
[
"[email protected]"
] | |
2aa099e77ec976eea8a2ce7424afac7d5124999a
|
b0c2a8c77fc0e025690e59f990950b6eb347c4c3
|
/corpus_builder/spiders/newspaper/janakantha.py
|
f0a39957ada5c7a106bab473fa6104a3258d95fb
|
[
"MIT"
] |
permissive
|
ibraheem-moosa/corpus-builder
|
1b31cbc501026436e5ebde2e363379b6fc094dd0
|
5f09835f9aa62abb5f891c4d3896206eedd9fe12
|
refs/heads/master
| 2020-08-06T21:39:31.700339 | 2018-08-24T14:00:18 | 2018-08-24T14:00:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,923 |
py
|
# -*- coding: utf-8 -*-
import datetime
import urlparse
import scrapy
from corpus_builder.templates.spider import CommonSpider
class JanakanthaSpider(CommonSpider):
name = "janakantha"
allowed_domains = ["dailyjanakantha.com"]
base_url = 'https://www.dailyjanakantha.com'
start_request_url = base_url
content_body = {
'css': 'p.artDetails *::text'
}
allowed_configurations = [
['start_date'],
['start_date', 'end_date'],
['category', 'start_date'],
['category', 'start_date', 'end_date'],
]
def request_index(self, response):
menu_links = [urlparse.urlparse(x.strip()).path.split('/')[-1] \
for x in response.css('nav.menu a::attr("href")').extract()]
categories = [x for x in menu_links if (not x == "" and not x == "#")]
if self.category is not None:
if self.category in categories:
categories = [self.category]
else:
raise ValueError('invalid category slug. available slugs: %s' % ", ".join(categories))
date_processing = self.start_date
while date_processing <= self.end_date:
for category in categories:
# https://www.dailyjanakantha.com/frontpage/date/2016-06-01
url = self.base_url + '/{0}/date/{1}'.format(
category,
date_processing.strftime('%Y-%m-%d')
)
yield scrapy.Request(url, callback=self.extract_news_category)
date_processing += datetime.timedelta(days=1)
def extract_news_category(self, response):
news_links = list(set(response.xpath('//div[@class="content"]//a').extract()))
for link in news_links:
if not link[:4] == 'http':
link = self.base_url + link
yield scrapy.Request(link, callback=self.parse_content)
|
[
"[email protected]"
] | |
f8f2a2c16488ab7b2db5c75b3e3384fe28779156
|
c5458f2d53d02cb2967434122183ed064e1929f9
|
/sdks/python/test/test_generate_commitments_request.py
|
8c5a96d5277ec4d96a679c0c58dcd09553377df4
|
[] |
no_license
|
ross-weir/ergo-node-api-sdks
|
fd7a32f79784dbd336ef6ddb9702b9dd9a964e75
|
9935ef703b14760854b24045c1307602b282c4fb
|
refs/heads/main
| 2023-08-24T05:12:30.761145 | 2021-11-08T10:28:10 | 2021-11-08T10:28:10 | 425,785,912 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,207 |
py
|
"""
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.generate_commitments_request_secrets import GenerateCommitmentsRequestSecrets
from openapi_client.model.unsigned_ergo_transaction import UnsignedErgoTransaction
globals()['GenerateCommitmentsRequestSecrets'] = GenerateCommitmentsRequestSecrets
globals()['UnsignedErgoTransaction'] = UnsignedErgoTransaction
from openapi_client.model.generate_commitments_request import GenerateCommitmentsRequest
class TestGenerateCommitmentsRequest(unittest.TestCase):
"""GenerateCommitmentsRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGenerateCommitmentsRequest(self):
"""Test GenerateCommitmentsRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = GenerateCommitmentsRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
57c861cd16af96f077cd25db431a46f4feb6d0b2
|
c30d4f174a28aac495463f44b496811ee0c21265
|
/python/helpers/python-skeletons/multiprocessing/__init__.py
|
de9d1ddfa3ca0c043f71519ec442d5c291506ae8
|
[
"Apache-2.0"
] |
permissive
|
sarvex/intellij-community
|
cbbf08642231783c5b46ef2d55a29441341a03b3
|
8b8c21f445550bd72662e159ae715e9d944ba140
|
refs/heads/master
| 2023-05-14T14:32:51.014859 | 2023-05-01T06:59:21 | 2023-05-01T06:59:21 | 32,571,446 | 0 | 0 |
Apache-2.0
| 2023-05-01T06:59:22 | 2015-03-20T08:16:17 |
Java
|
UTF-8
|
Python
| false | false | 4,217 |
py
|
"""Skeleton for 'multiprocessing' stdlib module."""
from multiprocessing.pool import Pool
class Process(object):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self.name = ''
self.daemon = False
self.authkey = None
self.exitcode = None
self.ident = 0
self.pid = 0
self.sentinel = None
def run(self):
pass
def start(self):
pass
def terminate(self):
pass
def join(self, timeout=None):
pass
def is_alive(self):
return False
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class Connection(object):
def send(self, obj):
pass
def recv(self):
pass
def fileno(self):
return 0
def close(self):
pass
def poll(self, timeout=None):
pass
def send_bytes(self, buffer, offset=-1, size=-1):
pass
def recv_bytes(self, maxlength=-1):
pass
def recv_bytes_into(self, buffer, offset=-1):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def Pipe(duplex=True):
return Connection(), Connection()
class Queue(object):
def __init__(self, maxsize=-1):
self._maxsize = maxsize
def qsize(self):
return 0
def empty(self):
return False
def full(self):
return False
def put(self, obj, block=True, timeout=None):
pass
def put_nowait(self, obj):
pass
def get(self, block=True, timeout=None):
pass
def get_nowait(self):
pass
def close(self):
pass
def join_thread(self):
pass
def cancel_join_thread(self):
pass
class SimpleQueue(object):
def empty(self):
return False
def get(self):
pass
def put(self, item):
pass
class JoinableQueue(multiprocessing.Queue):
def task_done(self):
pass
def join(self):
pass
def active_childern():
"""
:rtype: list[multiprocessing.Process]
"""
return []
def cpu_count():
return 0
def current_process():
"""
:rtype: multiprocessing.Process
"""
return Process()
def freeze_support():
pass
def get_all_start_methods():
return []
def get_context(method=None):
pass
def get_start_method(allow_none=False):
pass
def set_executable(path):
pass
def set_start_method(method):
pass
class Barrier(object):
def __init__(self, parties, action=None, timeout=None):
self.parties = parties
self.n_waiting = 0
self.broken = False
def wait(self, timeout=None):
pass
def reset(self):
pass
def abort(self):
pass
class Semaphore(object):
def __init__(self, value=1):
pass
def acquire(self, blocking=True, timeout=None):
pass
def release(self):
pass
class BoundedSemaphore(multiprocessing.Semaphore):
pass
class Condition(object):
def __init__(self, lock=None):
pass
def acquire(self, *args):
pass
def release(self):
pass
def wait(self, timeout=None):
pass
def wait_for(self, predicate, timeout=None):
pass
def notify(self, n=1):
pass
def notify_all(self):
pass
class Event(object):
def is_set(self):
return False
def set(self):
pass
def clear(self):
pass
def wait(self, timeout=None):
pass
class Lock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
class RLock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def Value(typecode_or_type, *args, **kwargs):
pass
def Array(typecode_or_type, size_or_initializer, lock=True):
pass
def Manager():
return multiprocessing.SyncManager()
|
[
"[email protected]"
] | |
6b354ee59c681faf08710f4c4a73bf94b911ddca
|
33af6185b48bd76f97f0a74390a3a812ee216c78
|
/angr/angr/procedures/glibc/__libc_start_main.py
|
12aa852769b769b404e992c1b45228fc1eb2aa92
|
[
"BSD-2-Clause"
] |
permissive
|
Ruide/angr-dev
|
dab0cabd907fce47ac698f890c3f3a8b80ab7e2a
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
refs/heads/master
| 2022-11-10T11:27:13.355024 | 2017-10-07T14:29:09 | 2017-10-07T14:29:09 | 104,417,044 | 0 | 1 |
BSD-2-Clause
| 2022-10-16T04:48:10 | 2017-09-22T01:35:12 |
C
|
UTF-8
|
Python
| false | false | 8,177 |
py
|
import logging
import pyvex
import angr
l = logging.getLogger("angr.procedures.glibc.__libc_start_main")
######################################
# __libc_start_main
######################################
class __libc_start_main(angr.SimProcedure):
#pylint:disable=arguments-differ,unused-argument,attribute-defined-outside-init
ADDS_EXITS = True
NO_RET = True
IS_FUNCTION = True
local_vars = ('main', 'argc', 'argv', 'init', 'fini')
def _initialize_b_loc_table(self):
"""
Initialize ptable for ctype
See __ctype_b_loc.c in libc implementation
"""
malloc = angr.SIM_PROCEDURES['libc']['malloc']
table = self.inline_call(malloc, 768).ret_expr
table_ptr = self.inline_call(malloc, self.state.arch.bits / 8).ret_expr
for pos, c in enumerate(self.state.libc.LOCALE_ARRAY):
# Each entry is 2 bytes
self.state.memory.store(table + (pos*2),
self.state.se.BVV(c, 16),
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
# Offset for negative chars
# 256 because 2 bytes each, -128 * 2
table += 256
self.state.memory.store(table_ptr,
table,
size=self.state.arch.bits / 8,
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
self.state.libc.ctype_b_loc_table_ptr = table_ptr
def _initialize_tolower_loc_table(self):
"""
Initialize ptable for ctype
See __ctype_tolower_loc.c in libc implementation
"""
malloc = angr.SIM_PROCEDURES['libc']['malloc']
# 384 entries, 4 bytes each
table = self.inline_call(malloc, 384*4).ret_expr
table_ptr = self.inline_call(malloc, self.state.arch.bits / 8).ret_expr
for pos, c in enumerate(self.state.libc.TOLOWER_LOC_ARRAY):
self.state.memory.store(table + (pos * 4),
self.state.se.BVV(c, 32),
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
# Offset for negative chars: -128 index (4 bytes per index)
table += (128 * 4)
self.state.memory.store(table_ptr,
table,
size=self.state.arch.bits / 8,
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
self.state.libc.ctype_tolower_loc_table_ptr = table_ptr
def _initialize_toupper_loc_table(self):
"""
Initialize ptable for ctype
See __ctype_toupper_loc.c in libc implementation
"""
malloc = angr.SIM_PROCEDURES['libc']['malloc']
# 384 entries, 4 bytes each
table = self.inline_call(malloc, 384*4).ret_expr
table_ptr = self.inline_call(malloc, self.state.arch.bits / 8).ret_expr
for pos, c in enumerate(self.state.libc.TOUPPER_LOC_ARRAY):
self.state.memory.store(table + (pos * 4),
self.state.se.BVV(c, 32),
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
# Offset for negative chars: -128 index (4 bytes per index)
table += (128 * 4)
self.state.memory.store(table_ptr,
table,
size=self.state.arch.bits / 8,
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
self.state.libc.ctype_toupper_loc_table_ptr = table_ptr
def _initialize_ctype_table(self):
self._initialize_b_loc_table()
self._initialize_tolower_loc_table()
self._initialize_toupper_loc_table()
@property
def envp(self):
return self.argv + (self.argc+1)*self.state.arch.bytes
def run(self, main, argc, argv, init, fini):
# TODO: handle symbolic and static modes
# TODO: add argument types
self._initialize_ctype_table()
self.main, self.argc, self.argv, self.init, self.fini = self._extract_args(self.state, main, argc, argv, init,
fini)
# TODO: __cxa_atexit calls for various at-exit needs
self.call(self.init, (self.argc, self.argv, self.envp), 'after_init')
def after_init(self, main, argc, argv, init, fini, exit_addr=0):
if isinstance(self.state.arch, ArchAMD64):
# (rsp+8) must be aligned to 16 as required by System V ABI
# ref: http://www.x86-64.org/documentation/abi.pdf , page 16
self.state.regs.rsp = (self.state.regs.rsp & 0xfffffffffffffff0) - 8
self.call(self.main, (self.argc, self.argv, self.envp), 'after_main')
def after_main(self, main, argc, argv, init, fini, exit_addr=0):
self.exit(0)
def static_exits(self, blocks):
# Execute those blocks with a blank state, and then dump the arguments
blank_state = angr.SimState(project=self.project, mode="fastpath")
# set up the stack pointer
blank_state.regs.sp = 0x7fffffff
# Execute each block
state = blank_state
for b in blocks:
# state.regs.ip = next(iter(stmt for stmt in b.statements if isinstance(stmt, pyvex.IRStmt.IMark))).addr
irsb = angr.SimEngineVEX().process(state, b,
force_addr=next(iter(stmt for stmt in b.statements if isinstance(stmt, pyvex.IRStmt.IMark))).addr)
if irsb.successors:
state = irsb.successors[0]
else:
break
cc = angr.DEFAULT_CC[self.arch.name](self.arch)
args = [ cc.arg(state, _) for _ in xrange(5) ]
main, _, _, init, fini = self._extract_args(blank_state, *args)
all_exits = [
(init, 'Ijk_Call'),
(main, 'Ijk_Call'),
(fini, 'Ijk_Call'),
]
return all_exits
@staticmethod
def _extract_args(state, main, argc, argv, init, fini):
"""
Extract arguments and set them to
:param angr.sim_state.SimState state: The program state.
:param main: An argument to __libc_start_main.
:param argc: An argument to __libc_start_main.
:param argv: An argument to __libc_start_main.
:param init: An argument to __libc_start_main.
:param fini: An argument to __libc_start_main.
:return: A tuple of five elements: (main, argc, argv, init, fini)
:rtype: tuple
"""
main_ = main
argc_ = argc
argv_ = argv
init_ = init
fini_ = fini
if state.arch.name == "PPC32":
# for some dumb reason, PPC passes arguments to libc_start_main in some completely absurd way
argv_ = argc_
argc_ = main_
main_ = state.mem[state.regs.r8 + 4:].int.resolved
init_ = state.mem[state.regs.r8 + 8:].int.resolved
fini_ = state.mem[state.regs.r8 + 12:].int.resolved
elif state.arch.name == "PPC64":
main_ = state.mem[state.regs.r8 + 8:].long.resolved
init_ = state.mem[state.regs.r8 + 16:].long.resolved
fini_ = state.mem[state.regs.r8 + 24:].long.resolved
return main_, argc_, argv_, init_, fini_
from archinfo import ArchAMD64
|
[
"[email protected]"
] | |
d1ef46174618edcfd908c875a157a06da832d91a
|
602ea0c05970cbd766df068b003671c561f59661
|
/tools/perf/benchmarks/jetstream2.py
|
19f31f16c1bc952e688b1bb19284defef99e3e9d
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.0-only",
"APSL-2.0",
"LicenseRef-scancode-unknown",
"Zlib"
] |
permissive
|
VitalyKononenko/chromium
|
088de78a639375b073cabb7665afc638334e8672
|
b8ad2cadb6a163269cd7851bc7962744743785bd
|
refs/heads/master
| 2023-03-01T10:15:00.815394 | 2019-08-15T19:51:40 | 2019-08-15T19:51:40 | 202,603,102 | 1 | 0 |
BSD-3-Clause
| 2019-08-15T19:54:34 | 2019-08-15T19:54:33 | null |
UTF-8
|
Python
| false | false | 1,655 |
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Apple's JetStream 2 benchmark.
JetStream 2 combines together a variety of JavaScript and Web Assembly
benchmarks, covering a variety of advanced workloads and programming
techniques, and reports a single score that balances them using a geometric
mean.
Each benchmark measures a distinct workload, and no single optimization
technique is sufficient to speed up all benchmarks. Some benchmarks
demonstrate tradeoffs, and aggressive or specialized optimizations for one
benchmark might make another benchmark slower. JetStream 2 rewards browsers
that start up quickly, execute code quickly, and continue running smoothly.
Each benchmark in JetStream 2 computes its own individual score. JetStream 2
weighs each benchmark equally, taking the geometric mean over each individual
benchmark's score to compute the overall JetStream 2 score.
"""
from telemetry import benchmark
import page_sets
from benchmarks import press
@benchmark.Info(emails=['[email protected]', '[email protected]'],
component='Blink>JavaScript',
documentation_url='https://browserbench.org/JetStream/in-depth.html')
class Jetstream2(press._PressBenchmark): # pylint: disable=protected-access
"""JetStream2, a combination of JavaScript and Web Assembly benchmarks.
Run all the Jetstream 2 benchmarks by default.
"""
@classmethod
def Name(cls):
return 'UNSCHEDULED_jetstream2'
def CreateStorySet(self, options):
return page_sets.Jetstream2StorySet()
|
[
"[email protected]"
] | |
d9f3996fc6b6e11676bb6d73c8c96a5562d5fcec
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/tank_setup/ammunition_setup_vehicle.py
|
572f09c20630d76920f59564d58da4e89187b639
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 |
Python
|
UTF-8
|
Python
| false | false | 972 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/tank_setup/ammunition_setup_vehicle.py
from CurrentVehicle import g_currentVehicle
from helpers import dependency
from skeletons.gui.shared import IItemsCache
class _TankSetupVehicle(object):
__slots__ = ('__vehicle',)
_itemsCache = dependency.descriptor(IItemsCache)
def __init__(self):
super(_TankSetupVehicle, self).__init__()
self.__vehicle = None
return
def setVehicle(self, value):
self.__vehicle = value
@property
def item(self):
return self.__vehicle or g_currentVehicle.item
@property
def defaultItem(self):
return g_currentVehicle.item if g_currentVehicle.isPresent() else None
def isPresent(self):
return self.__vehicle is not None
def dispose(self):
self.__vehicle = None
return
g_tankSetupVehicle = _TankSetupVehicle()
|
[
"[email protected]"
] | |
b001592fbd0025106b5c0de3a8e0852a6fb0006e
|
226b1c73a706f4734834196d18305d4d2c873589
|
/synlib/descriptions/EDFFXL.py
|
206bab5d4710ad637b02fca8e34e937d415a73fb
|
[] |
no_license
|
ocakgun/vlsistuff
|
43b4b07ae186b8d2360d11c57cd10b861e96bcbe
|
776c07f5d0c40fe7d410b5c85e7381017d4dab64
|
refs/heads/master
| 2022-06-13T14:40:22.641310 | 2020-05-08T11:09:00 | 2020-05-08T11:09:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 962 |
py
|
Desc = cellDescClass("EDFFXL")
Desc.properties["cell_leakage_power"] = "1762.140420"
Desc.properties["cell_footprint"] = "edff"
Desc.properties["area"] = "76.507200"
Desc.pinOrder = ['CK', 'D', 'E', 'IQ', 'IQN', 'Q', 'QN', 'next']
Desc.add_arc("CK","D","setup_rising")
Desc.add_arc("CK","D","hold_rising")
Desc.add_arc("CK","E","setup_rising")
Desc.add_arc("CK","E","hold_rising")
Desc.add_arc("CK","Q","rising_edge")
Desc.add_arc("CK","QN","rising_edge")
Desc.add_param("area",76.507200);
Desc.set_pin_job("CK","clock")
Desc.add_pin("CK","input")
Desc.add_pin("E","input")
Desc.add_pin("D","input")
Desc.add_pin("IQ","output")
Desc.add_pin_func("IQ","unknown")
Desc.add_pin("next","output")
Desc.add_pin_func("next","unknown")
Desc.add_pin("Q","output")
Desc.add_pin_func("Q","unknown")
Desc.add_pin("IQN","output")
Desc.add_pin_func("IQN","unknown")
Desc.add_pin("QN","output")
Desc.add_pin_func("QN","unknown")
Desc.set_job("flipflop")
CellLib["EDFFXL"]=Desc
|
[
"[email protected]"
] | |
021e4fcee7a62e92f84e0a057de120f6f6d67961
|
6c8f3ab5f952d986a17edda582c5a039bf65c632
|
/django/consolidate_project/consolidate_project/settings.py
|
08a82252a1c6016afd3b14e4b91661d7bd5f4c59
|
[] |
no_license
|
phillipn/coding_bootcamp_projects
|
3d3bd697728dd4502267e0cd2be7a090952029a8
|
278f96df9d256364583654a00fe585d474ea86a1
|
refs/heads/master
| 2021-01-17T17:30:14.607944 | 2017-03-19T18:12:32 | 2017-03-19T18:12:32 | 82,971,619 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,218 |
py
|
"""
Django settings for consolidate_project project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tqgt7)2b(iphav%!(5-e1(6kk%x=*o^#&i_aa_ab55)t0xgj5_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.login',
'apps.registration',
'apps.turtles',
'apps.classes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'consolidate_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'consolidate_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
580f96d5338bff027bec064c4f87a82504567a6d
|
9cd9e89359f0da1750a19f0609619e3bbe4c536e
|
/tests/fork_test.py
|
61c0e16d67a73a9a7ee9b0fcc4c582058fe208d0
|
[
"BSD-3-Clause"
] |
permissive
|
ph448/mitogen
|
e961c95578c852908e33861da7226919547070f0
|
c24d29d3676aa122e25716450246306aaf4a797b
|
refs/heads/master
| 2020-03-19T01:48:11.362786 | 2018-05-28T05:00:57 | 2018-05-28T05:00:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,219 |
py
|
import ctypes
import os
import random
import ssl
import struct
import sys
import mitogen
import unittest2
import testlib
import plain_old_module
IS_64BIT = struct.calcsize('P') == 8
PLATFORM_TO_PATH = {
('darwin', False): '/usr/lib/libssl.dylib',
('darwin', True): '/usr/lib/libssl.dylib',
('linux2', False): '/usr/lib/libssl.so',
('linux2', True): '/usr/lib/x86_64-linux-gnu/libssl.so',
}
c_ssl = ctypes.CDLL(PLATFORM_TO_PATH[sys.platform, IS_64BIT])
c_ssl.RAND_pseudo_bytes.argtypes = [ctypes.c_char_p, ctypes.c_int]
c_ssl.RAND_pseudo_bytes.restype = ctypes.c_int
def ping():
return 123
def random_random():
return random.random()
def RAND_pseudo_bytes(n=32):
buf = ctypes.create_string_buffer(n)
assert 1 == c_ssl.RAND_pseudo_bytes(buf, n)
return buf[:]
def exercise_importer(n):
"""
Ensure the forked child has a sensible importer.
"""
sys.path.remove(testlib.DATA_DIR)
import simple_pkg.a
return simple_pkg.a.subtract_one_add_two(n)
class ForkTest(testlib.RouterMixin, unittest2.TestCase):
def test_okay(self):
context = self.router.fork()
self.assertNotEqual(context.call(os.getpid), os.getpid())
self.assertEqual(context.call(os.getppid), os.getpid())
def test_random_module_diverges(self):
context = self.router.fork()
self.assertNotEqual(context.call(random_random), random_random())
def test_ssl_module_diverges(self):
# Ensure generator state is initialized.
RAND_pseudo_bytes()
context = self.router.fork()
self.assertNotEqual(context.call(RAND_pseudo_bytes),
RAND_pseudo_bytes())
def test_importer(self):
context = self.router.fork()
self.assertEqual(2, context.call(exercise_importer, 1))
def test_on_start(self):
recv = mitogen.core.Receiver(self.router)
def on_start(econtext):
sender = mitogen.core.Sender(econtext.parent, recv.handle)
sender.send(123)
context = self.router.fork(on_start=on_start)
self.assertEquals(123, recv.get().unpickle())
class DoubleChildTest(testlib.RouterMixin, unittest2.TestCase):
def test_okay(self):
# When forking from the master process, Mitogen had nothing to do with
# setting up stdio -- that was inherited wherever the Master is running
# (supervisor, iTerm, etc). When forking from a Mitogen child context
# however, Mitogen owns all of fd 0, 1, and 2, and during the fork
# procedure, it deletes all of these descriptors. That leaves the
# process in a weird state that must be handled by some combination of
# fork.py and ExternalContext.main().
# Below we simply test whether ExternalContext.main() managed to boot
# successfully. In future, we need lots more tests.
c1 = self.router.fork()
c2 = self.router.fork(via=c1)
self.assertEquals(123, c2.call(ping))
def test_importer(self):
c1 = self.router.fork(name='c1')
c2 = self.router.fork(name='c2', via=c1)
self.assertEqual(2, c2.call(exercise_importer, 1))
if __name__ == '__main__':
unittest2.main()
|
[
"[email protected]"
] | |
3726da4b6e8c640f2c1e4980ff8758f66e31bb14
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/lag82/504-tideGauge.py
|
f6cdd6a41e7cde5295cbc1bf322d1a52927b4360
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,984 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
dir_in = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
dir_out = '/lustre/fs0/home/mtadesse/eraFiveLag'
def lag():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 504
y = 505
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
# #check if the file exists
# os.chdir(dir_out)
# if (os.path.isfile(tg_name)):
# print('file already exists')
# continue
#cd to where the actual file is
os.chdir(dir_in)
pred = pd.read_csv(tg_name)
pred.sort_values(by = 'date', inplace=True)
pred.reset_index(inplace = True)
pred.drop('index', axis = 1, inplace = True)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since ERA20C has 3hrly data
#the lag_hrs is increased from 6(eraint) to 11 (era20C)
time_lagged = pd.DataFrame()
lag_hrs = [0, 6, 12, 18, 24, 30]
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
|
[
"[email protected]"
] | |
d0f805cd5b4c54300491e93aef4f4b816517393e
|
ea872f0a2bcc4270b7089120e3eb2f8dd32a165e
|
/Baxter/build/planning_baxter/catkin_generated/pkg.develspace.context.pc.py
|
e788f2c9fa75bd9400e0e1903a35e10d75c2678c
|
[] |
no_license
|
ZhenYaGuo/Warehouse-Robotic-System
|
2def137478911f499c45276aa3103a0b68ebb8d7
|
47b78d111b387102e29d2596bd5dc7c704f74f8f
|
refs/heads/master
| 2021-08-24T04:12:43.379580 | 2017-12-08T01:48:09 | 2017-12-08T01:48:09 | 113,405,332 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "planning_baxter"
PROJECT_SPACE_DIR = "/home/cc/ee106a/fa17/class/ee106a-aax/ros_workspaces/project/devel"
PROJECT_VERSION = "0.0.0"
|
[
"[email protected]"
] | |
f5b45500bb75688f6f3ca574206f37660a15e559
|
e9ef558d04f39f0e82ad63e955dd8772e63c99c3
|
/chat_project/chat_project/settings.py
|
a728c23923310ab703ec61e0f1b5ef83ec5c8de4
|
[
"MIT"
] |
permissive
|
nahidsaikat/Chat
|
5634ff91eef394ec2b6288d1adff17f0eb867b15
|
7d314195b03d355844767f7c89cca34f0fad95c9
|
refs/heads/master
| 2022-12-14T03:44:01.105994 | 2021-06-30T15:19:21 | 2021-06-30T15:19:21 | 177,983,242 | 0 | 0 |
MIT
| 2022-12-08T07:44:46 | 2019-03-27T11:49:48 |
Python
|
UTF-8
|
Python
| false | false | 3,434 |
py
|
"""
Django settings for chat_project project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ti1$n@9k5ub@9r76iw*f(&m*8#wm#-oiiid2jzi)_94bjq_1y&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'chat_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chat_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chat_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
|
[
"[email protected]"
] | |
d05b8fe31cb2b3669e6ffacc405b55cbda7ff8b4
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/tunnel/lsite.py
|
cf53b4bf12e69f5b767bc243bd15658320be6f5d
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 |
Python
|
UTF-8
|
Python
| false | false | 5,189 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LSite(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.tunnel.LSite")
meta.moClassName = "tunnelLSite"
meta.rnFormat = "lsite-%(id)s"
meta.category = MoCategory.REGULAR
meta.label = "Tunnel info for the local site in a multisite topology"
meta.writeAccessMask = 0x400000000001
meta.readAccessMask = 0x400000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.l3.Inst")
meta.superClasses.add("cobra.model.pol.Instr")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.rnPrefixes = [
('lsite-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5581, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "etep", "etep", 33221, PropCategory.REGULAR)
prop.label = "ETEP IP"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("etep", prop)
prop = PropMeta("str", "id", "id", 33222, PropCategory.REGULAR)
prop.label = "Site ID"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("id", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "id"))
def __init__(self, parentMoOrDn, id, markDirty=True, **creationProps):
namingVals = [id]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
1f62074c0c85f84ac88700f413546240cba19622
|
ec78979fd8479e884ab93d723360744db5152134
|
/wechat_stat.py
|
e05254f8304d487894b38f59d8004251e12e30bd
|
[] |
no_license
|
xushubo/learn-python
|
49c5f4fab1ac0e06c91eaa6bd54159fd661de0b9
|
8cb6f0cc23d37011442a56f1c5a11f99b1179ce6
|
refs/heads/master
| 2021-01-19T17:00:05.247958 | 2017-09-03T03:22:28 | 2017-09-03T03:22:28 | 101,032,298 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,139 |
py
|
import itchat
from echarts import Echart, Legend, Pie
itchat.login() #登录
friends = itchat.get_friends(update=True)[0:] #获取好友列表
male = female = other = 0 #初始化计数器,男、女、不填的
for i in friends[1:]: #遍历好友列表,列表第一个是自己,所以从1开始计算 1表示男性,2女性
sex = i['Sex']
if sex == 1:
male +=1
elif sex == 2:
female += 1
else:
other += 1
total = len(friends[1:])
print('wechat好友总数:%d' % total)
print('男性好友: %.2f%%' % (float(male)/total*100))
print('女性好友: %.2f%%' % (float(female)/total*100))
print('其他: %.2f%%' % (float(other)/total*100))
'''
chart = Echart('%s的微信好友性别比例' % (friends[0]['NickName']), 'from WeChat')
chart.use(Pie('WeChat', [{'value': male, 'name': '男性 %.2f%%' % (float(male) / total * 100)}, {'value': female, 'name': '女性 %.2f%%' % (float(female) / total * 100)}, {'value': other, 'name': '其他 %.2f%%' % (float(other) / total * 100)}], radius=["50%", "70%"]))
chart.use(Legend(['male', 'female', 'other']))
del chart.json['xAxis']
del chart.json['yAxis']
chart.plot()
'''
|
[
"[email protected]"
] | |
c3a0d221d0881ea417f3e5b03fd1a8fe558c52c1
|
632d58b9f7ae470d9ec2b0e88af0aa8054dfa40e
|
/src/ryzom_django/management/commands/ryzom_bundle.py
|
48c255b344ea621534b03d56660dbf76563dd28f
|
[] |
no_license
|
yourlabs/ryzom
|
8d06bf829ee9d31d33fa9353fdf187241c82b6ef
|
425859e2de30c3b939756a23a064fb1affe04b02
|
refs/heads/master
| 2023-05-13T10:27:09.766272 | 2023-05-02T14:49:25 | 2023-05-02T14:49:25 | 192,992,635 | 5 | 1 | null | 2022-10-11T20:19:52 | 2019-06-20T22:03:37 |
Python
|
UTF-8
|
Python
| false | false | 873 |
py
|
import os
from django.core.management.base import BaseCommand, CommandError
from ryzom_django import bundle
class Command(BaseCommand):
help = 'Write JS & CSS bundles to ryzom_django/static/bundle.*'
def handle(self, *args, **options):
static_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..',
'static',
)
)
if not os.path.exists(static_path):
os.makedirs(static_path)
with open(f'{static_path}/bundle.js', 'w+') as f:
f.write(bundle.js())
with open(f'{static_path}/bundle.css', 'w+') as f:
f.write(bundle.css())
self.stdout.write(self.style.SUCCESS(f'Successfully wrote {static_path}/bundle.*'))
self.stdout.write('Do not forget to collectstatic!')
|
[
"[email protected]"
] | |
b9dac58212d011f1c76f030f0631c55f20b3f02f
|
77ab593ed55a6d46b1778f6d41bc70ced3f8cd46
|
/face_ID_net/face_1024s/face_1024_vals.py
|
2b929cc2b67254c5a37f697a6093fc0d6f3d68f1
|
[] |
no_license
|
wosxcc/bot
|
e93b92fbca79a915feb186160f3f72c99218ffcb
|
c097f5455bc6264c9f778fb72900475963836153
|
refs/heads/master
| 2021-06-12T12:43:47.314071 | 2018-12-14T08:51:43 | 2018-12-14T08:51:43 | 128,619,488 | 7 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,983 |
py
|
import os
import cv2 as cv
import numpy as np
import random
import tensorflow as tf
from face_ID_net.face_1024s.ID_pb_net1024s import face_net
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
IMG_H=64
IMG_W =64
N_CLASSES =1024
learning_rate =0.001
def face_val(image_arr,run_train):
print('搞毛线啊')
log_dir = './face72/face_big1024/'
with tf.Graph().as_default():
graph = face_net(1, IMG_H,IMG_W, N_CLASSES,learning_rate,2,run_train)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(log_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('没有保存的模型')
if run_train ==True:
pos_d,neg_d = sess.run([graph['d_pos'],graph['d_neg']],feed_dict={graph['x']: np.reshape(image_arr, (3, 64, 64, 3))})
return pos_d, neg_d
elif run_train ==False:
print('下面出错了',len(image_arr),image_arr[0].shape)
anchor_data = sess.run(graph['anchor_out'],feed_dict={graph['x']: np.reshape(image_arr, ( 1, 64, 64, 3))})
print('上面出错了')
return anchor_data
pacth = 'E:/faceID'
for i in range(10):
file = random.sample(os.listdir(pacth),1)[0]
while(1):
negative_file= random.sample(os.listdir(pacth),1)[0]
if negative_file!=file:
break
print(file,negative_file)
anchor_img = random.sample(os.listdir(pacth+'/'+file),1)[0]
while(1):
positive_img = random.sample(os.listdir(pacth+'/'+file),1)[0]
if anchor_img!=positive_img:
break
negative_img = random.sample(os.listdir(pacth+'/'+negative_file),1)[0]
img_anchor=cv.imread(pacth+'/'+file+'/'+anchor_img)
img_positive=cv.imread(pacth+'/'+file+'/'+positive_img)
img_negative=cv.imread(pacth+'/'+negative_file+'/'+negative_img)
sh_anchor=cv.resize(img_anchor,(240,240),interpolation=cv.INTER_CUBIC)
sh_positive=cv.resize(img_positive,(240,240),interpolation=cv.INTER_CUBIC)
sh_negative=cv.resize(img_negative,(240,240),interpolation=cv.INTER_CUBIC)
image_data=[]
image_data.append(cv.resize(img_anchor,(64,64),interpolation=cv.INTER_CUBIC))
image_data.append(cv.resize(img_negative,(64,64),interpolation=cv.INTER_CUBIC))
image_data.append(cv.resize(img_positive,(64,64),interpolation=cv.INTER_CUBIC))
image_data =np.array(image_data,dtype='float32')
image_data =(image_data-128.0)/256.0
anchor_score = face_val(image_data[0],False)
print(anchor_score)
pos_d,neg_d =face_val(image_data,True)
print(pos_d,neg_d)
cv.imshow('anchor', sh_anchor)
cv.imshow('positive', sh_positive)
cv.imshow('negative', sh_negative)
cv.waitKey()
cv.destroyAllWindows()
|
[
"[email protected]"
] | |
77e79f9ef67d7b2a99e8a1c2d037a274848b9c17
|
ea3272d707f3a6e5d097301d300a0ea97ddd82b5
|
/psm/oop1/oop1_2/info_hiding_property.py
|
b12f77b06d357a78fd4c81646ba553fa9c6dce8c
|
[] |
no_license
|
gambler1541/BootCamp
|
d05850f256ed7a8baa02545551176959a66a9bb3
|
b025dd07a8fedd58366f96c9b516f134a95138f1
|
refs/heads/master
| 2020-04-07T07:21:51.363439 | 2019-06-08T11:10:27 | 2019-06-08T11:10:27 | 158,173,790 | 1 | 0 | null | 2018-11-19T06:38:36 | 2018-11-19T06:38:36 | null |
UTF-8
|
Python
| false | false | 830 |
py
|
class Account:
def __init__(self, name, money):
self.user = name
# 인스턴스 멤버 선언이 아니라 setter 메서드를 호출
self.balance = money
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, money):
if money < 0:
return
# 실제 인스턴스 멤버 선언이 일어나는 부분
self._balance = money
if __name__ == '__main__':
my_acnt = Account('greg', 5000)
# setter 함수를 통해 변경을 시도하므로 _balance 메버의 값은 음수로 변경되지 않음
# 음수로 변경되지 않았으므로 실행 결과는 5000이 나옴
my_acnt.balance =- 3000
# getter 함수인 balance() 메서드를 호출해 _balance apaqjdp wjqrms,
print(my_acnt.balance)
|
[
"[email protected]"
] | |
b4b2aa8f7d0110d5a1ee9b8e0de04c1e02146c12
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_zoologists.py
|
d5eaad0aa4529df66ccc13452502429859ae6960
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 234 |
py
|
#calss header
class _ZOOLOGISTS():
def __init__(self,):
self.name = "ZOOLOGISTS"
self.definitions = zoologist
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['zoologist']
|
[
"[email protected]"
] | |
29459d2f2495bd6eabb00953ccd6e2064a3749f5
|
d82a8844c7d46c752e567cca41a8ae1c15c975f7
|
/API/urls.py
|
aaae4d1d1c0b11959a544fed6876085e896c1700
|
[] |
no_license
|
golammahmud/job_evaluations_project
|
f1be9f8f8b27c0f9db6539294ccff25254ff08f3
|
fe362f2d6bc57e1d550c39263312ef046eb7754c
|
refs/heads/master
| 2023-08-04T10:20:59.442703 | 2021-09-27T02:31:03 | 2021-09-27T02:31:03 | 410,347,863 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
from django.contrib import admin
from django.urls import path,include
from rest_framework import routers
from .views import UserInputView,UserBasedInputView
from rest_framework_simplejwt.views import TokenObtainPairView,TokenRefreshView
router=routers.DefaultRouter()
router.register('all-userinputs',UserInputView)
router.register('user-based-inputs',UserBasedInputView)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/',include('rest_framework.urls')),
path('get_token/', TokenObtainPairView.as_view(), name='token_obtain_pair'), #get token
path('token_refresh/', TokenRefreshView.as_view(), name='token_refresh'),# get refresh token
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.