blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fd9bed4328f8591ad62960574eed263df888ec7
|
f618cb7a1b1f49c02396a2bb969cc7518fd163ab
|
/doc/_gallery/1_3_1_noisy_chirp_wv.py
|
ba10a534a72d30bbb4a32f5780d048b7422177fb
|
[] |
no_license
|
kingjr/pytftb
|
b968b8e2fc294a19cec8bf63e7d289f368ddf194
|
0bcacf5eef46bd173d90a23c00a7f4b8ee284b22
|
refs/heads/master
| 2021-01-16T22:27:05.587174 | 2015-06-25T05:16:02 | 2015-06-25T05:16:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 572 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
"""
from tftb.generators import fmlin, sigmerge, noisecg
from tftb.processing.cohen import WignerVilleDistribution
# Generate a chirp signal
n_points = 128
fmin, fmax = 0.0, 0.5
signal, _ = fmlin(n_points, fmin, fmax)
# Noisy chirp
noisy_signal = sigmerge(signal, noisecg(128), 0)
# Wigner-Ville spectrum of noisy chirp.
wvd = WignerVilleDistribution(noisy_signal)
wvd.run()
wvd.plot(kind='contour')
|
[
"[email protected]"
] | |
2f60ba606f3f3ff16f6ce61b7441c7944a9a3939
|
15f365dc711f2230073391687642498305286321
|
/Figure plotting/FIG_3.9c)_maximal allowable radial offset.py
|
d3495aa3b0b50f74b8be071296dbfa7a96ad2f13
|
[] |
no_license
|
Isabelliuqin/Optical_Levitation_Master_project_final
|
16d177ee0852361745286d4a5af8eea84aad5845
|
0ebe133a08a84e3c8521b06c6e9eec2584e0b3cc
|
refs/heads/master
| 2023-01-03T13:35:05.753240 | 2020-11-01T10:13:59 | 2020-11-01T10:13:59 | 309,067,970 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,693 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 16:31:57 2020
@author: liuqi
"""
import scipy as sp
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pylab as plt
import scipy.integrate as spi
from scipy.integrate import quad
import seaborn
from scipy.integrate import odeint
from scipy.integrate import dblquad
import Will_Module_addwdep as TQ
import Module_table_parameter as MTP
import time
integration_method = 'manual' # 'manual' or 'integrated'
grid_size = 100
plt.close('all')
###########################
#Our sphere
g = 9.8
c = 3 * 10**8
w_0 = 0.85 * 10 ** (-6)
Lambda = 1.064 * 10**(-6)
z_R = np.pi* w_0 ** 2 / Lambda
rho = 30 * 10 ** (-6)
n_0 = 1
n_s_n = 0.04
k = 7.6097
n_s = n_s_n - k*1j
sig_s = 10.49 * 10 ** 3 * (( 3 ** 3 - 2.25 ** 3) / 3 ** 3 ) #density of sphere in kg/m^3
sig_0 = 0 #density of medium in kg/m^3
m = 4/3 * np.pi * rho ** 3 * ( sig_s - sig_0 )
Permittivity = 8.85 * 10**(-12)
#P = 0.5 * c * n_0 * Permittivity #total power of the LG01 beam
P = 12 #optimal power required to levitate at w0 = 0.85um
############################################
#FIG 3.9c) maximal allowable radial offset
############################################
#x-axis: x-axis radial offset
#y-axis: Qx trapping efficiency
#key function: TQ.F_total_manual_integration
rho_0x = np.linspace(0,2*rho,100)
rho_0 = [0,0]
w = np.sqrt(2) * rho #optimal beam radius
Qoplist = []
for rho_0xe in rho_0x:
F_op = TQ.F_total_manual_integration(rho_0xe,rho_0[1], rho, n_0, n_s, w_0, w, z_R, P , target = "reflective", coordinate = 'x', grid_size = grid_size)['force_total'] #compute Qx at optimal beam radius wop, various radial offsets
Q_op = F_op * c / ( n_0 * P )
Qoplist.append(Q_op)
plt.plot(rho_0x/rho, np.array(Qoplist), lw=2, c="c", label="w/(sqrt(2)rho) = 1")
print ((rho_0x/rho)[np.argmin(abs(np.array(Qoplist)))]) #print the inflection point
new_ticks1 = np.linspace(0, 2 , 5) # plot axis
print(new_ticks1)
plt.xticks(new_ticks1,fontsize=20)
plt.yticks(np.linspace(-0.1, 0.05, 4),fontsize=20)
plt.rc('xtick',labelsize=20)
plt.rc('ytick',labelsize=20)
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['bottom'].set_position(('data',0))
plt.legend(loc=1,fontsize=16)
plt.xlabel('rho_0x/rho',fontsize=20)
plt.ylabel('Qx',fontsize=20)
plt.title('rho = 30um, w0 = 0.85um',fontsize=20)
plt.grid()
plt.show()
|
[
"[email protected]"
] | |
a0558eff96171575b90ef92a7b59d2a7abd7f87f
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/a8ab1a0b200881f52f564d28db90f10730c1f0b5-<latest>-fix.py
|
34987d8922650e14b77fd72b4e1557dd3181ede0
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,811 |
py
|
def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, update_only, installroot='/'):
res = {
}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {
}
pkgs['update'] = []
pkgs['install'] = []
updates = {
}
update_all = False
cmd = None
if ('*' in items):
update_all = True
(rc, out, err) = run_check_update(module, yum_basecmd)
if ((rc == 0) and update_all):
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif (rc == 100):
updates = parse_check_update(out)
elif (rc == 1):
res['msg'] = err
res['rc'] = rc
module.fail_json(**res)
if update_all:
cmd = (yum_basecmd + ['update'])
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
elif (spec.endswith('.rpm') and ('://' not in spec)):
if (not os.path.exists(spec)):
res['msg'] += ("No RPM file matching '%s' found on system" % spec)
res['results'].append(("No RPM file matching '%s' found on system" % spec))
res['rc'] = 127
module.fail_json(**res)
envra = local_envra(spec)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(spec)
continue
elif ('://' in spec):
package = fetch_rpm_from_url(spec, module=module)
envra = local_envra(package)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(package)
continue
elif (is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot) or update_only):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)
if (not pkglist):
res['msg'] += ("No package matching '%s' found available, installed or updated" % spec)
res['results'].append(("No package matching '%s' found available, installed or updated" % spec))
res['rc'] = 126
module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if ((spec in pkgs['install']) and is_available(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
nothing_to_do = False
break
(pkgname, _, _, _, _) = splitFilename(pkg)
if ((spec in pkgs['update']) and (pkgname in updates)):
nothing_to_do = False
will_update.add(spec)
if (spec != pkgname):
will_update_from_other_package[spec] = pkgname
break
if ((not is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)) and update_only):
res['results'].append(('Packages providing %s not installed due to update_only specified' % spec))
continue
if nothing_to_do:
res['results'].append(('All packages providing %s are up to date' % spec))
continue
conflicts = transaction_exists(pkglist)
if conflicts:
res['msg'] += ('The following packages have pending transactions: %s' % ', '.join(conflicts))
res['results'].append(('The following packages have pending transactions: %s' % ', '.join(conflicts)))
res['rc'] = 128
module.fail_json(**res)
if module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif (w not in updates):
other_pkg = will_update_from_other_package[w]
to_update.append((w, ('because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo']))))
else:
to_update.append((w, ('%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))))
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if (will_update or pkgs['install']):
res['changed'] = True
return res
if cmd:
(rc, out, err) = module.run_command(cmd)
res['changed'] = True
elif (pkgs['install'] or will_update):
cmd = (((yum_basecmd + ['install']) + pkgs['install']) + pkgs['update'])
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if ((not out_lower.endswith('no packages marked for update')) and (not out_lower.endswith('nothing to do'))):
res['changed'] = True
else:
(rc, out, err) = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res
|
[
"[email protected]"
] | |
a8cf597841bdc78c1f56b1e0b73d9efdcca7b554
|
c55bca491632ef98dfd0e39e9e197f86d4ce94f0
|
/wcoa/migrations/0019_auto_20200922_1837.py
|
6a1b7fb208ec5b9d7b5906ffb04ffb52f40aa3af
|
[
"MIT"
] |
permissive
|
Ecotrust/wcoa
|
420b2e9f03219a72f79e435c1001b87a76233a8b
|
f6ad1e42fa93560d57043ebeb8464a320befef14
|
refs/heads/main
| 2023-08-03T21:02:01.013970 | 2023-07-28T22:56:03 | 2023-07-28T22:56:03 | 196,878,615 | 1 | 1 |
MIT
| 2021-12-09T19:29:37 | 2019-07-14T20:07:39 |
Python
|
UTF-8
|
Python
| false | false | 395 |
py
|
# Generated by Django 2.2.9 on 2020-09-22 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wcoa', '0018_delete_masonrypage'),
]
operations = [
migrations.AlterField(
model_name='catalogiframepage',
name='source',
field=models.URLField(max_length=1999),
),
]
|
[
"[email protected]"
] | |
cd9815be7c9cc8ccdc4c8d46f182389f7124895a
|
0f6581b105ea7eb4b99dbff131378340a634e7ac
|
/pages/select_mall_page.py
|
a47f0ce03ea8ce69435593430a96ed74a92a928e
|
[] |
no_license
|
langdawang678/Se2PO
|
ded5e9f97a329f39a6de8ffaebe92330eb598eff
|
96d7eb6b4e1774b06b2fd9a4781f9bee7d8f5ed6
|
refs/heads/master
| 2023-03-25T10:44:23.140843 | 2021-03-23T09:41:39 | 2021-03-23T09:41:39 | 346,628,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 526 |
py
|
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from locations.goods_lib_locations import GoodsLibLocations
from common.base_page import BasePage
class SelectMallPage(BasePage):
# 退出元素是否存在
def get_elements_exists(self):
try:
WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(GoodsLibLocations.exit_link))
except:
return False
else:
return True
|
[
"[email protected]"
] | |
3ed16fe01640223215e8ecb9dd68102306c1c59b
|
592498a0e22897dcc460c165b4c330b94808b714
|
/1000번/1406_에디터.py
|
a89e92eec4a01dc869414b5d997fc614f0d9d6f9
|
[] |
no_license
|
atom015/py_boj
|
abb3850469b39d0004f996e04aa7aa449b71b1d6
|
42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d
|
refs/heads/master
| 2022-12-18T08:14:51.277802 | 2020-09-24T15:44:52 | 2020-09-24T15:44:52 | 179,933,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
from collections import deque
import sys
ip = sys.stdin.readline
lst = deque(list(ip().strip()))
rst = deque([])
for i in range(int(ip())):
cmd = ip().strip()
if cmd[0] == 'L':
if len(lst):
rst.appendleft(lst.pop())
elif cmd[0] == 'D':
if len(rst):
lst.append(rst.popleft())
elif cmd[0] == 'B':
if len(lst):
lst.pop()
else:
lst.append(cmd[2])
for i in lst+rst:
print(i,end='')
|
[
"[email protected]"
] | |
e1b448acf3b730cb600a2828622a2b86bc3e47d9
|
c9f4de7bf63df23325b477d3375a1bfb99865059
|
/main_a3.py
|
2a0057fd589f5aa522859a2167872c1f9d5d7b8e
|
[] |
no_license
|
EliHill/TextAnalysis
|
440a15dca3f467ab5d79a234582a9ca3b4c7ab10
|
44b05bd1995290bbbd7972a1f8953aa5e75be37e
|
refs/heads/master
| 2020-09-30T16:24:18.911419 | 2019-12-06T19:52:17 | 2019-12-06T19:52:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,750 |
py
|
"""main_a3.py
"""
import re
import os
import math
import nltk
from nltk.corpus import brown
from nltk.corpus import wordnet as wn
from nltk.corpus import PlaintextCorpusReader
from fsa import FSA
# NLTK stoplist with 3136 words (multilingual)
STOPLIST = set(nltk.corpus.stopwords.words())
# Vocabulary with 234,377 English words from NLTK
ENGLISH_VOCABULARY = set(w.lower() for w in nltk.corpus.words.words())
# The five categories from Brown that we are using
BROWN_CATEGORIES = ('adventure', 'fiction', 'government', 'humor', 'news')
# Global place to store Brown vocabularies so you calculate them only once
BROWN_VOCABULARIES = None
def is_content_word(word):
"""A content word is not on the stoplist and its first character is a letter."""
return word.lower() not in STOPLIST and word[0].isalpha()
class Text(object):
def __init__(self, path, name=None):
"""Takes a file path, which is assumed to point to a file or a directory,
extracts and stores the raw text and also stores an instance of nltk.text.Text."""
self.name = name
if os.path.isfile(path):
self.raw = open(path).read()
elif os.path.isdir(path):
corpus = PlaintextCorpusReader(path, '.*.mrg')
self.raw = corpus.raw()
self.text = nltk.text.Text( nltk.word_tokenize(self.raw))
def __len__(self):
return len(self.text)
def __getitem__(self, i):
return self.text[i]
def __str__(self):
name = '' if self.name is None else " '%s'" % self.name
return "<Text%s tokens=%s>" % (name, len(self))
def token_count(self):
"""Just return the length of the text."""
return len(self)
def type_count(self):
"""Returns the type count, with minimal normalization by lower casing."""
# an alternative would be to use the method nltk.text.Text.vocab()
return len(set([w.lower() for w in self.text]))
def sentence_count(self):
"""Return number of sentences, using the simplistic measure of counting period,
exclamation marks and question marks."""
# could also use nltk.sent.tokenize on self.raw
return len([t for t in self.text if t in '.!?'])
def most_frequent_content_words(self):
"""Return a list with the 25 most frequent content words and their
frequencies. The list has (word, frequency) pairs and is ordered
on the frequency."""
dist = nltk.FreqDist([w for w in self.text if is_content_word(w.lower())])
return dist.most_common(n=25)
def most_frequent_bigrams(self, n=25):
"""Return a list with the 25 most frequent bigrams that only contain
content words. The list returned should have pairs where the first
element in the pair is the bigram and the second the frequency, as in
((word1, word2), frequency), these should be ordered on frequency."""
filtered_bigrams = [b for b in list(nltk.bigrams(self.text))
if is_content_word(b[0]) and is_content_word(b[1])]
dist = nltk.FreqDist([b for b in filtered_bigrams])
return dist.most_common(n=n)
def concordance(self, word):
self.text.concordance(word)
## new methods for search part of assignment 3
def search(self, pattern):
return re.finditer(pattern, self.raw)
def find_sirs(self):
answer = set()
for match in self.search(r"\bSir \S+\b"):
answer.add(match.group())
return sorted(answer)
def find_brackets(self):
answer = set()
# use a non-greedy match on the characters between the brackets
for match in self.search(r"([\(\[\{]).+?([\)\]\}])"):
brackets = "%s%s" % (match.group(1), match.group(2))
# this tests for matching pairs
if brackets in ['[]', '{}', '()']:
answer.add(match.group())
return sorted(answer)
def find_roles(self):
answer = set()
for match in re.finditer(r"^([A-Z]{2,}[^\:]+): ", self.raw, re.MULTILINE):
answer.add(match.group(1))
return sorted(answer)
def find_repeated_words(self):
answer = set()
for match in self.search(r"(\w{3,}) \1 \1"):
answer.add(match.group())
return sorted(answer)
def apply_fsa(self, fsa):
i = 0
results = []
while i < len(self):
match = fsa.consume(self.text[i:])
if match:
results.append((i, match))
i += len(match)
else:
i += 1
return results
class Vocabulary():
"""Class to store all information on a vocabulary, where a vocabulary is created
from a text. The vocabulary includes the text, a frequency distribution over
that text, the vocabulary items themselves (as a set) and the sizes of the
vocabulary and the text. We do not store POS and gloss, for those we rely on
WordNet. The vocabulary is contrained to those words that occur in a
standard word list. Vocabulary items are not normalized, except for being in
lower case."""
def __init__(self, text):
self.text = text.text
# keeping the unfiltered list around for statistics
self.all_items = set([w.lower() for w in text])
self.items = self.all_items.intersection(ENGLISH_VOCABULARY)
# restricting the frequency dictionary to vocabulary items
self.fdist = nltk.FreqDist(t.lower() for t in text if t.lower() in self.items)
self.text_size = len(self.text)
self.vocab_size = len(self.items)
def __str__(self):
return "<Vocabulary size=%d text_size=%d>" % (self.vocab_size, self.text_size)
def __len__(self):
return self.vocab_size
def frequency(self, word):
return self.fdist[word]
def pos(self, word):
# do not volunteer the pos for words not in the vocabulary
if word not in self.items:
return None
synsets = wn.synsets(word)
# somewhat arbitrary choice to make unknown words nouns, returning None
# or 'UNKNOWN' would have been fine too.
return synsets[0].pos() if synsets else 'n'
def gloss(self, word):
# do not volunteer the gloss (definition) for words not in the vocabulary
if word not in self.items:
return None
synsets = wn.synsets(word)
# make a difference between None for words not in vocabulary and words
# in the vocabulary that do not have a gloss in WordNet
return synsets[0].definition() if synsets else 'NO DEFINITION'
def kwic(self, word):
self.text.concordance(word)
|
[
"[email protected]"
] | |
e0cca15b4698cfcef55c59c32ad1ec019b327f0b
|
b576ed1ff65700d505f687961cbed86fe94b1c3f
|
/objectModel/Python/cdm/utilities/copy_data_utils.py
|
52fd4d1ee5390f942bbde1ef66b2b5cca9e4104f
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
AzureMentor/CDM
|
c80761737c92cf6561d4b982b4882b1b1c5265d3
|
84d3928995e7ab3bba0a283771e5e26639408643
|
refs/heads/master
| 2021-11-30T17:52:42.274900 | 2021-11-27T18:38:19 | 2021-11-27T18:38:19 | 217,569,642 | 1 | 0 |
NOASSERTION
| 2021-11-27T18:38:20 | 2019-10-25T16:04:16 |
Java
|
UTF-8
|
Python
| false | false | 870 |
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information
from typing import Union, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.objectmodel import CdmCollection, CdmObject
from cdm.utilities import ResolveOptions, CopyOptions
def _array_copy_data(res_opt: 'ResolveOptions', source: Union['CdmCollection', List['CdmObject']], options: 'CopyOptions') -> Optional[List]:
"""Creates a list object that is a copy of the input IEnumerable object"""
if not source:
return None
casted = []
for elem in source:
if elem:
from cdm.persistence import PersistenceLayer
data = PersistenceLayer.to_data(elem, res_opt, options, PersistenceLayer.CDM_FOLDER)
casted.append(data)
return casted
|
[
"[email protected]"
] | |
7ff960b1f5fe2ab8db39e70e382084c495881cb8
|
1b12e6096c47312b67fa6ff223216945d2efb70c
|
/sandbox/vtk/selection/myinteractor.py
|
139202e49f1fe0d1418bde34dcae5a42beb929c2
|
[
"Apache-2.0"
] |
permissive
|
rboman/progs
|
6e3535bc40f78d692f1f63b1a43193deb60d8d24
|
03eea35771e37d4b3111502c002e74014ec65dc3
|
refs/heads/master
| 2023-09-02T17:12:18.272518 | 2023-08-31T15:40:04 | 2023-08-31T15:40:04 | 32,989,349 | 5 | 2 |
Apache-2.0
| 2022-06-22T10:58:38 | 2015-03-27T14:04:01 |
MATLAB
|
UTF-8
|
Python
| false | false | 4,329 |
py
|
# -*- coding: utf-8 -*-
import vtk
colors = vtk.vtkNamedColors()
class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None):
"""register to event listening
"""
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.selection = None
self.selected_mapper = vtk.vtkDataSetMapper()
self.selected_actor = vtk.vtkActor()
self.dataset = None
def select_one(self):
# get the mouse click position
clickPos = self.GetInteractor().GetEventPosition()
# crete a picker and pick at that position
picker = vtk.vtkCellPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
print("pick")
print(f"\tcell id = {picker.GetCellId()}")
print(f"\t3D pick position = {picker.GetPickPosition()}")
print(f"\t2D mouse position = {picker.GetSelectionPoint()[:2]}")
# the picking could be empty
# in that case, we leave the routine
if picker.GetDataSet():
print(f"\tdataset = {picker.GetDataSet().GetClassName()}")
else:
print(f"\tdataset = None")
return
# no cell has been picked => quit
if picker.GetCellId()==-1:
return
# cell type - we can pick triangles, but also tetras
cell_type = picker.GetDataSet().GetCellType( picker.GetCellId() )
print(f"\tcell type = { vtk.vtkCellTypes.GetClassNameFromTypeId( cell_type )}")
if(cell_type != vtk.VTK_TRIANGLE ):
print("\tWRONG CELL TYPE")
return
# we can pick the wrong ugrid (the red one)
# we store the right one at the first successful picking
if self.dataset == None:
self.dataset = picker.GetDataSet()
if picker.GetDataSet() != self.dataset:
print(f"\tPICKED WRONG DATASET!")
return
# -- cree un "vtkSelectionNode" (données de selection + type de selection)
ids = vtk.vtkIdTypeArray()
ids.SetNumberOfComponents(1)
ids.InsertNextValue(picker.GetCellId())
selectionNode = vtk.vtkSelectionNode()
selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL)
# CELL,POINT,FIELD,VERTEX,EDGE,ROW
selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES)
# SELECTIONS,GLOBALIDS,PEDIGREEIDS,VALUES,INDICES,FRUSTUM,
# LOCATIONS,THRESHOLDS,BLOCKS,QUERY
selectionNode.SetSelectionList(ids)
# -- cree une "vtkSelection" (la sélection en elle-même)
# c'est un ensemble de "noeuds de selection"
if not self.selection:
self.selection = vtk.vtkSelection()
self.selection.AddNode(selectionNode)
else:
self.selection.Union(selectionNode)
print( f"\tThere are {self.selection.GetNumberOfNodes()} 'selection nodes'.")
# -- DISPLAY: cree une "vtkExtractSelection"
extractSelection = vtk.vtkExtractSelection()
extractSelection.SetInputData(0, picker.GetDataSet())
# extractSelection.SetInputConnection(0, filt.GetOutputPort()) # cas d'un filtre
extractSelection.SetInputData(1, self.selection)
extractSelection.Update()
# build a ugrid for display
selected = vtk.vtkUnstructuredGrid()
selected.ShallowCopy(extractSelection.GetOutput())
print( f"\tThere are {selected.GetNumberOfPoints()} points in the selection.")
print( f"\tThere are {selected.GetNumberOfCells()} cells in the selection.")
self.selected_mapper.SetInputData(selected)
self.selected_actor.SetMapper(self.selected_mapper)
self.selected_actor.GetProperty().EdgeVisibilityOn()
self.selected_actor.GetProperty().SetColor( colors.GetColor3d('red') )
self.selected_actor.GetProperty().SetLineWidth(3)
self.GetDefaultRenderer().AddActor(self.selected_actor) # global - n'est pas ajouté si il l'a deja été
print(f'nb of actors = {self.GetDefaultRenderer().GetActors().GetNumberOfItems()}')
def leftButtonPressEvent(self, obj, event):
"""custom event
"""
self.select_one()
self.OnLeftButtonDown() # calls vtk.vtkInteractorStyleTrackballCamera
|
[
"[email protected]"
] | |
bf2df9013b94ee7ca80c35660b101bf47f905569
|
bd4f8320118c4fb25b95d29193c1adb2f5b55ec6
|
/contrib/userproperty_lint.py
|
7d99b16806929b36131ad944ccb545cac48d4c45
|
[
"Apache-2.0"
] |
permissive
|
Khan/khan-linter
|
30229d57ec82466af54b539eb3a57770335e0d65
|
9222e8f8c9aa6dead5c434d1eb7bb326207ed989
|
refs/heads/master
| 2023-07-21T05:06:19.757797 | 2022-07-11T16:54:42 | 2022-07-11T16:54:42 | 4,628,579 | 26 | 8 |
Apache-2.0
| 2023-09-06T21:29:52 | 2012-06-11T18:29:03 |
Python
|
UTF-8
|
Python
| false | false | 1,639 |
py
|
"""Linter that warns about using the dangerous UserProperty.
UserProperty's user_id value can change depending on whether or not Google
currently has a Google account registered w/ an email address that matches
UserProperty's email property. That means when a user changes email settings
in their Google account it can change the behavior of our queries. We don't
want that.
"""
from __future__ import absolute_import
import re
from shared.testutil import lintutil
# This captures any use of UserProperty on a db or ndb model. It will not
# capture subclasses of UserProperty, but we don't expect any of those to be
# around.
_USERPROPERTY_RE = re.compile(r'\bn?db\.UserProperty\(', re.DOTALL)
def lint_no_user_property(files_to_lint):
"""Enforce that nobody uses UserProperty.
...unless marked as an explicitly approved legacy usage via @Nolint.
"""
files_to_lint = lintutil.filter(files_to_lint, suffix='.py')
for filename in files_to_lint:
contents = lintutil.file_contents(filename)
for fn_match in _USERPROPERTY_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
linenum = 1 + contents.count('\n', 0, fn_match.start())
yield (filename, linenum, # filename and linenum
"Do not use UserProperty, it is not safe. Use UserData's "
"key as its foreign key, instead.")
|
[
"[email protected]"
] | |
95b5c45037161cace8ce3128cfd2bf49dc2bb7b6
|
fc6eefb980b53baae393980c46ac40d256687014
|
/Udacity-Intro-To-Computer-Science/Lesson 1/Lesson 1 - Quizzes/Final Quiz.py
|
8aa9f447ce3f3fde860303b34c61711a69cb1cb7
|
[] |
no_license
|
Brian-Mascitello/UCB-Third-Party-Classes
|
7bc151d348f753f93850f5e286c263639f782b05
|
e2d26e3d207d364462024759ad2342a8e172f657
|
refs/heads/master
| 2021-01-02T09:10:01.146169 | 2018-10-08T00:19:58 | 2018-10-08T00:19:58 | 99,150,324 | 0 | 0 | null | 2018-02-01T06:33:25 | 2017-08-02T18:47:29 |
Python
|
UTF-8
|
Python
| false | false | 780 |
py
|
# Write Python code that assigns to the
# variable url a string that is the value
# of the first URL that appears in a link
# tag in the string page.
# Your code should print http://udacity.com
# Make sure that if page were changed to
# page = '<a href="http://udacity.com">Hello world</a>'
# that your code still assigns the same value to the variable 'url',
# and therefore still prints the same thing.
# page = contents of a web page
page =('<div id="top_bin"><div id="top_content" class="width960">'
'<div class="udacity float-left"><a href="http://udacity.com">')
start_link = page.find('<a href=')
end_link = page.find('>', start_link)
start_position = start_link + len('<a href=') + 1
end_position = end_link - 1
url = page[start_position:end_position]
print(url)
|
[
"[email protected]"
] | |
2c6de1d98469164b77e496a0c33bfd4a67f22e17
|
1f5420fda4359bfc21b53de3a5f6e6a93b47b996
|
/ch02/ch02_menu.py
|
5abfa489c2386f900a6c3f914341bd20f4c6a22b
|
[] |
no_license
|
fl0wjacky/wxPython
|
600f5bfccad3ef5589e11573b30cffd1e2708b83
|
50b3cd5a63750d36065684b73aab0da70ff650a7
|
refs/heads/master
| 2022-09-02T04:24:47.540157 | 2022-08-10T04:13:17 | 2022-08-10T04:13:17 | 13,976,582 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,071 |
py
|
#! /usr/bin/env python
import wx
import wx.py.images as images
class ToolbarFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Toolbars', size=(300,200))
panel = wx.Panel(self)
panel.SetBackgroundColour('White')
statusBar = self.CreateStatusBar()#1
toolbar = self.CreateToolBar()#2
toolbar.AddSimpleTool(wx.NewId(),images.getPyBitmap(),"New","Long help for 'New'")#3
toolbar.Realize()#4
menuBar = wx.MenuBar()
menu1 = wx.Menu()
menuBar.Append(menu1,"&File")
menu2 = wx.Menu()
#6
menu2.Append(wx.NewId(),"&Copy","Copy in status bar")
menu2.Append(wx.NewId(),"C&ut","")
menu2.Append(wx.NewId(),"Paste","")
menu2.AppendSeparator()
menu2.Append(wx.NewId(),"&Options...","Display Options")
menuBar.Append(menu2,"&Edit")
self.SetMenuBar(menuBar)
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = ToolbarFrame(parent=None, id = -1)
frame.Show()
app.MainLoop()
|
[
"[email protected]"
] | |
9e28e0cd12e58048913b3c3764cd180e05af5636
|
9e41adf86b2c166a219f0b6d9371089c5f2d7d93
|
/Exerciciospython2/Função/e100.py
|
0b47e1bb8952e250e0f02facf33b98bfe7653f2f
|
[] |
no_license
|
Nadirlene/Exercicios-python
|
1aaead61dd0efcb5303f6294e765e9e1d54506cc
|
3fe82e166003922ef749756a249840ed1fe940b0
|
refs/heads/main
| 2022-12-25T21:35:06.172839 | 2020-09-28T15:08:37 | 2020-09-28T15:08:37 | 299,343,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
from random import randint
from time import sleep
númerosSorteados = []
def sorteio(lista):
print(f'Sorteando {len(lista)} valores da lista:', end=' ')
for c in range(0, 5):
lista.append(randint(1, 10))
print(lista[c], end=' ')
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
soma = 0
for c in lista:
if c % 2 == 0:
soma += c
print(f'Somando os valores pares de {lista}, temos {soma}')
sorteio(númerosSorteados)
somaPar(númerosSorteados)
|
[
"[email protected]"
] | |
b21ef021ca3d6afdf535882ef61eb49b75bf895c
|
8b7db851e13737d5c44cc00d38a46a2817c7707b
|
/tests/train.py
|
788e79cd09e75082a8dc8cf4d75b3dd063b824b5
|
[
"MIT"
] |
permissive
|
goelshivam1210/gym-novel-gridworlds
|
b6f24b38cfceb2b44461da9bb7607c56d27f4a9e
|
c8f419da02e4fd716b9e293fcf0b99ee2eb96367
|
refs/heads/master
| 2023-01-15T13:46:23.438199 | 2020-11-23T14:42:13 | 2020-11-23T14:42:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,349 |
py
|
import os
import time
import gym
import gym_novel_gridworlds
import numpy as np
from stable_baselines.common.env_checker import check_env
from stable_baselines import PPO2
from stable_baselines import DQN
from stable_baselines.gail import ExpertDataset
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common import make_vec_env
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.results_plotter import load_results, ts2xy
class RenderOnEachStep(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, env):
super(RenderOnEachStep, self).__init__()
self.env = env
def _on_step(self):
self.env.render()
# time.sleep(0.5)
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, check_freq, log_dir, model_name):
super(SaveOnBestTrainingRewardCallback, self).__init__()
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, model_name)
self.best_mean_reward = -np.inf
def _on_step(self):
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
class RemapActionOnEachStep(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, env, step_num):
super(RemapActionOnEachStep, self).__init__()
self.env = env
self.step_num = step_num
def _on_step(self):
if self.n_calls % self.step_num == 0:
# self.env = remap_action(self.env)
self.env.remap_action()
if __name__ == "__main__":
env_id = 'NovelGridworld-v3'
timesteps = 200000 # 200000
experiment_dir = 'results2' # 'models', results
experiment_code1 = env_id + '_' + str(timesteps)
experiment_code2 = '_' + '8beams0filled40range3items_in_360degrees_lfd' # lfd
model_code = experiment_code1 + experiment_code2
log_dir = experiment_dir + os.sep + env_id + experiment_code2
pretrain = True
os.makedirs(log_dir, exist_ok=True)
env = gym.make(env_id)
env = Monitor(env, log_dir)
# callback = RenderOnEachStep(env)
callback = SaveOnBestTrainingRewardCallback(1000, log_dir, model_code + '_best_model')
# callback = RemapActionOnEachStep(env, 50000)
# multiprocess environment
# env = make_vec_env('NovelGridworld-v0', n_envs=4)
check_env(env, warn=True)
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
# env = DummyVecEnv([lambda: env])
# model = PPO2(MlpPolicy, env, verbose=1)
env = DummyVecEnv([lambda: env])
model = PPO2.load('NovelGridworld-v3_200000_8beams0filled40range3items_in_360degrees_lfd_OLD', env)
# Pretrain the model from human recored dataset
# specify `traj_limitation=-1` for using the whole dataset
if pretrain:
dataset = ExpertDataset(expert_path='expert_NovelGridworld-v3_50demos2.npz', traj_limitation=-1, batch_size=128)
model.pretrain(dataset, n_epochs=2000)
model.save(model_code)
# model.learn(total_timesteps=timesteps)
model.learn(total_timesteps=timesteps, callback=callback)
model.save(model_code + '_last_model')
|
[
"[email protected]"
] | |
e0bd0c8393e10d70cd1d7736fc15a898d1f059dc
|
2e858717fbc3b74cc809dc5d60d337a844ae7fed
|
/codegolf/planets.py
|
a4f5b0a908013fcda517843121fbb9b541e6773d
|
[] |
no_license
|
maxbergmark/misc-scripts
|
95a1b5416c34e65b7e8ef26f5c941f9ba0ae0986
|
a1b3b889f8f6d28a452969a62af637a6866b69d3
|
refs/heads/master
| 2020-03-28T10:32:38.362737 | 2019-09-20T12:23:14 | 2019-09-20T12:23:14 | 148,118,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,108 |
py
|
def get_score(l, s):
c = 0
for i, e in enumerate(l):
c += int(e == s[i])
return c
def check_modulo(l, n):
mod = [i%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
def check_modulo_sq(l, n):
mod = [(i*i)%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
def check_modulo_cu(l, n):
mod = [(i*i*i)%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
l0 = [7238995, 32199698004604234, 121437875888467, 126948200247893, 28550423391528270, 448630251845, 495891408214, 1936875853, 7306076016364904775, 474081421652, 34184320687170893, 8031170932136239427, 28489, 1852796749, 107135534003525, 121424973492820, 478695222352, 1936290373, 107088256000328, 27418995543271764]
l1 = [358452458835, 5899229669892068223989509551434, 100801060862113215052800339, 103298841739860633878360661, 6211190611757106977975624033614, 1279847143846962159941, 1593728898723042190678, 21780717397762381, 370629223365341456449924529812037959, 1557125307789592521044, 6131964786814545525129908217165, 349859873446802695454943217443430723, 4812617, 21796097591570253, 83970509390754835569210693, 102090063924849785520616020, 1483554806647179537488, 19547570626458181, 87502894712962091220033864, 6687802272730805039891221866836]
l2 = [5469550, 20958273942611314, 91678030787182, 93949749261683, 22066581848026725, 297987634280, 371068925299, 1298231923, 5143513717239276645, 362546487662, 21785115176039033, 4855281086163547247, 18799, 1299148654, 76370733396065, 92850372243310, 345417020527, 1165126003, 79583419131233, 23759846615443809]
l3 = [474414806382, 9063409245936133368934076540274, 133522356591788631960941166, 139581022297794930405176691, 8036229759209788198835098840677, 1926852259672153551976, 2129837380648217507187, 32495384557351539, 526458259597464047712858951498687589, 2036164201638295527790, 9622030869291023328877655454329, 578706854677080430464104555890308207, 7293295, 31084771269373806, 117796765384867275302989921, 133508170257748661844078446, 2055980324755107837039, 32485561834039667, 117744782670614057051841889, 7717761131972000546125574465889]
l4 = [7695955, 33060607136195914, 129142996492627, 129138701525333, 33060598512444750, 500135649605, 504447788374, 1936875853, 8750051408287654215, 500068606292, 34187606587958093, 8391173042187362627, 28489, 1869573709, 129134373069125, 128034844600660, 504464632912, 1936877893, 129112712765768, 32772496317047124]
# lt = [l0, l1, l2, l3, l4]
lt = [[53, 104]]
c0 = 0
c1 = 0
c2 = 0
max_score = 0
for i in range(2, 10000000000):
for l in lt:
res = check_modulo(l, i)
# res_sq = check_modulo_sq(l, i)
# res_cu = check_modulo_cu(l, i)
res_sq = 0, 0
res_cu = 0, 0
c0 += res[0]
c1 += res_sq[0]
c2 += res_cu[0]
if i % 10000 == 0:
print("\r%d (%d %d %d)" % (i, c0, c1, c2), end="")
if res[1] > max_score or res_sq[1] > max_score or res_cu[1] > max_score:
print("\n%d %s %s %s %d" % (i, res, res_sq, res_cu, len(l)))
max_score = max(res[1], res_sq[1], res_cu[1])
|
[
"[email protected]"
] | |
b20eb31e621b6af9cf0b1d9291f57832e0b170b2
|
e00186e71a1f52b394315a0cbc27162254cfffb9
|
/durga/without_rest_models/testapp/models.py
|
6cac7e2e06e3e0b26b958a2b5e56c8110c3d2e6b
|
[] |
no_license
|
anilkumar0470/git_practice
|
cf132eb7970c40d0d032520d43e6d4a1aca90742
|
588e7f654f158e974f9893e5018d3367a0d88eeb
|
refs/heads/master
| 2023-04-27T04:50:14.688534 | 2023-04-22T05:54:21 | 2023-04-22T05:54:21 | 100,364,712 | 0 | 1 | null | 2021-12-08T19:44:58 | 2017-08-15T10:02:33 |
Python
|
UTF-8
|
Python
| false | false | 240 |
py
|
from django.db import models
# Create your models here
class Employee(models.Model):
eno = models.IntegerField()
ename = models.CharField(max_length=64)
esal = models.FloatField()
eadd = models.CharField(max_length=64)
|
[
"[email protected]"
] | |
3a32e88f924763cebc773b157f5ade3bbf566316
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9BJzrtpdMP8JFQg74_5.py
|
54241e25ddb6e70ccec2ec15821dcacf5ef26e29
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 652 |
py
|
"""
Create a function that given a list, it returns the index where if split in
two-subarrays (last element of the first array has index of (foundIndex-1)),
the sum of them are equal.
### Examples
twins([10, 20, 30, 5, 40, 50, 40, 15]) ➞ 5
# foundIndex 5 : [10+20+30+5+40]=[50+40+15]
twins([1, 2, 3, 4, 5, 5]) ➞ 4
# [1, 2, 3, 4] [5, 5]
twins([3, 3]) ➞ 1
### Notes
Return only the foundIndex, not the divided list.
"""
def twins(lst):
for i in range(1, len(lst)):
temp = []
temp.append(lst[:i])
temp.append(lst[i:])
if sum(temp[0]) == sum(temp[1]):
return i
|
[
"[email protected]"
] | |
b9c2ab2a145c713904bc1750e4837b1d3b4cc7d7
|
bbfa3b7ee2008617d33a7c5c7770d22e1aa8836b
|
/Neural_Network/_base.py
|
4297021e909f92cc59ba0f6ba4d9070986e15fba
|
[
"MIT"
] |
permissive
|
luoshao23/ML_algorithm
|
1a0046ce9c3abed029cceffa35defe57fffa82b2
|
6e94fdd0718cd892118fd036c7c5851cf3e6d796
|
refs/heads/master
| 2021-08-07T08:38:16.102455 | 2020-03-18T06:49:43 | 2020-03-18T06:49:43 | 92,467,636 | 4 | 1 |
MIT
| 2018-01-16T05:01:29 | 2017-05-26T03:20:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,611 |
py
|
from scipy.special import expit as logistic_sigmoid
import numpy as np
def identity(X):
return X
def logistic(X):
return logistic_sigmoid(X, out=X)
def tanh(X):
return np.tanh(X, out=X)
def relu(X):
return np.clip(X, 0, np.finfo(X.dtype).max, out=X)
def softmax(X):
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
def deriv_identity(a, delta):
"""nothing"""
def deriv_logistic(a, delta):
delta *= a
delta *= (1.0 - a)
def deriv_tanh(a, delta):
delta *= (1.0 - a**2)
def deriv_relu(a, delta):
delta[a <= 0] = 0
def squared_loss(y_true, y_pred):
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
return -np.sum(y_true * np.log(y_prob) +
(1 - y_true) * np.log(1 - y_prob)) / y_prob.shape[0]
ACTIVATIONS = {'identity': identity, 'logistic': logistic,
'tanh': tanh, 'relu': relu, 'softmax': softmax}
DERIVATIVES = {'identity': deriv_identity, 'logistic': deriv_logistic,
'tanh': deriv_tanh, 'relu': deriv_relu}
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
|
[
"[email protected]"
] | |
1d11db0aa1ed010ab524edc4b7847c5ce929f009
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_089/ch78_2020_04_13_14_24_17_375788.py
|
f3ffd533ba171b545632219f377a3046205a82d8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
jogo = True
import math
while jogo:
a = input("Qual o nome?")
if a == "sair":
jogo = False
else:
b = float(input("Aceleracao?"))
dic = {a:b}
dic2 = {}
soma = 0
for e in dic:
if e not int dic2:
dic2[e] = math.sqrt(200/dic[e])
if dic2[e] > soma:
soma = dic2[e]
r = ('O vencedor é {0} com tempo de conclusão de {1} s'.format(dic2[e],soma)
return r
|
[
"[email protected]"
] | |
f2016ead70d10ced68bab597dac0c22bfd28423e
|
d7641647d67d110e08997767e85bbea081c2537b
|
/bitmovin_api_sdk/encoding/inputs/udp_multicast/udp_multicast_api.py
|
59839e170880781ece7571d5ff6cbc19d6ee3393
|
[
"MIT"
] |
permissive
|
aachenmax/bitmovin-api-sdk-python
|
d3ded77c459852cbea4927ff28c2a4ad39e6026a
|
931bcd8c4695a7eb224a7f4aa5a189ba2430e639
|
refs/heads/master
| 2022-11-16T08:59:06.830567 | 2020-07-06T07:16:51 | 2020-07-06T07:16:51 | 267,538,689 | 0 | 1 |
MIT
| 2020-07-06T07:16:52 | 2020-05-28T08:44:44 |
Python
|
UTF-8
|
Python
| false | false | 3,377 |
py
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.udp_multicast_input import UdpMulticastInput
from bitmovin_api_sdk.encoding.inputs.udp_multicast.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_input_list_query_params import UdpMulticastInputListQueryParams
class UdpMulticastApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(UdpMulticastApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, udp_multicast_input, **kwargs):
# type: (UdpMulticastInput, dict) -> UdpMulticastInput
"""Create UDP multicast input
:param udp_multicast_input: The UdpMulticastInput to be created
:type udp_multicast_input: UdpMulticastInput, required
:return: UDP multicast input
:rtype: UdpMulticastInput
"""
return self.api_client.post(
'/encoding/inputs/udp-multicast',
udp_multicast_input,
type=UdpMulticastInput,
**kwargs
)
def delete(self, input_id, **kwargs):
# type: (string_types, dict) -> UdpMulticastInput
"""Delete UDP multicast input
:param input_id: Id of the input
:type input_id: string_types, required
:return: Id of the input
:rtype: UdpMulticastInput
"""
return self.api_client.delete(
'/encoding/inputs/udp-multicast/{input_id}',
path_params={'input_id': input_id},
type=UdpMulticastInput,
**kwargs
)
def get(self, input_id, **kwargs):
# type: (string_types, dict) -> UdpMulticastInput
"""UDP multicast Input Details
:param input_id: Id of the input
:type input_id: string_types, required
:return: UDP multicast input
:rtype: UdpMulticastInput
"""
return self.api_client.get(
'/encoding/inputs/udp-multicast/{input_id}',
path_params={'input_id': input_id},
type=UdpMulticastInput,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (UdpMulticastInputListQueryParams, dict) -> UdpMulticastInput
"""List UDP multicast inputs
:param query_params: Query parameters
:type query_params: UdpMulticastInputListQueryParams
:return: List of UDP multicast inputs
:rtype: UdpMulticastInput
"""
return self.api_client.get(
'/encoding/inputs/udp-multicast',
query_params=query_params,
pagination_response=True,
type=UdpMulticastInput,
**kwargs
)
|
[
"[email protected]"
] | |
e5edc21a34b45ca67e7abb9b03ee9215880f212d
|
c440bcb0e566ed107d198593bfeb482c59276dd8
|
/advent_of_code/2021/day10_1.py
|
2d34868acd58b3462cc9f7332e432aea3f23b3a6
|
[] |
no_license
|
TheCDC/Musings
|
1ee917bbf2fd39f6fa97b268568053ca6ad7fbbf
|
7b07e315230248239bbccad5d85d0a5e8a54d5d8
|
refs/heads/master
| 2022-11-30T23:37:24.608955 | 2021-12-19T08:12:03 | 2021-12-19T08:12:03 | 175,046,297 | 0 | 0 | null | 2022-11-22T07:20:49 | 2019-03-11T17:01:54 |
Python
|
UTF-8
|
Python
| false | false | 1,751 |
py
|
from typing import List, Optional, Tuple
with open("inputs/day10.txt") as f:
lines = f.read().split()
openers = "([{<"
closers = ")]}>"
points_corruption = {")": 3, "]": 57, "}": 1197, ">": 25137}
def complete(opens: List[str]):
to_complete = opens[:]
completion: List[str] = []
while to_complete:
c = to_complete.pop()
completion.append(closers[openers.find(c)])
return completion
def score_corruption(s: str):
return points_corruption[s]
def is_matched_pair(a: str, b: str):
assert len(a) == 1 and len(b) == 1
assert a in openers
assert b in closers
matching = openers.find(a) == closers.find(b)
return matching
def doline(line: str):
chars = list(reversed(list(enumerate(line))))
left: List[str] = []
right: List[str] = []
corrupted: Optional[Tuple[int, str]] = None
while chars:
i, c = chars.pop()
if c in openers:
left.append(c)
else:
right.append(c)
if not is_matched_pair(left[-1], c):
corrupted = (i, c) if corrupted is None else corrupted
while len(left) and len(right) and is_matched_pair(left[-1], right[-1]):
left.pop()
right.pop()
completion = complete(left)
return (left, right, completion, corrupted)
def solve(lines):
score_total = 0
results = [doline(line) for line in lines]
score_total = sum(score_corruption(cor[1]) for l, r, comp, cor in results if cor)
return (score_total, results)
def main():
solved = solve(lines)
print(
solved[0],
*[tuple("".join(x) for x in (t[0], t[1], t[2])) + (t[3],) for t in solved[1]],
sep="\n"
)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
4699041df8bc845885513fbf247fa04518328cbd
|
14afcc5e2b8bdb3d91b500f6e7985d8a3378e929
|
/src/68.文本左右对齐.py
|
b3689a9c97bc0475d281eab692c085002b906bbc
|
[] |
no_license
|
hysapphire/leetcode-python
|
8569a0e76f8917165e6b9fb25bfef1afc1186e3c
|
8e338ee7a5c9f124e897491d6a1f4bcd1d1a6270
|
refs/heads/master
| 2022-12-03T15:17:52.557115 | 2020-08-17T14:19:59 | 2020-08-17T14:19:59 | 278,781,919 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,782 |
py
|
#
# @lc app=leetcode.cn id=68 lang=python3
#
# [68] 文本左右对齐
#
# @lc code=start
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
splited_words = []
s = []
cnt = 0
for word in words:
t = cnt + len(word)
if t > maxWidth:
splited_words.append(s)
s = [word]
cnt = len(word) + 1
else:
s.append(word)
cnt = t + 1
splited_words.append(s)
res = []
for splited_word in splited_words[:-1]:
s = ""
if len(splited_word) == 1:
num_space = 0
else:
num_space = (maxWidth - sum([len(word) for word in splited_word])) // (len(splited_word) - 1)
delta_num_space = (maxWidth - sum([len(word) for word in splited_word])) - (len(splited_word) - 1) * num_space
if len(splited_word) == 1:
s = ""
s += splited_word[0]
for _ in range(delta_num_space):
s += " "
else:
for word in splited_word[:-1]:
s += word
for _ in range(num_space):
s += " "
if delta_num_space > 0:
s += " "
delta_num_space -= 1
s += splited_word[-1]
res.append(s)
s = ""
for word in splited_words[-1][:-1]:
s += word
s += " "
s += splited_words[-1][-1]
for _ in range(maxWidth - len(s)):
s += " "
res.append(s)
return res
# @lc code=end
|
[
"[email protected]"
] | |
2850dbedb93f513dc0ee15666df35c5ff685c000
|
1302c48beae789b1b7837f34325a8f2b203d69df
|
/src/byro/bookkeeping/models/account.py
|
866ae96ca5bbdf954ac3dddf73f44b8cdd0bb526
|
[] |
no_license
|
grince/byro
|
b9a8ad0d54b78ee220af6dedee119ab9ec0036df
|
abe8743c04ba828fdd5ff50c55c43a3b32bc26bd
|
refs/heads/master
| 2021-01-25T12:31:12.461853 | 2018-02-26T17:42:12 | 2018-02-26T17:42:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,496 |
py
|
from django.db import models
from django.db.models import Q
from django.utils.decorators import classproperty
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from byro.common.models.auditable import Auditable
from byro.common.models.choices import Choices
class AccountCategory(Choices):
# Regular Categories
MEMBER_DONATION = 'member_donation'
MEMBER_FEES = 'member_fees'
# Categories for double-entry bookkeeping
ASSET = 'asset'
LIABILITY = 'liability'
INCOME = 'income'
EXPENSE = 'expense'
@classproperty
def choices(cls):
return (
(cls.MEMBER_DONATION, _('Donation account')),
(cls.MEMBER_FEES, _('Membership fee account')),
(cls.ASSET, _('Asset account')),
(cls.LIABILITY, _('Liability account')),
(cls.INCOME, _('Income account')),
(cls.EXPENSE, _('Expense account')),
)
class Account(Auditable, models.Model):
account_category = models.CharField(
choices=AccountCategory.choices,
max_length=AccountCategory.max_length,
)
name = models.CharField(max_length=300, null=True) # e.g. 'Laser donations'
class Meta:
unique_together = (
('account_category', 'name'),
)
def __str__(self):
if self.name:
return self.name
return f'{self.account_category} account #{self.id}'
@property
def transactions(self):
from byro.bookkeeping.models import VirtualTransaction
return VirtualTransaction.objects.filter(
Q(source_account=self) | Q(destination_account=self)
)
def total_in(self, start=None, end=now()):
qs = self.incoming_transactions
if start:
qs = qs.filter(value_datetime__gte=start)
if end:
qs = qs.filter(value_datetime__lte=end)
return qs.aggregate(incoming=models.Sum('amount'))['incoming'] or 0
def total_out(self, start=None, end=now()):
qs = self.outgoing_transactions
if start:
qs = qs.filter(value_datetime__gte=start)
if end:
qs = qs.filter(value_datetime__lte=end)
return qs.aggregate(outgoing=models.Sum('amount'))['outgoing'] or 0
def balance(self, start=None, end=now()):
incoming_sum = self.total_in(start=start, end=end)
outgoing_sum = self.total_out(start=start, end=end)
return incoming_sum - outgoing_sum
|
[
"[email protected]"
] | |
c8a176d73ce4de43a0c744f3ba4ba152b13f907d
|
9c968f7cdf390f8417912519b53f1b7f6ea8b7e8
|
/HJ_AL/brute_force/b1065_brute.py
|
9f0cd478cfd5e6253c843f1b729ba4e7aabdc19b
|
[] |
no_license
|
hhongjoon/TIL
|
aa33ce2973552a0baa0e0da5bd7d20824fd2e322
|
a33b20af15d3f671ea7c7b2855291e50a9036c1c
|
refs/heads/master
| 2021-08-07T17:33:39.722880 | 2020-04-25T08:11:02 | 2020-04-25T08:11:02 | 162,099,245 | 4 | 0 | null | 2019-10-30T09:06:21 | 2018-12-17T08:34:07 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 427 |
py
|
num = int(input())
count=0
for i in range(1,num+1):
if len(str(i)) == 1 or len(str(i))==2:
count += 1
continue
str_num=str(i)
judge = True
for j in range(0,len(str_num)-2):
if int(str_num[j]) - int(str_num[j+1]) == int(str_num[j+1]) - int(str_num[j+2]):
continue
else:
judge = False
break
if judge == True:
count+=1
print(count)
|
[
"[email protected]"
] | |
028660a24e92f54b0bc846a5d68b6e90ac21cddf
|
41710e9133d660739f8f9f17040a2a8a6082e9fb
|
/python/aa_modules/fitsio_has_errors/eg2.py
|
d4a0e6e5e75796a2ec451845dfda65e7d12df200
|
[] |
no_license
|
hanjiangxue007/Programming
|
591678150e2e300051fdeaf09124d3893076d3a9
|
7a545ef2300b004497f30d27d1f2aaa032e26af5
|
refs/heads/master
| 2020-06-29T18:50:27.776557 | 2016-10-27T18:31:39 | 2016-10-27T18:31:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,094 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct-15-2016 Sat
# Last update :
#
#
# Imports
import fitsio
from fitsio import FITS,FITSHDR
# Often you just want to quickly read or write data without bothering to
# create a FITS object. In that case, you can use the read and write
# convienience functions.
# read all data from the first hdu with data
filename='test.fits'
data = fitsio.read(filename)
# read a subset of rows and columns from a table
data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
# read the header, or both at once
h = fitsio.read_header(filename, extension)
data,h = fitsio.read(filename, ext=ext, header=True)
# open the file, write a new binary table extension, and then write the
# data from "recarray" into the table. By default a new extension is
# added to the file. use clobber=True to overwrite an existing file
# instead. To append rows to an existing table, see below.
fitsio.write(filename, recarray)
# write an image
fitsio.write(filename, image)
|
[
"[email protected]"
] | |
b65f91b5d0820bef879b4902b41d7a79e7fe245a
|
33f304bbd8536045a63dea909031576ea3f7b488
|
/census_area/core.py
|
c3fe06979410922dd4552eca320be2f8349c5c06
|
[
"MIT"
] |
permissive
|
LindaLv11/census_area
|
859c92cd5ca6a8537ff45014b42771804dc29913
|
48d8bc7e73c12b58e796307e36c93029b1ec0044
|
refs/heads/master
| 2020-04-20T08:25:32.838867 | 2019-01-04T03:00:47 | 2019-01-04T03:00:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,436 |
py
|
import shapely.geometry
import shapely.geos
import esridump
GEO_URLS = {
'tracts' : {
1990 : 'https://gis.uspatial.umn.edu/arcgis/rest/services/nhgis/Census_Tracts_1910_2014/MapServer/8',
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/6',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2011 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2012 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2013 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/8',
2014 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/8',
2015 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/8',
2016 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/8'},
'block groups' : {
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/8',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2011 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2012 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2013 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/10',
2014 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/10',
2015 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/10',
2016 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/10'},
'blocks' : {
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/10',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Current/MapServer/12'},
'incorporated places' : {
1990 : 'https://gis.uspatial.umn.edu/arcgis/rest/services/nhgis/Places_1980_2014/MapServer/1',
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/24',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2011 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2012 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2013 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/26',
2014 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/26',
2015 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/26',
2016 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/26'}
}
class AreaFilter(object):
def __init__(self, geojson_geometry, sub_geography_url):
self.geo = shapely.geometry.shape(geojson_geometry)
geo_query_args = {'geometry': ','.join(str(x) for x in self.geo.bounds),
'geometryType': 'esriGeometryEnvelope',
'spatialRel': 'esriSpatialRelEnvelopeIntersects',
'inSR' : '4326',
'geometryPrecision' : 9,
'orderByFields': 'OID'}
self.area_dumper = esridump.EsriDumper(sub_geography_url,
extra_query_args = geo_query_args)
def __iter__(self):
for area in self.area_dumper:
area_geo = shapely.geometry.shape(area['geometry'])
if self.geo.intersects(area_geo):
try:
intersection = self.geo.intersection(area_geo)
except shapely.geos.TopologicalError:
intersection = self.geo.buffer(0).intersection(area_geo.buffer(0))
if intersection.area/area_geo.area > 0.1:
yield area
|
[
"[email protected]"
] | |
b5a06168a7891d65d6d1f2dc37cc42b31c3f9075
|
14b8cf0b67104b53534678b8c0e9525ace4714ff
|
/codeeval/spiral.py
|
8ce3b47e920f2d0e9c03bbd1d9e3a51d4092b051
|
[] |
no_license
|
bhfwg/py_learn
|
bb11898fd81f653643fc61949f43df751d317fcb
|
eca9da748bada67357961d1581d8ec890a3385f8
|
refs/heads/master
| 2020-03-27T15:01:25.881792 | 2018-06-05T01:36:26 | 2018-06-05T01:36:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,433 |
py
|
from sys import argv
def spiral_printing(n, m, one):
if n * m == 1:
yield one[0]
return
def point_2_index(x, y):
return x * m + y
ax = 0
ay = 0
bx = 0
by = m - 1
cx = n - 1
cy = m - 1
dx = n - 1
dy = 0
while 1:
for i in xrange(ay, by):
index = point_2_index(ax, i)
yield one[index]
for i in xrange(bx, cx):
index = point_2_index(i, cy)
yield one[index]
for i in xrange(cy, dy, -1):
index = point_2_index(dx, i)
yield one[index]
for i in xrange(dx, ax, -1):
index = point_2_index(i, ax)
yield one[index]
ax += 1
ay += 1
bx += 1
by -= 1
cx -= 1
cy -= 1
dx -= 1
dy += 1
if ay > by or ax > dx:
break
if ay == by:
for i in xrange(bx, cx + 1):
index = point_2_index(i, cy)
yield one[index]
break
elif ax == dx:
for i in xrange(ay, by + 1):
index = point_2_index(ax, i)
yield one[index]
break
f = open(argv[1], 'r')
for one in f:
one = one.strip()
if one:
n, m, one = one.split(';')
n = int(n)
m = int(m)
one = one.split(' ')
print ' '.join(spiral_printing(n, m, one))
f.close()
|
[
"[email protected]"
] | |
6c45e72f32ca223fecfcc490073f0cd0d14b4b65
|
0130c8b14927097663157846adc4b146d67d2fda
|
/tests/common/test_run/div_no_nan_run.py
|
1a2c66c665dc13f6f5900b55ab27ee71b9d67109
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] |
permissive
|
Shigangli/akg
|
e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc
|
3766c54e0b109541932d147a6b5643a334b82403
|
refs/heads/master
| 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,497 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.test_op import div_no_nan
from tests.common.tensorio import compare_tensor
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def div_no_nan_execute(shapes, dtype, attrs):
exp_output, inputs, args = gen_data(dtype, shapes)
mod = div_no_nan_compile(shapes, dtype, attrs)
# result_tvm
acu_output = utils.mod_launch(mod, args, expect=exp_output)
# compare result
rtol, atol = get_rtol_atol("div_no_nan", dtype)
TestCase_Result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
return inputs, acu_output, exp_output, TestCase_Result
def gen_data(dtype, shapes):
# Result_Numpy
data_x = random_gaussian(shapes[0], miu=1, sigma=0.1).astype(dtype)
data_y = random_gaussian(shapes[1], miu=0, sigma=2**-64).astype(dtype)
if dtype in ["uint8", "int8", "int32"]:
is_zero = np.equal(0, data_y)
if dtype in ["float16"]:
is_zero = np.less(np.abs(data_y), 2**-12)
if dtype in ["float32"]:
is_zero = np.less(np.abs(data_y), 2**-64)
if dtype in ["uint8", "int8", "int32"]:
exp_output = np.floor_divide(np.multiply(data_x, (1 - is_zero)), data_y + is_zero)
if dtype in ["float16", "float32"]:
exp_output = np.true_divide(np.multiply(data_x, (1 - is_zero)), data_y + is_zero)
# inputs and output to hold the data
output = np.full(exp_output.shape, np.nan, dtype)
inputs = [data_x, data_y]
args = [data_x, data_y, output]
return exp_output, inputs, args
def div_no_nan_compile(shapes, dtype, attrs, kernel_name='div_no_nan', runing=False):
return utils.op_build_test(div_no_nan.div_no_nan, [shapes[0], shapes[1]], [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=runing)
|
[
"[email protected]"
] | |
9fc39c434aeb8db7e69c85650d79dea51a686666
|
5d2404f62e58d5fd1f6112744ff32c3166183ac7
|
/Geek University/Seção 4/Exercicios/EX49.py
|
de8275af5902ac3f09895155461a32956779a2ef
|
[] |
no_license
|
Leownhart/My_Course_of_python
|
236cfc84d841c5883e5aa1cc0c0730e7a9a83c40
|
5abb21f8cdad91ab54247a007d40bf9ecd2cff8c
|
refs/heads/master
| 2020-08-28T15:04:33.628086 | 2020-08-24T19:25:39 | 2020-08-24T19:25:39 | 217,733,877 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 654 |
py
|
'''
49 - Faça um programa que leia um horário (hora, minuto, segundo) de inicio e a duração, em
segundos, de uma experiência biológica. O programa de resultar com o novo horário
(hora, minuto, segundo) do termino da mesma.
from datetime import datetime
now = datetime.now()
print now.year
print now.month
print now.day
print now.hour
print now.minute
print now.second
'''
# RESPOSTAS
from datetime import datetime
Hora = int(input('Informe a Hora: '))
Minuto = int(input('Informe os Minutos: '))
Segundos = int(input('Informe os Segundos: '))
print(f'Passaram-se {Hora * 3600 + Minuto * 60 + Segundos} Segundos')
print(f'{datetime.now()}')
|
[
"[email protected]"
] | |
68eeea5ed3b7b64fa83adeca2d9a513d9c57fd1c
|
24caa6710105a060fab2e17147e6d56609939011
|
/06-Importing_Data_in_Python_(Part_2)/01-Importing_data_from_the_Internet/01-Importing_flat_files_from_the_web_your_turn!.py
|
b845373064884f87b9853e85c1360cd5849f5a64
|
[] |
no_license
|
inverseundefined/DataCamp
|
99607022ad3f899d7681ad1f70fcedab290e269a
|
7226b6b6f41888c3610a884db9a226e013d37e56
|
refs/heads/master
| 2022-01-10T00:53:21.714908 | 2019-07-24T13:27:49 | 2019-07-24T13:27:49 | 198,280,648 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,446 |
py
|
'''
Importing flat files from the web: your turn!
You are about to import your first file from the web! The flat file you will import will be 'winequality-red.csv' from the University of California, Irvine's Machine Learning repository. The flat file contains tabular data of physiochemical properties of red wine, such as pH, alcohol content and citric acid content, along with wine quality rating.
The URL of the file is
'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
After you import it, you'll check your working directory to confirm that it is there and then you'll load it into a pandas DataFrame.
Instructions
100 XP
Import the function urlretrieve from the subpackage urllib.request.
Assign the URL of the file to the variable url.
Use the function urlretrieve() to save the file locally as 'winequality-red.csv'.
Execute the remaining code to load 'winequality-red.csv' in a pandas DataFrame and to print its head to the shell.
Take Hint (-30 XP)
'''
# Import package
from urllib.request import urlretrieve
# Import pandas
import pandas as pd
# Assign url of file: url
url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
# Save file locally
urlretrieve(url, 'winequality-red.csv')
# Read file into a DataFrame and print its head
df = pd.read_csv('winequality-red.csv', sep=';')
print(df.head())
|
[
"[email protected]"
] | |
af110594bc60b09186afd5627301dc1dbf379ca8
|
af61044c866eb85ca2c622e082090f7657431206
|
/webcli/arthur_utils/experiment.py
|
a2e95ed3a2caacf3035abf7dcdb6607dbfd126af
|
[] |
no_license
|
leepand/gridpoc
|
f7959ef099d8a5513c59dfeb682761771ffe7594
|
4c476cd0241a95a4a7d2abf53a519d3749ecfb94
|
refs/heads/master
| 2020-04-28T02:38:49.631595 | 2019-03-11T02:01:50 | 2019-03-11T02:01:50 | 174,906,542 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,502 |
py
|
from _mlflow_object import _MLflowObject
class Experiment(_MLflowObject):
"""
Experiment object.
"""
DEFAULT_EXPERIMENT_ID = 0
ACTIVE_LIFECYCLE = 'active'
DELETED_LIFECYCLE = 'deleted'
def __init__(self, experiment_id, name, artifact_location, lifecycle_stage):
super(Experiment, self).__init__()
self._experiment_id = experiment_id
self._name = name
self._artifact_location = artifact_location
self._lifecycle_stage = lifecycle_stage
@property
def experiment_id(self):
"""Integer ID of the experiment."""
return self._experiment_id
@property
def name(self):
"""String name of the experiment."""
return self._name
def _set_name(self, new_name):
self._name = new_name
@property
def artifact_location(self):
"""String corresponding to the root artifact URI for the experiment."""
return self._artifact_location
@property
def lifecycle_stage(self):
"""Lifecycle stage of the experiment. Can either be 'active' or 'deleted'."""
return self._lifecycle_stage
@classmethod
def from_proto(cls, proto):
return cls(proto.experiment_id, proto.name, proto.artifact_location, proto.lifecycle_stage)
@classmethod
def _properties(cls):
# TODO: Hard coding this list of props for now. There has to be a clearer way...
return ["experiment_id", "name", "artifact_location", "lifecycle_stage"]
|
[
"[email protected]"
] | |
33667e8b97d6c876c073bc1b32185c8188c271fa
|
a1614311937bae5204e171b2a3481fb31e61a490
|
/media/codigos/36/36sol118.py
|
0e4ccda5dba78b1aa00e7913b2e0c1bb249e5ec9
|
[] |
no_license
|
alexandre146/avaliar
|
8d406100ed72f10292a0580edac50ad061ad92e9
|
3daf247ca68962086592a356e013b07fa1569afe
|
refs/heads/master
| 2020-03-21T03:09:29.493919 | 2018-07-23T11:41:38 | 2018-07-23T11:41:38 | 137,883,682 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
n=int(input())
m=int(input())
if(m%n==0):
print(m)
elif(m%n!=0):
x=m%n
if((m-x)==n):
print("sem multiplos menores que"+str(m))
else:
print(m-x)
|
[
"[email protected]"
] | |
3a9bf2b914edde4e5c397c7319864fbf32311712
|
117f066c80f3863ebef74463292bca6444f9758a
|
/finnhub_swagger_api/finnhub_swagger_api/models/revenue_estimates_info.py
|
02eb5c15a1e32e1b17eb727157f4a1affeec2537
|
[] |
no_license
|
cottrell/notebooks
|
c6de3842cbaeb71457d270cbe6fabc8695a6ee1b
|
9eaf3d0500067fccb294d064ab78d7aaa03e8b4d
|
refs/heads/master
| 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 |
Python
|
UTF-8
|
Python
| false | false | 7,028 |
py
|
# coding: utf-8
"""
Finnhub API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from finnhub_swagger_api.configuration import Configuration
class RevenueEstimatesInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'revenue_avg': 'float',
'revenue_high': 'float',
'revenue_low': 'float',
'number_analysts': 'int',
'period': 'date'
}
attribute_map = {
'revenue_avg': 'revenueAvg',
'revenue_high': 'revenueHigh',
'revenue_low': 'revenueLow',
'number_analysts': 'numberAnalysts',
'period': 'period'
}
def __init__(self, revenue_avg=None, revenue_high=None, revenue_low=None, number_analysts=None, period=None, _configuration=None): # noqa: E501
"""RevenueEstimatesInfo - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._revenue_avg = None
self._revenue_high = None
self._revenue_low = None
self._number_analysts = None
self._period = None
self.discriminator = None
if revenue_avg is not None:
self.revenue_avg = revenue_avg
if revenue_high is not None:
self.revenue_high = revenue_high
if revenue_low is not None:
self.revenue_low = revenue_low
if number_analysts is not None:
self.number_analysts = number_analysts
if period is not None:
self.period = period
@property
def revenue_avg(self):
"""Gets the revenue_avg of this RevenueEstimatesInfo. # noqa: E501
Average revenue estimates including Finnhub's proprietary estimates. # noqa: E501
:return: The revenue_avg of this RevenueEstimatesInfo. # noqa: E501
:rtype: float
"""
return self._revenue_avg
@revenue_avg.setter
def revenue_avg(self, revenue_avg):
"""Sets the revenue_avg of this RevenueEstimatesInfo.
Average revenue estimates including Finnhub's proprietary estimates. # noqa: E501
:param revenue_avg: The revenue_avg of this RevenueEstimatesInfo. # noqa: E501
:type: float
"""
self._revenue_avg = revenue_avg
@property
def revenue_high(self):
"""Gets the revenue_high of this RevenueEstimatesInfo. # noqa: E501
Highest estimate. # noqa: E501
:return: The revenue_high of this RevenueEstimatesInfo. # noqa: E501
:rtype: float
"""
return self._revenue_high
@revenue_high.setter
def revenue_high(self, revenue_high):
"""Sets the revenue_high of this RevenueEstimatesInfo.
Highest estimate. # noqa: E501
:param revenue_high: The revenue_high of this RevenueEstimatesInfo. # noqa: E501
:type: float
"""
self._revenue_high = revenue_high
@property
def revenue_low(self):
"""Gets the revenue_low of this RevenueEstimatesInfo. # noqa: E501
Lowest estimate. # noqa: E501
:return: The revenue_low of this RevenueEstimatesInfo. # noqa: E501
:rtype: float
"""
return self._revenue_low
@revenue_low.setter
def revenue_low(self, revenue_low):
"""Sets the revenue_low of this RevenueEstimatesInfo.
Lowest estimate. # noqa: E501
:param revenue_low: The revenue_low of this RevenueEstimatesInfo. # noqa: E501
:type: float
"""
self._revenue_low = revenue_low
@property
def number_analysts(self):
"""Gets the number_analysts of this RevenueEstimatesInfo. # noqa: E501
Number of Analysts. # noqa: E501
:return: The number_analysts of this RevenueEstimatesInfo. # noqa: E501
:rtype: int
"""
return self._number_analysts
@number_analysts.setter
def number_analysts(self, number_analysts):
"""Sets the number_analysts of this RevenueEstimatesInfo.
Number of Analysts. # noqa: E501
:param number_analysts: The number_analysts of this RevenueEstimatesInfo. # noqa: E501
:type: int
"""
self._number_analysts = number_analysts
@property
def period(self):
"""Gets the period of this RevenueEstimatesInfo. # noqa: E501
Period. # noqa: E501
:return: The period of this RevenueEstimatesInfo. # noqa: E501
:rtype: date
"""
return self._period
@period.setter
def period(self, period):
"""Sets the period of this RevenueEstimatesInfo.
Period. # noqa: E501
:param period: The period of this RevenueEstimatesInfo. # noqa: E501
:type: date
"""
self._period = period
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RevenueEstimatesInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RevenueEstimatesInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RevenueEstimatesInfo):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
26ec2100442d4be7cb84f871f4af39f81f332470
|
056f10d9f99506bb9b5abf7e91633f3ad0c76061
|
/CountCSVRows.py
|
f31ac1a85a8c869736b03a67223274ff65e3ce66
|
[] |
no_license
|
taers232c/GAM-Scripts3
|
5f171b620b2ac19514ab7198e39720f59a60ba9e
|
a59c5adb7b03b6bc9a4e054b9b41eabae2779f13
|
refs/heads/master
| 2023-08-31T06:43:57.645295 | 2023-08-22T17:32:21 | 2023-08-22T17:32:21 | 108,921,186 | 176 | 46 | null | 2023-02-28T15:52:32 | 2017-10-30T23:48:44 |
Python
|
UTF-8
|
Python
| false | false | 573 |
py
|
#!/usr/bin/env python3
"""
# Purpose: Count rows in a CSV file
#
# Python: Use python or python3 below as appropriate to your system; verify that you have version 3
# $ python -V or python3 -V
# Python 3.x.y
# Usage:
# python3 CountCSVRows.py File.csv
#
"""
import csv
import sys
QUOTE_CHAR = '"' # Adjust as needed
if sys.argv[1] != '-':
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
rows = 0
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
rows += 1
print(rows)
if inputFile != sys.stdin:
inputFile.close()
|
[
"[email protected]"
] | |
7c4856b94c048615d4958703b69db3191a928ddf
|
d7195e61bc37f6b90c8bc2d6f164e5e7da98aa77
|
/landlab/grid/linkstatus.py
|
6eb74a1aadecb3b7f83bdb0915c210dc93491ae0
|
[
"MIT"
] |
permissive
|
joeljgeo/landlab
|
ffaae36b3ad3c5e1377355427bc9cfbb21074f01
|
1d2651c76a8a36a7a132f139638192df1823f8fb
|
refs/heads/master
| 2020-04-05T01:38:11.870170 | 2018-11-09T16:44:31 | 2018-11-09T16:44:31 | 156,443,219 | 0 | 0 |
MIT
| 2018-11-09T16:44:32 | 2018-11-06T20:26:54 |
Python
|
UTF-8
|
Python
| false | false | 5,415 |
py
|
#! /usr/bin/env python
import numpy as np
from .nodestatus import (CLOSED_BOUNDARY, CORE_NODE, FIXED_GRADIENT_BOUNDARY,
FIXED_VALUE_BOUNDARY)
from ..utils.decorators import (cache_result_in_object,
make_return_array_immutable)
# Define the link types
#: Indicates a link is *active*, and can carry flux
ACTIVE_LINK = 0
#: Indicates a link has a fixed (gradient) value, & behaves as a boundary
FIXED_LINK = 2
#: Indicates a link is *inactive*, and cannot carry flux
INACTIVE_LINK = 4
LINK_STATUS_FLAGS_LIST = [
ACTIVE_LINK,
FIXED_LINK,
INACTIVE_LINK,
]
LINK_STATUS_FLAGS = set(LINK_STATUS_FLAGS_LIST)
def is_fixed_link(node_status_at_link):
"""Find links that are fixed.
A link is fixed if it connects a core node with a fixed value
boundary node.
Parameters
----------
node_status_at_link : ndarray of int, shape `(n_links, 2)`
Node status a link tail and head.
Returns
-------
ndarray of bool, shape `(n_links, )`
True if link is fixed.
Examples
--------
>>> from landlab.grid.diagonals import is_fixed_link
>>> from landlab import CORE_NODE, FIXED_GRADIENT_BOUNDARY
>>> is_fixed_link([CORE_NODE, FIXED_GRADIENT_BOUNDARY])
array([ True], dtype=bool)
>>> from landlab import FIXED_VALUE_BOUNDARY
>>> is_fixed_link([CORE_NODE, FIXED_VALUE_BOUNDARY])
array([False], dtype=bool)
>>> is_fixed_link([[FIXED_GRADIENT_BOUNDARY, CORE_NODE],
... [CORE_NODE, CORE_NODE]])
array([ True, False], dtype=bool)
"""
node_status_at_link = np.asarray(node_status_at_link).reshape((-1, 2))
is_core_node = node_status_at_link == CORE_NODE
is_fixed_gradient_node = node_status_at_link == FIXED_GRADIENT_BOUNDARY
return ((is_core_node[:, 0] & is_fixed_gradient_node[:, 1]) |
(is_fixed_gradient_node[:, 0] & is_core_node[:, 1]))
def is_inactive_link(node_status_at_link):
"""Find links that are inactive.
A link is inactive if it connects two boundary nodes or one of
its nodes is closed.
Parameters
----------
node_status_at_link : ndarray of int, shape `(n_links, 2)`
Node status a link tail and head.
Returns
-------
ndarray of bool, shape `(n_links, )`
True if link is isactive.
Examples
--------
>>> from landlab.grid.diagonals import is_inactive_link
>>> from landlab import CORE_NODE, FIXED_GRADIENT_BOUNDARY
>>> is_inactive_link([CORE_NODE, CLOSED_BOUNDARY])
array([ True], dtype=bool)
>>> from landlab import FIXED_VALUE_BOUNDARY
>>> is_inactive_link([FIXED_GRADIENT_BOUNDARY, FIXED_VALUE_BOUNDARY])
array([ True], dtype=bool)
>>> is_inactive_link([[FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY],
... [CORE_NODE, CORE_NODE]])
array([ True, False], dtype=bool)
"""
node_status_at_link = np.asarray(node_status_at_link).reshape((-1, 2))
is_core = node_status_at_link == CORE_NODE
is_fixed_value = node_status_at_link == FIXED_VALUE_BOUNDARY
is_fixed_gradient = node_status_at_link == FIXED_GRADIENT_BOUNDARY
is_closed = node_status_at_link == CLOSED_BOUNDARY
is_boundary_node = is_fixed_value | is_fixed_gradient | is_closed
return ((is_boundary_node[:, 0] & is_boundary_node[:, 1]) |
(is_closed[:, 0] & is_core[:, 1]) |
(is_core[:, 0] & is_closed[:, 1]))
def is_active_link(node_status_at_link):
"""Find links that are active.
A link is active if it connects a core node with another core
node or a fixed value boundary.
Parameters
----------
node_status_at_link : ndarray of int, shape `(n_links, 2)`
Node status a link tail and head.
Returns
-------
ndarray of bool, shape `(n_links, )`
True if link is isactive.
Examples
--------
>>> from landlab.grid.diagonals import is_active_link
>>> from landlab import CORE_NODE, FIXED_GRADIENT_BOUNDARY
>>> is_active_link([CORE_NODE, FIXED_GRADIENT_BOUNDARY])
array([False], dtype=bool)
>>> from landlab import FIXED_VALUE_BOUNDARY
>>> is_active_link([CORE_NODE, FIXED_VALUE_BOUNDARY])
array([ True], dtype=bool)
>>> is_active_link([[FIXED_GRADIENT_BOUNDARY, CORE_NODE],
... [CORE_NODE, CORE_NODE]])
array([False, True], dtype=bool)
"""
node_status_at_link = np.asarray(node_status_at_link).reshape((-1, 2))
is_core_node = node_status_at_link == CORE_NODE
is_fixed_value_node = node_status_at_link == FIXED_VALUE_BOUNDARY
return (
(is_core_node[:, 0] & is_core_node[:, 1]) |
(is_core_node[:, 0] & is_fixed_value_node[:, 1]) |
(is_fixed_value_node[:, 0] & is_core_node[:, 1])
)
def set_status_at_link(node_status_at_link, out=None):
n_links = len(node_status_at_link)
if out is None:
out = np.full(n_links, 255, dtype=np.uint8)
_is_fixed_link = is_fixed_link(node_status_at_link)
_is_active_link = is_active_link(node_status_at_link)
_is_inactive_link = is_inactive_link(node_status_at_link)
assert np.all(np.sum(np.vstack((_is_active_link, _is_inactive_link,
_is_fixed_link)), axis=0) == 1)
out[_is_inactive_link] = INACTIVE_LINK
out[_is_active_link] = ACTIVE_LINK
out[_is_fixed_link] = FIXED_LINK
return out
|
[
"[email protected]"
] | |
6d8323e3ea02352d65d2f5f99110a013ddd2cc3d
|
1348885ccdebfcb6010a267a3440a4ccc64373d1
|
/Examples/IPlugSideChain/scripts/update_installer_version.py
|
d4c3a9886d1d11e75b3572f01e371d4ebdeff671
|
[
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ddf/iPlug2
|
c6565343def57dbf063fefb3b875c6337d363081
|
d05d20929544b06500369208b9ec81a62eb191fb
|
refs/heads/master
| 2022-11-02T04:39:45.019866 | 2022-10-10T17:15:04 | 2022-10-10T17:15:04 | 170,179,953 | 2 | 0 |
NOASSERTION
| 2019-02-11T18:30:30 | 2019-02-11T18:30:30 | null |
UTF-8
|
Python
| false | false | 3,091 |
py
|
#!/usr/bin/python3
# this script will update the versions in packages and innosetup installer files to match that in config.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
projectpath = os.path.abspath(os.path.join(scriptpath, os.pardir))
IPLUG2_ROOT = "../../.."
sys.path.insert(0, os.path.join(os.getcwd(), IPLUG2_ROOT + '/Scripts'))
from parse_config import parse_config
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
string.find(line, s)
line = line.replace(s, r)
sys.stdout.write(line)
def main():
demo = 0
if len(sys.argv) != 2:
print("Usage: update_installer_version.py demo(0 or 1)")
sys.exit(1)
else:
demo=int(sys.argv[1])
config = parse_config(projectpath)
# MAC INSTALLER
print("Updating Mac Installer version info...")
plistpath = projectpath + "/installer/" + config['BUNDLE_NAME'] + ".pkgproj"
with open(plistpath, 'rb') as fp:
installer = plistlib.load(fp)
# range = number of items in the installer (VST 2, VST 3, app, audiounit, aax)
for x in range(0,5):
installer['PACKAGES'][x]['PACKAGE_SETTINGS']['VERSION'] = config['FULL_VER_STR']
if demo:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME'] + " Demo"
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro-demo.rtf"
else:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME']
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro.rtf"
with open(plistpath, 'wb') as fp:
plistlib.dump(installer, fp)
# replacestrs(plistpath, "//Apple//", "//Apple Computer//")
# WIN INSTALLER
print("Updating Windows Installer version info...")
for line in fileinput.input(projectpath + "/installer/" + config['BUNDLE_NAME'] + ".iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + config['FULL_VER_STR'] + "\n"
if "OutputBaseFilename" in line:
if demo:
line="OutputBaseFilename=IPlugSideChain Demo Installer\n"
else:
line="OutputBaseFilename=IPlugSideChain Installer\n"
if 'Source: "readme' in line:
if demo:
line='Source: "readme-win-demo.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
else:
line='Source: "readme-win.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
if "WelcomeLabel1" in line:
if demo:
line="WelcomeLabel1=Welcome to the IPlugSideChain Demo installer\n"
else:
line="WelcomeLabel1=Welcome to the IPlugSideChain installer\n"
if "SetupWindowTitle" in line:
if demo:
line="SetupWindowTitle=IPlugSideChain Demo installer\n"
else:
line="SetupWindowTitle=IPlugSideChain installer\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
d021d36f984ab643b089ddca6cf72adba3e0c21e
|
e3565e1ce607f60745f2a045aae8026661a6b99b
|
/resources/Onyx-1.0.511/py/onyx/grid/griddy.py
|
b7fdb67930c51b30bfc7c426ac2a4ed49d48c2c2
|
[
"Apache-2.0"
] |
permissive
|
eternity668/speechAD
|
4c08d953b2ed06b3357b1c39d8709dd088a2471c
|
f270a1be86372b7044615e4fd82032029e123bc1
|
refs/heads/master
| 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,152 |
py
|
###########################################################################
#
# File: griddy.py (directory: ./py/onyx/grid)
# Date: 4-Feb-2009
# Author: Hugh Secker-Walker
# Description: A function for use in testing by gridgo.py
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
>>> True
True
"""
def my_func(a, b, c):
return a, b, c
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
|
[
"[email protected]"
] | |
9eb02a16cb5679b043e158e4f36ae3ea11a51e80
|
162f0a636cab320ead784b33597e583e38ac432f
|
/1744.py
|
f339b7b48defbece73a4dddc7bee0dbea7c0d161
|
[] |
no_license
|
goodsosbva/BOJ_Greedy
|
fc2450df90f64790f6cc01c168ba7f19ec83e504
|
98d21af254cacf41632a4b40ca9ef643b29bb104
|
refs/heads/main
| 2023-03-31T17:26:33.863396 | 2021-04-06T07:50:57 | 2021-04-06T07:50:57 | 347,081,712 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,641 |
py
|
n = int(input())
sequence = []
negative = []
positive = []
res = 0
for i in range(n):
i = int(input())
sequence.append(i)
for k in sequence:
if k < 0:
negative.append(k)
elif k > 0:
positive.append(k)
else:
negative.append(k)
negative.sort()
positive.sort(reverse=True)
# print(negative)
u = len(negative)
if 0 in negative:
if u % 2 == 0:
for q in range(0, u, 2):
res += negative[q] * negative[q + 1]
else:
for w in range(0, u - 1, 2):
res += negative[w] * negative[w + 1]
else:
if u % 2 == 0:
for q in range(0, u, 2):
res += negative[q] * negative[q + 1]
elif u % 2 != 0 and u != 1:
for w in range(0, u - 1, 2):
res += negative[w] * negative[w + 1]
res += negative[u - 1]
else:
res += negative[0]
# print("음수합:", res)
# print(positive)
v = len(positive)
if 1 in positive:
x = positive.count(1)
# print(x)
if v - 1 > x:
if v % 2 == 0:
for s in range(0, v - x, 2):
res += positive[s] * positive[s + 1]
res += x
else:
for t in range(0, v - x, 2):
res += positive[t] * positive[t + 1]
res += x
else:
for h in positive:
res += h
else:
if v % 2 == 0:
for r in range(0, v, 2):
res += positive[r] * positive[r + 1]
else:
for f in range(0, v - 1, 2):
res += positive[f] * positive[f + 1]
res += positive[v - 1]
print(res)
|
[
"[email protected]"
] | |
f8e9765b859dd527defd2ce06933a55ecb70e041
|
35fdd5b42b47a1dbe6a25f6fc1865f4e48b842a5
|
/evalml/tests/component_tests/test_catboost_classifier.py
|
1ef6fd41a8656a2914d90172ce42a92330d0a24e
|
[
"BSD-3-Clause"
] |
permissive
|
skvorekn/evalml
|
41e5426f9f7d5ad625c21b74336009894c79c7de
|
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
|
refs/heads/main
| 2023-03-27T01:42:07.691406 | 2021-03-19T18:53:43 | 2021-03-19T18:53:43 | 349,555,689 | 0 | 0 |
BSD-3-Clause
| 2021-03-21T14:57:01 | 2021-03-19T21:08:12 | null |
UTF-8
|
Python
| false | false | 837 |
py
|
import pandas as pd
from pytest import importorskip
from evalml.pipelines.components import CatBoostClassifier
from evalml.utils import SEED_BOUNDS
importorskip('catboost', reason='Skipping test because catboost not installed')
def test_catboost_classifier_random_seed_bounds_seed(X_y_binary):
"""ensure catboost's RNG doesn't fail for the min/max bounds we support on user-inputted random seeds"""
X, y = X_y_binary
col_names = ["col_{}".format(i) for i in range(len(X[0]))]
X = pd.DataFrame(X, columns=col_names)
y = pd.Series(y)
clf = CatBoostClassifier(n_estimators=1, max_depth=1, random_seed=SEED_BOUNDS.min_bound)
clf.fit(X, y)
clf = CatBoostClassifier(n_estimators=1, max_depth=1, random_seed=SEED_BOUNDS.max_bound)
fitted = clf.fit(X, y)
assert isinstance(fitted, CatBoostClassifier)
|
[
"[email protected]"
] | |
0f776e18f96167e136351a53c789777a2a35a629
|
cbc5e26bb47ae69e80a3649c90275becf25ce404
|
/xlsxwriter/test/comparison/test_chart_layout04.py
|
f377a5806721d2af1d65752bac33bb918a5d84f3
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] |
permissive
|
mst-solar-car/kicad-bom-generator
|
c3549409c3139f787ad28391372b5cb03791694a
|
2aae905056d06f3d25343a8d784049c141d05640
|
refs/heads/master
| 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,764 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_layout04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [68311296, 69198208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({
'name': 'Title',
'layout': {
'x': 0.42631933508311465,
'y': 0.14351851851851852,
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
48cf0f54c8738ea16878d6beb0a2fd2a8d7aa385
|
c50e5af8f72de6ef560ee6c0bbfa756087824c96
|
/刷题/Leetcode/84. 柱状图中最大的矩形/p84_Largest_Rectangle_in_Histogram_暴力.py
|
7430809260718f7c390d48a5c4dc9f9b4dcaa792
|
[] |
no_license
|
binghe2402/learnPython
|
5a1beef9d446d8316aaa65f6cc9d8aee59ab4d1c
|
2b9e21fe4a8eea0f8826c57287d59f9d8f3c87ce
|
refs/heads/master
| 2022-05-27T03:32:12.750854 | 2022-03-19T08:00:19 | 2022-03-19T08:00:19 | 252,106,387 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,012 |
py
|
from typing import List
class Solution:
# # 遍历不同边界(宽度),根据最小高度
# def largestRectangleArea(self, heights: List[int]) -> int:
# area = 0
# for i in range(len(heights)):
# for j in range(i, len(heights)):
# area = max(area, (j-i+1)*min(heights[i:j+1]))
# return area
# 遍历不同高度(从每个柱向两侧扩展),根据当前高度的最小宽度(最窄边界)
# 当两侧高度小于中央开始起点的高度,即为边界
def largestRectangleArea(self, heights: List[int]) -> int:
area = 0
for i in range(len(heights)):
left = right = i
# 寻找左边界
while left >= 0 and heights[i] <= heights[left]:
left -= 1
# 寻找右边界
while right < len(heights) and heights[i] <= heights[right]:
right += 1
area = max(area, (right - left - 1)*heights[i])
return area
|
[
"[email protected]"
] | |
c6fef081bd46b0cb2875a2870bf64ad4631575c4
|
baffcef29e33658138c43ef358d7399ab3ea2c0d
|
/WORKFLOWS/Tools/NEC/NAL/nal-model/rest/test/unit/test_dcs.py
|
cb088e130c920d2697ba7584fef6500526bdc175
|
[
"Apache-2.0",
"JSON"
] |
permissive
|
openmsa/NO
|
aa7d4ff000875bfcff0baee24555ec16becdb64e
|
24df42ee3927415b552b5e5d7326eecd04ebca61
|
refs/heads/master
| 2020-03-09T23:21:09.657439 | 2019-03-29T06:29:07 | 2019-03-29T06:29:07 | 129,056,267 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,271 |
py
|
import json
import mysql.connector
import os
import sys
import unittest
import urllib.request
import urllib.parse
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../../')
from rest.api import router
from rest.conf import config
class TestSelectAPI(unittest.TestCase):
# Do a test of Select.
ID = 0
def setUp(self):
# Establish a clean test environment.
super(TestSelectAPI, self).setUp()
# Insert test data
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestSelectAPI, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
con, cur = self.connect_db()
global extension_info
extension_info = {
'dc_name': 'dc_namexxxxxxxxxx',
'dc_number': 'dc_numberxxxxxxxxxx'
}
# Execute SQL
param_vals = ['test_create_id-0ac6cb428b23', '2016-12-31 23:59:59',
'test_update_id-0ac6cb428b23', '2016-12-31 23:59:59',
0, 'dc_id-dd7e-0ac6cb428b23',
json.dumps(extension_info)]
cur.execute("INSERT INTO WIM_DC_MNG(create_id, create_date, " +
"update_id, update_date, delete_flg, " +
"dc_id, extension_info) VALUES " +
"(%s, %s, %s, %s, %s, %s, %s)", param_vals)
cur.execute('SELECT last_insert_id() FROM WIM_DC_MNG')
global ID
ID = cur.fetchall()[0][0]
self.cut_db(con, cur)
def destroy_fixtures(self):
con, cur = self.connect_db()
# Execute SQL
param_vals = ['test_create_id-0ac6cb428b23']
cur.execute("DELETE FROM WIM_DC_MNG WHERE " +
"create_id = %s", param_vals)
self.cut_db(con, cur)
def connect_db(self):
# Connect Database
con = mysql.connector.connect(
host=getattr(config, 'MYSQL_HOSTNAME', ''),
db=getattr(config, 'MYSQL_DBNAME', ''),
user=getattr(config, 'MYSQL_USERID', ''),
passwd=getattr(config, 'MYSQL_PASSWORD', ''),
buffered=True)
# Set Autocommit Off
con.autocommit = False
# Open Cursor
cur = con.cursor()
return con, cur
def cut_db(self, con, cur):
# Commit Transaction
con.commit()
# Close Cursor
cur.close()
# Close Database
con.close()
def test_select_api(self):
request_params = {
'query': {
'delete_flg': '0', 'ID': ID
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
self.assertEqual(res_data[0]['ID'], ID)
self.assertEqual(res_data[0]['create_id'],
'test_create_id-0ac6cb428b23')
self.assertEqual(res_data[0]['update_id'],
'test_update_id-0ac6cb428b23')
self.assertEqual(res_data[0]['delete_flg'], '0')
self.assertEqual(res_data[0].get('extension_info', ''), '')
for key in extension_info:
self.assertEqual(res_data[0].get(key), extension_info[key])
def test_insert_api(self):
insert_params = {
'create_id': 'test_create_id-0ac6cb428b23',
'update_id': 'test_create_id-0ac6cb428b23',
'delete_flg': 0,
'dc_id': 'dc_id-bb6d-6bb9bd380a11',
'dc_name': 'dc_name_B',
'dc_number': 1234
}
request_params = {
'body': insert_params,
'query': {},
'resource': 'dcs',
'method': 'POST',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
self.assertTrue('ID' in res_data)
# Assertion(check select)
request_params = {
'query': {
'dc_id': 'dc_id-bb6d-6bb9bd380a11',
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
for key in insert_params:
if key == 'delete_flg':
self.assertEqual(res_data[0].get(key), str(insert_params[key]))
else:
self.assertEqual(res_data[0].get(key), insert_params[key])
def test_update_api(self):
update_params = {
'update_id': 'test_update_id-0ac6cb428b23',
'dc_id': 'dc_id-ad4c-4cc6ea276a55',
'dc_name': 'dc_name_C',
'dc_number': 5678
}
request_params = {
'body': update_params,
'query': {},
'resource': 'dcs',
'method': 'PUT',
'id': [ID]
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(res_data, True)
# Assertion(check select)
request_params = {
'query': {
'dc_id': 'dc_id-ad4c-4cc6ea276a55',
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
for key in update_params:
if key == 'delete_flg':
self.assertEqual(res_data[0].get(key), str(update_params[key]))
else:
self.assertEqual(res_data[0].get(key), update_params[key])
def test_delete_api(self):
request_params = {
'body': {},
'query': {},
'resource': 'dcs',
'method': 'DELETE',
'id': [ID]
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(res_data, True)
# Assertion(Select Check)
con, cur = self.connect_db()
cur.execute("SELECT ID FROM WIM_DC_MNG " +
"WHERE ID = %s", [ID])
self.assertEqual(cur.fetchall(), [])
self.cut_db(con, cur)
|
[
"[email protected]"
] | |
fbd49bfeec9947ef6f83b1e9787a0081f6be9f05
|
57775b4c245723078fd43abc35320cb16f0d4cb6
|
/Data structure/linked-list/delete-node-given-position.py
|
cc4164b336e8f1ad6093479327c26ce5514d4106
|
[] |
no_license
|
farhapartex/code-ninja
|
1757a7292ac4cdcf1386fe31235d315a4895f072
|
168fdc915a4e3d3e4d6f051c798dee6ee64ea290
|
refs/heads/master
| 2020-07-31T16:10:43.329468 | 2020-06-18T07:00:34 | 2020-06-18T07:00:34 | 210,668,245 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,394 |
py
|
"""
Given a ‘key’, delete the first occurrence of this key in linked list.
To delete a node from linked list, we need to do following steps.
1) Find previous node of the node to be deleted.
2) Change the next of previous node.
3) Free memory for the node to be deleted.
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
def push_front(self, node):
new_node = Node(node)
new_node.next = self.head
self.head = new_node
def insert_after(self, prev_node, new_node):
if prev_node is None:
print("Previous node must be in a LinkedList")
return
new_node = Node(new_node)
new_node.next = prev_node.next
prev_node.next = new_node
def append(self, new_data):
new_node = Node(new_data)
if self.head is None:
self.head = new_node
return
last = self.head
while last.next:
last = last.next
last.next = new_node
def deleteNodeGivenPosition(self, position):
if self.head is None:
return
temp = self.head
if position == 0:
self.head = temp.next
temp = None
return
# Find previous node of the node to be deleted
for i in range(position-1):
temp = temp.next
if temp is None:
break
# If position is more than number of nodes
if temp is None:
return
if temp.next is None:
return
# Node temp.next is the node to be deleted
# store pointer to the next of node to be deleted
next = temp.next.next
temp.next=None
# Unlink the node from linked list
temp.next = next
if __name__ == "__main__":
llist = LinkedList()
llist.append(6)
llist.push_front(10)
llist.push_front(6)
llist.push_front(11)
llist.append(20)
llist.insert_after(llist.head.next, 8)
llist.print_list()
llist.deleteNodeGivenPosition(2)
print("Linked List after Deletion at 2:")
llist.print_list()
|
[
"[email protected]"
] | |
ca0bf818f5d797fe169d26f5876caf9e6873172e
|
197b10d75ba44b22fca29f8d69c2922b72cb8ca5
|
/ship/api.py
|
ae8690dc1a4da94d2c96e6f66ac78b8994a82a42
|
[] |
no_license
|
webbyfox/py_master
|
c713c87cf4fd7d2697765211cdaefd7b49f96adc
|
e4b3ef5ea618b8f91c363d7f51d0e7b7064762a9
|
refs/heads/master
| 2021-01-11T14:45:15.060075 | 2017-01-27T13:24:58 | 2017-01-27T13:24:58 | 80,209,118 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,519 |
py
|
# -*- coding: utf-8 -*-
from rest_framework import viewsets, mixins, status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from assessment.auth import TokenAuthSupportQueryString
from .injection_setup import logic
from .serializers import ShipSerializer
class ShipViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
authentication_classes = (TokenAuthSupportQueryString,)
permission_classes = (IsAuthenticated,)
pagination_class = LimitOffsetPagination
serializer_class = ShipSerializer
default_limit = 20
def list(self, request): # pylint: disable=unused-argument
ships = self.get_queryset()
page = self.paginate_queryset(ships)
return self.get_paginated_response(page)
def get_queryset(self):
user = self.request.user
user_ids = [user.id] + self.request.query_params.getlist('user_id')
query_kwargs = {
'user_ids': user_ids,
'id': self.request.query_params.get('id'),
'ids': self.request.query_params.getlist('ids'),
'status': self.request.query_params.get('status'),
'order_by': self.request.query_params.get('order_by'),
}
ships, __ = logic.get_ships(**query_kwargs)
return ships
def create(self, request):
data = self.request.data.copy()
# We want to override the user ID to be the authenticated user.
data['user_id'] = self.request.user.id
serializer = self.serializer_class(data=data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
ships, __ = logic.get_ships(
id=pk,
user_ids=[request.user.id],
)
return Response(self.serializer_class(ships[0]).data)
def update(self, request, pk=None):
raise NotImplementedError(
'Please implement ``ship.api:ShipViewSet.update``'
)
def destroy(self, request, pk=None): # pylint: disable=unused-argument
logic.delete_ship(id=pk)
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"[email protected]"
] | |
e7cbec8407c61c7b724171aa967674dbf244853b
|
89bae02f23e787416fda894a128c9abfb4986515
|
/metalearning/allennlp/tests/modules/matrix_attention/cosine_matrix_attention_test.py
|
cff481ba8ea3a77de780b912867c54cef1eb849c
|
[
"Apache-2.0"
] |
permissive
|
asheverdin/multilingual-interference
|
f2e64cebfffc749b080fa64860659922224e6e65
|
7bc1b5918142e3c84bea83c5a7f39e3f245172e9
|
refs/heads/main
| 2023-05-12T13:07:19.997696 | 2021-05-28T22:16:26 | 2021-05-28T22:16:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,518 |
py
|
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
|
[
"[email protected]"
] | |
d2462ea0d850cd6935ccb6c60eff3fbb00faf7d7
|
07917881310fc81d85a2cbdf27c9b3c4fa03c694
|
/python1812/python_1/17_测试_收发邮件_二维码/代码/04_验证码生成器.py
|
4d493eee3597ce7e1c156d58c53c29845e19966c
|
[] |
no_license
|
zaoyuaner/Learning-materials
|
9bc9a127d1c6478fb6cebbb6371b1fd85427c574
|
1f468a6f63158758f7cbfe7b5df17f51e3205f04
|
refs/heads/master
| 2020-05-18T11:38:45.771271 | 2019-05-20T09:07:44 | 2019-05-20T09:07:44 | 184,384,050 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,363 |
py
|
import datetime
import hashlib
from PIL import ImageFont,ImageDraw,Image
from random import randint
class VerifyCode:
def __init__(self,width=100,height=40,size=4):
"""
:param width: 验证码的宽度
:param height: 验证码的高度
:param size: 验证码的长度
"""
self.width = width if width > 0 else 100
self.height = height if height > 0 else 40
self.size = size if size > 0 else 4
self.pen = None # 画笔
self.code = "" # 保存验证码字符串
# @property
# def code(self):
# return self.__code
# @code.setter
# def code(self,code):
# self.__code = code
def generate(self):
# 1.生成画布 # 越靠近255的颜色越浅
im = Image.new("RGB",(self.width,self.height),self.randColor(160,255))
# 2.生成画笔
self.pen = ImageDraw.Draw(im)
# 3.生成随机字符串
self.randString()
# 4.画字符串
self.__drawCode()
# 5.画干扰点
self.__drawPoint()
# 6.画干扰线
self.__drawLine()
# 7.保存图片
im.save("vc.jpg")
def __drawLine(self):
"""
画干扰线
:return:
"""
for i in range(6):
start = (randint(1,self.width-1),randint(1,self.height-1))
end = (randint(1,self.width-1),randint(1,self.height-1))
self.pen.line([start,end],fill=self.randColor(50,150),width = 1)
def __drawPoint(self):
"""
画干扰点
:return:
"""
for i in range(200):
x = randint(1,self.width-1)
y = randint(1,self.height-1)
self.pen.point((x,y),fill= self.randColor(30,100))
def __drawCode(self):
"""
画字符串
:return:
"""
myFont = ImageFont.truetype("MSYH.TTF",size=20,encoding="UTF-8")
for i in range(self.size):
x = 15 + i*(self.width - 20)/self.size # 为每个字符均匀分配位置
y = randint(5,10) # 随机高度
self.pen.text((x,y),self.code[i],fill = self.randColor(0,60),font = myFont)
def randString(self):
"""
产生随机整数字符串
:return:
"""
result = ""
for i in range(self.size):
result += str(randint(0,9))
self.code = result
def randColor(self,low,high): # 随机背景颜色
return randint(low,high),randint(low,high),randint(low,high)
# class StrCode(VerifyCode):
# def randString(self):
# s1 =hashlib.md5(b"2314").hexdigest()
# print(s1)
# self.code = s1[:self.size]
if __name__ == "__main__":
vc = VerifyCode()
# vc = StrCode()
vc.generate()
print(vc.code)
|
[
"[email protected]"
] | |
edfcd0b67010b318be752683aea47602efef2e0e
|
9b57429efa72dbfa2ead9ae8d98a148475264aef
|
/dataservice/zmq/UPcomputer_part/data_process_0mqsubsys/codetestfile.py
|
5b92ab166e2e97779e29006953d6456126db19c8
|
[] |
no_license
|
Scottars/nis_website
|
7d78b1ab8647ebf17bc2b020660a56ac6f6a039f
|
2025e428dd65dba06c95738233978604ee011570
|
refs/heads/master
| 2022-03-07T19:04:15.565128 | 2021-01-19T16:03:50 | 2021-01-19T16:03:50 | 218,421,853 | 0 | 0 | null | 2022-03-02T06:49:57 | 2019-10-30T01:58:29 |
JavaScript
|
UTF-8
|
Python
| false | false | 155 |
py
|
import struct
b = b'exp' + struct.pack('!f', 12)
print(b)
print(b[0:3])
if b[0:3] == b'exp':
exp_id = struct.unpack('!f', b[3:7])[0]
print(exp_id)
|
[
"[email protected]"
] | |
82ecfd01834d11e1c0de1b980af3a9cafb7d5d79
|
d0fec74acfbfdee1b662736731c1cc988e2ba2ee
|
/problem_44/p044.py
|
45a4578f96261bb8aeac04304edbc1ab5ebc2014
|
[] |
no_license
|
msztylko/project-Euler
|
fdd0cfefbe88b63f6dbd2d08f1cd59270b9e1735
|
b3f5ce828ccc6662c100dd27fa295fc8afa22f6e
|
refs/heads/master
| 2021-11-23T02:50:19.333259 | 2021-10-31T17:52:28 | 2021-10-31T17:52:28 | 195,980,596 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,851 |
py
|
import itertools, sys
if sys.version_info.major == 2:
range = xrange
def compute():
pentanum = PentagonalNumberHelper()
min_d = None # None means not found yet, positive number means found a candidate
# For each upper pentagonal number index, going upward
for i in itertools.count(2):
pent_i = pentanum.term(i)
# If the next number down is at least as big as a found difference, then conclude searching
if min_d is not None and pent_i - pentanum.term(i - 1) >= min_d:
break
# For each lower pentagonal number index, going downward
for j in range(i - 1, 0, -1):
pent_j = pentanum.term(j)
diff = pent_i - pent_j
# If the difference is at least as big as a found difference, then stop testing lower pentagonal numbers
if min_d is not None and diff >= min_d:
break
elif pentanum.is_term(pent_i + pent_j) and pentanum.is_term(diff):
min_d = diff # Found a smaller difference
return str(min_d)
# Provides memoization for generating and testing pentagonal numbers.
class PentagonalNumberHelper(object):
def __init__(self):
self.term_list = [0]
self.term_set = set()
def term(self, x):
assert x > 0
while len(self.term_list) <= x:
n = len(self.term_list)
term = (n * (n * 3 - 1)) >> 1
self.term_list.append(term)
self.term_set.add(term)
return self.term_list[x]
def is_term(self, y):
assert y > 0
while self.term_list[-1] < y:
n = len(self.term_list)
term = (n * (n * 3 - 1)) >> 1
self.term_list.append(term)
self.term_set.add(term)
return y in self.term_set
if __name__ == "__main__":
print(compute())
|
[
"[email protected]"
] | |
81678e4f401442962478ab90127c24b61b21e897
|
c074ce302e0a2a09ebe8b0a94e342380afbaa911
|
/beakjoon_PS/no2579_2.py
|
7c00e40144c179d3cbf2eca5fbd8ec8eb8d546f6
|
[] |
no_license
|
elrion018/CS_study
|
eeea7a48e9e9b116ddf561ebf10633670d305722
|
3d5478620c4d23343ae0518d27920b3211f686fd
|
refs/heads/master
| 2021-06-10T13:35:20.258335 | 2021-04-25T10:12:17 | 2021-04-25T10:12:17 | 169,424,097 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 555 |
py
|
import sys
N = int(sys.stdin.readline())
stair = []
for _ in range(N):
stair.append(int(sys.stdin.readline()))
dp = [[0, 0] for _ in range(N)]
if N > 2:
dp[0][0] = stair[0]
dp[1][0] = stair[1]
dp[1][1] = stair[0] + stair[1]
dp[2][0] = stair[0] + stair[2]
dp[2][1] = stair[1] + stair[2]
for i in range(2, N):
dp[i][0] = max(dp[i-2][0], dp[i-2][1]) + stair[i]
dp[i][1] = dp[i-1][0] + stair[i]
print(max(dp[N-1][0], dp[N-1][1]))
elif N == 2:
print(stair[0]+stair[1])
elif N == 1:
print(stair[0])
|
[
"[email protected]"
] | |
aa4c1d64ab5007478c6035cf4a0c3268d542695f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_tow.py
|
bbb4161da8d142413231367d45dc13fd41964c06
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 369 |
py
|
#calss header
class _TOW():
def __init__(self,):
self.name = "TOW"
self.definitions = [u"to pull someone's vehicle using a rope or chain tied to your vehicle: ", u'being pulled along: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
1345cc0e0a984974cc45d265fb5e248b561053c2
|
b503e79ccfca67c8114f5bd7a215f5ae993a0ba4
|
/airflow/providers/amazon/aws/sensors/glue.py
|
21a82da9ee9d040fd45ccda5044d467bf7c6b4c3
|
[
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] |
permissive
|
github/incubator-airflow
|
df1d9780f862ea1df8261ea6015dd50a4583f983
|
73f70e00b9fd294057f8ca6b714a85622f6d5dd5
|
refs/heads/gh-2.0.2
| 2023-07-29T18:08:43.140580 | 2022-09-14T18:23:42 | 2022-09-14T18:23:42 | 80,634,006 | 24 | 27 |
Apache-2.0
| 2023-04-18T04:24:36 | 2017-02-01T15:34:55 |
Python
|
UTF-8
|
Python
| false | false | 2,398 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class AwsGlueJobSensor(BaseSensorOperator):
"""
Waits for an AWS Glue Job to reach any of the status below
'FAILED', 'STOPPED', 'SUCCEEDED'
:param job_name: The AWS Glue Job unique name
:type job_name: str
:param run_id: The AWS Glue current running job identifier
:type run_id: str
"""
template_fields = ('job_name', 'run_id')
@apply_defaults
def __init__(self, *, job_name: str, run_id: str, aws_conn_id: str = 'aws_default', **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
self.run_id = run_id
self.aws_conn_id = aws_conn_id
self.success_states = ['SUCCEEDED']
self.errored_states = ['FAILED', 'STOPPED', 'TIMEOUT']
def poke(self, context):
hook = AwsGlueJobHook(aws_conn_id=self.aws_conn_id)
self.log.info("Poking for job run status :for Glue Job %s and ID %s", self.job_name, self.run_id)
job_state = hook.get_job_state(job_name=self.job_name, run_id=self.run_id)
if job_state in self.success_states:
self.log.info("Exiting Job %s Run State: %s", self.run_id, job_state)
return True
elif job_state in self.errored_states:
job_error_message = "Exiting Job " + self.run_id + " Run State: " + job_state
raise AirflowException(job_error_message)
else:
return False
|
[
"[email protected]"
] | |
816ccf13d545d21c6a8991fbbd5db56841a3fd65
|
4eab1bd9e1b00155872e44963a5df0532cb5341f
|
/menus/menuTwo.py
|
ebbc521d4e91603c346648b2c0ccb7a4a9256571
|
[] |
no_license
|
soheilpaper/python-gui
|
9b067467ca41d27092e5817d0a49162b10c37de6
|
4e6bcad319829dd2c0fdc328520a55a7932060c7
|
refs/heads/master
| 2020-12-31T04:29:16.798703 | 2016-04-08T08:41:59 | 2016-04-08T08:41:59 | 55,763,643 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,595 |
py
|
import wx
########################################################################
class MyForm(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="wx.Menu Tutorial")
self.panel = wx.Panel(self, wx.ID_ANY)
# create the menubar
menuBar = wx.MenuBar()
# create the first menu (starting on left)
carMenu = wx.Menu()
carMenu.Append(101, "&Ford", "An American Automaker")
carMenu.Append(102, "&Nissan", "")
carMenu.Append(103, "&Toyota", "Buy Japanese!")
carMenu.Append(104, "&Close", "Close the application")
# add a picture to a menu
picMenu = wx.Menu()
item = wx.MenuItem(picMenu, wx.ID_ANY, "Snake", "This menu has a picture!")
img = wx.Image('snake32.bmp', wx.BITMAP_TYPE_ANY)
item.SetBitmap(wx.BitmapFromImage(img))
picMenu.AppendItem(item)
# add menus to menubar
menuBar.Append(carMenu, "&Vehicles")
menuBar.Append(picMenu, "&Picture")
self.SetMenuBar(menuBar)
#----------------------------------------------------------------------
def onExit(self, event):
""""""
self.Close()
#----------------------------------------------------------------------
# Run the program
if __name__ == "__main__":
app = wx.App(False)
frame = MyForm().Show()
app.MainLoop()
|
[
"[email protected]"
] | |
5ea8085f35c9778a5a1d4aae6dc84dacc2eb3e30
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D08A/MOVINSD08AUN.py
|
546f7086b78b82989b1b35deabc5ccb25f908114
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null |
UTF-8
|
Python
| false | false | 1,740 |
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD08AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TDT', MIN: 1, MAX: 3, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'HAN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DIM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 1, MAX: 99},
{ID: 'TMP', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'EQA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'GID', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'GDS', MIN: 0, MAX: 1},
]},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DGS', MIN: 1, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"[email protected]"
] | |
82b102860dad12c81b3575f99ab5d3102e7229e3
|
927d23e5fbcbd7001b1007990b9a28014bfb8219
|
/mnist_classification.py
|
373bf1d62d3f945e2554161b608f5dc3b439098b
|
[] |
no_license
|
minar09/tensorflow-practices
|
5822cf784063223bc0a5a62570fa0a5548cf1ef0
|
7982860ce2ec6df0c57a5389711464cbddad89fe
|
refs/heads/master
| 2020-03-28T21:09:32.658650 | 2018-10-08T15:25:08 | 2018-10-08T15:25:08 | 149,133,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,725 |
py
|
#### MNIST classification ###
# Hide the warning messages about CPU/GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Import libraries
import tensorflow as tf
import time
import numpy as np
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
# Download/Read MNIST
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Hide the warning messages about deprecations of MNIST data read
tf.logging.set_verbosity(old_v)
# Initialize parameters
t1 = time.time()
num_steps = 5000
batch_size = 128
display_step = 500
n_hidden_1 = 256
n_hidden_2 = 256
n_hidden_3 = 256
num_input = 784
num_classes = 10
# Define placeholder
x = tf.placeholder("float", [None, num_input])
y = tf.placeholder("float", [None, num_classes])
# Define Weight and Bias for linear regression
weights = {
'h1' : tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3' : tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'out' : tf.Variable(tf.random_normal([n_hidden_1, num_classes]))
}
biases = {
'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
'b3' : tf.Variable(tf.random_normal([n_hidden_3])),
'out' : tf.Variable(tf.random_normal([num_classes]))
}
# Initialize the model
def mlp(x):
l1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
l2 = tf.nn.relu(tf.add(tf.matmul(l1, weights['h2']), biases['b2']))
l3 = tf.nn.relu(tf.add(tf.matmul(l2, weights['h3']), biases['b3']))
lout = tf.add(tf.matmul(l3, weights['out']), biases['out'])
return lout
# Define hypothesis, cost and optimization functions
logits = mlp(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
prediction = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Launch graph/Initialize session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(1, num_steps+1):
batch_train_images, batch_train_labels = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x: batch_train_images, y: batch_train_labels})
if step % display_step == 0 or step == 1:
print("Step " + str(step) + " out of " + str(num_steps))
print("Optimization finished!")
t2 = time.time()
print("Testing accuracy: ", sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})*100, "%")
print("Learning time: " + str(t2-t1) + " seconds")
|
[
"[email protected]"
] | |
088dc88aa4aeb64878d97237118802a64edf1d5f
|
48db7bebad4309a7bca8b7dec2cc9193551f46a3
|
/returns/_generated/pointfree/bind_io.pyi
|
192094dd7cdfecddb6d6bb7c83451e2b4d7e27ae
|
[
"BSD-2-Clause"
] |
permissive
|
kenjihiraoka/returns
|
bff6196a059d411b6c36f4a2e284e4439d24fd73
|
4589973520d7226b18acd7295d1a9a10ff032759
|
refs/heads/master
| 2022-11-20T13:20:41.094871 | 2020-07-07T08:23:05 | 2020-07-07T08:23:05 | 277,863,697 | 0 | 0 |
BSD-2-Clause
| 2020-07-07T16:09:25 | 2020-07-07T16:09:25 | null |
UTF-8
|
Python
| false | false | 1,779 |
pyi
|
from typing import Callable, TypeVar, overload
from typing_extensions import Protocol
from returns.context import RequiresContextFutureResult, RequiresContextIOResult
from returns.future import Future, FutureResult
from returns.io import IO, IOResult
_ValueType = TypeVar('_ValueType', contravariant=True)
_ErrorType = TypeVar('_ErrorType')
_NewValueType = TypeVar('_NewValueType', covariant=True)
_EnvType = TypeVar('_EnvType', contravariant=True)
class _BindIO(Protocol[_ValueType, _NewValueType]):
"""
Helper class to represent type overloads for ret_type based on a value type.
Contains all containers we have.
It does not exist in runtime.
It is also completely removed from typing with the help of the mypy plugin.
"""
@overload
def __call__(
self,
container: RequiresContextIOResult[_EnvType, _ValueType, _ErrorType],
) -> RequiresContextIOResult[_EnvType, _NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: RequiresContextFutureResult[
_EnvType, _ValueType, _ErrorType,
],
) -> RequiresContextFutureResult[_EnvType, _NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: IOResult[_ValueType, _ErrorType],
) -> IOResult[_NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: Future[_ValueType],
) -> Future[_NewValueType]:
...
@overload
def __call__(
self,
container: FutureResult[_ValueType, _ErrorType],
) -> FutureResult[_NewValueType, _ErrorType]:
...
def _bind_io(
function: Callable[[_ValueType], IO[_NewValueType]],
) -> _BindIO[_ValueType, _NewValueType]:
...
|
[
"[email protected]"
] | |
70237c341ae1c9585377c6c6ec289173ce92bdae
|
148044ba8412cfe9227201e82360770d6a7e9780
|
/check_screen.py
|
095f92651215f811b52a37d88fe1c3fbc9022209
|
[] |
no_license
|
mwaskom/sticks_experiment
|
9e0b2af851e20f82cd8a3011b08ac061b0061191
|
fcfd98cb4528e9011168be27b2121a96514b3fa3
|
refs/heads/master
| 2023-08-24T06:22:27.939464 | 2015-08-28T23:53:16 | 2015-08-31T04:38:24 | 38,704,521 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
import sys
from psychopy import visual, event
import cregg
def main(arglist):
p = cregg.Params("scan")
p.set_by_cmdline(arglist)
win = cregg.launch_window(p)
visual.Circle(win, p.array_radius,
edges=128,
lineColor="white",
lineWidth=2).draw()
win.flip()
event.waitKeys(keyList=p.quit_keys)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"[email protected]"
] | |
4a010a42bfbd615afad1fd018c160396fa4dbd69
|
40f4626ec26f23923c2b19d7ed24f3c512495182
|
/src/kangqi/task/compQA/model/module/cross_attention_indirect.py
|
3606a67410715c49755b69f8e4e28061ab9a5fcc
|
[] |
no_license
|
Zjhao666/CompQA
|
c937c382a2f0a0fce4fdda8efda7c916b3e4c978
|
4bb2abc40428373481909e02543062a7388615bd
|
refs/heads/master
| 2023-02-09T02:28:09.966576 | 2020-12-31T21:18:32 | 2020-12-31T21:18:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,278 |
py
|
"""
Author: Kangqi Luo
Goal: Combine the structure of ABCNN-1 and AF-attention
(A Decomposable Attention Model for Natural Language Inference)
We are using the module in compQA scenario, where the rhs (path) is represented by both pwords and preds.
Therefore, we send'em together into the module, making it a little bit more complex than a normal CrossAtt layer.
"""
import tensorflow as tf
from . import att_layer
from kangqi.util.LogUtil import LogInfo
class IndirectCrossAttention:
def __init__(self, lf_max_len, rt_max_len, dim_att_hidden, att_func):
self.lf_max_len = lf_max_len
self.rt_max_len = rt_max_len
self.dim_att_hidden = dim_att_hidden
LogInfo.logs('IndirectCrossAttention: lf_max_len = %d, rt_max_len = %d, dim_att_hidden = %d, att_func = %s.',
lf_max_len, rt_max_len, dim_att_hidden, att_func)
assert att_func in ('dot', 'bilinear', 'bahdanau', 'bdot')
self.att_func = getattr(att_layer, 'cross_att_' + att_func)
def forward(self, lf_input, lf_mask, rt_input, rt_mask):
"""
:param lf_input: (ds, lf_max_len, dim_hidden)
:param lf_mask: (ds, lf_max_len) as float32
:param rt_input: (ds, rt_max_len, dim_hidden)
:param rt_mask: (ds, rt_max_len) as float32
"""
with tf.variable_scope('cross_att_indirect', reuse=tf.AUTO_REUSE):
lf_cube_mask = tf.stack([lf_mask] * self.rt_max_len,
axis=-1, name='lf_cube_mask') # (ds, lf_max_len, rt_max_len)
rt_cube_mask = tf.stack([rt_mask] * self.lf_max_len,
axis=1, name='rt_cube_mask') # (ds, lf_max_len, rt_max_len)
cube_mask = tf.multiply(lf_cube_mask, rt_cube_mask, name='cube_mask')
""" Calculate cross attention matrix """
raw_att_mat = self.att_func(lf_input=lf_input, rt_input=rt_input,
lf_max_len=self.lf_max_len,
rt_max_len=self.rt_max_len,
dim_att_hidden=self.dim_att_hidden)
masked_att_mat = raw_att_mat * cube_mask + tf.float32.min * (1. - cube_mask)
# padding: -inf
""" Attention normalize & produce att_repr """
att_norm_for_lf = tf.nn.softmax(masked_att_mat, dim=2, name='att_norm_for_lf')
att_norm_for_rt = tf.nn.softmax(masked_att_mat, dim=1, name='att_norm_for_rt')
# for_lf: sum_j A[:,j] = 1.
# for_rt: sum_i A[i,:] = 1.
lf_att_repr = tf.matmul(att_norm_for_lf, rt_input, name='lf_att_repr') # (ds, lf_max_len, dim_emb)
rt_att_repr = tf.matmul(tf.transpose(att_norm_for_rt, perm=[0, 2, 1]), # (ds, rt_max_len, lf_max_len)
lf_input, name='rt_att_repr') # (ds, rt_max_len, dim_emb)
return lf_att_repr, rt_att_repr, raw_att_mat
# @staticmethod
# def att_norm_col_wise(att_mat):
# sum_of_cols = 1e-4 + tf.reduce_mean(att_mat, axis=1, name='sum_of_cols') # (ds, rt_max_len)
# sum_of_cols = tf.expand_dims(sum_of_cols, axis=1) # (ds, 1, rt_max_len)
# att_norm = tf.div(att_mat, sum_of_cols, name='att_norm_col_wise')
# # (ds, lf_max_len, rt_max_len), sum(att_norm[:, j]) = 1
# # att_norm[:, j]: the distribution over left words for each word-j at right side
# return att_norm
#
# @staticmethod
# def att_norm_row_wise(att_mat):
# sum_of_rows = 1e-4 + tf.reduce_sum(att_mat, axis=2, name='sum_of_rows') # (ds, lf_max_len)
# sum_of_rows = tf.expand_dims(sum_of_rows, axis=2) # (ds, lf_max_len, 1)
# att_norm = tf.div(att_mat, sum_of_rows, name='att_norm_row_wise')
# # (ds, lf_max_len, rt_max_len), sum(att_norm[i, :]) = 1
# # att_norm[i, :]: the distribution over right words for each word-i at left side
# return att_norm
#
# def construct_att_weights(self, att_mat):
# """
# Parikh: Go through formula (2) in AF-attention paper
# :param att_mat: (ds, q_max_len, p_max_len + pw_max_len)
# :return: 3 attention weights (q, p, pw) and the split attention matrices
# """
# """ Naive v.s. Parikh: just different from the normalizing direction!! """
# p_att_mat, pw_att_mat = tf.split(value=att_mat,
# num_or_size_splits=[self.p_max_len, self.pw_max_len],
# axis=2) # (ds, q_max_len, p_max_len | pw_max_len)
# if self.att_norm_mode == 'parikh':
# att_wt_q = self.att_norm_col_wise(att_mat=att_mat) # (ds, q_max_len, p_max_len+pw_max_len)
# att_wt_p = self.att_norm_row_wise(att_mat=p_att_mat) # (ds, q_max_len, p_max_len)
# att_wt_pw = self.att_norm_row_wise(att_mat=pw_att_mat) # (ds, q_max_len, pw_max_len)
# else: # naive
# att_wt_q = self.att_norm_row_wise(att_mat=att_mat)
# att_wt_p = self.att_norm_col_wise(att_mat=p_att_mat)
# att_wt_pw = self.att_norm_col_wise(att_mat=pw_att_mat)
# return p_att_mat, pw_att_mat, att_wt_q, att_wt_p, att_wt_pw
|
[
"[email protected]"
] | |
dc34c1f11f334a3d915def0a7e3345ee0781e7e9
|
38ea041a35d6e1bbdcb875cfff1a313b02476e81
|
/appModules/AddContact.py
|
515d8116725ef529922d9747efd7df54bad352c6
|
[] |
no_license
|
saraliuhou/DataDriverTestFrameWork
|
1824d0b771c20a87ce3d0b5cebf5cf1e70b4226b
|
5f243026e9f03e96fa010f945fb31b7545759798
|
refs/heads/master
| 2020-06-01T00:19:32.435417 | 2019-06-12T09:10:09 | 2019-06-12T09:10:09 | 190,554,542 | 0 | 0 | null | 2019-06-06T09:29:50 | 2019-06-06T09:29:50 | null |
UTF-8
|
Python
| false | false | 2,262 |
py
|
from pageObjects.HomePage import HomePage
from pageObjects.NewContact import AddContactPage
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from util.ParseConfigurationFile import ParseConfigFile
class NewContactPersonAction(object):
def __init__(self):
pass
@staticmethod
def addressLink(driver):
'''
点击通讯录按钮
:param driver:
:return:
'''
homePage = HomePage(driver)
# 点击通讯录
homePage.addressLink().click()
@staticmethod
def addContact(driver, contactName, contactMail, isSatr, contactPhone, contactComment):
'''
添加联系人场景
:param driver:
:param contactName:
:param contactMail:
:param isSatr:
:param contactPhone:
:param contactComment:
:return:
'''
# 点击新建联系人
addContact = AddContactPage(driver)
# 调试的时候这边有时候会报错。点击不到[新建联系人]这个按钮,所以加了一个显示等待
by, locator = ParseConfigFile().getElementValue('126mail_addContactPage', 'addContactPage.newContact')
WebDriverWait(driver, 30).until(EC.element_to_be_clickable((by, locator)))
addContact.newContact().click()
if contactName:
# 非必填项
addContact.addName().send_keys(contactName)
# 必填项
addContact.addMail().send_keys(contactMail)
if isSatr == '是':
addContact.markStar().click()
if contactPhone:
addContact.addPhone().send_keys(contactPhone)
if contactComment:
addContact.addContent().send_keys(contactComment)
addContact.clickCommitBtn().click()
if __name__=='__main__':
from appModules.LoginAction import LoginAction
import time
from selenium import webdriver
driver = webdriver.Firefox()
driver.get('https://mail.126.com')
time.sleep(5)
LoginAction.login(driver, 'linux', 'chao')
NewContactPersonAction.addressLink(driver)
NewContactPersonAction.addContact(driver, '','[email protected]', '是', '','')
time.sleep(5)
driver.quit()
|
[
"[email protected]"
] | |
d8f3167c34525042bfc9833d02d8d53673ff7978
|
79aa4b99a48bb16a907916ad63c902443420541a
|
/0019.py
|
e1253c96f9a4ecabdca22315f7ecd7d39377a98c
|
[] |
no_license
|
mach8686devops/leetcode-100
|
62dec66c719d7cfa120ca9505701df49d8d5b982
|
f90526c9b073165b86b933cdf7d1dc496e68f2c6
|
refs/heads/main
| 2023-04-11T06:28:15.059587 | 2021-04-13T12:11:54 | 2021-04-13T12:11:54 | 329,346,572 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
l = []
while head: l, head = l + [head], head.next
if n != len(l): l[-n - 1].next = l[-n].next
del l[-n]
return l and l[0]
|
[
"[email protected]"
] | |
2132ca489839efb59eecac3da30efd56457831e6
|
18eac94ff076c1eecd72870ef93ae656906e8673
|
/supervised_learning/0x06-keras/13-predict.py
|
e2426676d174b5ff52b5fd6940693d363bda35a2
|
[] |
no_license
|
dgquintero/holbertonschool-machine_learning
|
c1331ff87e053f9c143a0e503e8db177dfc7aafe
|
c80073d0ef68deeedbe2d991e296ef75f58a220f
|
refs/heads/master
| 2022-12-19T21:49:10.581793 | 2020-10-15T14:56:22 | 2020-10-15T14:56:22 | 279,329,167 | 0 | 1 | null | 2020-09-25T19:11:52 | 2020-07-13T14:42:03 |
Python
|
UTF-8
|
Python
| false | false | 574 |
py
|
#!/usr/bin/env python3
"""function predict"""
import tensorflow.keras as K
def predict(network, data, verbose=False):
"""
function that tests a neural network
Arguments:
network: the network model to test
data: the input data to test the model with
labels: are the correct one-hot labels of data
verbose: is a boolean that determines if output
should be printed during the testing process
Returns: the prediction for the data
"""
prediction = network.predict(data, verbose=verbose)
return prediction
|
[
"[email protected]"
] | |
c4a498197bd65892c63d8b651006a2e100b27e0c
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/media/v20210601/get_transform.py
|
2d5107073354128ca98d5c3db8d4db0c9a68f79d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,036 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTransformResult',
'AwaitableGetTransformResult',
'get_transform',
'get_transform_output',
]
@pulumi.output_type
class GetTransformResult:
"""
A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
"""
def __init__(__self__, created=None, description=None, id=None, last_modified=None, name=None, outputs=None, system_data=None, type=None):
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outputs and not isinstance(outputs, list):
raise TypeError("Expected argument 'outputs' to be a list")
pulumi.set(__self__, "outputs", outputs)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def created(self) -> str:
"""
The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ' format.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
An optional verbose description of the Transform.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
The UTC date and time when the Transform was last updated, in 'YYYY-MM-DDThh:mm:ssZ' format.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def outputs(self) -> Sequence['outputs.TransformOutputResponse']:
"""
An array of one or more TransformOutputs that the Transform should generate.
"""
return pulumi.get(self, "outputs")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTransformResult(GetTransformResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTransformResult(
created=self.created,
description=self.description,
id=self.id,
last_modified=self.last_modified,
name=self.name,
outputs=self.outputs,
system_data=self.system_data,
type=self.type)
def get_transform(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
transform_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransformResult:
"""
A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
:param str transform_name: The Transform name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['transformName'] = transform_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:media/v20210601:getTransform', __args__, opts=opts, typ=GetTransformResult).value
return AwaitableGetTransformResult(
created=__ret__.created,
description=__ret__.description,
id=__ret__.id,
last_modified=__ret__.last_modified,
name=__ret__.name,
outputs=__ret__.outputs,
system_data=__ret__.system_data,
type=__ret__.type)
@_utilities.lift_output_func(get_transform)
def get_transform_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
transform_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTransformResult]:
"""
A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
:param str transform_name: The Transform name.
"""
...
|
[
"[email protected]"
] | |
4aeb5076c559a2d62968ac097e20666249770856
|
03f9b8bdea312636afb4df3737b55cb0cc4b21ff
|
/CanIWin.py
|
3d81f1f782f454808169ef87a967ad9bee42ec2d
|
[] |
no_license
|
ellinx/LC-python
|
f29dd17bbe15407ba0d06ad68386efdc9a343b56
|
9190d3d178f1733aa226973757ee7e045b7bab00
|
refs/heads/master
| 2021-06-01T15:21:24.379811 | 2020-10-29T04:37:07 | 2020-10-29T04:37:07 | 132,704,788 | 1 | 1 | null | 2019-05-15T03:26:11 | 2018-05-09T05:13:26 |
Python
|
UTF-8
|
Python
| false | false | 2,052 |
py
|
"""
In the "100 game," two players take turns adding, to a running total,
any integer from 1..10. The player who first causes the running total to reach or exceed 100 wins.
What if we change the game so that players cannot re-use integers?
For example, two players might take turns drawing from a common pool of
numbers of 1..15 without replacement until they reach a total >= 100.
Given an integer maxChoosableInteger and another integer desiredTotal,
determine if the first player to move can force a win, assuming both players play optimally.
You can always assume that maxChoosableInteger will not be larger than 20 and
desiredTotal will not be larger than 300.
Example
Input:
maxChoosableInteger = 10
desiredTotal = 11
Output:
false
Explanation:
No matter which integer the first player choose, the first player will lose.
The first player can choose an integer from 1 up to 10.
If the first player choose 1, the second player can only choose integers from 2 up to 10.
The second player will win by choosing 10 and get a total = 11, which is >= desiredTotal.
Same with other integers chosen by the first player, the second player will always win.
"""
class Solution:
def canIWin(self, maxChoosableInteger, desiredTotal):
"""
:type maxChoosableInteger: int
:type desiredTotal: int
:rtype: bool
"""
def dfs(nums, diff, mm):
if diff<=0:
return False
key = ",".join(nums)+" "+str(diff)
if key in mm:
return mm[key]
for i,num in enumerate(nums):
if not dfs(nums[:i]+nums[i+1:], diff-int(num), mm):
mm[key] = True
return True
mm[key] = False
return False
if desiredTotal<=1:
return True
if (1+maxChoosableInteger)*maxChoosableInteger//2<desiredTotal:
return False
nums = [ str(i) for i in range(1,maxChoosableInteger+1)]
mm = dict()
return dfs(nums, desiredTotal, mm)
|
[
"[email protected]"
] | |
6276ed8fbaf501f6fe6c7314d1eee780a50c0c89
|
270d7f88e47683abd55c0191466c80513b2aa9f9
|
/tests/test_tta.py
|
9d0ba17296509b58febeed4a4f4c0b193716299d
|
[
"MIT"
] |
permissive
|
williamberrios/pytorch-toolbelt
|
abdf8e455a4ffc79d2afbc92e80005a821fb97a9
|
4a24e6324b8270d31c08b8b2f667d740b9823377
|
refs/heads/master
| 2023-07-06T06:35:24.197821 | 2021-08-12T07:47:20 | 2021-08-12T07:47:20 | 400,866,088 | 1 | 0 |
MIT
| 2021-08-28T18:43:12 | 2021-08-28T18:43:12 | null |
UTF-8
|
Python
| false | false | 4,994 |
py
|
from collections import defaultdict
import cv2
import torch
import numpy as np
import pytest
from torch import nn
from pytorch_toolbelt.inference import tta
from pytorch_toolbelt.modules import GlobalAvgPool2d
from pytorch_toolbelt.utils.torch_utils import to_numpy
from pytorch_toolbelt.zoo import resnet34_unet64_s4
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
class NoOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input
class SumAll(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.sum(dim=[1, 2, 3])
def test_d4_image2mask():
x = torch.rand((4, 3, 224, 224))
model = NoOp()
output = tta.d4_image2mask(model, x)
np.testing.assert_allclose(to_numpy(output), to_numpy(x), atol=1e-6, rtol=1e-6)
def test_d4_image2mask_v2():
x = torch.rand((4, 3, 224, 224))
x_a = tta.d4_image_augment(x)
y = tta.d4_image_deaugment(x_a)
np.testing.assert_allclose(to_numpy(y), to_numpy(x), atol=1e-6, rtol=1e-6)
@torch.no_grad()
@skip_if_no_cuda()
def test_d4_speed():
df = defaultdict(list)
n = 100
model = resnet34_unet64_s4().cuda().eval()
x = torch.rand((4, 3, 224, 224)).float().cuda()
y1 = tta.d4_image2mask(model, x)
y2 = tta.d4_image_deaugment(model(tta.d4_image_augment(x)))
np.testing.assert_allclose(to_numpy(y1), to_numpy(y2), atol=1e-6, rtol=1e-6)
for deterministic in [False, True]:
for benchmark in [False, True]:
for dtype in [torch.float16, torch.float32]:
torch.cuda.empty_cache()
torch.backends.cuda.deterministic = deterministic
torch.backends.cuda.benchmark = benchmark
model = resnet34_unet64_s4().to(dtype).cuda().eval()
speed_v1 = 0
speed_v2 = 0
for i in range(n):
x = torch.rand((4, 3, 224, 224)).to(dtype).cuda(non_blocking=False)
start = cv2.getTickCount()
y = tta.d4_image2mask(model, x)
v = y.sum().item()
finish = cv2.getTickCount()
speed_v1 += finish - start
np.testing.assert_allclose(v, v, atol=1e-6, rtol=1e-6)
for i in range(n):
x = torch.rand((4, 3, 224, 224)).to(dtype).cuda(non_blocking=False)
start = cv2.getTickCount()
x_a = tta.d4_image_augment(x)
x_a = model(x_a)
y = tta.d4_image_deaugment(x_a)
v = y.sum().item()
finish = cv2.getTickCount()
speed_v2 += finish - start
np.testing.assert_allclose(v, v, atol=1e-6, rtol=1e-6)
df["mode"].append("fp16" if dtype == torch.float16 else "fp32")
df["deterministic"].append(deterministic)
df["benchmark"].append(benchmark)
df["d4_image2mask (ms)"].append(1000.0 * speed_v1 / (cv2.getTickFrequency() * n))
df["d4_augment (ms)"].append(1000.0 * speed_v2 / (cv2.getTickFrequency() * n))
import pandas as pd
df = pd.DataFrame.from_dict(df)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
print(df)
df.to_csv("tta_eval.csv", index=False)
def test_fliplr_image2mask():
x = torch.rand((4, 3, 224, 224))
model = NoOp()
output = tta.fliplr_image2mask(model, x)
np.testing.assert_allclose(to_numpy(output), to_numpy(x), atol=1e-6, rtol=1e-6)
def test_d4_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.d4_image2label(model, x)
expected = int(x.sum())
assert int(output) == expected
def test_fliplr_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.fliplr_image2label(model, x)
expected = int(x.sum())
assert int(output) == expected
def test_fivecrop_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.fivecrop_image2label(model, x, (2, 2))
expected = ((1 + 2 + 5 + 6) + (3 + 4 + 7 + 8) + (9 + 0 + 3 + 4) + (1 + 2 + 5 + 6) + (6 + 7 + 0 + 1)) / 5
assert int(output) == expected
def test_tencrop_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.tencrop_image2label(model, x, (2, 2))
expected = (2 * ((1 + 2 + 5 + 6) + (3 + 4 + 7 + 8) + (9 + 0 + 3 + 4) + (1 + 2 + 5 + 6) + (6 + 7 + 0 + 1))) / 10
assert int(output) == expected
|
[
"[email protected]"
] | |
2f876f6a85661251f0ba85f749269bb1b2e63c24
|
e2efa339cf6fb017e1d1898325b363a69c227409
|
/app.py
|
6367bcdbeda570b322259488161e00e0d12605db
|
[] |
no_license
|
lm10pulkit/update_delete
|
201b22b3816606640ab22a0f63c7bf2d58ed6295
|
c9c935e070f555c006dca00fd0940863fcc0790d
|
refs/heads/master
| 2020-04-15T18:32:53.239716 | 2019-01-09T19:07:40 | 2019-01-09T19:07:40 | 164,915,990 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,733 |
py
|
from flask import Flask ,session, render_template, request, redirect,url_for,g
from flask_mysqldb import MySQL
from flask_bcrypt import Bcrypt
import os
# intializing the app
app = Flask(__name__)
#secret key
app.secret_key= os.urandom(24)
# setting up database
app.config['MYSQL_HOST']='localhost'
app.config['MYSQL_USER']='root'
app.config['MYSQL_PASSWORD']= ''
app.config['MYSQL_DB']='crud'
mysql = MySQL(app)
#bcrypt for hashing passwords to keep database secure
bcrypt= Bcrypt(app)
@app.route('/',methods=['GET','POST'])
def index():
if request.method=='GET':
return render_template('login.html')
else:
form= request.form
username=form['username']
password=form['password']
if username=='admin' and password=='admin':
session['user']=username
return redirect(url_for('data'))
else:
return redirect(url_for('index'))
@app.route('/list',methods=['GET'])
def data():
if 'user' in session:
cur = mysql.connection.cursor()
resultValue = cur.execute(" select * from employee")
userDetails = cur.fetchall()
return render_template('list.html', employee=userDetails)
else:
return redirect(url_for('index'))
@app.route('/add',methods=['GET','POST'])
def add():
if 'user' in session:
if request.method == 'GET':
return render_template('add.html')
else:
form = request.form
print(form)
firstname = form['firstname']
lastname = form['lastname']
address = form['address']
email = form['email']
contact = form['contact']
argo = [firstname, lastname, address, email, int(contact)]
cur = mysql.connection.cursor()
cur.execute("INSERT INTO employee(firstname,lastname,address,email,contact) values (%s,%s,%s,%s,%s)", argo)
mysql.connection.commit()
cur.close()
return redirect(url_for('data'))
else:
return redirect(url_for('index'))
@app.route('/delete/<id>',methods=['GET'])
def delete(id=None):
if 'user' in session:
query='delete from employee where id = %s'
params=[id]
cur = mysql.connection.cursor()
cur.execute(query,params)
mysql.connection.commit()
cur.close()
return redirect(url_for('data'))
else:
return redirect(url_for('index'))
@app.route('/edit/<id>',methods=['POST','GET'])
def edit(id=None):
if 'user' in session:
if request.method=='POST':
form = request.form
params=[form['firstname'],form['lastname'],form['address'],form['email'],form['contact'],id]
query ='update employee set firstname= %s , lastname = %s , address= %s , email= %s, contact= %s where id = %s '
cur = mysql.connection.cursor()
cur.execute(query, params)
mysql.connection.commit()
cur.close()
return redirect(url_for('data'))
else:
query = 'select * from employee where id = %s'
params=[id]
cur = mysql.connection.cursor()
resultValue=cur.execute(query, params)
if resultValue>0:
userDetails = cur.fetchall()
return render_template('edit.html',user=userDetails[0])
else:
return 'invalid id'
else:
return redirect(url_for('index'))
@app.route('/logout',methods=['GET'])
def logout():
session.pop('user', None)
return redirect(url_for('index'))
if __name__=='__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
3ad6b6e4e9387b3b9cc5855347a729c5a5d8be58
|
49caef1f93bd4673530e0a4c54c59028fb7b54e6
|
/npg7/web_printscreen_zb/controllers.py
|
f5da2a2deb809c478d12f54592606a9e2f4de36e
|
[] |
no_license
|
slevenhagen/addons-extra7.0
|
7622024198c0cf637f3f4767eb2b955532af3710
|
85611a86a0e1522fd88b5e6fbb217f425c4ae12d
|
refs/heads/master
| 2020-03-17T14:12:42.082766 | 2018-05-16T13:02:05 | 2018-05-16T13:02:05 | 133,663,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,142 |
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 ZestyBeanz Technologies Pvt. Ltd.
# (http://wwww.zbeanztech.com)
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import json
except ImportError:
import simplejson as json
import web.http as openerpweb
from web.controllers.main import ExcelExport
from web.controllers.main import Export
import re
from cStringIO import StringIO
from lxml import etree
import trml2pdf
import time, os
import locale
import openerp.tools as tools
try:
import xlwt
except ImportError:
xlwt = None
class ZbExcelExport(ExcelExport):
_cp_path = '/web/export/zb_excel_export'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
style = xlwt.easyxf('align: wrap yes')
font = xlwt.Font()
font.bold = True
style.font = font
ignore_index = []
count = 0
for i, fieldname in enumerate(fields):
if fieldname.get('header_data_id', False):
field_name = fieldname.get('header_name', '')
worksheet.write(0, i-count, field_name, style)
worksheet.col(i).width = 8000
else:
count += 1
ignore_index.append(i)
style = xlwt.easyxf('align: wrap yes')
bold_style = xlwt.easyxf('align: wrap yes')
font = xlwt.Font()
font.bold = True
bold_style.font = font
for row_index, row in enumerate(rows):
count = 0
for cell_index, cell_value in enumerate(row):
if cell_index not in ignore_index:
cell_style = style
if cell_value.get('bold', False):
cell_style = bold_style
cellvalue = cell_value.get('data', '')
if isinstance(cellvalue, basestring):
cellvalue = re.sub("\r", " ", cellvalue)
if cell_value.get('number', False) and cellvalue:
cellvalue = float(cellvalue)
if cellvalue is False: cellvalue = None
worksheet.write(row_index + 1, cell_index - count, cellvalue, cell_style)
else:
count += 1
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
@openerpweb.httprequest
def index(self, req, data, token):
data = json.loads(data)
return req.make_response(
self.from_data(data.get('headers', []), data.get('rows', [])),
headers=[
('Content-Disposition', 'attachment; filename="%s"'
% data.get('model', 'Export.xls')),
('Content-Type', self.content_type)
],
cookies={'fileToken': token}
)
class ExportPdf(Export):
_cp_path = '/web/export/zb_pdf'
fmt = {
'tag': 'pdf',
'label': 'PDF',
'error': None
}
@property
def content_type(self):
return 'application/pdf'
def filename(self, base):
return base + '.pdf'
def from_data(self, uid, fields, rows, company_name):
pageSize=[210.0,297.0]
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('PageFormat', 'a4')
_append_node('header-date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('company', company_name)
l = []
t = 0
temp = []
tsum = []
skip_index = []
header = etree.SubElement(new_doc, 'header')
i = 0
for f in fields:
if f.get('header_data_id', False):
value = f.get('header_name', "")
field = etree.SubElement(header, 'field')
field.text = tools.ustr(value)
else:
skip_index.append(i)
i += 1
lines = etree.SubElement(new_doc, 'lines')
for row_lines in rows:
node_line = etree.SubElement(lines, 'row')
j = 0
for row in row_lines:
if not j in skip_index:
para = "yes"
tree = "no"
value = row.get('data', '')
if row.get('bold', False):
para = "group"
if row.get('number', False):
tree = "float"
col = etree.SubElement(node_line, 'col', para=para, tree=tree)
col.text = tools.ustr(value)
j += 1
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = trml2pdf.parseNode(rml, title='Printscreen')
return self.obj
class ZbPdfExport(ExportPdf):
_cp_path = '/web/export/zb_pdf_export'
@openerpweb.httprequest
def index(self, req, data, token):
data = json.loads(data)
uid = data.get('uid', False)
return req.make_response(self.from_data(uid, data.get('headers', []), data.get('rows', []),
data.get('company_name','')),
headers=[('Content-Disposition',
'attachment; filename=PDF Export'),
('Content-Type', self.content_type)],
cookies={'fileToken': int(token)})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"[email protected]"
] | |
19edfb5d48f61044424ab6c2a3dd832edbd0612a
|
daae0cf103b6c9f26065f7546a7dc79281fc0bde
|
/16/3.py
|
297df94c2bd4b2df86a41fbc8e26d6952e1e12d4
|
[] |
no_license
|
oc0de/pyEPI
|
97a5d4db91d5459f407c9d414fc999de56885124
|
2b7cedecdd5b8665ab4f1ca4762a3fd5adcc9864
|
refs/heads/master
| 2021-09-05T09:32:53.646441 | 2018-01-26T03:58:10 | 2018-01-26T03:58:10 | 119,003,665 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 371 |
py
|
def number_of_ways(n, m):
cache = {}
def helper(x,y):
if x == y == 0: return 1
if (x,y) not in cache:
ways_top = 0 if x == 0 else helper(x-1, y)
ways_left = 0 if y == 0 else helper(x, y-1)
cache[(x,y)] = ways_top + ways_left
return cache[(x,y)]
return helper(n-1, m-1)
print number_of_ways(5, 5)
|
[
"[email protected]"
] | |
e0b7367a019a91e2fa1bcd3bff959a74b8b7323a
|
e8cc4cd00990a4f8a75e538ca68fa77456f37e3c
|
/telebot/apps.py
|
f6201278a2003ae19e1031d371370381cf66d2e3
|
[] |
no_license
|
devRayanwv/djangoTest
|
950b5d54a3a53f52f615e2ed0a99bac975fb0db9
|
71bb9377f70fde5b28c5685e8800c4209f265a9f
|
refs/heads/master
| 2020-04-06T04:28:08.942379 | 2017-02-24T22:20:08 | 2017-02-24T22:20:08 | 82,883,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class TelebotConfig(AppConfig):
name = 'telebot'
|
[
"[email protected]"
] | |
24d2af17dd3749befa8832fee7ee08d62a1a9063
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/JgYPQrYdivmqN4KKX_18.py
|
51bf0ca4c9e57c6e4d4df644268825f4357b96e2
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
def BMI(weight, height):
if "kilos" in weight:
fBmi = round(float(weight.split()[0]) / float(height.split()[0])**2,1)
else :
a = (float(weight.split()[0])) * 0.453592
b = (float(height.split()[0])) * 0.0254
fBmi = round(a/b**2,1)
if fBmi < 18.5:
return "{0} Underweight".format(fBmi)
elif fBmi >= 18.5 and fBmi < 24.9:
return "{0} Normal weight".format(fBmi)
elif fBmi >= 25 and fBmi < 29.9:
return "{0} Overweight".format(fBmi)
elif fBmi >= 30:
return "{0} Obesity".format(fBmi)
|
[
"[email protected]"
] | |
9ed3302317bb7901f6b3244ef26fc1ecb990a599
|
5b9f9b4ea1494943e6f7f842df55909599ed1304
|
/python/onshape_client/oas/models/bt_shaded_render_document_response.py
|
7f8e89af07f0ac165d25afbbf29e6536706ff134
|
[] |
no_license
|
jenniferyoung02/onshape-clients
|
f50534f033428027515b7fc0b801b1caab4d0aec
|
8ee31a17d7af32f105b851e45f69fd4a3006e1ba
|
refs/heads/master
| 2020-09-07T06:44:37.682545 | 2019-10-08T18:52:06 | 2019-10-08T18:52:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,884 |
py
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.104
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BTShadedRenderDocumentResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'image_data': 'list[str]',
'status': 'BTNodeStatus'
}
attribute_map = {
'image_data': 'imageData',
'status': 'status'
}
def __init__(self, image_data=None, status=None): # noqa: E501
"""BTShadedRenderDocumentResponse - a model defined in OpenAPI""" # noqa: E501
self._image_data = None
self._status = None
self.discriminator = None
if image_data is not None:
self.image_data = image_data
if status is not None:
self.status = status
@property
def image_data(self):
"""Gets the image_data of this BTShadedRenderDocumentResponse. # noqa: E501
:return: The image_data of this BTShadedRenderDocumentResponse. # noqa: E501
:rtype: list[str]
"""
return self._image_data
@image_data.setter
def image_data(self, image_data):
"""Sets the image_data of this BTShadedRenderDocumentResponse.
:param image_data: The image_data of this BTShadedRenderDocumentResponse. # noqa: E501
:type: list[str]
"""
self._image_data = image_data
@property
def status(self):
"""Gets the status of this BTShadedRenderDocumentResponse. # noqa: E501
:return: The status of this BTShadedRenderDocumentResponse. # noqa: E501
:rtype: BTNodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BTShadedRenderDocumentResponse.
:param status: The status of this BTShadedRenderDocumentResponse. # noqa: E501
:type: BTNodeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BTShadedRenderDocumentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
262e1d9f54429dd7118716daf6cfbc910a323686
|
4fb9150b08a128571ed4a84897c8c95afb76ccb6
|
/healthy/migrations/0002_labdetail.py
|
e6624ae565d211b7af58232ca3a06dfcfe941dd7
|
[] |
no_license
|
eduarde/ChunkyMonkeys
|
815feb7f3e6e2085babb61d12f2255ea2cb46ada
|
34f30e6aaeef6af15aa12e6d599f55d67c6fb7d7
|
refs/heads/master
| 2021-07-09T21:30:49.084584 | 2016-12-05T10:42:04 | 2016-12-05T10:42:04 | 58,738,867 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,175 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-14 11:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('healthy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LabDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reason', models.TextField(blank=True, null=True, verbose_name='Reason')),
('cause', models.TextField(blank=True, null=True, verbose_name='Cause')),
('action', models.TextField(blank=True, null=True, verbose_name='Action')),
('lab_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='LabDet', to='healthy.Lab')),
('user_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
eb53a990da835beaca9e9cc878481161831bfb1f
|
1bb2a9150de01c618163bbb8f872bdce6f14df4f
|
/BaekJoon/2981_검문.py
|
acbbec8e742ffdac47cb7a67e0dc300dcd8ab895
|
[] |
no_license
|
whyj107/Algorithm
|
a1c9a49a12a067366bd0f93abf9fa35ebd62102e
|
aca83908cee49ba638bef906087ab3559b36b146
|
refs/heads/master
| 2023-04-14T12:59:52.761752 | 2021-05-01T03:53:31 | 2021-05-01T03:53:31 | 240,014,212 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
# 문제
# 검문
# https://www.acmicpc.net/problem/2981
# 풀이
from sys import stdin
N = int(stdin.readline())
M = [int(stdin.readline()) for i in range(N)]
M.sort()
tmp = M[-1] - M[0]
y = []
for i in range(2, int(tmp**0.5)+1):
if tmp % i == 0:
y.append(i)
if tmp//i not in y: y.append(tmp//i)
y.sort()
y.append(tmp)
for i in y:
for n in range(N):
if n == N-1:
print(i, end=" ")
elif M[n] % i != M[n+1] % i:
break
# 다른 사람의 풀이
"""
import sys
input = sys.stdin.readline
def gcd(a, b):
return gcd(b, a % b) if a % b else b
n = int(input())
num = sorted([int(input()) for _ in range(n)])
get = num[1] - num[0]
for i in range(2, n):
get = gcd(get, num[i]-num[i-1])
res = set()
for i in range(2, int(get**0.5)+1):
if get % i == 0:
res.add(i)
res.add(get//i)
res.add(get)
res = sorted(list(res))
print(' '.join(map(str, res)))
"""
|
[
"[email protected]"
] | |
43170fa8f7fc5a3560607c4b21a1cb123096b586
|
f6c1a4593859ad75000e726414f25fbf02766143
|
/setup.py
|
7edb29cfc794fbf5f917801018c219ab2e44a25c
|
[] |
no_license
|
jbeezley/metadata_extractor
|
b753ce6f9e55e5bc92f16b5decfbab5b992ac621
|
1401127bf572119353e3c504278ff7436e077c9e
|
refs/heads/master
| 2020-03-20T00:57:52.713434 | 2018-06-12T13:49:54 | 2018-06-12T13:49:54 | 137,062,489 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,917 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from setuptools import setup, find_packages
# perform the install
setup(
name='girder-plugin-metadata-extractor',
version='0.2.0',
description='Enables the extraction of metadata from uploaded files',
author='Kitware, Inc.',
author_email='[email protected]',
url='https://github.com/girder/metadata_extractor',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
package_data={
'': ['web_client/**']
},
packages=find_packages(exclude=['test']),
zip_safe=False,
install_requires=[
'girder',
'hachoir-core',
'hachoir-metadata',
'hachoir-parser'
],
entry_points={
'girder.plugin': [
'metadata_extractor = girder_plugin_metadata_extractor:MetadataExtractorPlugin'
]
}
)
|
[
"[email protected]"
] | |
c56a3f8d77a5d05be57428bbda596c5e31709503
|
241724e83f5c12ed9d7dd3b825dfe4e2b1b0f777
|
/examples/boundary_conditions.py
|
a73111c7860a10c82ddfefc46005d3f0954a7718
|
[
"MIT"
] |
permissive
|
xuanxu/py-pde
|
d8be358ab76d4060b14afc74bc7d836591c6188e
|
de33d938aea8680eff872ae1b64569895662a248
|
refs/heads/master
| 2021-03-09T21:37:13.920717 | 2020-03-10T12:18:03 | 2020-03-10T12:18:03 | 246,382,909 | 0 | 0 |
MIT
| 2020-03-10T18:54:22 | 2020-03-10T18:54:22 | null |
UTF-8
|
Python
| false | false | 521 |
py
|
#!/usr/bin/env python3
from pde import UnitGrid, ScalarField, DiffusionPDE
grid = UnitGrid([16, 16], periodic=[False, True]) # generate grid
state = ScalarField.random_uniform(grid, 0.2, 0.3) # generate initial condition
# set boundary conditions `bc` for all axes
bc_x_left = {'type': 'derivative', 'value': 0.1}
bc_x_right = {'type': 'value', 'value': 0}
bc_x = [bc_x_left, bc_x_right]
bc_y = 'periodic'
eq = DiffusionPDE(bc=[bc_x, bc_y])
result = eq.solve(state, t_range=10, dt=0.005)
result.plot(show=True)
|
[
"[email protected]"
] | |
aaed72c4c34418066429eb2c96fbe9b95606cdb3
|
de358ba57518d65393c810da20c53e1c41494bff
|
/LRUcache.py
|
49f000a37b16c4cd24efb3415b3888324acb43b6
|
[] |
no_license
|
avirupdandapat/ALGOPROJECT
|
43eef94b13e38452cdc6a506b17b6fee581a07e1
|
55b60a0c6e51cae900e243505f6a4557ad4d7069
|
refs/heads/master
| 2022-12-29T13:02:54.655976 | 2020-10-18T12:23:57 | 2020-10-18T12:23:57 | 305,095,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 878 |
py
|
from collections import deque
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.capacity = capacity
self.dic = {}
self.q = deque()
# @return an integer
def get(self, key):
if key in self.dic:
self.q.remove(key)
self.q.appendleft(key)
return self.dic[key]
return -1
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
if key in self.dic:
self.q.remove(key)
elif self.capacity == len(self.dic):
keyToRemove = self.q.pop()
del self.dic[keyToRemove]
self.q.appendleft(key)
self.dic[key] = value
if __name__ == '__main__':
l = LRUCache(2)
l.set(1, 10)
l.set(5, 12)
print(l.get(5))
l.get(5)
l.get(1)
|
[
"[email protected]"
] | |
da45f7852916d35f50bd49f037a7b3edd42a3e21
|
68d38b305b81e0216fa9f6769fe47e34784c77f2
|
/alascrapy/spiders/amazon_uk_reviews.py
|
15695e7d86cb23644a4dfb659ed43372c84943c0
|
[] |
no_license
|
ADJet1437/ScrapyProject
|
2a6ed472c7c331e31eaecff26f9b38b283ffe9c2
|
db52844411f6dac1e8bd113cc32a814bd2ea3632
|
refs/heads/master
| 2022-11-10T05:02:54.871344 | 2020-02-06T08:01:17 | 2020-02-06T08:01:17 | 237,448,562 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
__author__ = 'leonardo'
from alascrapy.spiders.base_spiders.amazon import AmazonReviewsSpider
class AmazonUkReviewsSpider(AmazonReviewsSpider):
name = 'amazon_uk_reviews'
start_url_format = "https://www.amazon.co.uk/product-reviews/%s/ref=cm_cr_dp_see_all_btm?ie=UTF8&showViewpoints=1&sortBy=recent"
date_format = 'on %d %B %Y'
amazon_kind = 'amazon_uk_id'
language = 'en'
|
[
"[email protected]"
] | |
4d1dc1f084686e22f9f832a79dae3c1d0d56dc01
|
43fe6a9d6875f7524204177a3a68229059133789
|
/social/account/multiforms.py
|
844065a4370c0da415a5df2b271ab382d43f2db9
|
[
"MIT"
] |
permissive
|
MiKueen/Social-Network
|
a011836805ad45228b0031ed1883526b0af02920
|
0b872860f08c3ec6f48a53160128af28787737c7
|
refs/heads/master
| 2023-04-17T15:33:13.212550 | 2019-07-13T04:40:54 | 2019-07-13T04:40:54 | 196,678,685 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,182 |
py
|
from django.views.generic.base import ContextMixin, TemplateResponseMixin
from django.views.generic.edit import ProcessFormView
from django.http import HttpResponseForbidden
class MultiFormMixin(ContextMixin):
form_classes = {}
prefixes = {}
success_urls = {}
initial = {}
prefix = None
success_url = None
def get_form_classes(self):
return self.form_classes
def get_forms(self, form_classes):
return dict([(key, self._create_form(key, class_name)) \
for key, class_name in form_classes.items()])
def get_form_kwargs(self, form_name):
kwargs = {}
kwargs.update({'initial':self.get_initial(form_name)})
kwargs.update({'prefix':self.get_prefix(form_name)})
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def forms_valid(self, forms, form_name):
form_valid_method = '%s_form_valid' % form_name
if hasattr(self, form_valid_method):
return getattr(self, form_valid_method)(forms[form_name])
else:
return HttpResponseRedirect(self.get_success_url(form_name))
def forms_invalid(self, forms):
return self.render_to_response(self.get_context_data(forms=forms))
def get_initial(self, form_name):
initial_method = 'get_%s_initial' % form_name
if hasattr(self, initial_method):
return getattr(self, initial_method)()
else:
return {'action': form_name}
def get_prefix(self, form_name):
return self.prefixes.get(form_name, self.prefix)
def get_success_url(self, form_name=None):
return self.success_urls.get(form_name, self.success_url)
def _create_form(self, form_name, form_class):
form_kwargs = self.get_form_kwargs(form_name)
form = form_class(**form_kwargs)
return form
class ProcessMultipleFormsView(ProcessFormView):
def get(self, request, *args, **kwargs):
form_classes = self.get_form_classes()
forms = self.get_forms(form_classes)
return self.render_to_response(self.get_context_data(forms=forms))
def post(self, request, *args, **kwargs):
form_classes = self.get_form_classes()
form_name = request.POST.get('action')
return self._process_individual_form(form_name, form_classes)
def _process_individual_form(self, form_name, form_classes):
forms = self.get_forms(form_classes)
form = forms.get(form_name)
if not form:
return HttpResponseForbidden()
elif form.is_valid():
return self.forms_valid(forms, form_name)
else:
return self.forms_invalid(forms)
class BaseMultipleFormsView(MultiFormMixin, ProcessMultipleFormsView):
"""
A base view for displaying several forms.
"""
class MultiFormsView(TemplateResponseMixin, BaseMultipleFormsView):
"""
A view for displaying several forms, and rendering a template response.
"""
|
[
"[email protected]"
] | |
fd975001732ca43e6a45cbcefd0a09a0cf1fd7fa
|
a37963de31a67c214680d80d9ee3ce4611d28587
|
/mrl/modules/model.py
|
8f93b82dcc75932df0c875e7910016d0b4a2814d
|
[
"MIT"
] |
permissive
|
jingweiz/mrl
|
c4c614877760953b246125688e7df96f9081fc4e
|
c94ab1685aea85b0d328199adefca543227875af
|
refs/heads/master
| 2022-11-12T01:36:05.354935 | 2020-07-10T23:32:38 | 2020-07-10T23:32:38 | 279,804,300 | 0 | 1 |
MIT
| 2020-07-15T07:56:50 | 2020-07-15T07:56:49 | null |
UTF-8
|
Python
| false | false | 1,448 |
py
|
import mrl
import torch
from typing import Callable
import os
import pickle
import dill
class PytorchModel(mrl.Module):
"""
Generic wrapper for a pytorch nn.Module (e.g., the actorcritic network).
These live outside of the learning algorithm modules so that they can easily be
shared by different modules (e.g., critic can be used by intrinsic curiosity module).
They are also saved independently of the agent module (which is stateless).
"""
def __init__(self, name : str, model_fn : Callable):
super().__init__(name, required_agent_modules=[], locals=locals())
self.model_fn = model_fn
self.model = self.model_fn()
def _setup(self):
if self.config.get('device'):
self.model = self.model.to(self.config.device)
def save(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
torch.save(self.model.state_dict(), path)
def load(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
self.model.load_state_dict(torch.load(path), strict=False)
def copy(self, new_name):
"""Makes a copy of the Model; e.g., for target networks"""
new_model = dill.loads(dill.dumps(self.model))
model_fn = lambda: new_model
return self.__class__(new_name, model_fn)
def __call__(self, *args, **kwargs):
if self.training:
self.model.train()
else:
self.model.eval()
return self.model(*args, **kwargs)
|
[
"[email protected]"
] | |
0c3976214f8e28555d2e3ff9dd37ab37dd2c712b
|
251e4de91841fc42959e89211d3501ce24c4435e
|
/eventdriven/adapter/base.py
|
253f683289151bfeaaceae339ac6fba3956f10e6
|
[
"Apache-2.0"
] |
permissive
|
ZSAIm/EventDriven
|
df1251c4e9f3f382600159d6626a6c959670c438
|
92bed2b3cde9249724f9cc25f3d19470abda5b9b
|
refs/heads/master
| 2020-12-07T17:04:32.511933 | 2020-02-20T07:51:18 | 2020-02-20T07:51:18 | 232,758,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,767 |
py
|
# -*- coding: UTF-8 -*-
from abc import ABC
class AbstractAdapter(ABC):
def __setup__(self, parent, name, **options):
""" 安装适配器过程中调用该方法进行初始化。 """
self._parent = parent
self._instance_name = name
self._options = options
def __name__(self):
""" 返回适配器实例名称。 """
return self._instance_name
def __patch__(self):
""" __setup__ 之后对控制器进行打补丁。 """
pass
def __running__(self):
""" 控制器启动中(线程启动前)。"""
pass
def __run__(self):
""" 控制器启动后调用该方法。 """
pass
def __closing__(self):
""" 控制器发起关闭事件后调用该方法。 """
pass
def __closed__(self):
""" 控制事件关闭后调用该方法。"""
pass
def __exception__(self, error):
""" 控制器事件处理异常调用该方法。"""
pass
def __suspend__(self):
""" 控制器发起挂起事件后调用该方法。 """
pass
def __resume__(self):
""" 控制器发起恢复挂起状态事件后调用该方法。 """
pass
def __mapping__(self):
""" 返回添加的事件处理映射。 """
return {}
def __context__(self):
""" 返回需要添加的全局动态上下文。"""
return {}
def __static__(self):
""" 返回需要添加的静态上下文。"""
return {}
@staticmethod
def __unique__():
""" 返回是否只能安装唯一实例。 """
return False
@staticmethod
def __dependencies__():
""" 返回适配器依赖列表。 """
return []
|
[
"[email protected]"
] | |
7075b62d95d63c0abfdebcac5772e9ce9fff30f4
|
02b460257be33634a5e204c12a22d396c49ec1e8
|
/ch1/ex1_6.py
|
e506176ded89c2a72f238158685c3fe6189a0731
|
[] |
no_license
|
wxhheian/ptcb
|
c5250362d5ab0903498e52c5a5d9cbdccc37853f
|
ae95fb18853f94246b4b1e84371e3f140677c8e8
|
refs/heads/master
| 2020-07-02T08:28:16.867948 | 2019-08-09T18:49:50 | 2019-08-09T18:49:50 | 201,473,507 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 466 |
py
|
##实现一个键对应多个值的字典
#####实现方法一:将多个值放到不同的容器中
# d = {
# 'a':[1,2,3],
# 'b':[4,5]
# }
# e = {
# 'a':{1,2,3},
# 'b':{4,5}
# }
from collections import defaultdict
d = defaultdict(list)
d['a'].append(1)
d['b'].append(2)
d['b'].append(4)
e = defaultdict(set)
e['a'].add(1)
e['a'].add(2)
e['b'].add(4)
################setdefault
f={}
f.setdefault('a',[]).append(1)
f.setdefault('a',[]).append(2)
|
[
"[email protected]"
] | |
06d81819ec245e77cec949f12a8b70ffb0617810
|
9431bba2d148f8aef9c0a8f3ca16fcf875890757
|
/scraping/get_html_title.py
|
9f5573db2266ed5c6d715cae3af9936cb85faae6
|
[
"MIT"
] |
permissive
|
terasakisatoshi/pythonCodes
|
fba0b78414b2c85f4a738200354ea583f0516768
|
953210c06e9885a7c885bc01047715a77de08a1a
|
refs/heads/master
| 2023-05-14T12:30:22.201711 | 2023-05-07T13:41:22 | 2023-05-07T13:41:22 | 197,893,702 | 2 | 1 |
MIT
| 2022-11-25T10:59:52 | 2019-07-20T07:09:12 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 603 |
py
|
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
def get_title(url):
try:
html = urlopen(url)
except HTTPError as e:
print(e)
return None
try:
bsoup = BeautifulSoup(html.read())
title = bsoup.body.h1
except AttributeError as e:
return None
return title
def main():
URL="http://www.pythonscraping.com/pages/page1.html"
title=get_title(URL)
if title==None:
print("Title could not be found")
else:
print(title)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c822f6ed07953bee56d648fff611aea04680c407
|
366b2ff9cd498808438bf7c48f697c05b361d02c
|
/models.py
|
0606075241f9749a7ff176655dadf12a115be600
|
[] |
no_license
|
c-bata/AngularJS-Bottle-TodoApp
|
1aef6b09fd85fabaa63898ab3fb9a2d586216b93
|
8f03820b7949b0c28477970c58f25ccd1856b2a9
|
refs/heads/master
| 2021-03-12T22:40:32.000758 | 2015-11-04T11:14:47 | 2015-11-04T11:14:47 | 38,732,944 | 2 | 0 | null | 2015-11-04T11:11:39 | 2015-07-08T05:02:47 |
Python
|
UTF-8
|
Python
| false | false | 1,225 |
py
|
from datetime import datetime
from sqlalchemy import (
Column, Integer, Unicode, UnicodeText, Boolean, DateTime,
create_engine
)
from sqlalchemy.ext import declarative
from bottle.ext import sqlalchemy
Base = declarative.declarative_base()
engine = create_engine('sqlite:///:memory:', echo=True)
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword='db', # 関数内で挿入される変数名
create=True, # テーブルを作成するか
commit=True, # 関数終了時にトランザクションをコミットするか
use_kwargs=False
)
class Task(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
memo = Column(UnicodeText)
done = Column(Boolean, nullable=False, default=False)
created_at = Column(DateTime, default=datetime.now(), nullable=False)
def __repr__(self):
return "<Task (title='%s')>" % self.title
@property
def serialize(self):
return {
'id': self.id,
'title': self.title,
'memo': self.memo,
'done': self.done,
'created_at': self.created_at.strftime('%Y-%m-%d')
}
|
[
"[email protected]"
] | |
7466229e21a1f6ba95a9a8ae72f30c4c238f16fe
|
9ecf6cfdc15b704b44688c533c5c6e9eccc5c0ab
|
/randomise-selected-objects-color.py
|
181f6e92a57894fc3a910c380826c7c07f9afaf0
|
[] |
no_license
|
Bordilovskii/cinema4d-scripts
|
96b1eab6aa442ef6ead105d22e0bab352d8563c9
|
811be702a64c8b0c97dedbbf95723ce0af06a7fa
|
refs/heads/master
| 2020-03-27T06:37:25.692966 | 2018-07-04T09:30:18 | 2018-07-04T09:30:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
import c4d
import random as rand
def main():
doc.StartUndo()
objs = doc.GetActiveObjects(0)
if len(objs) == 0:return
for obj in objs:
doc.AddUndo(c4d.UNDOTYPE_CHANGE,obj)
obj[c4d.ID_BASEOBJECT_USECOLOR] = 2
r = rand.random()
g = rand.random()
b = rand.random()
doc.AddUndo(c4d.UNDOTYPE_CHANGE,obj)
obj[c4d.ID_BASEOBJECT_COLOR] = c4d.Vector(r,g,b)
c4d.EventAdd()
doc.EndUndo()
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
24cdb1982f2fe439f8411d943177ebf9d46ba73e
|
8d6ec0275afe856834bf10643e3b4b2cbcb318f4
|
/03-online-shop/myshop/shop/views.py
|
93982ce741c0abec344a2ff2ddd5db46f5ee1ff2
|
[] |
no_license
|
markronquillo/django-by-example
|
be35fbbc483440a11c440733931c146d56816c97
|
fa749e5077f64ac68f11c7b529e13ac097cb5bd0
|
refs/heads/master
| 2021-01-11T14:38:40.854636 | 2017-02-24T03:09:58 | 2017-02-24T03:09:58 | 80,184,667 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 883 |
py
|
from django.shortcuts import render, get_object_or_404
from .models import Category, Product
from cart.forms import CartAddProductForm
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request,
'shop/product/list.html',
{'category': category,
'categories': categories,
'products': products})
def product_detail(request, id, slug):
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
return render(request,
'shop/product/detail.html',
{'product': product,
'cart_product_form': cart_product_form})
|
[
"[email protected]"
] | |
1fcb488242e10d0c03422d74916f668b21eb791b
|
0e69513ca0fda765b5f655c4405aafb209491389
|
/input/parse_pcm-dpc_it.py
|
4492610a839245b4948d341f93c7abb1d5d1c339
|
[] |
no_license
|
adrianrequena/covid19
|
57a54fdaec79c0d1d57de63810e3337513e87b2f
|
a13cb2c117a68de2740702831f84c17049aa95ab
|
refs/heads/master
| 2023-07-20T01:49:44.583897 | 2020-04-01T19:19:21 | 2020-04-01T19:19:21 | 252,279,864 | 0 | 0 | null | 2023-07-06T21:57:02 | 2020-04-01T20:28:35 |
Python
|
UTF-8
|
Python
| false | false | 1,687 |
py
|
#!/usr/bin/env python
import os
import sys
from pathlib import Path
from datetime import datetime, timedelta
import pandas
from utils import \
parse_level_args, github_raw_dataframe, github_raw_url, dataframe_output, merge_previous
# Root path of the project
ROOT = Path(os.path.dirname(__file__)) / '..'
# This script can parse both region-level and country-level data
is_region = parse_level_args(sys.argv[1:]).level == 'region'
if is_region:
df = github_raw_dataframe(
'pcm-dpc/COVID-19', 'dati-json/dpc-covid19-ita-regioni.json', orient='records')
else:
df = github_raw_dataframe(
'pcm-dpc/COVID-19', 'dati-json/dpc-covid19-ita-andamento-nazionale.json', orient='records')
df = df.rename(columns={
'data': 'Date',
'totale_casi': 'Confirmed',
'deceduti': 'Deaths',
'tamponi': 'Tested'
})
if is_region:
df['_RegionLabel'] = df['denominazione_regione']
# Parse date into a datetime object
df['Date'] = df['Date'].apply(lambda date: datetime.fromisoformat(date).date())
# Offset date by 1 day to match ECDC report
if not is_region:
df['RegionCode'] = None
df['Date'] = df['Date'].apply(lambda date: date + timedelta(days=1))
# Convert dates to ISO format
df['Date'] = df['Date'].apply(lambda date: date.isoformat())
# Add the country code to all records
df['CountryCode'] = 'IT'
# Merge the new data with the existing data (prefer new data if duplicates)
if not is_region:
filter_function = lambda row: row['CountryCode'] == 'IT' and pandas.isna(row['RegionCode'])
df = merge_previous(df, ['Date', 'CountryCode'], filter_function)
# Output the results
dataframe_output(df, ROOT, 'IT' if is_region else None)
|
[
"[email protected]"
] | |
43a3171c18f24f3e5cf493bcf8576ddb6b9456b6
|
ebd2df05eae5875f3edd5c891442b9fe1f3d54ee
|
/empleados/views.py
|
3b8388bd33952007db18e34edaecbd69330d2a7c
|
[] |
no_license
|
gfcarbonell/app_navidad
|
06191ef3b084d40c7a5f387a60407406c2c89d54
|
fa290f8cf0b4b0d9237b555417fe38f879938adf
|
refs/heads/master
| 2020-12-24T11:54:10.514150 | 2016-11-16T15:37:09 | 2016-11-16T15:37:09 | 73,115,163 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,364 |
py
|
# -*- encoding: utf-8 -*-
from django.conf import settings
from django.views.generic import CreateView, UpdateView, ListView, DetailView
from .models import Empleado
from .forms import EmpleadoModelForm, EmpleadoUsuarioForm
from django.core.urlresolvers import reverse_lazy
from rest_framework import viewsets
from django.db.models import Q
import socket
from pure_pagination.mixins import PaginationMixin
from django.template.defaultfilters import slugify
from infos_sistemas.mixins import TipoPerfilUsuarioMixin
class EmpleadoCreateView(TipoPerfilUsuarioMixin, CreateView):
template_name = 'empleado_create.html'
form_class = EmpleadoUsuarioForm
model = Empleado
success_url = reverse_lazy('empleado:control')
def form_valid(self, form):
user = form['model_form_usuario'].save(commit=False)
user.usuario_creador = self.request.user
user.ultimo_usuario_editor = user.usuario_creador
try:
user.nombre_host = socket.gethostname()
user.ultimo_nombre_host = user.nombre_host
except:
user.nombre_host = 'localhost'
user.ultimo_nombre_host = user.nombre_host
user.direccion_ip = socket.gethostbyname(socket.gethostname())
user.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())
empleado = form['model_form_empleado'].save(commit=False)
empleado.tipo_persona = 'Natural'
if empleado.numero_hijo is None:
empleado.numero_hijo = 0
user.save()
empleado.usuario = user
empleado.usuario_creador = self.request.user
empleado.ultimo_usuario_editor = empleado.usuario_creador
try:
empleado.nombre_host = socket.gethostname()
empleado.ultimo_nombre_host = empleado.nombre_host
except:
empleado.nombre_host = 'localhost'
empleado.ultimo_nombre_host = empleado.nombre_host
empleado.direccion_ip = socket.gethostbyname(socket.gethostname())
empleado.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())
empleado.save()
return super(EmpleadoCreateView, self).form_valid(form)
class EmpleadoUpdate(TipoPerfilUsuarioMixin, UpdateView):
form_class = EmpleadoModelForm
success_url = reverse_lazy('empleado:control')
template_name = 'empleado_update.html'
queryset = Empleado.objects.all()
def form_valid(self, form):
self.object = form.save(commit=False)
if self.object.numero_hijo is None:
self.object.numero_hijo = 0
self.object.ultimo_usuario_editor = self.request.user
try:
self.object.ultimo_nombre_host = socket.gethostname()
except:
self.object.ultimo_nombre_host = 'localhost'
self.object.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())
self.object.save()
return super(EmpleadoUpdate, self).form_valid(form)
class EmpleadoUsuarioUpdateView(TipoPerfilUsuarioMixin, UpdateView):
form_class = EmpleadoUsuarioForm
success_url = reverse_lazy('empleado:control')
template_name = 'empleado_usuario_update.html'
queryset = Empleado.objects.all()
def get_context_data(self, **kwarg):
context = super(EmpleadoUpdateView, self).get_context_data(**kwarg)
empleado = self.queryset.get(slug__contains=self.kwargs['slug'])
data = {'empleado':empleado}
context.update(data)
return context
def get_form_kwargs(self):
kwargs = super(EmpleadoUpdateView, self).get_form_kwargs()
kwargs.update(instance={
'model_form_empleado': self.object,
'model_form_usuario': self.object.usuario,
})
return kwargs
def form_valid(self, form):
empleado = self.queryset.get(slug__contains=self.kwargs['slug'])
user = form['model_form_usuario'].save(commit=False)
user = empleado.usuario
user.ultimo_usuario_editor = self.request.user
try:
user.ultimo_nombre_host = user.nombre_host
except:
user.ultimo_nombre_host = user.nombre_host
user.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())
empleado = form['model_form_empleado'].save(commit=False)
empleado.tipo_persona = 'Natural'
if empleado.numero_hijo is None:
empleado.numero_hijo = 0
user.save()
empleado.usuario = user
empleado.ultimo_usuario_editor = self.request.user
try:
empleado.ultimo_nombre_host = empleado.nombre_host
except:
empleado.ultimo_nombre_host = empleado.nombre_host
empleado.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())
empleado.save()
return super(EmpleadoUpdateView, self).form_valid(form)
class EmpleadoDetailView(TipoPerfilUsuarioMixin, DetailView):
template_name = 'empleado_detail.html'
model = Empleado
queryset = Empleado.objects.all()
class EmpleadoControlListView(PaginationMixin, TipoPerfilUsuarioMixin, ListView):
model = Empleado
template_name = 'empleados.html'
paginate_by = 10
def get_context_data(self, **kwarg):
context = super(EmpleadoControlListView, self).get_context_data(**kwarg)
boton_menu = False
total_registro = self.model.objects.count()
data = {
'boton_menu' : boton_menu,
'total_registro': total_registro,
}
context.update(data)
return context
def get(self, request, *args, **kwargs):
if request.GET.get('search_registro', None):
self.object_list = self.get_queryset()
context = self.get_context_data()
return self.render_to_response(context)
else:
return super(EmpleadoControlListView, self).get(self, request, *args, **kwargs)
def get_queryset(self):
if self.request.GET.get('search_registro', None):
value = self.request.GET.get('search_registro', None)
queryset = self.model.objects.filter(Q(slug__icontains=slugify(value)))
else:
queryset = super(EmpleadoControlListView, self).get_queryset()
return queryset
|
[
"[email protected]"
] | |
26dace9da5168c53db1423f65ab53c70e82b7187
|
d131ad1baf891a2918ae27b0dc57f3c0c1f99586
|
/blog/migrations/0001_initial.py
|
ec6923c8ffb8cbccaa6e420a5a387c7af1f5ae91
|
[] |
no_license
|
Alymbekov/TestProjectForDjangoForms
|
d3bf24844628136f9236d5222d32235e87f7aecd
|
ce3262e7565e293b691ea70b94b67155c15525bd
|
refs/heads/master
| 2020-04-10T05:35:19.516127 | 2018-12-07T14:24:05 | 2018-12-07T14:24:05 | 160,832,149 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
# Generated by Django 2.1 on 2018-11-18 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=150)),
('slug', models.SlugField(max_length=150, unique=True)),
('body', models.TextField(blank=True, db_index=True)),
('date_pub', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"[email protected]"
] | |
471b28b164af5875eb9670ed6bdea81faaa98ba6
|
9d1c9a81520437122d9f2f012c2737e4dd22713c
|
/src/td_clean.py
|
0b0e3a8e8ad9f059d56a6f5f5dd04748362a15f8
|
[
"MIT"
] |
permissive
|
geophysics-ubonn/crtomo_tools
|
136aa39a8a0d92061a739ee3723b6ef7879c57b8
|
aa73a67479c4e96bc7734f88ac7b35a74b5d158c
|
refs/heads/master
| 2023-08-24T01:55:29.517285 | 2023-08-08T13:03:46 | 2023-08-08T13:03:46 | 142,049,690 | 2 | 9 |
MIT
| 2019-06-06T12:46:42 | 2018-07-23T17:54:24 |
Standard ML
|
UTF-8
|
Python
| false | false | 1,791 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Clean a simulation directory of all modeling/inversion files
"""
import numpy as np
import os
import glob
def main():
rm_list = []
required_files_inversion = (
'exe/crtomo.cfg',
'grid/elem.dat',
'grid/elec.dat',
'mod/volt.dat')
clean_inv = np.all([os.path.isfile(x) for x in required_files_inversion])
if clean_inv:
rm_list += glob.glob('inv/*')
rm_list += [
'exe/error.dat',
'exe/crtomo.pid',
'exe/variogram.gnu',
'exe/inv.elecpositions',
'exe/inv.gstat',
'exe/inv.lastmod',
'exe/inv.lastmod_rho',
'exe/inv.mynoise_pha',
'exe/inv.mynoise_rho',
'exe/inv.mynoise_voltages',
'exe/tmp.kfak',
'overview.png',
]
required_files_modelling = (
'exe/crmod.cfg',
'grid/elem.dat',
'grid/elec.dat',
'config/config.dat',
'rho/rho.dat'
)
clean_mod = np.all([os.path.isfile(x) for x in required_files_modelling])
if clean_mod:
rm_list += glob.glob('mod/sens/*')
rm_list += glob.glob('mod/pot/*')
rm_list += ['mod/volt.dat', ]
rm_list += ['exe/crmod.pid', ]
for filename in rm_list:
if os.path.isfile(filename):
# print('Removing file {0}'.format(filename))
os.remove(filename)
plot_files = (
'rho.png',
'imag.png',
'real.png',
'phi.png',
'cov.png',
'fpi_imag.png',
'fpi_phi.png',
'fpi_real.png',
)
for filename in plot_files:
if os.path.isfile(filename):
os.remove(filename)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
fa2af2256e992f5dea361ca6dc8422c6d97e35d1
|
43ab33b2f50e47f5dbe322daa03c86a99e5ee77c
|
/rcc/models/study_events.py
|
73804683abfe9626a9ff78782d4aa06520a3ae77
|
[] |
no_license
|
Sage-Bionetworks/rcc-client
|
c770432de2d2950e00f7c7bd2bac22f3a81c2061
|
57c4a621aecd3a2f3f9faaa94f53b2727992a01a
|
refs/heads/main
| 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,338 |
py
|
# coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class StudyEvents(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'study_event': 'list[StudyEvent]'
}
attribute_map = {
'study_event': 'studyEvent'
}
def __init__(self, study_event=None, local_vars_configuration=None): # noqa: E501
"""StudyEvents - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._study_event = None
self.discriminator = None
if study_event is not None:
self.study_event = study_event
@property
def study_event(self):
"""Gets the study_event of this StudyEvents. # noqa: E501
:return: The study_event of this StudyEvents. # noqa: E501
:rtype: list[StudyEvent]
"""
return self._study_event
@study_event.setter
def study_event(self, study_event):
"""Sets the study_event of this StudyEvents.
:param study_event: The study_event of this StudyEvents. # noqa: E501
:type: list[StudyEvent]
"""
self._study_event = study_event
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StudyEvents):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StudyEvents):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
07025217cb00bf91a6ba23c519d15a6c2bff30ad
|
82a9077bcb5a90d88e0a8be7f8627af4f0844434
|
/google-cloud-sdk/lib/tests/unit/api_lib/compute/instances/ops_agents/exceptions_test.py
|
6315b0e3b6f56b2dd728bee1157215665d21febe
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
piotradamczyk5/gcloud_cli
|
1ae2553595e569fad6ce84af62b91a7ee5489017
|
384ece11040caadcd64d51da74e0b8491dd22ca3
|
refs/heads/master
| 2023-01-01T23:00:27.858583 | 2020-10-21T04:21:23 | 2020-10-21T04:21:23 | 290,238,061 | 0 | 0 | null | 2020-10-19T16:43:36 | 2020-08-25T14:31:00 |
Python
|
UTF-8
|
Python
| false | false | 1,861 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for ops_agents.exceptions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute.instances.ops_agents import exceptions
from tests.lib import test_case
import six
ERROR_MESSAGE_1 = 'At most one agent with type [logging] is allowed.'
ERROR_MESSAGE_2 = (
'The agent version [1] is not allowed. Expected values: [latest], '
'[current-major], or anything in the format of '
'[MAJOR_VERSION.MINOR_VERSION.PATCH_VERSION] or [MAJOR_VERSION.*.*].')
ERROR_MESSAGE_3 = (
'An agent can not be pinned to the specific version [5.3.1] when '
'[enable-autoupgrade] is set to true for that agent.')
MULTI_ERROR_MESSAGE = '{} | {} | {}'.format(
ERROR_MESSAGE_1, ERROR_MESSAGE_2, ERROR_MESSAGE_3)
class PolicyValidationMultiErrorTest(test_case.TestCase):
def testErrorMessage(self):
errors = [
exceptions.PolicyValidationError(ERROR_MESSAGE_1),
exceptions.PolicyValidationError(ERROR_MESSAGE_2),
exceptions.PolicyValidationError(ERROR_MESSAGE_3),
]
multi_error = exceptions.PolicyValidationMultiError(errors)
self.assertEqual(MULTI_ERROR_MESSAGE, six.text_type(multi_error))
|
[
"[email protected]"
] | |
80ffd316b9bbc8a682e4c8e9e842d3020e7a8472
|
545536daea315e31e01e388326e21a317f73dc6c
|
/Guddu on a Date.py
|
f390db81dd0b921ac0e786f7bc984075e63bfca0
|
[] |
no_license
|
calkikhunt/CODE_CHEF
|
3cd4db7d2231dc31a045645da08c52a78edda6b6
|
81bb90368822bc77e70582ab3eae1a4244e6c80f
|
refs/heads/master
| 2022-04-18T08:43:23.900118 | 2020-01-29T09:31:35 | 2020-01-29T09:31:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 805 |
py
|
t=int(input())
for i in range(t):
ctrcopy=19
n=int(input())
ptr=0
while ptr<(n):
ctr=ctrcopy
check=str(ctrcopy)
doublecheck=str(ctrcopy+19)
sumdigi=0
while ctr>0:
use=ctr%10
ctr=ctr//10
sumdigi+=use
if sumdigi%10==0 and check[len(check)-1]!='0':
ptr+=1
if ptr>=n:
break
ctrcopy+=9
elif sumdigi%10==0 and check[len(check)-1]=='0' and check[0]==doublecheck[0]:
ptr+=1
if ptr>=n:
break
ctrcopy+=19
elif sumdigi%10==0 and check[len(check)-1]=='0' and check[0]!=doublecheck[0]:
ptr+=1
if ptr>=n:
break
ctrcopy+=18
print(ctrcopy)
|
[
"[email protected]"
] | |
a17d7cd9fdcdc856d383afb6531cce96e9bb9932
|
1ff376da81912600e0f8b3d45ea061d9418a654c
|
/backend/weeklypulls/apps/series/models.py
|
219c094f4f48347bc1312ed8e9e5114862031b13
|
[] |
no_license
|
rkuykendall/weeklypulls
|
9c3448665b3a18cc0375ad40a60ad71008bb4e89
|
e8300a6f28f6ce959130865e8bcf8c365033b2ce
|
refs/heads/master
| 2021-01-17T19:51:43.702126 | 2017-12-18T12:16:28 | 2017-12-18T12:16:28 | 61,999,182 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,704 |
py
|
import os
from django.db import models
from django.contrib.postgres.fields import ArrayField
import marvelous
from weeklypulls.apps.marvel.models import DjangoCache
class Series(models.Model):
series_id = models.IntegerField(unique=True)
read = ArrayField(models.IntegerField(), default=list)
skipped = ArrayField(models.IntegerField(), default=list)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = "series"
def __str__(self):
try:
return '{} ({})'.format(self.api['title'], self.series_id)
except Exception:
return 'Series {} (api error)'.format(self.series_id)
@property
def api(self):
public_key = os.environ['MAPI_PUBLIC_KEY']
private_key = os.environ['MAPI_PRIVATE_KEY']
cache = DjangoCache()
marvel_api = marvelous.api(public_key, private_key, cache=cache)
series = marvel_api.series(self.series_id)
response = {
'title': series.title,
'comics': [],
'series_id': self.series_id,
}
series_args = {
'format': "comic",
'formatType': "comic",
'noVariants': True,
'limit': 100,
}
for comic in series.comics(series_args):
response['comics'].append({
'id': comic.id,
'title': comic.title,
'read': (comic.id in self.read),
'skipped': (comic.id in self.skipped),
'on_sale': comic.dates.on_sale,
'series_id': comic.series.id,
'images': comic.images,
})
return response
|
[
"[email protected]"
] | |
9b7d397ba307c03c0cd50292f30ea2770a2a8816
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02623/s581456736.py
|
db739a5bab8b529088885d50f94a895ce4eb8e86
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
n, m, k = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
a_num = 0
b_num = 0
book_num = 0
passed_k = 0
for i in range(n):
if a[i] + passed_k <= k:
a_num += 1
passed_k += a[i]
else:
break
for i in range(m):
if b[i] + passed_k <= k:
b_num += 1
passed_k += b[i]
else:
break
book_num = a_num + b_num
while a_num > 0:
passed_k -= a[a_num - 1]
a_num -= 1
while b_num < m:
if passed_k + b[b_num] <= k:
passed_k += b[b_num]
b_num += 1
else:
break
book_num = max(book_num, a_num + b_num)
if b_num == m:
break
print(book_num)
|
[
"[email protected]"
] | |
881bf26ac89b923944c31b113c5a4250cb30de70
|
780c45da6388931381d911499723c5afa8a44036
|
/run_test_c30.py
|
ce1a8a664e0893aa42c5eaf89ed0835150c1a6ad
|
[
"Apache-2.0"
] |
permissive
|
daitouli/metaheuristics
|
f9157bd700957072a69c0be03d8d34378533581c
|
9d885e4c9e9f39ad22baa9ea5d263d5daa276f88
|
refs/heads/master
| 2021-02-04T18:40:47.387347 | 2019-09-30T06:51:26 | 2019-09-30T06:51:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,520 |
py
|
import pandas as pd
from models.multiple_solution.swarm_based.ABC import *
from models.multiple_solution.swarm_based.BMO import *
from models.multiple_solution.swarm_based.BOA import *
from models.multiple_solution.swarm_based.EPO import *
from models.multiple_solution.swarm_based.HHO import *
from models.multiple_solution.swarm_based.NMR import *
from models.multiple_solution.swarm_based.PFA import *
from models.multiple_solution.swarm_based.PSO import *
from models.multiple_solution.swarm_based.SFO import *
from models.multiple_solution.swarm_based.SOA import *
from models.multiple_solution.swarm_based.WOA import *
from utils.FunctionUtil import *
## Setting parameters
root_paras = {
"problem_size": 100,
"domain_range": [-100, 100],
"print_train": True,
"objective_func": C30
}
abc_paras = {
"epoch": 500,
"pop_size": 100,
"couple_bees": [16, 4], # number of bees which provided for good location and other location
"patch_variables": [5.0, 0.985], # patch_variables = patch_variables * patch_factor (0.985)
"sites": [3, 1], # 3 bees (employed bees, onlookers and scouts), 1 good partition
}
bmo_paras = {
"epoch": 500,
"pop_size": 100,
"bm_teams": 10
}
boa_paras = {
"epoch": 500,
"pop_size": 100,
"c": 0.01,
"p": 0.8,
"alpha": [0.1, 0.3]
}
epo_paras = {
"epoch": 500,
"pop_size": 100
}
hho_paras = {
"epoch": 500,
"pop_size": 100
}
nmr_paras = {
"pop_size": 100,
"epoch": 500,
"bp": 0.75, # breeding probability
}
pfa_paras = {
"epoch": 500,
"pop_size": 100
}
pso_paras = {
"epoch": 500,
"pop_size": 100,
"w_minmax": [0.4, 0.9], # [0-1] -> [0.4-0.9] Weight of bird
"c_minmax": [1.2, 1.2] # [(1.2, 1.2), (0.8, 2.0), (1.6, 0.6)] Effecting of local va global
}
isfo_paras = {
"epoch": 500,
"pop_size": 100, # SailFish pop size
"pp": 0.1 # the rate between SailFish and Sardines (N_sf = N_s * pp) = 0.25, 0.2, 0.1
}
soa_paras = {
"epoch": 500,
"pop_size": 100,
}
woa_paras = {
"epoch": 500,
"pop_size": 100
}
## Run model
name_model = {
'BaseABC': BaseABC(root_algo_paras=root_paras, abc_paras=abc_paras),
'BaseBMO': BaseBMO(root_algo_paras=root_paras, bmo_paras=bmo_paras),
"AdaptiveBOA": AdaptiveBOA(root_algo_paras=root_paras, boa_paras=boa_paras),
"BaseEPO": BaseEPO(root_algo_paras=root_paras, epo_paras=epo_paras),
"BaseHHO": BaseHHO(root_algo_paras=root_paras, hho_paras=hho_paras),
"LevyNMR": LevyNMR(root_algo_paras=root_paras, nmr_paras=nmr_paras),
"IPFA": IPFA(root_algo_paras=root_paras, pfa_paras=pfa_paras),
"BasePSO": BasePSO(root_algo_paras=root_paras, pso_paras=pso_paras),
"ImprovedSFO": ImprovedSFO(root_algo_paras=root_paras, isfo_paras=isfo_paras),
"BaseSOA": BaseSOA(root_algo_paras=root_paras, soa_paras=soa_paras),
"BaoWOA": BaoWOA(root_algo_paras=root_paras, woa_paras=woa_paras)
}
### 1st: way
# list_loss = []
# for name, model in name_model.items():
# _, loss = model._train__()
# list_loss.append(loss)
# list_loss = np.asarray(list_loss)
# list_loss = list_loss.T
# np.savetxt("run_test_c30.csv", list_loss, delimiter=",", header=str(name_model.keys()))
### 2nd: way
list_loss = {}
for name, model in name_model.items():
_, loss = model._train__()
list_loss[name] = loss
df = pd.DataFrame(list_loss)
df.to_csv('c30_results.csv') # saving the dataframe
|
[
"[email protected]"
] | |
ed530e3765c93ad395a073bdba2ebcf9db8a922e
|
2069ec66ace2e8fb5d55502d1c3ce7fd89f3cdcc
|
/fp2/example/write.py
|
2835c40effeaaa01280a975bc1037885b60af898
|
[] |
no_license
|
caimingA/ritsumeiPython
|
6812a0233456cf3d5346a63d890f4201160593c5
|
bb9c39726dd26fe53f7a41f5367bdab60c36a057
|
refs/heads/master
| 2022-11-16T22:28:50.274374 | 2020-07-13T14:53:51 | 2020-07-13T14:53:51 | 279,294,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
f = open("yuki.txt", mode="w", encoding="utf-8")
f.write("或冬曇りの午後、わたしは中央線の汽車の窓に一列の山脈を眺めてゐた。")
f.write("山脈は勿論まつ白だつた。")
f.write("が、それは雪と言ふよりも山脈の皮膚に近い色をしてゐた。")
|
[
"[email protected]"
] | |
91c38c6e741d31665a613aefbe52b741dad9f2d3
|
e2f133885cfcea86a3c06bba2f1d4d165e50c823
|
/api_test/main.py
|
eb2d68962d74199d1e2afd00f96adc2b336a3364
|
[] |
no_license
|
JR1QQ4/app_test
|
e0d9dc25ea03060d17dc7f29f30706ec4b8c16ea
|
1c2ab9a5601e94a28f9bfe485e615d22511bb79b
|
refs/heads/main
| 2023-05-25T14:55:53.326377 | 2021-06-08T14:33:52 | 2021-06-08T14:33:52 | 349,760,345 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,417 |
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.extensions.android.gsm import GsmCallActions
from appium.webdriver.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Main:
_driver: WebDriver
_appPackage = "com.xueqiu.android"
_appActivity = ".view.WelcomeActivityAlias"
# _appActivity = ".common.MainActivity"
# 搜索框
_search_input = (MobileBy.ID, "com.xueqiu.android:id/tv_search")
_search_text = (MobileBy.ID, "com.xueqiu.android:id/search_input_text")
# 搜索到的内容
_search_result = (MobileBy.XPATH, '//*[@resource-id="com.xueqiu.android:id/name" and @text="$value"]')
_search_result_first = (MobileBy.ID, 'com.xueqiu.android:id/name')
_result_item = (MobileBy.XPATH, '//*[@resource-id="com.xueqiu.android:id/ll_stock_result_view"]'
'//*[@text="$value"]/../..')
_result_item_code = (MobileBy.XPATH, '//*[@text="$code"]')
_result_price = (MobileBy.XPATH, '//*[@resource-id="com.xueqiu.android:id/ll_stock_result_view"]'
'//*[@text="$value"]/../..//*[@resource-id="com.xueqiu.android:id/current_price"]')
_result_price_with_code = (MobileBy.XPATH, '//*[@text="$code"]/../../..'
'//*[@resource-id="com.xueqiu.android:id/current_price"]')
# 取消搜索
_close_search = (MobileBy.ID, 'com.xueqiu.android:id/action_close')
# tab导航
_tab = (MobileBy.XPATH, '//*[@resource-id="android:id/tabs"]//*[@text="$tab"]/..')
def __init__(self, driver: WebDriver = None):
if driver is None:
opts = ["http://127.0.0.1:4723/wd/hub",
{
"platformName": "Android",
"platformVersion": "6.0",
"deviceName": "127.0.0.1:7555",
"automationName": "UiAutomator2",
"appPackage": self._appPackage, # adb shell dumpsys activity top
"appActivity": self._appActivity,
"noRest": True,
"unicodeKeyBoard": True,
"resetKeyBoard": True,
# "avd": "Pixel_23_6", # 启动模拟器
"dontStopAppOnRest": True, # 首次启动 app 时不停止 app(可以调试或者运行的时候提升运行速度)
"skipDeviceInitialization": True, # 跳过安装,权限设置等操作(可以调试或者运行的时候提升运行速度)
# "newCommandTimeout": 300, # 每一条命令执行的间隔时间
# "uuid": "", # 用于
# "autoGrantPermissions": True, # 用于权限管理,设置了这个,就不需要设置 noRest
"chromedriverExecutable": "C:\\webdriver\\chromedriver.exe" # 用于测试 webview 页面
}
]
self._driver = webdriver.Remote(*opts)
else:
self._driver.start_activity(self._appPackage, self._appActivity)
self._driver.implicitly_wait(10)
def find(self, locator):
WebDriverWait(self._driver, 10).until(EC.visibility_of_element_located(locator))
return self._driver.find_element(*locator)
def click(self, locator):
ele = WebDriverWait(self._driver, 10).until(EC.visibility_of_element_located(locator))
ele.click()
def text(self, locator, value=""):
WebDriverWait(self._driver, 10).until(EC.visibility_of_element_located(locator))
if value != "":
self._driver.find_element(*locator).send_keys(value)
else:
return self._driver.find_element(*locator).text
def search(self, value="阿里巴巴"):
self.click(self._search_input)
self.text(self._search_text, value)
def search_and_get_price(self, value="阿里巴巴"):
self.click(self._search_input)
self.text(self._search_text, value)
self.click((self._search_result[0], self._search_result[1].replace("$value", "阿里巴巴")))
return float(self.text((self._result_price[0], self._result_price[1].replace("$value", "阿里巴巴"))))
def search_and_show_attribute(self):
ele = self.find(self._search_input)
search_enabled = ele.is_enabled()
print(ele.text) # 搜索股票/组合/用户/讨论
print(ele.location) # {'x': 219, 'y': 60}
print(ele.size) # {'height': 36, 'width': 281}
if search_enabled:
ele.click()
self.text(self._search_text, "alibaba")
ali_ele = self.find((self._search_result[0], self._search_result[1].replace("$value", "阿里巴巴")))
# ali_ele.is_displayed()
print(ali_ele.get_attribute("displayed")) # true
def move_to(self, cur=None, target=None):
sleep(3)
action = TouchAction(self._driver)
# action.press(x=cur["x"], y=cur["y"]).wait(200).move_to(x=target["x"], y=target["y"]).release().perform()
print(self._driver.get_window_rect())
action.press(x=360, y=1000).wait(200).move_to(x=360, y=280).release().perform()
def scroll_and_search_with_android_selector(self):
loc = (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("关注")')
WebDriverWait(self._driver, 10).until(EC.visibility_of_element_located(loc))
self._driver.find_element_by_android_uiautomator('new UiSelector().text("关注")').click()
self._driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().'
'scrollable(true).instance(0)).'
'scrollIntoView(new UiSelector().text("玉山落雨").'
'instance(0));').click()
sleep(5)
def toast(self):
print(self._driver.page_source)
def clear(self, locator):
self.find(locator).clear()
def search_get_price(self, value, code):
self.click(self._search_input)
self.text(self._search_text, value)
self.click(self._search_result_first)
price = self.text((self._result_price_with_code[0], self._result_price_with_code[1].replace("$code", code)))
self.click(self._close_search)
return price
def mobile_call(self, phone_number="13883256868", action=GsmCallActions.CALL):
"""mumu 模拟器不支持,需要使用原生的"""
# action:
# GsmCallActions.CALL
# GsmCallActions.ACCEPT
# GsmCallActions.CANCEL
# GsmCallActions.HOLD
self._driver.make_gsm_call(phone_number, action)
def msg(self, phone_number="13537773695", message="Hello world!"):
"""mumu 模拟器不支持,需要使用原生的"""
self._driver.send_sms(phone_number, message)
def network(self, connection_type=1):
self._driver.set_network_connection(connection_type)
sleep(3)
self._driver.set_network_connection(6)
sleep(3)
def screenshot_as_file(self, path="./photos/img.png"):
self._driver.get_screenshot_as_file(path)
def webview(self):
self.click((self._tab[0], self._tab[1].replace("$tab", "交易")))
sleep(10)
print(self._driver.contexts)
# 立即开户,切换到 webview
self._driver.switch_to.context(self._driver.contexts[-1])
sleep(10)
# print(self._driver.window_handles)
loc1 = (MobileBy.XPATH, "//*[id='Layout_app_3V4']/div/div/ul/li[1]/div[2]/h1")
WebDriverWait(self._driver, 10).until(EC.element_to_be_clickable(loc1))
self.click(loc1)
sleep(10)
handle = self._driver.window_handles[-1]
self._driver.switch_to.window(handle)
# 开户信息填写
loc2 = (MobileBy.ID, "phone-number")
loc3 = (MobileBy.ID, "code")
loc4 = (MobileBy.CSS_SELECTOR, ".btn-submit")
self.text(loc2, "13810120202")
self.text(loc3, "6666")
self.click(loc4)
|
[
"[email protected]"
] | |
e232ea8556be487081ad7ae17a32d47bd88efdad
|
31e6ca145bfff0277509dbd7c4b44b8deddf3334
|
/LeetCode/Graph/combination-sum.py
|
1bad4a940655a4357b9828e4c8a4c2eb18a168a3
|
[] |
no_license
|
brillantescene/Coding_Test
|
2582d6eb2d0af8d9ac33b8e829ff8c1682563c42
|
0ebc75cd66e1ccea3cedc24d6e457b167bb52491
|
refs/heads/master
| 2023-08-31T06:20:39.000734 | 2021-10-15T10:51:17 | 2021-10-15T10:51:17 | 254,366,460 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 459 |
py
|
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
result = []
def dfs(csum, index, path):
if csum < 0:
return
if csum == 0:
result.append(path)
return
for i in range(index, len(candidates)):
dfs(csum-candidates[i], i, path+[candidates[i]])
dfs(target, 0, [])
return result
|
[
"[email protected]"
] | |
5588a9b58bb4811699015d008966309f1b432923
|
76a01339f7ca19536a07d66e18ff427762157a2a
|
/codeforces/Python/serval_and_bus.py
|
49a999fb8f0c58c2e96f04c61667f1b963aee56a
|
[] |
no_license
|
shaarangg/CP-codes
|
75f99530921a380b93d8473a2f2a588dc35b0beb
|
94fc49d0f20c02da69f23c74e26c974dfe122b2f
|
refs/heads/main
| 2023-07-19T21:31:40.011853 | 2021-09-07T05:22:28 | 2021-09-07T05:22:28 | 332,644,437 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
n,t = map(int,input().split())
m=10**9
j=0
for i in range(n):
s,d = map(int,input().split())
if(t<=s):
a=s-t
else:
a=t-s
if(a%d==0):
a=0
else:
a = (a//d + 1)*d -t + s
if(m>a):
m=a
j=i+1
print(j)
|
[
"[email protected]"
] | |
a20ec095f9065df80a1ba32f675716abe0875c05
|
26c4426d2c9cd10fd7d4a73609512e69e31b64ba
|
/justone/mayflower/products/forms.py
|
452a35e79f1ecaab5846dfb47812af7c3869b763
|
[] |
no_license
|
KirillUdod/html2exc
|
550761213eb6edd7d3ea4787938cce65584606c3
|
60569f01822a15b2e5b6884a42774cd428953700
|
refs/heads/master
| 2021-01-15T17:07:05.906492 | 2016-01-06T11:51:38 | 2016-01-06T11:51:38 | 34,809,072 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,453 |
py
|
from django import forms
from products.models import Bouquet
class DependenciesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DependenciesForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
dependencies = getattr(self.Meta.model, 'dependencies', {})
if isinstance(dependencies, dict):
for (depend_field, depend_field_value), fields in dependencies.iteritems():
if not isinstance(self.fields[depend_field], forms.BooleanField)\
and not getattr(self.fields[depend_field], 'choices', None):
raise ValueError()
if not isinstance(fields, (list, tuple)):
fields = [fields]
required = False
if self.data:
post_value = self.data.get(self.add_prefix(depend_field))
if post_value == 'on' and isinstance(depend_field_value, bool):
post_value = 'True'
if post_value == unicode(depend_field_value):
required = True
elif instance and getattr(instance, depend_field, None) == depend_field_value:
required = True
for field in fields:
self.fields[field].required = required
class BouquetAdminForm(DependenciesForm):
class Meta:
model = Bouquet
|
[
"[email protected]"
] | |
ec1d8c4d661870efcce6dd2ea0b18baee2087b45
|
f21109a5c23340447d0e3d34f14299c30e49d023
|
/Dynamic Programming/11. Longest Common Subsequence.py
|
a8f0e898a3fad5f7001ac206032d7ee02a013de3
|
[] |
no_license
|
ShashankSinha98/FAANG-Questions
|
45366004c3176a3c11ef554a25a11fe21e53ebca
|
73ef742b3747e89d32d384baa6acf35044bf3ce0
|
refs/heads/master
| 2022-12-21T09:42:51.796086 | 2020-09-24T08:24:47 | 2020-09-24T08:24:47 | 286,765,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 598 |
py
|
t = int(input())
def common_lcs(str1,n,str2,m):
dp = [[0]*(m+1) for i in range(n+1)]
for i in range(1,n+1):
for j in range(1,m+1):
if str1[i-1]==str2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j],dp[i][j-1])
return dp[n][m]
def display(arr):
for i in arr:
for j in i:
print(j,end=" ")
print()
print()
while t!=0:
t-=1
n,m = [int(i) for i in input().split()]
str1 = input()
str2 = input()
res = common_lcs(str1,n,str2,m)
print(res)
|
[
"[email protected]"
] | |
446d6d7faa595deb53a808126c8a2aced62533ca
|
00b86f883694b17575a514227960b963d3b6179b
|
/Analysis/python/regions.py
|
fd5293018c7e89c2e26d88fe5e64bddca3efeb61
|
[] |
no_license
|
HephyAnalysisSW/TTZRun2EFT
|
1b33a6bad49d0d6e119e49c74faa35dee0e4bb0e
|
730a7465d4cbde52649965ed0e2a5b29bcc309c3
|
refs/heads/master
| 2020-04-30T16:40:46.454225 | 2019-04-18T08:09:46 | 2019-04-18T08:09:46 | 176,956,090 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,950 |
py
|
from TTZRun2EFT.Analysis.Region import Region
from TTZRun2EFT.Analysis.Region import texString
from TTZRun2EFT.Analysis.Region import allowedVars
from math import pi
def getRegionsFromThresholds(var, vals, gtLastThreshold = True):
return [Region(var, (vals[i], vals[i+1])) for i in range(len(vals)-1)]
def getRegions2D(varOne, varOneThresholds, varTwo, varTwoThresholds):
regions_varOne = getRegionsFromThresholds(varOne, varOneThresholds)
regions_varTwo = getRegionsFromThresholds(varTwo, varTwoThresholds)
regions2D = []
for r1 in regions_varOne:
for r2 in regions_varTwo:
regions2D.append(r1+r2)
return regions2D
def simpleStringToDict( simpleString ):
# replace variables by a string not containing "_"
for i, var in enumerate(allowedVars):
simpleString = simpleString.replace(var, "var%i"%i)
cutList = simpleString.split("_")
# convert simpleString to threshold tuple, fill in dict
cutDict = {}
for cut in cutList:
for i, var in enumerate(allowedVars):
if "var"+str(i) in cut:
cutRange = cut.replace("var%i"%i, "")
cutRange = cutRange.split("To")
cutRange = tuple( map( float, cutRange ) )
if len(cutRange) == 1: cutRange = ( cutRange[0], -1 )
cutDict.update( {var:cutRange} )
return cutDict
def dictToCutString( dict ):
res=[]
for var in dict.keys():
svar = var
s1=svar+">="+str(dict[var][0])
if dict[var][1]>-1: s1+="&&"+svar+"<"+str(dict[var][1])
res.append(s1)
return "&&".join(res)
def simpleStringToCutString( cutString ):
return dictToCutString( simpleStringToDict( cutString ) )
#Put all sets of regions that are used in the analysis, closure, tables, etc.
#differencial
thresholds = [ 20, 120, 220, 320, 420, -999 ]
genTTZRegions = getRegionsFromThresholds( "GenPhoton_pt[0]", thresholds )
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.