id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
4849459
|
WIDTH = 200
HEIGHT = 200
PIXELSIZE = 4
BRUSHSIZE = 1
# COLORS
BACKGROUND = (0, 0, 0)
CURSOR = (90, 90, 90)
CURSORALPHA = 90
GRID = (30, 30, 30)
DEFAULTTEMP = 297.5
GLOBAL_TEMP_FACTOR = 0.2
GLOBAL_TEMP_RATE = 0.6
MAXUPDATE = 60 # frames that an element needs to be stationary before it freezes
BORDER = [2, 2, 2, 256] # top, right, down, left
FILETYPES = ("Sandbox Files", '*.sbx')
THEME = "theme"
|
StarcoderdataPython
|
6516545
|
import binascii
import struct
import hashlib
import math
import bittensor
import rich
import time
import torch
import numbers
import pandas
from typing import Tuple, List, Union, Optional
def indexed_values_to_dataframe (
prefix: Union[str, int],
index: Union[list, torch.LongTensor],
values: Union[list, torch.Tensor],
filter_zeros: bool = False
) -> 'pandas.DataFrame':
# Type checking.
if not isinstance(prefix, str) and not isinstance(prefix, numbers.Number):
raise ValueError('Passed prefix must have type str or Number')
if isinstance(prefix, numbers.Number):
prefix = str(prefix)
if not isinstance(index, list) and not isinstance(index, torch.Tensor):
raise ValueError('Passed uids must have type list or torch.Tensor')
if not isinstance(values, list) and not isinstance(values, torch.Tensor):
raise ValueError('Passed values must have type list or torch.Tensor')
if not isinstance(index, list):
index = index.tolist()
if not isinstance(values, list):
values = values.tolist()
index = [ idx_i for idx_i in index if idx_i < len(values) and idx_i >= 0 ]
dataframe = pandas.DataFrame(columns=[prefix], index = index )
for idx_i in index:
value_i = values[ idx_i ]
if value_i > 0 or not filter_zeros:
dataframe.loc[idx_i] = pandas.Series( { str(prefix): value_i } )
return dataframe
def unbiased_topk( values, k, dim=0, sorted = True, largest = True):
r""" Selects topk as in torch.topk but does not bias lower indices when values are equal.
Args:
values: (torch.Tensor)
Values to index into.
k: (int):
Number to take.
Return:
topk: (torch.Tensor):
topk k values.
indices: (torch.LongTensor)
indices of the topk values.
"""
permutation = torch.randperm(values.shape[ dim ])
permuted_values = values[ permutation ]
topk, indices = torch.topk( permuted_values, k, dim = dim, sorted=sorted, largest=largest )
return topk, permutation[ indices ]
def hex_bytes_to_u8_list( hex_bytes: bytes ):
hex_chunks = [int(hex_bytes[i:i+2], 16) for i in range(0, len(hex_bytes), 2)]
return hex_chunks
def u8_list_to_hex( values: list ):
total = 0
for val in reversed(values):
total = (total << 8) + val
return total
def create_seal_hash( block_hash:bytes, nonce:int ) -> bytes:
nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
block_bytes = block_hash.encode('utf-8')[2:]
pre_seal = nonce_bytes + block_bytes
seal = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
return seal
def seal_meets_difficulty( seal:bytes, difficulty:int ):
seal_number = int.from_bytes(seal, "big")
product = seal_number * difficulty
limit = int(math.pow(2,256) - 1)
if product > limit:
return False
else:
return True
def solve_for_difficulty( block_hash, difficulty ):
meets = False
nonce = -1
while not meets:
nonce += 1
seal = create_seal_hash( block_hash, nonce )
meets = seal_meets_difficulty( seal, difficulty )
if nonce > 1:
break
return nonce, seal
def solve_for_difficulty_fast( subtensor ):
block_number = subtensor.get_current_block()
difficulty = subtensor.difficulty
block_hash = subtensor.substrate.get_block_hash( block_number )
while block_hash == None:
block_hash = subtensor.substrate.get_block_hash( block_number )
block_bytes = block_hash.encode('utf-8')[2:]
meets = False
nonce = -1
limit = int(math.pow(2,256) - 1)
best = math.inf
update_interval = 100000
start_time = time.time()
console = bittensor.__console__
with console.status("Solving") as status:
while not meets:
nonce += 1
# Create seal.
nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
pre_seal = nonce_bytes + block_bytes
seal = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
seal_number = int.from_bytes(seal, "big")
product = seal_number * difficulty
if product - limit < best:
best = product - limit
best_seal = seal
if product < limit:
return nonce, block_number, block_hash, difficulty, seal
if nonce % update_interval == 0:
itrs_per_sec = update_interval / (time.time() - start_time)
start_time = time.time()
difficulty = subtensor.difficulty
block_number = subtensor.get_current_block()
block_hash = subtensor.substrate.get_block_hash( block_number)
while block_hash == None:
block_hash = subtensor.substrate.get_block_hash( block_number)
block_bytes = block_hash.encode('utf-8')[2:]
status.update("Solving\n Nonce: [bold white]{}[/bold white]\n Iters: [bold white]{}/s[/bold white]\n Difficulty: [bold white]{}[/bold white]\n Block: [bold white]{}[/bold white]\n Best: [bold white]{}[/bold white]".format( nonce, int(itrs_per_sec), difficulty, block_hash.encode('utf-8'), binascii.hexlify(best_seal) ))
def create_pow( subtensor ):
nonce, block_number, block_hash, difficulty, seal = solve_for_difficulty_fast( subtensor )
return {
'nonce': nonce,
'difficulty': difficulty,
'block_number': block_number,
'block_hash': block_hash,
'work': binascii.hexlify(seal)
}
|
StarcoderdataPython
|
9732547
|
# Generated by Django 3.0.10 on 2020-09-11 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djcelery_model', '0003_auto_20200713_1254'),
]
operations = [
migrations.AlterField(
model_name='modeltaskmeta',
name='object_id',
field=models.IntegerField(db_index=True),
),
]
|
StarcoderdataPython
|
9710100
|
#!/usr/bin/env python -tt
"""
unittests.test_security
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
import logging
import os
# import sys
import dbsign.logger as L
import dbsign.security as S
import dbsign.shell as sh
log = L.get_logger(__name__)
@unittest.skip("need to mock identity/p12 file")
class TestIdentity(unittest.TestCase):
def setUp(self):
self.keyfile = S.keychain_to_file("debugsign_test")
self.password = "<PASSWORD>"
self.identity = 'debug_codesign'
# store and override loglevel
self.init_loglevel = L._root.level
L.set_level(logging.CRITICAL)
# create test keychain
self.assertTrue(S.create_keychain(self.keyfile, self.password))
def tearDown(self):
# restore loglevel
L.set_level(self.init_loglevel)
# force remove test keychain
if os.path.exists(self.keyfile):
os.remove(self.keyfile)
@unittest.skip("need to mock p12 file")
def test_import_exists(self): # type: () -> ()
keydb = self.keyfile
passwd = <PASSWORD>
ident = self.identity
id_file = None
self.assertFalse(S.find_identity(keydb, ident))
self.assertTrue(S.import_identity(keydb, ident, id_file, passwd))
self.assertTrue(S.find_identity(keydb, ident))
@unittest.skip("need to mock p12 file")
@unittest.skipUnless(os.getenv(S.UNSAFE_FLAG), "Requires --unsafe")
def test_trust(self): # type: () -> ()
keydb = self.keyfile
passwd = <PASSWORD>
ident = self.identity
id_file = None
self.assertTrue(S.import_identity(keydb, ident, id_file, passwd))
self.assertTrue(S.find_identity(keydb, ident))
self.assertTrue(S.trust_identity(keydb, ident))
@unittest.skip("need to mock a trusted identity")
def test_verify(self): # type: () -> ()
self.fail()
@unittest.skip("need to mock a trusted identity")
def test_verify_filename(self): # type: () -> ()
self.fail()
@unittest.skip("need to mock p12 file")
def test_delete(self): # type: () -> ()
self.fail()
class TestKeychain(unittest.TestCase):
def setUp(self):
self.keyfile = S.keychain_to_file("debugsign_test")
self.password = "<PASSWORD>"
self.init_loglevel = L._root.level
L.set_level(logging.CRITICAL)
def tearDown(self):
L.set_level(self.init_loglevel)
if os.path.exists(self.keyfile):
os.remove(self.keyfile)
def test_keychain_to_file(self):
ext = S._KEYCHAIN_EXT
login_path = os.path.expanduser(
os.path.join("~/Library/Keychains", "login." + ext))
self.assertTrue(login_path, S.keychain_to_file('login'))
def test_derive_keychain_extension(self):
"""
Because the tested method is itself guesswork,
the test is rather simplistic:
- generate expected location of user's login keychain
- verify that "$HOME/Library/Keychains/login.${keychain_extension}"
exists and is valid
"""
login_keychain = os.path.expanduser(
"~/Library/Keychains/login.{}".format(
S.derive_keychain_extension()))
self.assertTrue(os.path.exists(login_keychain))
self.assertTrue(os.access(login_keychain, os.R_OK))
def test_keychain_exists(self):
"""
- assert existing keychain => True
- assert non-existent keychain => False
"""
valid_keychain = S.keychain_to_file("login")
with self.subTest(keychain=valid_keychain):
self.assertTrue(S.keychain_exists(valid_keychain))
invalid_keychain = S.keychain_to_file("invalid")
with self.subTest(keychain=invalid_keychain):
self.assertFalse(S.keychain_exists(invalid_keychain))
def test_keychain_operations(self):
"""
- assert keychain does not exist
- create keychain and assert exists
- lock keychain
- unlock keychain and assert exists
"""
keyfile = self.keyfile
password = <PASSWORD>
# some assorted negatives
self.assertFalse(S.create_keychain('/tmp', password))
# assert keychain does not exist
self.assertFalse(S.keychain_exists(keyfile))
# create and assert success
res_create = S.create_keychain(keyfile, password)
self.assertTrue(res_create)
self.assertTrue(S.keychain_exists(keyfile))
# assert second creation succeeds
self.assertTrue(S.create_keychain(keyfile, password))
# keychain is unlocked at creation; lock it to test unlocking
cmd_lock = sh.run(['security', 'lock-keychain', keyfile])
self.assertTrue(cmd_lock)
self.assertFalse(S.unlock_keychain(keyfile, ''))
self.assertTrue(S.unlock_keychain(keyfile, password))
# ensure keychain settings were set correctly
cmd_info = sh.run(['security', 'show-keychain-info', keyfile])
self.assertTrue(cmd_info)
self.assertRegexpMatches(cmd_info.stderr, r"\bno-timeout\b", cmd_info)
# delete with backup
res_delete = S.delete_keychain(keyfile, backup=True)
self.assertTrue(res_delete)
backup_file = res_delete.value
# assert backup was made
self.assertTrue(os.path.exists(backup_file))
# assert keychain is gone
self.assertFalse(S.keychain_exists(keyfile))
self.assertFalse(os.path.exists(keyfile))
# cleanup backup file
os.unlink(backup_file)
|
StarcoderdataPython
|
4853787
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# urls.py for app 'page'
#
from django.conf.urls import patterns, include, url
from notice.views import ListNoticeView
urlpatterns = patterns('',
# notice list view
url(r'^list/$', ListNoticeView.as_view(),
name='list_notice'),
)
|
StarcoderdataPython
|
140139
|
<filename>webapp/apps/test_assets/utils.py
import json
import os
import sys
from ..taxbrain.compute import MockCompute
from ..taxbrain.models import TaxSaveInputs, OutputUrl
from ..taxbrain.forms import PersonalExemptionForm
from ..dynamic import views
from ..taxbrain import views
from django.core.files.uploadedfile import SimpleUploadedFile
NUM_BUDGET_YEARS = int(os.environ.get("NUM_BUDGET_YEARS", "10"))
def get_dropq_compute_from_module(module_import_path, attr='dropq_compute',
MockComputeObj=MockCompute, **mc_args):
"""
mocks dropq compute object from specified module
returns: mocked dropq compute object
"""
module_views = sys.modules[module_import_path]
setattr(module_views, attr, MockComputeObj(**mc_args))
return getattr(module_views, attr)
def do_micro_sim(client, data, tb_dropq_compute=None, dyn_dropq_compute=None,
compute_count=None, post_url='/taxbrain/'):
"""
do the proper sequence of HTTP calls to run a microsim
tb_dropq_compute: mocked taxbrain dropq_compute object; set to default
config if None
dyn_dropq_compute: mocked dynamic dropq_compute object; set to default
config if None
compute_count: number of jobs submitted; only checked in quick_calc tests
post_url: url to post data; is also set to /taxbrain/file/ for file_input
tests
returns: response object, taxbrain mock dropq compute object,
dynamic dropq compute object, primary key for model run
"""
#Monkey patch to mock out running of compute jobs
if tb_dropq_compute is None:
tb_dropq_compute = get_dropq_compute_from_module(
'webapp.apps.taxbrain.views',
num_times_to_wait=0
)
if dyn_dropq_compute is None:
dyn_dropq_compute = get_dropq_compute_from_module(
'webapp.apps.dynamic.views',
num_times_to_wait=1
)
response = client.post(post_url, data)
# Check that redirect happens
assert response.status_code == 302
idx = response.url[:-1].rfind('/')
assert response.url[:idx].endswith("taxbrain")
# Check for good response
response2 = client.get(response.url)
# TODO: check compute count once NUM_BUDGET_YEARS env variable issue is
# resolved
assert response2.status_code == 200
if compute_count is not None:
assert tb_dropq_compute.count == compute_count
# return response
return {"response": response,
"tb_dropq_compute": tb_dropq_compute,
"dyn_dropq_compute": dyn_dropq_compute,
"pk": response.url[idx+1:-1]}
def check_posted_params(mock_compute, params_to_check, start_year):
"""
Make sure posted params match expected results
user_mods: parameters that are actually passed to taxcalc
params_to_check: gives truth value for parameters that we want to check
(formatted as taxcalc dict style reform)
"""
last_posted = mock_compute.last_posted
user_mods = json.loads(last_posted["user_mods"])
assert last_posted["first_budget_year"] == int(start_year)
for year in params_to_check:
for param in params_to_check[year]:
act = user_mods["policy"][str(year)][param]
exp = params_to_check[year][param]
assert exp == act
def get_post_data(start_year, _ID_BenefitSurtax_Switches=True, quick_calc=False):
"""
Convenience function for posting GUI data
"""
data = {u'has_errors': [u'False'],
u'start_year': unicode(start_year),
'csrfmiddlewaretoken':'abc123'}
if _ID_BenefitSurtax_Switches:
switches = {u'ID_BenefitSurtax_Switch_0': [u'True'],
u'ID_BenefitSurtax_Switch_1': [u'True'],
u'ID_BenefitSurtax_Switch_2': [u'True'],
u'ID_BenefitSurtax_Switch_3': [u'True'],
u'ID_BenefitSurtax_Switch_4': [u'True'],
u'ID_BenefitSurtax_Switch_5': [u'True'],
u'ID_BenefitSurtax_Switch_6': [u'True']}
data.update(switches)
if quick_calc:
data['quick_calc'] = 'Quick Calculation!'
return data
def get_file_post_data(start_year, reform_text, assumptions_text=None, quick_calc=False):
"""
Convenience function for posting file input data
"""
tc_file = SimpleUploadedFile("test_reform.json", reform_text)
data = {u'docfile': tc_file,
u'has_errors': [u'False'],
u'start_year': unicode(start_year),
u'quick_calc': quick_calc,
'csrfmiddlewaretoken':'<PASSWORD>'}
if assumptions_text is not None:
tc_file2 = SimpleUploadedFile("test_assumptions.json",
assumptions_text)
data['assumpfile'] = tc_file2
return data
def get_taxbrain_model(fields, first_year=2017,
quick_calc=False, taxcalc_vers="0.13.0",
webapp_vers="1.2.0", exp_comp_datetime = "2017-10-10"):
fields = fields.copy()
del fields['_state']
del fields['creation_date']
del fields['id']
for key in fields:
if isinstance(fields[key], list):
fields[key] = ','.join(map(str, fields[key]))
personal_inputs = PersonalExemptionForm(first_year, fields)
model = personal_inputs.save()
model.job_ids = '1,2,3'
model.json_text = None
model.first_year = first_year
model.quick_calc = quick_calc
model.save()
unique_url = OutputUrl()
unique_url.taxcalc_vers = taxcalc_vers
unique_url.webapp_vers = webapp_vers
unique_url.unique_inputs = model
unique_url.model_pk = model.pk
unique_url.exp_comp_datetime = exp_comp_datetime
unique_url.save()
return unique_url
|
StarcoderdataPython
|
11354918
|
import Currency
n = float(input('Please, type a value (R$): '))
Currency.Resume(n)
|
StarcoderdataPython
|
301430
|
#!/usr/bin/env python3
"""
Plotting routines dedicated to time-series or temporal trends
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
# import seaborn as sns
#--------------------------------------
# Time-Series Plots
#--------------------------------------
def OccurancePlot(archive,
searchterms,
fields=None,
alpha=0.2,
startdate=None,
enddate=None):
"""
Plots a time-series of occurances of a given
set of search terms in the collection as a scatter
plot of vertical lines
Inputs:
archive (obj) : archive.Archive() object
searchterms (list) : Terms for which to search.
fields (list) : Metadata fields in which to look.
Shorthand expected. Default searches through
all in config.fields_short.
alpha (float) : Alpha of marker face
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
"""
# Instantiate figure
plt.figure(figsize=(8,len(searchterms)*1.3/3), dpi=200)
# Handle colors (tab10 or brg)
colors = cm.tab10(np.arange(len(searchterms)) \
/ float(len(searchterms)))
np.random.shuffle(colors)
for ii, term in enumerate(searchterms):
# Find filenames with search terms in field
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate=startdate,
enddate=enddate)
# Plot scatterplot w/ default spacing settings
plt.scatter(data['CreateDate'],
np.ones(len(data['CreateDate']))*(len(searchterms)-ii),
s=400, color=colors[ii], marker='|', alpha=alpha)
plt.ylim([0.5, len(searchterms) + 0.5])
plt.yticks(list(range(len(searchterms),0,-1)), searchterms)
plt.title('Occurances by Date')
return
def OccuranceMagnitude(archive,
searchterms,
fields=None,
alpha=0.4,
scale=5,
startdate=None,
enddate=None):
"""
Plots a scatterplot time-series of occurances of
a given set of search terms in the collection,
with sizes of each marker reflecting the number
of appearances that day
Inputs:
archive (obj) : archive.Archive() object
searchterms (list) : Terms for which to search.
fields (list) : Metadata fields in which to look.
Shorthand expected. Default searches through
all in config.fields_short.
alpha (float) : Alpha of marker face
scale (int) : Scaling factor to apply to all markers
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
"""
# Instantiate figure
plt.figure(figsize=(8,len(searchterms)*1.3/3), dpi=300)
# Get array of colors from hsv:
colors = cm.hsv(np.arange(len(searchterms)) \
/ float(len(searchterms)))
colors = np.array(colors)
colors[:,3] = alpha
np.random.shuffle(colors)
for ii, term in enumerate(searchterms):
# Find filenames with search terms in field
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate=startdate,
enddate=enddate)
# Count totals by day
counts = data['CreateDate'].dt.normalize().value_counts()
dates = counts.index.to_series()
# Plot scatterplot w/ default spacing settings
plt.scatter(dates, np.ones(len(dates))*(len(searchterms)-ii),
s=counts*scale, color=colors[ii], marker='o',
edgecolors=(0,0,0,1), linewidth=0.5)
plt.ylim([0.5, len(searchterms) + 0.5])
plt.yticks(list(range(len(searchterms),0,-1)), searchterms)
plt.title('Occurances by Date')
return
def ViolinPlot(archive, terms, fields,
startdate=None, enddate=None,
refdate='19800101_000000',
palette='Set2', inner='points',
scale='area', cut=0, linewidth=0.8):
"""
Wrapper for the Seaborn violin plot function. For each keyword
in terms, find files for which that keyword appears in fields,
and plot a violin of occurances by date. Most other attributes
are aesthetic adjustments fed into seaborn.violinplots()
Inputs:
archive (obj) : archive.Archive() object
terms (list) : Keywords to search for in Archive, fed into
Archive.FindSource()
fields (list) : Exif fields in which to search for terms,
fed into Archive.FindSource()
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
refdate (str) : Reference date which is used to convert
pandas datetime to numeric dates, ideally a value
similar to the dates returned from the collection
palette, inner, scale, cut, linewidth : See requirements
for seaborn.violinplot()
"""
# Check if Seaborn is available
try:
import seaborn as sns
except ImportError:
print("Function unavailable, requires installation of Seaborn")
print("Perform full setup for auxilary packages")
return
sns.set_theme(style="whitegrid")
dates = []
refdate = pd.to_datetime(refdate, format="%Y%m%d_%H%M%S")
for n, term in enumerate(terms):
# Create a random dataset across several variables
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate, enddate)
# Convert to numeric date
data['epoch'] = (data['CreateDate']-refdate)//pd.Timedelta("1d")
# Save to list
dates.append(data['epoch']/365.0 + refdate.year)
# Append all the dates into a new dataframe
df = pd.concat(dates, axis=1, keys=terms)
# Show each distribution with both violins and points
fig, ax = plt.subplots(figsize=(3,len(terms)/1.5), dpi=200)
ax = sns.violinplot(data=df, ax=ax, width=0.95, orient='h',
palette=palette, inner=inner, scale=scale,
cut=cut, linewidth=linewidth)
return
def RidgePlot(archive, terms, fields,
startdate=None, enddate=None,
refdate='19800101_000000',
palette='deep', bw_adjust=0.5,
aspect=8, height=0.8):
"""
Wrapper for the Seaborn Ridge plot. For each keyword
in terms, find files for which that keyword appears in fields,
and plot a kernel of occurances by date. Most other attributes
are aesthetic adjustments fed into seaborn, see example at
https://seaborn.pydata.org/examples/kde_ridgeplot.html
Inputs:
archive (obj) : archive.Archive() object
terms (list) : Keywords to search for in Archive, fed into
Archive.FindSource()
fields (list) : Exif fields in which to search for terms,
fed into Archive.FindSource()
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
refdate (str) : Reference date which is used to convert
pandas datetime to numeric dates, ideally a value
similar to the dates returned from the collection
palette (str) : Seaborn color palette
bw_adjust (float) : Smoothness of the kernel
aspect (float) : width/height of figure
height (float) : height of each FacetGrid
"""
# Check if Seaborn is available
try:
import seaborn as sns
except ImportError:
print("Function unavailable, requires installation of Seaborn")
print("Perform full setup for auxilary packages")
return
sns.set_theme(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
dates = []
refdate = pd.to_datetime(refdate, format="%Y%m%d_%H%M%S")
for n, term in enumerate(terms):
# Create a random dataset across several variables
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate, enddate)
dfl = pd.DataFrame()
# Convert to numeric date
dfl['epoch'] = (data['CreateDate']-refdate)//pd.Timedelta("1d")
dfl['epoch'] = dfl['epoch']/365.0 + refdate.year
# Create term column to create long-form tidy df
dfl['term'] = np.tile(term, len(dfl['epoch']))
dates.append(dfl)
# Append all the dates into a new dataframe
df = pd.concat(dates)
# Initialize the FacetGrid object
g = sns.FacetGrid(df, row="term", hue="term",
aspect=aspect, height=height, palette=palette)
# Draw the densities in a few steps
g.map(sns.kdeplot, "epoch",
bw_adjust=bw_adjust, clip_on=False,
fill=True, alpha=1, linewidth=1.5)
g.map(sns.kdeplot, "epoch", clip_on=False,
color="w", lw=2, bw_adjust=bw_adjust)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes, fontsize=14)
g.map(label, "epoch")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-0.25)
# Other minor details for tuning the plot
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
ax = plt.gca()
ax.tick_params(axis='x', which='major', labelsize=10, color='k')
ax.set_xlabel(None)
plt.gcf().set_dpi(200)
return
|
StarcoderdataPython
|
17295
|
<gh_stars>1-10
from open_anafi.models import Indicator, IndicatorParameter, IndicatorLibelle
from open_anafi.serializers import IndicatorSerializer
from .frame_tools import FrameTools
from open_anafi.lib import parsing_tools
from open_anafi.lib.ply.parsing_classes import Indic
import re
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
class IndicatorTools:
@staticmethod
def calculate_max_depth(indicator):
"""Calculates the depth of an indicator (the max depth of all its parameters)
:param indicator: The indicator to evaluate
:type indicator: class:`open_anafi.models.Indicator`
"""
depth = 0
for parameter in indicator.parameters.all():
if parameter.depth > depth:
depth = parameter.depth
indicator.max_depth = depth
indicator.save()
@staticmethod
def update_depth(indicator):
"""Updates the depth of an indicator after an update.
Recursively updates all the affected indicators/frames
:param indicator: The indicator to evaluate
:type indicator: class:`open_anafi.models.Indicator`
"""
parameters = IndicatorParameter.objects.filter(original_equation__contains=indicator.name)
indicators_to_update = list(set([param.indicator for param in parameters]))
frames_to_update = list(indicator.frames.all())
if len(indicators_to_update) > 0:
for indic in indicators_to_update:
for frame in indic.frames.all(): frames_to_update.append(frame)
# For each indicator, we update the depth of all the parameters, then we calculate the max depth of the indicator
for param in indic.parameters.all(): IndicatorParameterTools.calculate_depth(param)
IndicatorTools.calculate_max_depth(indic)
for indic in indicators_to_update: IndicatorTools.update_depth(indic)
# We update the depth of the frames
frames_to_update = list(set(frames_to_update))
if len(frames_to_update) > 0:
for frame in frames_to_update: FrameTools.calculate_depth(frame)
#This method can be optimized
@staticmethod
def update_indicator(equation, description, id, libelle=None):
"""Update an indicator.
Note that we cannot modify the indicator's name.
:param equation: The updated equation (updated or not)
:type equation: str
:param description: The updated description
:type description: str
:param id: The indicator's id
:type id: int
:param libelle: An extra libelle for the indicator
:type libelle: str
:return: The updated indicator
:rtype: class:`open_anafi.models.Indicator`
"""
indic = Indicator.objects.get(id=id)
if libelle is not None:
indicator_libelle = IndicatorLibelle.objects.filter(indicator=indic)
if len(indicator_libelle) > 1:
raise Exception('Cet indicateur possède plusieurs libellés')
elif len(indicator_libelle) == 0:
indicator_libelle = IndicatorLibelle.objects.create(libelle=libelle, indicator=indic)
indicator_libelle.save()
else:
indicator_libelle = indicator_libelle[0]
indicator_libelle.libelle = libelle
indicator_libelle.save()
if description is not None :
with transaction.atomic():
indic.description = description
indic.save()
if equation is not None:
#
with transaction.atomic():
backup_indicator = IndicatorSerializer(indic).data
old_params = IndicatorParameter.objects.filter(indicator=indic)
old_params_ids = [ p.id for p in old_params].copy()
if len(backup_indicator.get('libelles')) > 1:
raise Exception('Cet indicateur possède plusieurs libellés')
parsing_tools.update_formula(equation, indic)
for parameter in IndicatorParameter.objects.filter(id__in=old_params_ids):
parameter.delete()
indic = Indicator.objects.get(name=backup_indicator.get('name'))
indic.save()
IndicatorTools.update_depth(indic)
return indic.name
@staticmethod
def check_equation_element(element):
if type(element) is Indic:
try:
Indicator.objects.get(name=element.name)
except ObjectDoesNotExist:
raise Exception(f"L'indicateur {element.name} n'existe pas.")
@staticmethod
def check_equation(equation):
try:
parsed_indicator = parsing_tools.parse_equation(equation)
for eq in parsed_indicator:
if type(eq['tree']) is tuple:
for element in eq['tree']:
IndicatorTools.check_equation_element(element)
else:
IndicatorTools.check_equation_element(eq['tree'])
except Exception as e:
raise Exception(f"Erreur dans la formule : {str(e)}")
@staticmethod
def check_indicator_usages_in_formulas(indicator):
"""
Checks if an indicator is part of a formula of any other indicator.
Used to check if an indicator is safe to remove.
:param indicator: The indicator to check
:type indicator: :class:`open_anafi.models.Indicator`
"""
result = [indicator_parameter.indicator.name for indicator_parameter in
IndicatorParameter.objects.filter(original_equation__icontains=indicator.name)]
return result
class IndicatorParameterTools:
@staticmethod
def calculate_depth(indicator_parameter):
"""Calculates the depth of an indicator parameter,
given that all the indicators present in its equation already exist and have the correct depth.
:param indicator_parameter: The indicator parameter to evaluate
:type indicator_parameter: class:`open_anafi.models.IndicatorParameter`
"""
depth = 0
indicators = IndicatorParameterTools.extract_indicators_from_equation(indicator_parameter.original_equation)
if len(indicators) == 0:
indicator_parameter.depth = 1
indicator_parameter.save()
for indicator in indicators:
if indicator.max_depth > depth:
depth = indicator.max_depth
indicator_parameter.depth = depth + 1
indicator_parameter.save()
@staticmethod
def extract_indicators_from_equation(equation):
"""Retrieves all the indicator objects contained in a equation
:param equation: An equation according to the defined language
:type equation: str
:return: The list of all the indicator objects present in the equation
:rtype: list of class:`open_anafi.models.Indicator`
"""
exp = re.compile('[\-+/*^(\[)\]]')
is_indicator = re.compile('[A-Z0-9]+(_[A-Z0-9]+)+')
split_equation = list(filter(None, map(str.strip, exp.split(equation))))
indicators = []
for item in split_equation:
if not is_indicator.match(item) : continue
try:
indic = Indicator.objects.get(name = item)
indicators.append(indic)
except ObjectDoesNotExist:
raise Exception(f"L'indicateur {item} n'existe pas.")
return indicators
|
StarcoderdataPython
|
6656411
|
<reponame>daniele-sartiano/biaffine-parser
# -*- coding: utf-8 -*-
import argparse
from datetime import datetime
from parser import Model
from parser.cmds.cmd import CMD
from parser.utils.corpus import Corpus, TextCorpus
from parser.utils.data import TextDataset, batchify
import torch
class Predict(CMD):
def add_subparser(self, name, parser):
subparser = parser.add_parser(
name, help='Use a trained model to make predictions.'
)
subparser.add_argument('--prob', action='store_true',
help='whether to output probs')
subparser.add_argument('--fdata', default='data/ptb/test.conllx',
help='path to dataset')
subparser.add_argument('--fpred', default='pred.conllx',
help='path to predicted result')
subparser.add_argument('--raw-text', action='store_true',
help='raw text as input')
subparser.add_argument('--tokenizer-lang', default=None,
help='tokenizer language')
subparser.add_argument('--tokenizer-dir', default='.tokenizer-models',
help='path to saved tokenizer models')
return subparser
def __call__(self, args):
super(Predict, self).__call__(args)
print("Load the dataset")
if args.prob:
self.fields = self.fields._replace(PHEAD=Field('probs'))
if args.raw_text:
if args.tokenizer_lang is None:
raise argparse.ArgumentTypeError('With --raw-text param, it is mandatory specify the --tokenizer-lang param')
corpus = TextCorpus.load(args.fdata, self.fields, args.tokenizer_lang, args.tokenizer_dir, use_gpu=args.device != 1)
else:
corpus = Corpus.load(args.fdata, self.fields)
dataset = TextDataset(corpus, [self.WORD, self.FEAT], args.buckets)
# set the data loader
dataset.loader = batchify(dataset, args.batch_size)
print(f"{len(dataset)} sentences, "
f"{len(dataset.loader)} batches")
print("Load the model")
self.model = Model.load(args.model)
self.model.args = args
print(f"{self.model}\n")
print("Make predictions on the dataset")
start = datetime.now()
pred_arcs, pred_rels, pred_probs = self.predict(dataset.loader)
total_time = datetime.now() - start
# restore the order of sentences in the buckets
indices = torch.tensor([i
for bucket in dataset.buckets.values()
for i in bucket]).argsort()
corpus.arcs = [pred_arcs[i] for i in indices]
corpus.rels = [pred_rels[i] for i in indices]
if args.prob:
corpus.probs = [pred_probs[i] for i in indices]
print(f"Save the predicted result to {args.fpred}")
corpus.save(args.fpred)
print(f"{total_time}s elapsed, "
f"{len(dataset) / total_time.total_seconds():.2f} Sents/s")
|
StarcoderdataPython
|
6660929
|
<reponame>chavarera/Selenium_Screenshot
import os
import sys
import time
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.dirname(DATA_DIR)))
from Screenshot.Screenshot_Clipping import Screenshot
from selenium import webdriver
iedriver_path = os.path.abspath(DATA_DIR + '/IEDriverServer.exe')
def test_IE():
sc = Screenshot()
driver = webdriver.Ie(executable_path=iedriver_path)
url = 'http://yandex.ru'
driver.get(url)
time.sleep(10)
sc.full_Screenshot(driver, save_path='.', image_name='testimage.png',load_wait_time=5,is_load_at_runtime=True)
driver.close()
driver.quit()
|
StarcoderdataPython
|
4967622
|
<reponame>Waytoaniket/Myguard
import boto3
import csv
import os
import random
import string
import shutil
import time
import datetime
import base64
from dotenv import load_dotenv
from flask import Blueprint, current_app, render_template, url_for, redirect, request, session, flash
from werkzeug.utils import secure_filename
from ..extensions import mongo
register = Blueprint("register", __name__, static_folder="images", template_folder="templates")
@register.route("/")
def register_details():
return render_template("register_details.html")
@register.route("/form-result", methods=['POST','GET'])
def register_form_result():
if request.method == 'POST':
name = request.form['name']
user_id = str('MG-'+''.join(random.choices(string.ascii_uppercase + string.digits, k = 4)) )
if 'file' not in request.files:
flash('No image found')
file = request.files['image']
# print(file.read(),'===============>')
if file.filename == '':
flash('No image selected')
else:
image = request.files['image']
image_string = base64.b64encode(image.read())
image_string = image_string.decode('utf-8')
if file:
# filename = secure_filename(str(user_id)+".jpg")
# if(not os.path.exists(current_app.config['REGISTER_IMAGES_FOLDER'])):
# print(current_app.config['REGISTER_IMAGES_FOLDER'])
# os.makedirs(current_app.config['REGISTER_IMAGES_FOLDER'])
# file.save(os.path.join(current_app.config['REGISTER_IMAGES_FOLDER'], filename))
image = str(user_id)+".jpg"
#AWS Bucket upload
load_dotenv()
print(os.environ["ACCESS_KEY_ID"])
s3 = boto3.resource('s3',
aws_access_key_id = os.environ["ACCESS_KEY_ID"],
aws_secret_access_key = os.environ["SECRET_ACCESS_KEY"],
region_name='us-east-2')
file.seek(0)
data = file.read()
s3.Bucket('my-guard-bucket').put_object(Key=str(image), Body=data)
result = request.form
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# MongoDB Atlas
user_entry = {
"mg_id": user_id,
"name": name,
"timestamp": timestamp
}
print(mongo)
print(mongo.db)
print(mongo.db.users)
user_collection = mongo.db.users
user_collection.insert_one(user_entry)
return render_template("register_form-result.html",
result = result,
file=image_string,
user_id=user_id,
name=name,
timestamp=timestamp)
@register.route("/users")
def users():
# Mongo DB Atlas - Users
results = mongo.db.users.find({})
return render_template("users.html", results=results)
|
StarcoderdataPython
|
3379699
|
import shutil
import pytest
from pytest import approx
import pandas as pd
import calliope
from calliope.test.common.util import check_error_or_warning
class TestModelPreproccesing:
def test_preprocess_national_scale(self):
calliope.examples.national_scale()
def test_preprocess_time_clustering(self):
calliope.examples.time_clustering()
def test_preprocess_time_resampling(self):
calliope.examples.time_resampling()
def test_preprocess_urban_scale(self):
calliope.examples.urban_scale()
@pytest.mark.filterwarnings("ignore:(?s).*Integer:calliope.exceptions.ModelWarning")
def test_preprocess_milp(self):
calliope.examples.milp()
def test_preprocess_operate(self):
calliope.examples.operate()
def test_preprocess_time_masking(self):
calliope.examples.time_masking()
class TestNationalScaleExampleModelSenseChecks:
def example_tester(self, solver='cbc', solver_io=None):
override = {
'model.subset_time': '2005-01-01',
'run.solver': solver,
}
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.national_scale(override_dict=override)
model.run()
assert model.results.storage_cap.to_pandas()['region1-1::csp'] == approx(45129.950)
assert model.results.storage_cap.to_pandas()['region2::battery'] == approx(6675.173)
assert model.results.energy_cap.to_pandas()['region1-1::csp'] == approx(4626.588)
assert model.results.energy_cap.to_pandas()['region2::battery'] == approx(1000)
assert model.results.energy_cap.to_pandas()['region1::ccgt'] == approx(30000)
assert float(model.results.cost.sum()) == approx(38988.7442)
assert float(
model.results.systemwide_levelised_cost.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.063543, abs=0.000001)
assert float(
model.results.systemwide_capacity_factor.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.2642256, abs=0.000001)
def test_nationalscale_example_results_cbc(self):
self.example_tester()
def test_nationalscale_example_results_gurobi(self):
try:
import gurobipy # pylint: disable=unused-import
self.example_tester(solver='gurobi', solver_io='python')
except ImportError:
pytest.skip('Gurobi not installed')
def test_nationalscale_example_results_cplex(self):
if shutil.which('cplex'):
self.example_tester(solver='cplex')
else:
pytest.skip('CPLEX not installed')
def test_nationalscale_example_results_glpk(self):
if shutil.which('glpsol'):
self.example_tester(solver='glpk')
else:
pytest.skip('GLPK not installed')
def test_considers_supply_generation_only_in_total_levelised_cost(self):
# calculation of expected value:
# costs = model.get_formatted_array("cost").sum(dim="locs")
# gen = model.get_formatted_array("carrier_prod").sum(dim=["timesteps", "locs"])
# lcoe = costs.sum(dim="techs") / gen.sel(techs=["ccgt", "csp"]).sum(dim="techs")
model = calliope.examples.national_scale()
model.run()
assert model.results.total_levelised_cost.item() == approx(0.067005, abs=1e-5)
def test_fails_gracefully_without_timeseries(self):
override = {
"locations.region1.techs.demand_power.constraints.resource": -200,
"locations.region2.techs.demand_power.constraints.resource": -400,
"techs.csp.constraints.resource": 100
}
with pytest.raises(calliope.exceptions.ModelError):
calliope.examples.national_scale(override_dict=override)
class TestNationalScaleExampleModelInfeasibility:
def example_tester(self):
model = calliope.examples.national_scale(
scenario='check_feasibility',
override_dict={'run.cyclic_storage': False}
)
model.run()
assert model.results.attrs['termination_condition'] in ['infeasible', 'other'] # glpk gives 'other' as result
assert 'systemwide_levelised_cost' not in model.results.data_vars
assert 'systemwide_capacity_factor' not in model.results.data_vars
def test_nationalscale_example_results_cbc(self):
self.example_tester()
class TestNationalScaleExampleModelOperate:
def example_tester(self):
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
model = calliope.examples.national_scale(
override_dict={'model.subset_time': ['2005-01-01', '2005-01-03']},
scenario='operate')
model.run()
expected_warnings = [
'Energy capacity constraint removed from region1::demand_power as force_resource is applied',
'Energy capacity constraint removed from region2::demand_power as force_resource is applied',
'Resource capacity constraint defined and set to infinity for all supply_plus techs'
]
assert check_error_or_warning(excinfo, expected_warnings)
assert all(model.results.timesteps == pd.date_range('2005-01', '2005-01-03 23:00:00', freq='H'))
def test_nationalscale_example_results_cbc(self):
self.example_tester()
class TestNationalScaleResampledExampleModelSenseChecks:
def example_tester(self, solver='cbc', solver_io=None):
override = {
'model.subset_time': '2005-01-01',
'run.solver': solver,
}
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.time_resampling(override_dict=override)
model.run()
assert model.results.storage_cap.to_pandas()['region1-1::csp'] == approx(23563.444)
assert model.results.storage_cap.to_pandas()['region2::battery'] == approx(6315.78947)
assert model.results.energy_cap.to_pandas()['region1-1::csp'] == approx(1440.8377)
assert model.results.energy_cap.to_pandas()['region2::battery'] == approx(1000)
assert model.results.energy_cap.to_pandas()['region1::ccgt'] == approx(30000)
assert float(model.results.cost.sum()) == approx(37344.221869)
assert float(
model.results.systemwide_levelised_cost.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.063543, abs=0.000001)
assert float(
model.results.systemwide_capacity_factor.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.25, abs=0.000001)
def test_nationalscale_resampled_example_results_cbc(self):
self.example_tester()
def test_nationalscale_resampled_example_results_glpk(self):
if shutil.which('glpsol'):
self.example_tester(solver='glpk')
else:
pytest.skip('GLPK not installed')
class TestNationalScaleClusteredExampleModelSenseChecks:
def model_runner(self, solver='cbc', solver_io=None,
how='closest', storage_inter_cluster=False,
cyclic=False, storage=True):
override = {
'model.time.function_options': {
'how': how, 'storage_inter_cluster': storage_inter_cluster
},
'run.solver': solver,
'run.cyclic_storage': cyclic
}
if storage is False:
override.update({
'techs.battery.exists': False,
'techs.csp.exists': False
})
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.time_clustering(override_dict=override)
model.run()
return model
def example_tester_closest(self, solver='cbc', solver_io=None):
model = self.model_runner(solver=solver, solver_io=solver_io, how='closest')
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(49670627.15297682)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.137105, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.064501, abs=0.000001)
def example_tester_mean(self, solver='cbc', solver_io=None):
model = self.model_runner(solver=solver, solver_io=solver_io, how='mean')
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(22172253.328)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.127783, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.044458, abs=0.000001)
def example_tester_storage_inter_cluster(self):
model = self.model_runner(storage_inter_cluster=True)
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(21825515.304)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.100760, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.091036, abs=0.000001)
def test_nationalscale_clustered_example_closest_results_cbc(self):
self.example_tester_closest()
def test_nationalscale_clustered_example_closest_results_glpk(self):
if shutil.which('glpsol'):
self.example_tester_closest(solver='glpk')
else:
pytest.skip('GLPK not installed')
def test_nationalscale_clustered_example_mean_results_cbc(self):
self.example_tester_mean()
@pytest.mark.skip(reason='GLPK is useless and delivering different results on different operating systems')
def test_nationalscale_clustered_example_mean_results_glpk(self):
if shutil.which('glpsol'):
self.example_tester_mean(solver='glpk')
else:
pytest.skip('GLPK not installed')
def test_nationalscale_clustered_example_storage_inter_cluster(self):
self.example_tester_storage_inter_cluster()
def test_storage_inter_cluster_cyclic(self):
model = self.model_runner(storage_inter_cluster=True, cyclic=True)
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(18904055.722)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.122564, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[{'carriers': 'power', 'techs': 'battery'}].item()
) == approx(0.075145, abs=0.000001)
def test_storage_inter_cluster_no_storage(self):
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
self.model_runner(storage_inter_cluster=True, storage=False)
expected_warnings = [
'Tech battery was removed by setting ``exists: False``',
'Tech csp was removed by setting ``exists: False``'
]
assert check_error_or_warning(excinfo, expected_warnings)
class TestUrbanScaleExampleModelSenseChecks:
def example_tester(self, resource_unit, solver='cbc', solver_io=None):
unit_override = {
'techs.pv.constraints': {
'resource': 'file=pv_resource.csv:{}'.format(resource_unit),
'resource_unit': 'energy_{}'.format(resource_unit)
},
'run.solver': solver
}
override = {'model.subset_time': '2005-07-01', **unit_override}
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.urban_scale(override_dict=override)
model.run()
assert model.results.energy_cap.to_pandas()['X1::chp'] == approx(250.090112)
# GLPK isn't able to get the same answer both times, so we have to account for that here
if resource_unit == 'per_cap' and solver == 'glpk':
heat_pipe_approx = 183.45825
else:
heat_pipe_approx = 182.19260
assert model.results.energy_cap.to_pandas()['X2::heat_pipes:N1'] == approx(heat_pipe_approx)
assert model.results.carrier_prod.sum('timesteps').to_pandas()['X3::boiler::heat'] == approx(0.18720)
assert model.results.resource_area.to_pandas()['X2::pv'] == approx(830.064659)
assert float(model.results.carrier_export.sum()) == approx(122.7156)
# GLPK doesn't agree with commercial solvers, so we have to account for that here
cost_sum = 430.097399 if solver == 'glpk' else 430.089188
assert float(model.results.cost.sum()) == approx(cost_sum)
def test_urban_example_results_area(self):
self.example_tester('per_area')
def test_urban_example_results_area_gurobi(self):
try:
import gurobipy # pylint: disable=unused-import
self.example_tester('per_area', solver='gurobi', solver_io='python')
except ImportError:
pytest.skip('Gurobi not installed')
def test_urban_example_results_cap(self):
self.example_tester('per_cap')
def test_urban_example_results_cap_gurobi(self):
try:
import gurobipy # pylint: disable=unused-import
self.example_tester('per_cap', solver='gurobi', solver_io='python')
except ImportError:
pytest.skip('Gurobi not installed')
@pytest.mark.filterwarnings("ignore:(?s).*Integer:calliope.exceptions.ModelWarning")
def test_milp_example_results(self):
model = calliope.examples.milp(
override_dict={'model.subset_time': '2005-01-01', 'run.solver_options.mipgap': 0.001}
)
model.run()
assert model.results.energy_cap.to_pandas()['X1::chp'] == 300
assert model.results.energy_cap.to_pandas()['X2::heat_pipes:N1'] == approx(188.363137)
assert model.results.carrier_prod.sum('timesteps').to_pandas()['X1::supply_gas::gas'] == approx(12363.173036)
assert float(model.results.carrier_export.sum()) == approx(0)
assert model.results.purchased.to_pandas()['X2::boiler'] == 1
assert model.results.units.to_pandas()['X1::chp'] == 1
assert float(model.results.operating_units.sum()) == 24
assert float(model.results.cost.sum()) == approx(540.780779)
def test_operate_example_results(self):
model = calliope.examples.operate(
override_dict={'model.subset_time': ['2005-07-01', '2005-07-04']}
)
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
model.run()
expected_warnings = [
'Energy capacity constraint removed',
'Resource capacity constraint defined and set to infinity for all supply_plus techs'
]
assert check_error_or_warning(excinfo, expected_warnings)
assert all(model.results.timesteps == pd.date_range('2005-07', '2005-07-04 23:00:00', freq='H'))
|
StarcoderdataPython
|
4973656
|
<gh_stars>0
x, y, z = 2, 5, 107
# ÖDÜLLÜ SORULAR - ÖDÜL 20 TL PARA
# 1- Kullanıcıdan aldığınız 2 sayının çarpımı ile x,y,z toplamının farkı nedir
# a = int(input('1.sayı: '))
# b = int(input('2.sayı: '))
# result = (a*b) - (x+y+z)
# 2- y' nin x' e kalansız bölümünü hesaplayınız
result = y // x
# 3- (x,y,z) toplamının mod 3 'ü nedir ?
toplam = (x+ y+ z)
result = toplam % 3
# 4- y'nin x. kuvvetini hesaplayınız.
result = y ** x
# 5- x, *y, z = numbers işlemine göre z' nin küpü kaçtır ?
numbers = 1, 5, 7, 10, 6
x, *y ,z = numbers
result = z ** 3
# 6- x, *y, z = numbers işlemine göre y ' nin değerleri toplamı kaçtır ?
numbers = 1, 5, 7, 10, 6
x, *y ,z = numbers
result = y[0] + y[1] + y[2]
print(result)
|
StarcoderdataPython
|
3288433
|
# This example assumes servers to load balance
# already exist and will be pool members
import libcloud
from libcloud.loadbalancer.base import Algorithm
def create_load_balancer():
# Compute driver to retrieve servers to be pool members (the nodes)
cls = libcloud.get_driver(libcloud.DriverType.COMPUTE,
libcloud.DriverType.COMPUTE.NTTCIS)
compute_driver = cls('my_username', '<PASSWORD>', region='eu')
net_domain_name = 'sdk_test_1'
net_domains = compute_driver.ex_list_network_domains(location='EU6')
net_domain_id = [d for d in net_domains if d.name == net_domain_name][0].id
# Load balancer driver to create and/or edit load balanceers
cls = libcloud.get_driver(libcloud.DriverType.LOADBALANCER,
libcloud.DriverType.LOADBALANCER.NTTCIS)
lbdriver = cls('my_username', net_domain_id, 'my_pass', region='eu')
member1 = compute_driver.list_nodes(ex_name='web1')[0]
member2 = compute_driver.list_nodes(ex_name='web2')[0]
members = [member1, member2]
name = 'sdk_test_balancer'
port = '80'
listener_port = '8000'
protocol = 'TCP'
algorithm = Algorithm.LEAST_CONNECTIONS_MEMBER
members = [m for m in members]
ex_listener_ip_address = "192.168.3.11"
lb = lbdriver.create_balancer(
name,
listener_port=listener_port,
port=port, protocol=protocol,
algorithm=algorithm, members=members,
optimization_profile='TCP',
ex_listener_ip_address=ex_listener_ip_address
)
print(lb)
|
StarcoderdataPython
|
8062849
|
<reponame>John1001Song/Big-Data-Robo-Adviser<gh_stars>1-10
import os
from os.path import isfile, join
from os import listdir
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
N_fs_growth_ann_path = '../datasets/NASDAQ/financial_statement/growth/annual/'
N_fs_growth_qua_path = '../datasets/NASDAQ/financial_statement/growth/quarterly/'
N_stock_path = '../datasets/NASDAQ/stock_price/'
def get_all_file_name(file_path, file_format):
# get all files names
files_names = [f for f in listdir(file_path) if isfile(join(file_path, f))]
name_array = []
for name in files_names:
if file_format in name:
name_array.append(name)
return name_array
def match(ann_list, qua_list):
for ann_ele in ann_list:
if ann_ele not in qua_list:
print('annual file: ', ann_ele, ' is not in the quarterly folder')
for quar_ele in qua_list:
if quar_ele not in ann_list:
print('quarterly file: ', quar_ele, 'is not in the annual folder')
if __name__ == '__main__':
ann_list = get_all_file_name(N_fs_growth_ann_path, '.xlsx')
qua_list = get_all_file_name(N_fs_growth_qua_path, '.xlsx')
stock_list = get_all_file_name(N_stock_path, '.csv')
print(len(ann_list))
print(len(qua_list))
print(len(stock_list))
match(ann_list, qua_list)
|
StarcoderdataPython
|
1640128
|
# Get the first n-digit pandigital prime.
# FAST (<1.1s)
#
# APPROACH:
# - Use the permutations that are genererated by itertools.permutation
# to generate them sorted.
# - Generate primes only until sqrt(987654321), the max possible n-digit pandigital prime,
# so it can be checked that the numbers are prime.
from .helpers import primes_until
from itertools import permutations
from math import floor
limit = floor(987654321 ** .5)
primes = tuple(primes_until(limit))[3:]
def transform_to_num(t):
return int(''.join(t))
def is_prime(n):
if not n % 3:
return False
bound = floor(n**.5)
for p in primes:
if p > bound:
return True
elif not n % p:
return False
def get_result():
for n in range(9, 3, -1):
for i in permutations(str(s) for s in range(n, 0, -1)):
if i[-1] in '24685': continue
if is_prime(transform_to_num(i)):
return transform_to_num(i)
result = get_result()
|
StarcoderdataPython
|
5140301
|
from ddpg import *
#from new_ddpg import *
#from pretrained_ddpg import *
import rec_env
import sys
import gc
gc.enable()
EPISODES = 100000
TEST_NUM = 10
flag_test = False
def main():
env = rec_env.Env()
agent = DDPG(env.state_space, env.action_dim)
for episode in range(EPISODES):
env.reset()
# Train
for step in range(env.timestep_limit):
state,action,reward,next_state,done = env.step()
agent.perceive(state,action,reward,next_state,done)
if done:
break
# Testing:
if flag_test and episode > 0:
total_reward = 0
for i in xrange(TEST_NUM):
state = env.rand()
for step in range(env.timestep_limit):
action = agent.action(state) # direct action for test
state, reward = env.search(state, action)
total_reward += reward
ave_reward = total_reward/TEST_NUM
print ('episode: ',episode,'Evaluation Average Reward:',ave_reward)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5041234
|
"""
Modules in this directory smooth over importing functionality that may be
present in different libraries, depending on the users' system.
"""
|
StarcoderdataPython
|
1972604
|
from dataflows import Flow, update_package
from dgp.core.base_enricher import enrichments_flows, BaseEnricher
from dgp.config.consts import RESOURCE_NAME, CONFIG_PRIMARY_KEY
from dgp_server.log import logger
class LoadMetadata(BaseEnricher):
def test(self):
return self.config._unflatten().get('extra', {}).get('metadata')
def postflow(self):
metadata = self.config._unflatten().get('extra', {}).get('metadata')
logger.info('UPDATING WITH METADATA %r', metadata)
return Flow(
update_package(**metadata)
)
class Deduplicator(BaseEnricher):
def test(self):
logger.info('DEDPULICATING %r', self.config.get('extra.deduplicate'))
return self.config.get('extra.deduplicate')
def postflow(self):
key_field_names = [
ct.replace(':', '-')
for ct in self.config.get(CONFIG_PRIMARY_KEY)
]
used = set()
def dedup(rows):
if rows.res.name == RESOURCE_NAME:
logger.info('DEDPULICATING with KEYS %r', key_field_names)
for row in rows:
key = tuple(row.get(k) for k in key_field_names)
if key not in used:
used.add(key)
yield row
else:
yield from rows
steps = [
dedup,
]
f = Flow(*steps)
return f
# class Deduplicator(BaseEnricher):
# def test(self):
# logger.info('DEDPULICATING %r', self.config.get('extra.deduplicate'))
# return self.config.get('extra.deduplicate')
# def postflow(self):
# key_field_names = [
# ct.replace(':', '-')
# for ct in self.config.get(CONFIG_PRIMARY_KEY)
# ]
# value_field_names = [
# mapping['columnType'].replace(':', '-')
# for mapping in self.config.get(CONFIG_MODEL_MAPPING)
# if ('columnType' in mapping and
# mapping['columnType'].split(':')[0] == 'value')
# ]
# steps = [
# join_with_self(
# RESOURCE_NAME,
# key_field_names,
# {
# **dict((f, {}) for f in key_field_names),
# **dict((f, dict(aggregate='sum')) for f in value_field_names),
# '*': dict(aggregate='last')
# }
# ),
# ]
# logger.info('DEDPULICATING with KEYS %r', key_field_names)
# f = Flow(*steps)
# return f
def flows(config, context):
return enrichments_flows(
config, context,
Deduplicator,
LoadMetadata,
)
|
StarcoderdataPython
|
113857
|
import dnd.parse
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional, Any
import dnd.table
_EMPTY_DICT = dict()
class Template(object):
def __init__(self, text: "str") -> None:
self._text = text
self._parts = list()
self._values = None # type: Optional[dict[str, Any]]
self._tables = None # type: Optional[dict[str, dnd.table.Table]]
self._error_behavior = 0
rest = text
while "{{" in rest:
start = rest.index("{{")
end = rest.index("}}", start + 2)
statement = rest[start + 2 : end].strip()
strpart = rest[0:start]
if len(strpart) > 0:
self._parts.append((0, strpart))
rest = rest[end + 2 :]
try:
self._parts.append((1, dnd.parse.expression(statement)))
except ValueError:
self._parts.append((2, statement))
if len(rest) > 0:
self._parts.append((0, rest))
@property
def text(self) -> "str":
return self._text
@property
def values(self) -> "Optional[dict[str, Any]]":
return self._values
@values.setter
def values(self, values: "Optional[dict[str, Any]]") -> None:
self._values = values
@property
def tables(self) -> "Optional[dict[str, Any]]":
return self._tables
@tables.setter
def tables(self, tables: "Optional[dict[str, dnd.table.Table]]") -> None:
self._tables = tables
def raise_on_error(self) -> None:
self._error_behavior = 0
def print_on_error(self) -> None:
self._error_behavior = 1
def ignore_on_error(self) -> None:
self._error_behavior = 2
def evaluate(
self,
values: "Optional[dict[str,Any]]" = None,
tables: "Optional[dict[str, dnd.table.Table]]" = None,
) -> "str":
if values is None:
values = self._values if self._values is not None else _EMPTY_DICT
if tables is None:
tables = self._tables if self._tables is not None else _EMPTY_DICT
result = ""
for t, v in self._parts:
if t == 0:
# String
result += v
elif t == 1:
# Dice Expression
result += str(v())
elif t == 2:
# Value/Table lookup
if v in values:
try:
# E.g. a Dice object or Expression Node
result += str(values[v]())
except Exception:
result += str(values[v])
elif v in tables:
result += tables[v].random().template.evaluate(values, tables)
else:
# This may raise an error, or continue
self._error("statement {} not in values or tables".format(repr(v)))
result += "<ERROR>"
return result
def _error(self, message: "str"):
if self._error_behavior == 0:
raise ValueError(message)
elif self._error_behavior == 1:
print(message)
def __repr__(self) -> "str":
return "Template({})".format(repr(self._text))
def __str__(self) -> "str":
if self._values is not None or self._tables is not None:
return self.evaluate()
return self._text
|
StarcoderdataPython
|
3303384
|
# encoding: utf-8
import sys
import re
import argparse
from workflow.workflow import MATCH_ATOM, MATCH_STARTSWITH, MATCH_SUBSTRING, MATCH_ALL, MATCH_INITIALS, MATCH_CAPITALS, MATCH_INITIALS_STARTSWITH, MATCH_INITIALS_CONTAIN
from workflow import Workflow, ICON_WEB, ICON_WARNING, ICON_BURN, ICON_SWITCH, ICON_HOME, ICON_COLOR, ICON_INFO, ICON_SYNC, web, PasswordNotFound
from common import qnotify, error, st_api, get_device, get_scene
log = None
def get_devices(wf, api_key):
"""Retrieve all devices
Returns a has of devices.
"""
items = []
i = 0
while True:
result = st_api(wf, api_key, 'devices', dict(max=200, page=i))
if 'items' in result:
items.extend(result['items'])
if '_links' in result and 'next' in result['_links']:
i += 1
else:
break
return items
def get_scenes(wf, api_key):
"""Retrieve all scenes
Returns a has of scenes.
"""
return st_api(wf, api_key, 'scenes', dict(max=200))['items']
def get_colors():
r = web.get('https://raw.githubusercontent.com/jonathantneal/color-names/master/color-names.json')
flip_colors = r.json()
colors = {v.lower().replace(' ',''): k for k, v in flip_colors.items()}
return colors
def get_color(name, colors):
name = name.lower().replace(' ','')
if re.match('[0-9a-f]{6}', name):
return '#'+name.upper()
elif name in colors:
return colors[name].upper()
return ''
def get_device_capabilities(device):
capabilities = []
if device['components'] and len(device['components']) > 0 and \
device['components'][0]['capabilities'] and len(device['components'][0]['capabilities']) > 0:
capabilities = list(map( lambda x: x['id'], device['components'][0]['capabilities']))
return capabilities
def get_device_commands(device, commands):
result = []
capabilities = get_device_capabilities(device)
for capability in capabilities:
for command, map in commands.items():
if capability == map['capability']:
result.append(command)
return result
def preprocess_device_command(wf, api_key, args, commands):
if 'toggle' == args.device_command:
status = st_api(wf, api_key, '/devices/'+args.device_uid+'/status')
if status and 'components' in status and 'main' in status['components'] and 'switch' in status['components']['main'] and 'switch' in status['components']['main']['switch'] and 'value' in status['components']['main']['switch']['switch']:
state = status['components']['main']['switch']['switch']['value']
log.debug("Toggle Switch state is "+state)
if 'on' == state:
args.device_command = 'off'
else:
args.device_command = 'on'
return args.device_command
def handle_device_commands(wf, api_key, args, commands):
if not args.device_uid or args.device_command not in commands.keys():
return
args.device_command = preprocess_device_command(wf, api_key, args, commands)
command = commands[args.device_command]
device = get_device(wf, args.device_uid)
device_name = device['label']
capabilities = get_device_capabilities(device)
if command['capability'] not in capabilities:
error('Unsupported command for device')
# eval all lambdas in arguments
if 'arguments' in command and command['arguments']:
for i, arg in enumerate(command['arguments']):
if callable(arg):
command['arguments'][i] = arg()
elif isinstance(arg, dict):
for key, value in arg.items():
if callable(value):
arg[key] = value()
data = {'commands': [command]}
log.debug("Executing Switch Command: "+device_name+" "+args.device_command)
result = st_api(wf, api_key,'devices/'+args.device_uid+'/commands', None, 'POST', data)
result = (result and result['results'] and len(result['results']) > 0 and result['results'][0]['status'] and 'ACCEPTED' == result['results'][0]['status'])
if result:
qnotify("SmartThings", device_name+" turned "+args.device_command+' '+(args.device_params[0] if args.device_params else ''))
log.debug("Switch Command "+device_name+" "+args.device_command+" "+(args.device_params[0] if args.device_params else '')+' '+("succeeded" if result else "failed"))
return result
def handle_scene_commands(wf, api_key, args):
if not args.scene_uid:
return
scene = get_scene(wf, args.scene_uid)
scene_name = scene['sceneName']
log.debug("Executing Scene Command: "+scene_name)
result = st_api(wf, api_key,'scenes/'+args.scene_uid+'/execute', None, 'POST')
result = (result and result['status'] and 'success' == result['status'])
if result:
qnotify("SmartThings", "Ran "+scene_name)
log.debug("Scene Command "+scene_name+" "+("succeeded" if result else "failed"))
return result
def main(wf):
# retrieve cached devices and scenes
devices = wf.stored_data('devices')
scenes = wf.stored_data('scenes')
colors = wf.stored_data('colors')
# build argument parser to parse script args and collect their
# values
parser = argparse.ArgumentParser()
# add an optional (nargs='?') --apikey argument and save its
# value to 'apikey' (dest). This will be called from a separate "Run Script"
# action with the API key
parser.add_argument('--apikey', dest='apikey', nargs='?', default=None)
parser.add_argument('--showstatus', dest='showstatus', nargs='?', default=None)
# add an optional (nargs='?') --update argument and save its
# value to 'apikey' (dest). This will be called from a separate "Run Script"
# action with the API key
parser.add_argument('--update', dest='update', action='store_true', default=False)
# reinitialize
parser.add_argument('--reinit', dest='reinit', action='store_true', default=False)
# device name, uid, command and any command params
parser.add_argument('--device-uid', dest='device_uid', default=None)
parser.add_argument('--device-command', dest='device_command', default='')
parser.add_argument('--device-params', dest='device_params', nargs='*', default=[])
# scene name, uid, command and any command params
parser.add_argument('--scene-uid', dest='scene_uid', default=None)
# add an optional query and save it to 'query'
parser.add_argument('query', nargs='?', default=None)
# parse the script's arguments
args = parser.parse_args(wf.args)
log.debug("args are "+str(args))
words = args.query.split(' ') if args.query else []
# list of commands
commands = {
'status': {
'capability': 'global'
},
'on': {
'component': 'main',
'capability': 'switch',
'command': 'on'
},
'toggle': {
'component': 'main',
'capability': 'switch',
'command': 'on'
},
'off': {
'component': 'main',
'capability': 'switch',
'command': 'off'
},
'dim': {
'component': 'main',
'capability': 'switchLevel',
'command': 'setLevel',
'arguments': [
lambda: int(args.device_params[0]),
]
},
'lock': {
'component': 'main',
'capability': 'lock',
'command': 'lock'
},
'unlock': {
'component': 'main',
'capability': 'lock',
'command': 'unlock'
},
'color': {
'component': 'main',
'capability': 'colorControl',
'command': 'setColor',
'arguments': [
{
'hex': lambda: get_color(args.device_params[0], colors)
}
]
},
'mode': {
'component': 'main',
'capability': 'thermostatMode',
'command': 'setThermostatMode',
'arguments': [
lambda: str(args.device_params[0])
]
},
'heat': {
'component': 'main',
'capability': 'thermostatHeatingSetpoint',
'command': 'setHeatingSetpoint',
'arguments': [
lambda: int(args.device_params[0]),
]
},
'cool': {
'component': 'main',
'capability': 'thermostatCoolingSetpoint',
'command': 'setCoolingSetpoint',
'arguments': [
lambda: int(args.device_params[0]),
]
}
}
# Reinitialize if necessary
if args.reinit:
wf.reset()
wf.delete_password('smartthings_api_key')
qnotify('SmartThings', 'Workflow reinitialized')
return 0
if args.showstatus:
if args.showstatus in ['on', 'off']:
wf.settings['showstatus'] = args.showstatus
wf.settings.save()
qnotify('SmartThings', 'Show Status '+args.showstatus)
return 0
####################################################################
# Save the provided API key
####################################################################
# save API key if that is passed in
if args.apikey: # Script was passed an API key
log.debug("saving api key "+args.apikey)
# save the key
wf.save_password('smartthings_api_key', args.apikey)
qnotify('SmartThings', 'API Key Saved')
return 0 # 0 means script exited cleanly
####################################################################
# Check that we have an API key saved
####################################################################
try:
api_key = wf.get_password('smartthings_api_key')
except PasswordNotFound: # API key has not yet been set
error('API Key not found')
return 0
# Update devices if that is passed in
if args.update:
# update devices and scenes
devices = get_devices(wf, api_key)
scenes = get_scenes(wf, api_key)
colors = get_colors()
wf.store_data('devices', devices)
wf.store_data('scenes', scenes)
wf.store_data('colors', colors)
qnotify('SmartThings', 'Devices and Scenes updated')
return 0 # 0 means script exited cleanly
# handle any device or scene commands there may be
handle_device_commands(wf, api_key, args, commands)
handle_scene_commands(wf, api_key, args)
if __name__ == u"__main__":
wf = Workflow(update_settings={
'github_slug': 'schwark/alfred-smartthings-py'
})
log = wf.logger
sys.exit(wf.run(main))
|
StarcoderdataPython
|
5117430
|
print('-=-=-=-=-= DESAFIO 95 -=-=-=-=')
print()
print('=-='*15)
print(f'{"APROVEITAMENTO DO JOGADOR":^45}')
print('=-='*15)
jogador = {}
time = []
while True:
gols = []
jogador['nome'] = str(input('Nome: ')).title()
partidas = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for c in range(1, partidas+1):
gols.append(int(input(f'Gols na partida {c}: ')))
jogador['gols'] = gols
jogador['total'] = sum(gols)
time.append(jogador.copy())
continuar = ' '
print('-'*45)
while continuar not in 'SN':
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if continuar == 'N':
print('=-='*15)
break
print('-'*45)
# TABELA
print(f'{"COD":5}', end='')
for k in jogador.keys():
print(f'{k.upper():15}', end='')
print()
print('-'*41)
for i, j in enumerate(time):
print(f'{i:<5}', end='')
for v in j.values():
print(f'{str(v):<15}', end='')
print()
print('-'*41)
while True:
op = int(input('Mostrar dados de qual jogador? (999 para SAIR) '))
if op == 999:
break
if op >= len(time):
print(f'ERRO! Não existe jogador com código {op}. Tente novamente...')
else:
print(f'-- LEVANTAMENTO DO JOGADOR {time[op]["nome"]}:')
for c, gl in enumerate(time[op]['gols']):
print(f' Na partida {c+1}, fez {gl} gols.')
print('-'*41)
print()
print('<< VOLTE SEMPRE! >>')
|
StarcoderdataPython
|
5177830
|
<gh_stars>1-10
import sys
import time
import pdb
from copy import deepcopy
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas as pd
from sklearn.preprocessing import StandardScaler
from scipy.stats import multivariate_normal
from scipy.stats import truncnorm
import nevergrad as ng
class Dropo(object):
"""
Domain Randomization Off-Policy Optimization (DROPO)
Official implementation of DROPO as in the paper "DROPO: Sim-to-Real
Transfer with Offline Domain Randomization". View the file test_dropo.py
for a sample usage of the class.
Public repo at: https://github.com/gabrieletiboni/dropo
Main methods
-------
optimize_dynamics_distribution(...)
Starts the main DROPO optimization problem
set_offline_dataset(...)
Sets the offline dataset of transitions used for running DROPO
MSE(means), MSE_trajectories(means)
Compute the MSE in state space with <means> as dynamics parameters
(respectively for --sparse-mode and trajectory mode)
"""
def __init__(self,
sim_env,
t_length,
seed=0,
scaling=False,
sync_parall=True):
"""
Parameters
----------
sim_env : gym simulated environment object.
t_length : int,
Lambda hyperparameter as in our paper. Specifies how many
consecutive actions are executed for each likelihood evaluation.
seed : int, optional
scaling : boolean, optional
If True, each state observation dimension is rescaled to get similar
scaling across different dimensions.
sync_parall : boolean, optional
If True, explicitly adjust the number of evaluations in the opt.
problem to match CMA's population size w.r.t. the number of
parallel workers used.
"""
assert t_length > 0
self.sim_env = sim_env
self.sim_env.reset()
self._raw_mjstate = deepcopy(self.sim_env.get_sim_state()) # Save fresh full mjstate
self.t_length = t_length
self.current_t_length = -self.t_length
self.scaling = scaling
self.scaler = (StandardScaler(copy=True) if self.scaling else None)
self.T = None
self.seed = seed
self.sync_parall = sync_parall
return
def set_offline_dataset(self, T, indexes=None, n=None, sparse_mode=False):
"""Sets the offline state transitions used for running DROPO.
In general, we can select a subset of all of the transitions contained
in the target dataset `T`, to speed up the opt. problem or
for debugging. Specify the value `n` to subselect a number of
trajectories.
Parameters
----------
T : dict,
Offline dataset with keys: ['observations',
'next_observations',
'actions',
'terminals'
]
T['observations'] : ndarray,
2D array (t, n) containing the current state information
for each timestep `t`
T['next_observations'] : ndarray
2D array (t, n) containing the next-state information
for each timestep `t`
T['actions'] : ndarray
2D array (t, a) containing the action commanded to the agent
at the current timestep `t`
T['terminals'] : ndarray
1D array (t,) of booleans indicating whether or not the
current state transition is terminal (ends the episode)
indexes : list, optional
List of integers indicating the subset of transitions used for
running DROPO. If None, transitions are automatically selected
based on `n` and `sparse_mode`. (default: none)
n : int, optional
Number of trajectories sampled from `T`, if `indexes` is not
explicitly specified.
NOTE: if --sparse-mode is selected, then `n` refers to number of
single sparse transitions instead.
sparse_mode : boolean, optional
if True, DROPO is run on random sparse transitions, rather than
full episodes. In this mode, `n` is treated as the number of transitions.
"""
assert ('observations' in T
and 'next_observations' in T
and 'actions' in T
and 'terminals' in T)
self.T = T
self.sparse_mode = sparse_mode
if indexes is None:
if self.sparse_mode:
if n is None: # Use all transitions in `T`
self.transitions = list(range(len(self.T['observations'])-self.t_length))
else: # Get a subset of `n` sparse transitions randomly sampled in `T`
self.transitions = self._get_subset_sparse_transitions(n)
else: # Get a subset of `n` trajectories randomly sampled in `T`
self.transitions = self._get_ordered_n_trajectories(n)
else:
self.transitions = indexes
if self.scaling: # Fit scaler
self.scaler.fit(self.T['next_observations'])
return
def get_means(self, phi):
return np.array(phi)[::2]
def get_stdevs(self, phi):
return np.array(phi)[1::2]
def pretty_print_bounds(self, phi):
assert (
self.sim_env is not None
and isinstance(self.sim_env.dynamics_indexes, dict)
)
return '\n'.join([str(self.sim_env.dynamics_indexes[i])+':\t'+str(round(phi[i*2],5))+', '+str(round(phi[i*2+1],5)) for i in range(len(phi)//2)])
def optimize_dynamics_distribution(self, opt,
budget=1000,
additive_variance=False,
epsilon=1e-3,
sample_size=100,
now=1,
learn_epsilon=False,
normalize=False,
logstdevs=False):
"""Starts the main DROPO optimization problem
Parameters
----------
budget : int,
Number of objective function evaluations for CMA-ES
additive_variance : boolean,
if True, add --epsilon to the diagonal of the cov_matrix to regularize the next-state distribution inference
epsilon : float
sample_size : int,
Number of dynamics parameters sampled from the domain randomization distribution
now : int,
number of parallel workers
learn_epsilon : boolean,
if True, learn the --epsilon parameter by adding it as a parameter to the opt. problem
normalize : boolean,
if True, normalize mean and st.devs. in the search space to the interval [0, 4] (recommended)
logstdevs : boolean,
if True, denormalize st.devs. for objective function evaluation in log-space
"""
dim_task = len(self.sim_env.get_task())
search_space = []
search_space_bounds = []
self.parameter_bounds = np.empty((dim_task, 2, 2), float)
self.normalized_width = 4
self.logstdevs = logstdevs
assert hasattr(self.sim_env, 'set_task_search_bounds')
self.sim_env.set_task_search_bounds()
for i in range(dim_task):
width = self.sim_env.max_task[i]-self.sim_env.min_task[i] # Search interval for this parameter
# MEAN
initial_mean = (self.sim_env.min_task[i]+width/4) + np.random.rand()*((self.sim_env.max_task[i]-width/4)-(self.sim_env.min_task[i]+width/4)) # Initialize it somewhat around the center
if normalize: # Normalize parameter mean to interval [0, 4]
search_space.append(ng.p.Scalar(init=self.normalized_width*0.5).set_bounds(lower=0, upper=self.normalized_width))
else:
search_space.append(ng.p.Scalar(init=initial_mean).set_bounds(lower=self.sim_env.min_task[i], upper=self.sim_env.max_task[i]))
self.parameter_bounds[i, 0, 0] = self.sim_env.min_task[i]
self.parameter_bounds[i, 0, 1] = self.sim_env.max_task[i]
# STANDARD DEVIATION
initial_std = width/8 # This may sometimes lead to a stdev smaller than the lower threshold of 0.00001, so take the minimum
stdev_lower_bound = np.min([0.00001, initial_std-1e-5])
stdev_upper_bound = width/4
if normalize: # Normalize parameter stdev to interval [0, 4]
if self.logstdevs: # Recommended: optimize stdevs in log-space
search_space.append(ng.p.Scalar(init=self.normalized_width/2).set_bounds(lower=0, upper=self.normalized_width))
else: # Linearly optimize stdevs
search_space.append(ng.p.Scalar(init=self.normalized_width * (initial_std-stdev_lower_bound) / (stdev_upper_bound - stdev_lower_bound) ).set_bounds(lower=0, upper=self.normalized_width))
else: # Optimize parameters in their original scale (not recommended when using CMA-ES with the identity matrix as starting cov_matrix)
search_space.append(ng.p.Scalar(init=initial_std).set_bounds(lower=stdev_lower_bound, upper=stdev_upper_bound))
self.parameter_bounds[i, 1, 0] = stdev_lower_bound
self.parameter_bounds[i, 1, 1] = stdev_upper_bound
search_space_bounds.append(self.sim_env.min_task[i])
search_space_bounds.append(self.sim_env.max_task[i])
if learn_epsilon:
search_space.append( ng.p.Log(init=1e-3).set_bounds(lower=1e-15, upper=1e-1) )
epsilon = None
params = ng.p.Tuple(*search_space)
instru = ng.p.Instrumentation(bounds=params,
sample_size=sample_size,
epsilon=epsilon,
additive_variance=additive_variance,
learn_epsilon=learn_epsilon,
normalize=normalize)
Optimizer = self.__get_ng_optimizer(opt)
optim = Optimizer(parametrization=instru, budget=budget, num_workers=now)
start = time.time()
if not self.sparse_mode:
loss_function = self._L_target_given_phi_trajectories
loss_function_parallel = self._L_target_given_phi_trajectories_parallel
else:
loss_function = self._L_target_given_phi
loss_function_parallel = self._L_target_given_phi_parallel
# Run optimization problem
if now == 1:
recommendation = optim.minimize(loss_function)
else:
print('Parallelization with num workers:', optim.num_workers)
if self.sync_parall:
budget_used = 0
while budget_used < budget:
fit, X = [], []
while len(X) < optim.es.popsize:
solutions = []
remaining = optim.es.popsize - len(X)
curr_now = np.min([now, remaining])
for nw in range(curr_now):
solutions.append(optim.ask())
X.append(solutions[-1])
f_args = zip(range(now), [dict(item.kwargs) for item in solutions])
pool = Pool(processes=curr_now)
res = pool.map(loss_function_parallel, f_args)
pool.close()
pool.join()
for r in res:
fit.append(r)
for x, r in zip(X, fit):
optim.tell(x, r)
budget_used += optim.es.popsize
recommendation = optim.recommend() # Get final minimum found
else:
for u in range(budget // now):
xs = []
for i in range(now):
xs.append(optim.ask())
f_args = zip(range(now), [dict(item.kwargs) for item in xs])
pool = Pool(processes=now)
res = pool.map(loss_function_parallel, f_args)
pool.close()
pool.join()
for x, r in zip(xs, res):
optim.tell(x, r)
recommendation = optim.recommend() # Get final minimum found
end = time.time()
elapsed = end-start
if normalize:
if learn_epsilon:
return self._denormalize_bounds(recommendation.value[1]['bounds'][:-1]), loss_function(**recommendation.kwargs), elapsed, recommendation.value[1]['bounds'][-1]
else:
return self._denormalize_bounds(recommendation.value[1]['bounds']), loss_function(**recommendation.kwargs), elapsed, None
else:
if learn_epsilon:
return recommendation.value[1]['bounds'][:-1], loss_function(**recommendation.kwargs), elapsed, recommendation.value[1]['bounds'][-1]
else:
return recommendation.value[1]['bounds'], loss_function(**recommendation.kwargs), elapsed, None
def _L_target_given_phi_parallel(self, args):
i, args = args
np.random.seed(i+self.seed)
return self._L_target_given_phi(**args)
def _L_target_given_phi_trajectories_parallel(self, args):
i, args = args
np.random.seed(i+self.seed)
return self._L_target_given_phi_trajectories(**args)
def _L_target_given_phi(self,
bounds,
sample_size=100,
epsilon=1e-3,
additive_variance=False,
learn_epsilon=False,
normalize=False):
"""Objective function evaluation for --sparse-mode"""
likelihood = 0
if learn_epsilon:
epsilon = bounds[-1]
bounds = bounds[:-1]
if normalize:
bounds = self._denormalize_bounds(bounds)
sample = self.sample_truncnormal(bounds, sample_size*len(self.transitions))
t_length = self.t_length
# For each transition, map the sample to the state space,
# estimate the next-state distribution, and compute the likelihood
# of the real next state.
for k, t in enumerate(self.transitions):
ob = self.T['observations'][t]
target_ob_prime = self.T['next_observations'][t+t_length-1]
mapped_sample = []
for ss in range(sample_size):
r = self.sim_env.reset()
task = sample[k*sample_size + ss]
self.sim_env.set_task(*task)
self.sim_env.set_sim_state(self.sim_env.get_full_mjstate(ob, self._raw_mjstate))
if hasattr(self.sim_env.sim, 'forward'):
self.sim_env.sim.forward()
else:
raise ValueError('No forward() method found. This environment is not supported.')
for j in range(t, t+t_length):
action = self.T['actions'][j]
s_prime, reward, done, _ = self.sim_env.step(action)
mapped_sample.append(s_prime)
mapped_sample = np.array(mapped_sample)
if self.scaling:
target_ob_prime = self.scaler.transform(target_ob_prime.reshape(1,-1))[0]
mapped_sample = self.scaler.transform(mapped_sample)
# Infer covariance matrix and mean
cov_matrix = np.cov(mapped_sample, rowvar=0)
mean = np.mean(mapped_sample, axis=0)
if additive_variance:
cov_matrix = cov_matrix + np.diag(np.repeat(epsilon, mean.shape[0]))
multi_normal = multivariate_normal(mean=mean, cov=cov_matrix, allow_singular=True)
logdensity = multi_normal.logpdf(target_ob_prime)
likelihood += logdensity
if np.isinf(likelihood):
print('WARNING: infinite likelihood encountered.')
return -1*likelihood
def _L_target_given_phi_trajectories(self,
bounds,
sample_size=100,
additive_variance=False,
epsilon=1e-3,
normalize=False,
learn_epsilon=False):
"""Objective function evaluation for standard trajectory mode"""
if learn_epsilon:
epsilon = bounds[-1]
bounds = bounds[:-1]
if normalize:
bounds = self._denormalize_bounds(bounds)
sample = self.sample_truncnormal(bounds, sample_size)
r = self.sim_env.reset()
mapped_sample_per_transition = np.zeros((len(self.transitions), sample_size, r.shape[0]), float)
target_ob_prime_per_transition = np.zeros((len(self.transitions), r.shape[0]), float)
lambda_steps = self.t_length
effective_transitions = []
first_pass = True
for i, ss in enumerate(range(sample_size)):
task = sample[ss]
self.sim_env.set_task(*task)
reset_next = True
lambda_count = -1
# Reproduce trajectories with this task from the phi
for k, t in enumerate(self.transitions):
lambda_count += 1
if lambda_count < 0 or lambda_count%lambda_steps != 0:
continue
# Check if any of the next lambda_steps transitions are ending states, including current one
for l in range(k, k+lambda_steps):
if self.T['terminals'][self.transitions[l]] == True:
reset_next = True
lambda_count = -1
break
if lambda_count == -1:
continue
if first_pass:
effective_transitions.append(k)
ob = self.T['observations'][t]
target_ob_prime = self.T['next_observations'][t+lambda_steps-1]
if reset_next: # Initialize simulator at the beginning of the episode
r = self.sim_env.reset()
self.sim_env.set_sim_state(self.sim_env.get_initial_mjstate(ob, self._raw_mjstate))
if hasattr(self.sim_env.sim, 'forward'):
self.sim_env.sim.forward()
else:
raise ValueError('No forward() method found. This environment is not supported.')
reset_next = False
else: # Reset simulator after last transition
self.sim_env.set_sim_state(self.sim_env.get_full_mjstate(ob, self.sim_env.get_sim_state()))
if hasattr(self.sim_env.sim, 'forward'):
self.sim_env.sim.forward()
else:
raise ValueError('No forward() method found. This environment is not supported.')
for j in range(t, t+lambda_steps):
action = self.T['actions'][j]
s_prime, reward, done, _ = self.sim_env.step(action)
mapped_sample = np.array(s_prime)
if self.scaling:
target_ob_prime = self.scaler.transform(target_ob_prime.reshape(1,-1))[0]
mapped_sample = self.scaler.transform(mapped_sample.reshape(1, -1))[0]
mapped_sample_per_transition[k, i, :] = mapped_sample
target_ob_prime_per_transition[k, :] = target_ob_prime
first_pass = False
likelihood = 0
for i, k in enumerate(effective_transitions):
mapped_sample = mapped_sample_per_transition[k]
target_ob_prime = target_ob_prime_per_transition[k]
# Infer next-state distribution parameters
cov_matrix = np.cov(mapped_sample, rowvar=0)
mean = np.mean(mapped_sample, axis=0)
if additive_variance:
cov_matrix = cov_matrix + np.diag(np.repeat(epsilon, mean.shape[0]))
multi_normal = multivariate_normal(mean=mean, cov=cov_matrix, allow_singular=True)
logdensity = multi_normal.logpdf(target_ob_prime)
likelihood += logdensity
if np.isinf(likelihood):
print('WARNING: infinite likelihood encountered.')
return -1*likelihood
def _denormalize_bounds(self, phi):
"""Denormalize means and stdevs in phi back to their original space
for evaluating the likelihood."""
new_phi = []
for i in range(len(phi)//2):
norm_mean = phi[i*2]
norm_std = phi[i*2 + 1]
mean = (norm_mean * (self.parameter_bounds[i,0,1]-self.parameter_bounds[i,0,0]))/self.normalized_width + self.parameter_bounds[i,0,0]
if not self.logstdevs:
std = (norm_std * (self.parameter_bounds[i,1,1]-self.parameter_bounds[i,1,0]))/self.normalized_width + self.parameter_bounds[i,1,0]
else:
std = self.parameter_bounds[i,1,0] * ((self.parameter_bounds[i,1,1]/self.parameter_bounds[i,1,0])**(norm_std/self.normalized_width)) # a × (b/a)^(x/10) ≥ 0.
new_phi.append(mean)
new_phi.append(std)
return new_phi
def MSE(self, means):
"""Compute the MSE in state space with means as dynamics parameters (--sparse-mode).
Refer to our paper (Section IV.A) for a detailed explanation on how
the MSE is computed.
"""
distance = 0
task = np.array(means)
self.sim_env.set_task(*task)
for t in self.transitions:
ob = self.T['observations'][t]
action = self.T['actions'][t]
target_ob_prime = self.T['observations'][t+1]
mapped_sample = []
self.sim_env.set_sim_state(self.sim_env.get_full_mjstate(ob, self._raw_mjstate))
s_prime, reward, done, _ = self.sim_env.step(action)
mapped_sample.append(list(s_prime))
mapped_sample = np.array(mapped_sample)
if self.scaling:
mapped_sample = self.scaler.transform(mapped_sample)
target_ob_prime = self.scaler.transform(target_ob_prime.reshape(1,-1))[0]
mapped_sample = mapped_sample[0,:]
distance += np.linalg.norm(target_ob_prime-mapped_sample)**2
mean_distance = distance / len(self.transitions)
return mean_distance
def MSE_trajectories(self, means):
"""Compute the MSE in state space with means as dynamics parameters.
Refer to our paper (Section IV.A) for a detailed explanation on how
the MSE is computed.
"""
distance = []
task = np.array(means)
self.sim_env.set_task(*task)
reset_next = True
for k, t in enumerate(self.transitions):
if self.T['terminals'][t] == True:
reset_next = True
continue
target_s = self.T['observations'][t]
target_s_prime = self.T['observations'][t+1]
if reset_next:
r = self.sim_env.reset()
self.sim_env.set_sim_state(self.sim_env.get_initial_mjstate(target_s, self._raw_mjstate))
if hasattr(self.sim_env.sim, 'forward'):
self.sim_env.sim.forward()
elif hasattr(self.sim_env.sim.env.sim, 'forward'):
self.sim_env.sim.env.sim.forward()
else:
raise ValueError('No forward() method found. This environment is not supported.')
reset_next = False
else:
self.sim_env.set_sim_state(self.sim_env.get_full_mjstate(target_s, self.sim_env.get_sim_state()))
action = self.T['actions'][t]
sim_s_prime, reward, done, _ = self.sim_env.step(action)
sim_s_prime = np.array(sim_s_prime)
if self.scaling:
sim_s_prime = self.scaler.transform(sim_s_prime.reshape(1, -1))[0]
target_s_prime = self.scaler.transform(target_s_prime.reshape(1, -1))[0]
distance.append(np.linalg.norm(sim_s_prime - target_s_prime)**2)
return np.mean(distance)
def sample_truncnormal(self, phi, size=1):
"""Sample <size> observations from the dynamics distribution parameterized by <phi>.
A truncnormal density function is used, truncating values more than
2 standard deviations away => happens around 5% of the time otherwise.
"""
a,b = -2, 2
sample = []
for i in range(len(phi)//2):
mean = phi[i*2]
std = phi[i*2 + 1]
if hasattr(self.sim_env, 'get_task_lower_bound'):
lower_bound = self.sim_env.get_task_lower_bound(i)
else:
lower_bound = 0.0001
if hasattr(self.sim_env, 'get_task_upper_bound'):
upper_bound = self.sim_env.get_task_upper_bound(i)
else:
upper_bound = 1000000000
# Make sure all samples belong to [lower_bound, upper_bound]
attempts = 0
obs = truncnorm.rvs(a, b, loc=mean, scale=std, size=size)
while np.any((obs<lower_bound) | (obs>upper_bound)):
obs[((obs < lower_bound) | (obs > upper_bound))] = truncnorm.rvs(a, b, loc=mean, scale=std, size=len(obs[((obs < lower_bound) | (obs > upper_bound))]))
attempts += 1
if attempts > 20:
obs[obs < lower_bound] = lower_bound
obs[obs > upper_bound] = upper_bound
print(f"Warning - Not all samples were above >= {lower_bound} or below {upper_bound} after 20 attempts. Setting them to their min/max bound values, respectively.")
sample.append(obs)
return np.array(sample).T
def _distance(self, target, sim_state):
if self.scaling:
d = np.linalg.norm(
self.scaler.transform(target.reshape(1,-1))
- self.scaler.transform(sim_state.reshape(1,-1))
)**2
else:
d = np.linalg.norm(target - sim_state)**2
return d
def _get_trajectories_indexes(self, n=None):
"""Returns starting index of each trajectory"""
terminals = self.T['terminals']
arr = np.where(terminals==True)[0]
arr = np.insert(-1, 1, arr) # Insert first trajectory
arr = arr[:-1] # Remove last terminal state (no trajectory after it)
arr = arr+1 # Starting state is the one after the previous episode has finished
if n is not None:
ts = np.random.choice(arr, size=n, replace=False)
else:
ts = list(arr)
return ts
def _get_ordered_n_trajectories(self, n=None):
"""Returns indexes of n trajectories
randomly sampled from self.T"""
terminals = self.T['terminals']
arr = np.where(terminals==True)[0]
arr = np.insert(-1, 1, arr) # Insert first trajectory
arr = arr[:-1] # Remove last terminal state (no trajectory after it)
arr = arr+1 # Starting state is the one after the previous episode has finished
if n is not None:
ts = np.random.choice(arr, size=n, replace=False)
else:
ts = list(arr)
transitions = []
for t in ts:
duration = np.argmax(self.T['terminals'][t:])
for toadd in range(t, t+duration+1):
transitions.append(toadd)
return transitions
def _get_subset_sparse_transitions(self, n):
if self.t_length < 1:
raise ValueError('Invalid lambda value')
if n < 1:
raise ValueError('Invalid number of transitions')
c = 0
valid_ts = []
size = len(self.T['observations'])
while c < n:
t = np.random.randint(0, size-self.t_length)
valid = True
for i in range(t, t+self.t_length):
if self.T['terminals'][i]:
valid = False
break
if not valid:
continue
valid_ts.append(t)
c+=1
return valid_ts
def __get_ng_optimizer(self, opt_string):
"""Get Nevergrad optimizer
https://facebookresearch.github.io/nevergrad/optimization.html#choosing-an-optimizer
"""
opts = {
'oneplusone': ng.optimizers.OnePlusOne, # simple robust method for continuous parameters with num_workers < 8.
'bayesian': ng.optimizers.BO, # Bayesian optimization
'twopointsde': ng.optimizers.TwoPointsDE, # excellent in many cases, including very high num_workers
'pso': ng.optimizers.PSO, # excellent in terms of robustness, high num_workers ok
'tbpsa': ng.optimizers.TBPSA, # excellent for problems corrupted by noise, in particular overparameterized (neural) ones; very high num_workers ok).
'random': ng.optimizers.RandomSearch, # the classical random search baseline; don’t use softmax with this optimizer.
'meta': ng.optimizers.NGOpt, # “meta”-optimizer which adapts to the provided settings (budget, number of workers, parametrization) and should therefore be a good default.
'cma': ng.optimizers.CMA # CMA-ES (https://en.wikipedia.org/wiki/CMA-ES)
}
if opt_string not in opts:
raise NotImplementedError('Optimizer not found')
return opts[opt_string]
|
StarcoderdataPython
|
5108300
|
<gh_stars>0
#print('__init__')
import sys
import importlib
def reload():
# print('reload')
importlib.reload(formatter)
importlib.reload(functionmaker)
importlib.reload(logger)
importlib.reload(mawk)
from . import arguments, formatter, functionmaker, logger, mawk, utils
#from .__main__ import main
#print('leaving init')
|
StarcoderdataPython
|
213600
|
<filename>OSINT-Reconnaissance/nwatch/nwatch.py
#!/usr/bin/python
#GNU GPLv3
# nWatch.py - handy tool for host discovery, portscanning and operating system fingerprinting.
# Copyright (C) <2016> <<NAME>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import logging
from colorama import Fore, init, Style
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import time
import nmap
from socket import AF_INET, AF_INET6, inet_ntop
from ctypes import *
import ctypes, ctypes.util
#c-lib load
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
#structs
class struct_sockaddr(Structure):
_fields_ = [
('sa_family', c_ushort),('sa_data', c_byte * 14),]
class struct_sockaddr_in(Structure):
_fields_ = [
('sin_family', c_ushort),('sin_port', c_uint16),('sin_addr', c_byte * 4)]
class struct_sockaddr_in6(Structure):
_fields_ = [
('sin6_family', c_ushort),
('sin6_port', c_uint16),
('sin6_flowinfo', c_uint32),
('sin6_addr', c_byte * 16),
('sin6_scope_id', c_uint32)]
class union_ifa_ifu(Union):
_fields_ = [('ifu_broadaddr', POINTER(struct_sockaddr)),('ifu_dstaddr', POINTER(struct_sockaddr)),]
class struct_ifaddrs(Structure):
pass
struct_ifaddrs._fields_ = [
('ifa_next', POINTER(struct_ifaddrs)),
('ifa_name', c_char_p),
('ifa_flags', c_uint),
('ifa_addr', POINTER(struct_sockaddr)),
('ifa_netmask', POINTER(struct_sockaddr)),
('ifa_ifu', union_ifa_ifu),
('ifa_data', c_void_p),]
class IFAGTR(object):
def __init__(self, name):
self.name = name
self.addresses = {}
def __str__(self):
return "%s<splitter>%s<splitter>%s" % (
self.name,
self.addresses.get(AF_INET)[0],
self.addresses.get(AF_INET6)[0])
def ARPscan(interface,target):
hosts = {}
try:
ans, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=target), timeout=4, verbose=0, iface=interface)
for i in range(0,len(ans)):
hosts[str(ans[i][1].psrc)] = str(ans[i][1].hwsrc)
except Exception, ex:
try:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] Exception('%s') occured\n\t%s-> Errno : %d\n\t-> Error : %s"%(type(ex).__name__,Style.DIM,ex.args[0],ex.args[1])
except:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] %s"%(str(ex))
sys.exit()
return hosts
def RFA(sa):
SA_FAMILY, SIN_ADDR= sa.sa_family, None
if SA_FAMILY == AF_INET:
sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
SIN_ADDR = inet_ntop(SA_FAMILY, sa.sin_addr)
elif SA_FAMILY == AF_INET6:
sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
SIN_ADDR = inet_ntop(SA_FAMILY, sa.sin6_addr)
return SA_FAMILY, SIN_ADDR
def DHCPDiscover():
DHCPlst=[]
conf.checkIPaddr = False
fam,hw = get_if_raw_hwaddr(conf.iface)
dhcp_discover = Ether(dst="ff:ff:ff:ff:ff:ff")/IP(src="0.0.0.0",dst="255.255.255.255")/UDP(sport=68,dport=67)/BOOTP(chaddr=hw)/DHCP(options=[("message-type","discover"),"end"])
ans, unans = srp(dhcp_discover, multi=False, verbose=False)
for i in ans: DHCPlst.append(i[1][IP].src)
return DHCPlst
def pIFGen(ifap):
ifa = ifap.contents
while True:
yield ifa
if not ifa.ifa_next:break
ifa = ifa.ifa_next.contents
def getInterfaces():
ifap = POINTER(struct_ifaddrs)()
if libc.getifaddrs(pointer(ifap)):raise OSError(get_errno())
try:
IFADCT = {}
for ifa in pIFGen(ifap):
name = ifa.ifa_name.decode("UTF-8")
i = IFADCT.get(name)
if not i:i = IFADCT[name] = IFAGTR(name)
SA_FAMILY, SIN_ADDR = RFA(ifa.ifa_addr.contents)
if SIN_ADDR:
if SA_FAMILY not in i.addresses:i.addresses[SA_FAMILY] = list()
i.addresses[SA_FAMILY].append(SIN_ADDR)
return IFADCT.values()
finally:
libc.freeifaddrs(ifap)
def pgrntIF():
count = 1
LIF = getInterfaces()
print ""
print "-"*90
print "|"+Fore.YELLOW+" Sl-no"+Style.RESET_ALL+" | "+Fore.YELLOW+"Interface name "+Style.RESET_ALL+"| "+\
Fore.YELLOW+"IPv4-address"+Style.RESET_ALL+" |%s"%(" "*14)+Fore.YELLOW+"IPv6-address%s"%(" "*14)+Style.RESET_ALL+"|"
print "-"*90
for i in LIF:
rdata = str(i).split("<splitter>")
rdata[0] = rdata[0].center(16,' ')
rdata[1] = rdata[1].center(22,' ')
rdata[2] = rdata[2].center(40,' ')
if '127.' in rdata[1]:rdata.append(Fore.RED+"<= DO NOT USE LOCALHOST"+Style.RESET_ALL)
else:rdata.append(" ")
rdata = '|'.join(rdata)
print '|'+str(count).center(7,' ')+'|'+rdata
count += 1
print "-"*90
choice = ""
while 1:
try:
choice = int(raw_input("choose an interface> "))
if(choice<=len(LIF)):
print "["+Fore.YELLOW+"*"+Style.RESET_ALL+"] Interface => %s"%(str(LIF[choice-1]).split('<splitter>')[0].replace(' ', ''))
break
else:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] Invalid choice"
except KeyboardInterrupt:
print "\n["+Fore.YELLOW+"!"+Style.RESET_ALL+"] Exiting..."
sys.exit()
except:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] Invalid choice"
return str(LIF[choice-1]).split("<splitter>")[0].replace(' ',''), str(LIF[choice-1]).split("<splitter>")[1].replace(' ','')
def prettyPrint(host,mac, nFP, isdhcp):
print "-"*(len(host)+4)
print ("| "+Fore.GREEN+str(host)+Style.RESET_ALL+" |")
print "-"*(len(host)+4)
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"MAC"+Style.RESET_ALL+" : %s"%(mac))
hostname=Fore.YELLOW+"-unknown-"
if(nFP[host].hostname()!=""):
hostname=nFP[host].hostname()
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"Hostname"+Style.RESET_ALL+" : %s"%(hostname))
if isdhcp:
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"DHCP server"+Style.RESET_ALL+" : True")
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"State"+Style.RESET_ALL+" : %s"%(nFP[host].state()))
if nFP[host].all_protocols():
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"Ports"+Style.RESET_ALL)
for proto in nFP[host].all_protocols():
ports = list(nFP[host][proto].keys())
ports.sort()
print(" "*((len(host)+4)/2) + "|"+'\t'+"["+Fore.GREEN+Style.DIM+"+"+Style.RESET_ALL+'] Protocol : %s' % proto)
print(" "*((len(host)+4)/2) + "|"+'\t\tPort\t\tState')
print(" "*((len(host)+4)/2) + "|"+'\t\t====\t\t=====')
for port in ports:
print(" "*((len(host)+4)/2) + "|"+'\t\t%s\t\t%s' % (port, nFP[host][proto][port]['state']))
else:
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"Ports"+Style.RESET_ALL+Style.RESET_ALL+" : %s"%((Fore.YELLOW+"-none-")))
print (" "*((len(host)+4)/2) + "|_ "+Fore.GREEN+Style.DIM+"OS fingerprinting"+Style.RESET_ALL)
if nFP[host].has_key('osclass'):
for osclass in nFP[host]['osclass']:
print('\t\t'+"["+Fore.GREEN+Style.DIM+"+"+Style.RESET_ALL+"] Type : {0}".format(osclass['type']))
print('\t\t Vendor : {0}'.format(osclass['vendor']))
print('\t\t OS-Family : {0}'.format(osclass['osfamily']))
print('\t\t OS-Gen : {0}'.format(osclass['osgen']))
print('\t\t Accuracy : {0}%'.format(osclass['accuracy']))
return True
elif nFP[host].has_key('osmatch'):
for osmatch in nFP[host]['osmatch']:
print('\t\t'+"["+Fore.GREEN+Style.DIM+"+"+Style.RESET_ALL+"] Name : {0} (accuracy {1}%)".format(osmatch['name'],osmatch['accuracy']))
return True
elif nFP[host].has_key('fingerprint'):
print('\t\t* Fingerprint : {0}'.format(nFP[host]['fingerprint']))
return True
def postAS(hostslist):
hosts = [host for host, x in hostslist.items()]
macs = [mac for x, mac in hostslist.items()]
try:
nm = nmap.PortScanner()
except Exception, ex:
try:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] Exception('%s') occured\n\t%s-> Errno : %d\n\t-> Error : %s"%(type(ex).__name__,Style.DIM,ex.args[0],ex.args[1])
except:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] %s"%(str(ex))
sys.exit(0)
try:
FiFlag, isDHCP = False, False
isDHCPlst = []
try:
isDHCPlst=DHCPDiscover()
except:
pass
for host, mac in hostslist.items():
if host in isDHCPlst:
isDHCP = True
else:
isDHCP = False
nm.scan(str(host), arguments="-O")
FiFlag = prettyPrint(host,mac, nm, isDHCP)
if not(FiFlag):
print "["+Fore.YELLOW+"*"+Style.RESET_ALL+"] Warning : couldn't detect to OS"
except Exception, ex:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] Error in OS fingerprinting, continuing..."
if __name__ == '__main__':
init(autoreset=True)
print '''
888 888 888 888
888 o 888 888 888
888 d8b 888 888 888
88888b. 888 d888b 888 8888b. 888888 .d8888b 88888b.
888 "88b 888d88888b888 "88b 888 d88P" 888 "88b
888 888 88888P Y88888 .d888888 888 888 888 888
888 888 8888P Y8888 888 888 Y88b. Y88b. 888 888
888 888 888P Y888 "Y888888 "Y888 "Y8888P 888 888
'''+'['+Fore.YELLOW+'&'+Style.RESET_ALL+'] Created by suraj ('+Fore.RED+'#r00t'+Style.RESET_ALL+')'
print "["+Fore.GREEN+"+"+Style.RESET_ALL+"] Started at %s"%(time.strftime("%X"))
print "["+Fore.YELLOW+"*"+Style.RESET_ALL+"] Choose a network interface"
iface, ip_addr = pgrntIF()
if not(ip_addr.startswith("127.")):
subnet = ip_addr+"/24"
else:
print "["+Fore.RED+"-"+Style.RESET_ALL+"] Cannot scan localhost("+Fore.YELLOW+"%s"%(ip_addr)+Style.RESET_ALL+"), exiting..."
sys.exit()
t1 = time.time()
print "["+Fore.YELLOW+"*"+Style.RESET_ALL+"] Scanning subnet(%s) on %s interface"%(subnet,iface)
hosts = ARPscan(iface,subnet)
postAS(hosts)
t2 = time.time()
print "\n["+Fore.YELLOW+"*"+Style.RESET_ALL+"] Scanning took %d seconds, task completed at %s."%(t2-t1,time.strftime('%X'))
sys.exit()
|
StarcoderdataPython
|
4859078
|
<gh_stars>10-100
#!/usr/bin/env python3
# reproduce_character_models.py
# Reproduce predictive modeling of characters.
# This script assumes that you have subset.tar.gz
# in the parent directory of the /train_models directory
# directory. It also expects to have a /temp directory
# as a sibling (at the same level as /train_models).
# When it's asked to create a model it extracts
# characters from the tar.gz file and puts them
# in temp. Inefficient? Yes! But it means I can
# use versatiletrainer without having to edit it
# to take data from a tarfile.
# It also assumes that character metadata is in
# /metadata/balanced_character_subset.csv.
# Finally, it wants a folder '../models', again
# placed as a sibling, where it can put various
# intermediate lexicons and metadata.
import csv, os, sys, pickle, math, tarfile
import versatiletrainer as train
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
def select_subset_to_model(modelname, metadatapath, numexamples, startdate, enddate):
'''
Creates metadata for a model of gender trained on a balanced
sample of the whole timeline.
In keeping with Python practice, the date range is inclusive at the bottom,
but not the top.
It returns a path to the metadata created.
'''
allmeta = pd.read_csv(metadatapath)
timeslice = allmeta[(allmeta.firstpub >= startdate) & (allmeta.firstpub < enddate)]
m = timeslice[timeslice.gender == 'm']
f = timeslice[timeslice.gender == 'f']
msample = m.sample(n = numexamples)
fsample = f.sample(n = numexamples)
general_sample = pd.concat([msample, fsample])
outpath = '../models/' + modelname + '_meta.csv'
general_sample.to_csv(outpath)
return outpath, general_sample.docid
def authgender_subset_to_model(modelname, agender, metadatapath, numexamples, startdate, enddate):
'''
Creates metadata for a subset of characters drawn only from books
written by authors of a specified gender (agender).
It returns a path to the metadata created.
'''
allmeta = pd.read_csv(metadatapath)
timeslice = allmeta[(allmeta.authgender == agender) & (allmeta.firstpub >= startdate) & (allmeta.firstpub < enddate)]
m = timeslice[timeslice.gender == 'm']
f = timeslice[timeslice.gender == 'f']
msample = m.sample(n = numexamples)
fsample = f.sample(n = numexamples)
general_sample = pd.concat([msample, fsample])
outpath = '../models/' + modelname + '_meta.csv'
general_sample.to_csv(outpath)
return outpath, general_sample.docid
def subset_to_predict_authgender(modelname, metadatapath, num, startdate, enddate):
'''
Creates metadata that can be used to actually predict authgender.
It returns a path to the metadata created.
'''
allmeta = pd.read_csv(metadatapath)
timeslice = allmeta[(allmeta.firstpub >= startdate) & (allmeta.firstpub < enddate)]
mbym = timeslice[(timeslice.authgender == 'm') & (timeslice.gender == 'm')]
fbym = timeslice[(timeslice.authgender == 'm') & (timeslice.gender == 'f')]
mbyf = timeslice[(timeslice.authgender == 'f') & (timeslice.gender == 'm')]
fbyf = timeslice[(timeslice.authgender == 'f') & (timeslice.gender == 'f')]
general_sample = pd.concat([mbym.sample(n = num), fbym.sample(n = num),
mbyf.sample(n = num), fbyf.sample(n = num)])
outpath = '../models/' + modelname + '_meta.csv'
general_sample['tags'] = general_sample.authgender
# that's the line that actually ensures we are predicting
# author gender rather than character gender
general_sample.to_csv(outpath)
return outpath, np.mean(general_sample.firstpub)
def refresh_temp(list_of_docids):
'''
Empties the temporary folder and restocks it, using a list
of docids that are in subset.tar.gz.
'''
folder = '../temp'
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
with tarfile.open('../subset.tar.gz', 'r:gz') as tar:
for d in list_of_docids:
tarmember = 'charactersubset/' + d + '.tsv'
destination = '../temp/' + d + '.tsv'
data = tar.extractfile(tarmember).read().decode('utf-8')
with open(destination, mode = 'w', encoding = 'utf-8') as f:
f.write(data)
def gridsearch_a_model(metadatapath, sourcefolder, c_range, ftstart, ftend, ftstep, positive_tags = ['f'], negative_tags = ['m']):
''' Function does a gridsearch to identify an optimal number of features and setting of
the regularization constant; then produces that model. Note that we do not use this for
models of specific decades. Just initially for model selection.'''
modelname = metadatapath.replace('.//models/', '').replace('_meta.csv', '')
extension = '.tsv'
vocabpath = metadatapath.replace('_meta', '_vocab')
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = metadatapath.replace('_meta', '')
## EXCLUSIONS. # not used in this project
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
sizecap = 2000
# CLASSIFY CONDITIONS # not used in this project
testconditions = set()
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def crossvalidate_one_model(metadatapath, sourcefolder, c_range, ftstart, ftend, ftstep, positive_tags = ['f'], negative_tags = ['m']):
''' '''
modelname = metadatapath.replace('.//models/', '').replace('_meta.csv', '')
extension = '.tsv'
vocabpath = metadatapath.replace('_meta', '_vocab')
if os.path.exists(vocabpath):
os.unlink(vocabpath)
# we rebuild vocab each time
outputpath = metadatapath.replace('_meta', '')
## EXCLUSIONS. # not used in this project
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
sizecap = 2000
# CLASSIFY CONDITIONS # not used in this project
testconditions = set()
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
rawaccuracy, allvolumes, coefficientuples = train.crossvalidate_single_model(paths, exclusions, classifyconditions, modelparams)
print(rawaccuracy)
return rawaccuracy
def crossvalidate_across_L2_range(metadatapath, sourcefolder, c_range, ftstart, ftend, ftstep, positive_tags = ['f'], negative_tags = ['m']):
'''
For a given set of characters, crossvalidates a model at multiple
L2 settings, and returns all the accuracies.
'''
modelname = metadatapath.replace('.//models/', '').replace('_meta.csv', '')
extension = '.tsv'
vocabpath = metadatapath.replace('_meta', '_vocab')
if os.path.exists(vocabpath):
os.unlink(vocabpath)
# we rebuild vocab each time
outputpath = metadatapath.replace('_meta', '')
## EXCLUSIONS. # not used in this project
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
sizecap = 2000
# CLASSIFY CONDITIONS # not used in this project
testconditions = set()
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracydict = dict()
for c_setting in c_range:
cparam = [c_setting]
modelparams = 'logistic', 10, ftstart, ftend, ftstep, cparam
rawaccuracy, allvolumes, coefficientuples = train.crossvalidate_single_model(paths, exclusions, classifyconditions, modelparams)
accuracydict[c_setting] = rawaccuracy
return accuracydict
def applymodel(modelpath, metapath, outpath):
''' This function applies a specified model (modelpath) to a specified
metadata set (metapath), and sends the results to outpath.
'''
sourcefolder = '/Users/tunder/data/character_subset/'
extension = '.tsv'
metadatapath = metapath = '../metadata/balanced_character_subset.csv'
newmetadict = train.apply_pickled_model(modelpath, sourcefolder, extension, metadatapath)
print('Got predictions for that model.')
newmetadict.to_csv(outpath)
def correlate_models(firstpath, secondpath):
one = pd.read_csv(firstpath, index_col = 'docid')
two = pd.read_csv(secondpath, index_col = 'docid')
justpredictions = pd.concat([one['logistic'], two['logistic']], axis=1, keys=['one', 'two'])
justpredictions.dropna(inplace = True)
r, p = pearsonr(justpredictions.one, justpredictions.two)
return r
def comparison(selfmodel, othermodel, modelname):
totalvolumes = 0
right = 0
for v in selfmodel.index:
realgenre = selfmodel.loc[v, 'realclass']
v = str(v)
otherprediction = othermodel.loc[v, modelname]
if realgenre > .5 and otherprediction > 0.5:
right += 1
elif realgenre < .5 and otherprediction < 0.5:
right += 1
totalvolumes +=1
return totalvolumes, right
def getacc(filelist):
allofem = 0
allright = 0
for afile in filelist:
df = pd.read_csv(afile)
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df.logistic > 0.5))
tn = sum((df.realclass <= 0.5) & (df.logistic <= 0.5))
fp = sum((df.realclass <= 0.5) & (df.logistic > 0.5))
fn = sum((df.realclass > 0.5) & (df.logistic <= 0.5))
assert totalcount == (tp + fp + tn + fn)
allofem += totalcount
allright += (tp + tn)
return allright / allofem
if __name__ == '__main__':
args = sys.argv
command = args[1]
metapath = '../metadata/balanced_character_subset.csv'
sourcefolder = '/Users/tunder/data/character_subset/'
if command == 'optimize_general_model':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1000
featureend = 3200
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholetimeline', metapath,
numexamples = 800, startdate = 1780, enddate = 2010)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
if command == 'optimize_fifty_years':
# this option creates a model that can be used for comparison to
# the model of fictional prestige, which spans only 1850-1950
c_range = [.0001]
featurestart = 2450
featureend = 2700
featurestep = 50
generalmetapath, general_docids = select_subset_to_model('fiftypost1950', metapath, numexamples = 1500, startdate = 1950, enddate = 2050)
# The number of examples is higher here, because we want this model to be maximally
# accurate, and we're not trying to use this as a guide for other 800-character
# models.
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'test_decades':
c_range = [.0004]
featurestart = 2300
featureend = 2300
featurestep = 100
with open('../dataforR/speechlessdecademodels.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\taccuracy\n')
for dec in range (1790, 2010, 10):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 10
modelname = 'decade' + str(dec)
for i in range(15):
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracy = crossvalidate_one_model(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
f.write(str(dec) + '\t' + str(accuracy) + '\n')
elif command == 'optimize_20c':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1100
featureend = 3000
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholetwentieth', metapath,
numexamples = 800, startdate = 1900, enddate = 2000)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'optimize_19c':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1100
featureend = 3000
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholenineteenth', metapath,
numexamples = 800, startdate = 1800, enddate = 1900)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'optimize_thirty':
decade = int(args[2])
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1900
featureend = 3000
featurestep = 100
modelname = 'optimalthirty' + str(decade)
generalmetapath, general_docids = select_subset_to_model(modelname, metapath,
numexamples = 1500, startdate = decade - 10, enddate = decade + 20)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'decade_grid':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.00003, .0001, .0003, .001]
featurestart = 2200
featureend = 2200
featurestep = 100
with open('../dataforR/decadegrid.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\tL2\taccuracy\titer\n')
for dec in range (1790, 2010, 10):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 10
modelname = 'decade' + str(dec)
for i in range(15):
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(dec) + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'decade_grid_for_differentiation_plot':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.0001]
featurestart = 2300
featureend = 2300
featurestep = 100
for dec in range (1790, 2010, 10):
floor = dec - 10
ceiling = dec + 20
modelname = 'thirty' + str(dec)
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 1500,
startdate = floor, enddate = ceiling)
accuracy = crossvalidate_one_model(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
print(str(dec) + '\t' + str(accuracy) + '\n')
elif command == 'auth_specific_charpredict_grid':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.00003, .0001, .0003, .001]
featurestart = 2200
featureend = 2200
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
with open('../dataforR/auth_specific_charpredict.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\tauthgender\tL2\taccuracy\titer\n')
for dec in range (1800, 2000, 20):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 20
for agender in ['m', 'f']:
modelname = agender + 'author' + '_' + str(dec)
for i in range(5):
decademetapath, docids = authgender_subset_to_model(modelname, agender, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(dec) + '\t' + agender + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'predict_authgender':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.0001, .0003, .001, .003]
featurestart = 2500
featureend = 2500
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
with open('../dataforR/authgender_predictions.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('meandate\tL2\taccuracy\titer\n')
for dec in range (1795, 2010, 17):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 17
modelname = 'predict_authgender' + '_' + str(dec)
for i in range(9):
decademetapath, meandate = subset_to_predict_authgender(modelname, metapath, num = 400,
startdate = floor, enddate = ceiling)
# note that in this case num is not the total number of male or female examples,
# but the number for each cell of a 2x2 contingency matrix of author gender
# versus character gender so 400 produces 1600 total instances
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(meandate) + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'optimize_authgender':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008, .03, 1]
featurestart = 800
featureend = 3600
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
generalmetapath, general_docids = subset_to_predict_authgender('general_authgender', metapath,
num = 400, startdate = 1780, enddate = 2010)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'onlywomenwriters':
c_range = [.0003]
featurestart = 2500
featureend = 2600
featurestep = 100
womensmetapath, docids = authgender_subset_to_model('onlywomenwritersC', 'f', metapath, numexamples = 1500, startdate = 1800, enddate = 2000)
gridsearch_a_model(womensmetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
elif command == 'onlymalewriters':
c_range = [.0003]
featurestart = 2500
featureend = 2600
featurestep = 100
womensmetapath, docids = authgender_subset_to_model('onlymalewritersC', 'm', metapath, numexamples = 1500, startdate = 1800, enddate = 2000)
gridsearch_a_model(womensmetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
elif command == 'compare_models':
men = ['onlymalewriters', 'onlymalewritersB', 'onlymalewritersC']
women = ['onlywomenwriters', 'onlywomenwritersB', 'onlywomenwritersC']
# test_subset_path, test_docids = select_subset_to_model('test_metadata', metapath, numexamples = 1000, startdate = 1800, enddate = 2000)
test_subset_path = '../models/test_metadata_meta.csv'
generaloutpath = '/Users/tunder/Dropbox/python/character/future_work/appliedmodels/'
masculineperspective = []
feminineperspective = []
for m in men:
modelpath = '../models/' + m + '.pkl'
outpath = generaloutpath + m + '.results'
if not os.path.exists(outpath):
applymodel(modelpath, test_subset_path, outpath)
masculineperspective.append(outpath)
for w in women:
modelpath = '../models/' + w + '.pkl'
outpath = generaloutpath + w + '.results'
if not os.path.exists(outpath):
applymodel(modelpath, test_subset_path, outpath)
feminineperspective.append(outpath)
print('among men:')
r = []
r.append(correlate_models(masculineperspective[0], masculineperspective[1]))
r.append(correlate_models(masculineperspective[1], masculineperspective[2]))
r.append(correlate_models(masculineperspective[0], masculineperspective[2]))
print(sum(r) / len(r))
print('among women:')
r = []
r.append(correlate_models(feminineperspective[0], feminineperspective[1]))
r.append(correlate_models(feminineperspective[1], feminineperspective[2]))
r.append(correlate_models(feminineperspective[0], feminineperspective[2]))
print(sum(r) / len(r))
print('between genders:')
r = []
r.append(correlate_models(masculineperspective[0], feminineperspective[0]))
r.append(correlate_models(masculineperspective[1], feminineperspective[0]))
r.append(correlate_models(masculineperspective[1], feminineperspective[1]))
r.append(correlate_models(masculineperspective[1], feminineperspective[2]))
r.append(correlate_models(masculineperspective[0], feminineperspective[2]))
r.append(correlate_models(masculineperspective[2], feminineperspective[2]))
print(sum(r) / len(r))
else:
print("I don't know that command.")
|
StarcoderdataPython
|
12827184
|
from tenx_missile import MissileLauncher
_VALID_CODES = ['DPRK', 'BOOM', 'ACME']
class Missile:
def __init__(self):
self._launcher = MissileLauncher()
def fire(self):
self._launcher.fire()
def launch_missile(missile, code):
if check_code(code):
missile.fire()
def check_code(code):
return code in _VALID_CODES
#############
# TEST CODE #
#############
# DUMMY WHOT?
def test_launch_missile_with_invalid_code():
pass # launch_missile(¿?¿?, 'INVALID')
|
StarcoderdataPython
|
12816046
|
<filename>test/crs/test_crs_maker.py<gh_stars>1-10
import pytest
from pyproj.crs import (
BoundCRS,
CompoundCRS,
DerivedGeographicCRS,
GeographicCRS,
ProjectedCRS,
VerticalCRS,
)
from pyproj.crs.coordinate_operation import (
AlbersEqualAreaConversion,
LambertConformalConic2SPConversion,
RotatedLatitudeLongitudeConversion,
ToWGS84Transformation,
TransverseMercatorConversion,
UTMConversion,
)
from pyproj.crs.coordinate_system import Cartesian2DCS, Ellipsoidal3DCS, VerticalCS
from pyproj.crs.datum import CustomDatum
from pyproj.crs.enums import VerticalCSAxis
def test_make_projected_crs():
aeaop = AlbersEqualAreaConversion(0, 0)
pc = ProjectedCRS(conversion=aeaop, name="Albers")
assert pc.name == "Albers"
assert pc.type_name == "Projected CRS"
assert pc.coordinate_operation == aeaop
def test_make_geographic_crs():
gc = GeographicCRS(name="WGS 84")
assert gc.name == "WGS 84"
assert gc.type_name == "Geographic 2D CRS"
assert gc.to_authority() == ("OGC", "CRS84")
def test_make_geographic_3d_crs():
gcrs = GeographicCRS(ellipsoidal_cs=Ellipsoidal3DCS())
assert gcrs.type_name == "Geographic 3D CRS"
assert gcrs.to_authority() == ("IGNF", "WGS84GEODD")
def test_make_derived_geographic_crs():
conversion = RotatedLatitudeLongitudeConversion(o_lat_p=0, o_lon_p=0)
dgc = DerivedGeographicCRS(base_crs=GeographicCRS(), conversion=conversion)
assert dgc.name == "undefined"
assert dgc.type_name == "Geographic 2D CRS"
assert dgc.coordinate_operation == conversion
def test_vertical_crs():
vc = VerticalCRS(
name="NAVD88 height",
datum="North American Vertical Datum 1988",
geoid_model="GEOID12B",
)
assert vc.name == "NAVD88 height"
assert vc.type_name == "Vertical CRS"
assert vc.coordinate_system == VerticalCS()
assert vc.to_json_dict()["geoid_model"]["name"] == "GEOID12B"
@pytest.mark.parametrize(
"axis",
[
VerticalCSAxis.UP,
VerticalCSAxis.UP_FT,
VerticalCSAxis.DEPTH,
VerticalCSAxis.DEPTH_FT,
VerticalCSAxis.GRAVITY_HEIGHT_FT,
],
)
def test_vertical_crs__chance_cs_axis(axis):
vc = VerticalCRS(
name="NAVD88 height",
datum="North American Vertical Datum 1988",
vertical_cs=VerticalCS(axis=axis),
)
assert vc.name == "NAVD88 height"
assert vc.type_name == "Vertical CRS"
assert vc.coordinate_system == VerticalCS(axis=axis)
def test_compund_crs():
vertcrs = VerticalCRS(
name="NAVD88 height",
datum="North American Vertical Datum 1988",
vertical_cs=VerticalCS(),
geoid_model="GEOID12B",
)
projcrs = ProjectedCRS(
name="NAD83 / Pennsylvania South",
conversion=LambertConformalConic2SPConversion(
latitude_false_origin=39.3333333333333,
longitude_false_origin=-77.75,
latitude_first_parallel=40.9666666666667,
latitude_second_parallel=39.9333333333333,
easting_false_origin=600000,
northing_false_origin=0,
),
geodetic_crs=GeographicCRS(datum="North American Datum 1983"),
cartesian_cs=Cartesian2DCS(),
)
compcrs = CompoundCRS(
name="NAD83 / Pennsylvania South + NAVD88 height", components=[projcrs, vertcrs]
)
assert compcrs.name == "NAD83 / Pennsylvania South + NAVD88 height"
assert compcrs.type_name == "Compound CRS"
assert compcrs.sub_crs_list[0].type_name == "Projected CRS"
assert compcrs.sub_crs_list[1].type_name == "Vertical CRS"
def test_bound_crs():
proj_crs = ProjectedCRS(conversion=UTMConversion(12))
bound_crs = BoundCRS(
source_crs=proj_crs,
target_crs="WGS 84",
transformation=ToWGS84Transformation(
proj_crs.geodetic_crs, 1, 2, 3, 4, 5, 6, 7
),
)
assert bound_crs.type_name == "Bound CRS"
assert bound_crs.source_crs.coordinate_operation.name == "UTM zone 12N"
assert bound_crs.coordinate_operation.towgs84 == [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
assert bound_crs.target_crs.name == "WGS 84"
def test_bound_crs__example():
proj_crs = ProjectedCRS(
conversion=TransverseMercatorConversion(
latitude_natural_origin=0,
longitude_natural_origin=15,
false_easting=2520000,
false_northing=0,
scale_factor_natural_origin=0.9996,
),
geodetic_crs=GeographicCRS(
datum=CustomDatum(ellipsoid="International 1909 (Hayford)")
),
)
bound_crs = BoundCRS(
source_crs=proj_crs,
target_crs="WGS 84",
transformation=ToWGS84Transformation(
proj_crs.geodetic_crs, -122.74, -34.27, -22.83, -1.884, -3.4, -3.03, -15.62
),
)
with pytest.warns(UserWarning):
assert bound_crs.to_dict() == {
"ellps": "intl",
"k": 0.9996,
"lat_0": 0,
"lon_0": 15,
"no_defs": None,
"proj": "tmerc",
"towgs84": [-122.74, -34.27, -22.83, -1.884, -3.4, -3.03, -15.62],
"type": "crs",
"units": "m",
"x_0": 2520000,
"y_0": 0,
}
|
StarcoderdataPython
|
1922838
|
<reponame>veritaem/Twitoff
"""
Initializes directory for Flask app
"""
from .app import create_app
APP = create_app()
|
StarcoderdataPython
|
6601417
|
<gh_stars>0
from flask import Blueprint, render_template
blueprint = Blueprint("viewer", __name__)
@blueprint.route("/viewer")
def viewer():
return render_template("viewer/viewer.html")
|
StarcoderdataPython
|
8002230
|
import time
import chipwhisperer as cw
def cwconnect(offset=1250, totalsamples=3000):
scope = cw.scope()
target = cw.target(scope)
# setup scope parameters
scope.gain.gain = 45
scope.adc.samples = int(totalsamples)
scope.adc.offset = int(offset)
scope.adc.basic_mode = "rising_edge"
scope.clock.clkgen_freq = 7370000
scope.clock.adc_src = "clkgen_x4"
scope.trigger.triggers = "tio4"
scope.io.tio1 = "serial_rx"
scope.io.tio2 = "serial_tx"
scope.io.hs2 = "clkgen"
target.baud = 38400
target.protver = '1.0'
target.key_cmd = 'k$KEY$\n'
target.go_cmd = 'p$TEXT$\n'
target.output_cmd = 'r$RESPONSE$\n'
return (cw, scope, target)
def measure_AES(scope, target, plaintext, key):
plaintext = [int(p) for p in plaintext]
key = [int(k) for k in key]
target.init()
# run aux stuff that should run before the scope arms here
target.reinit()
target.setModeEncrypt() # only does something for targets that support it
target.loadEncryptionKey(key)
target.loadInput(plaintext)
scope.arm()
# run aux stuff that should run after the scope arms here
timeout = 50
target.go()
# wait for target to finish
while target.isDone() is False and timeout:
timeout -= 1
time.sleep(0.01)
try:
ret = scope.capture()
if ret:
print('Timeout happened during acquisition')
except IOError as e:
print('IOError: %s' % str(e))
# run aux stuff that should happen after trace here
trace = list(scope.getLastTrace())
textout = list(target.readOutput())
return (textout, trace)
|
StarcoderdataPython
|
11255846
|
# from web.web1.web3 import cal
#from web.web1.web3.cal import add
# from web.web1 import web3 #执行web3的__init__文件,唯一不支持的调用方式
# print(web3.cal.add(2,6))
from web.web1.web3 import cal
cal.add(3,8)
|
StarcoderdataPython
|
6444175
|
<filename>make_animation.py<gh_stars>0
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import tifffile
import skimage.exposure
import scipy.ndimage.filters as filters
def ani_frame(images,firstIm,lastImage,subtract_first=True,filtering=None,
sigma=1.,out_name='demo.mp4',fps=10,dpi=100):
"""
images must be a numpy array
"""
n_images, dimX, dimY = images.shape
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if filtering:
print("Filtering...")
#image = skimage.exposure.equalize_hist(image)
images = filters.gaussian_filter(images, sigma)
print("Done.")
if subtract_first:
image = images[firstIm+1] - images[firstIm]
average = np.int(np.mean(image))
firstIm += 1
else:
image = images[firstIm]
im = ax.imshow(image,cmap='gray',interpolation='nearest')
#im.set_clim([0,1])
#fig.set_size_inches([5,5])
#plt.tight_layout()
def update_img(n):
try:
if subtract_first:
image = images[n] - images[firstIm] + average
else:
image = images[n]
#print(".",end="")
im.set_data(image)
#im.set_array(image)
except:
print(n)
return im
#legend(loc=0)
ani = animation.FuncAnimation(fig,update_img,range(firstIm,lastImage),
interval=10, blit=False)
writer = animation.writers['ffmpeg'](fps=fps)
ani.save(out_name,writer=writer,dpi=dpi)
plt.close(fig.number)
return ani
if __name__ == "__main__":
if True:
#mainDir = "/home/gf/Meas/Creep/CoFeB/Film/Irradiated"
#mainDir = "/home/gf/Meas/Creep/CoFeB/Film/Non-irradiated/Moon/run6"
mainDir = "/home/gf/Meas/Creep/CoFeB/Wires/Irradiated/run1_2"
for subDir in sorted(os.listdir(mainDir)):
s = os.path.join(mainDir, subDir)
for filename in os.listdir(s):
basename, extension = os.path.splitext(filename)
if extension == '.mp4':
break
if extension == '.tif':
print(filename)
f = os.path.join(s, filename)
try:
with tifffile.TiffFile(f) as tif:
im = tif.asarray()
# Need to transform into a int16
except:
print("There is a problem with the tif file, skipping")
break
im = np.asarray(im, dtype=np.int16)
fout = os.path.join(s, basename+".mp4")
lastImage, dimX, dimY = im.shape
ani_frame(im,1,lastImage,subtract_first=True,filtering=True,
sigma=1.,dpi=600,out_name=fout)
else:
mainDir = "/home/gf/Meas/Creep/CoFeB/Film/Irradiated/07_irradiatedFilm_0.20A_10fps"
f = "07_irradiatedFilm_0.20A_10fps_MMStack_Pos0.ome.tif"
fout = "07_irradiatedFilm_0.20A_10fps_MMStack_Pos0.ome.mp4"
filename = os.path.join(mainDir, f)
print(filename)
with tifffile.TiffFile(filename) as tif:
im = tif.asarray()
im = np.asarray(im, dtype=np.int16)
fout = os.path.join(mainDir, fout)
lastImage, dimX, dimY = im.shape
ani_frame(im,1,lastImage,subtract_first=True,dpi=600, filtering=True,
out_name=fout)
|
StarcoderdataPython
|
5040578
|
<filename>sdk/python/pulumi_alicloud/ess/attachment.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AttachmentArgs', 'Attachment']
@pulumi.input_type
class AttachmentArgs:
def __init__(__self__, *,
instance_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
scaling_group_id: pulumi.Input[str],
force: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Attachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
:param pulumi.Input[bool] force: Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
"""
pulumi.set(__self__, "instance_ids", instance_ids)
pulumi.set(__self__, "scaling_group_id", scaling_group_id)
if force is not None:
pulumi.set(__self__, "force", force)
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
"""
return pulumi.get(self, "instance_ids")
@instance_ids.setter
def instance_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "instance_ids", value)
@property
@pulumi.getter(name="scalingGroupId")
def scaling_group_id(self) -> pulumi.Input[str]:
"""
ID of the scaling group of a scaling configuration.
"""
return pulumi.get(self, "scaling_group_id")
@scaling_group_id.setter
def scaling_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "scaling_group_id", value)
@property
@pulumi.getter
def force(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
"""
return pulumi.get(self, "force")
@force.setter
def force(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force", value)
@pulumi.input_type
class _AttachmentState:
def __init__(__self__, *,
force: Optional[pulumi.Input[bool]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Attachment resources.
:param pulumi.Input[bool] force: Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
"""
if force is not None:
pulumi.set(__self__, "force", force)
if instance_ids is not None:
pulumi.set(__self__, "instance_ids", instance_ids)
if scaling_group_id is not None:
pulumi.set(__self__, "scaling_group_id", scaling_group_id)
@property
@pulumi.getter
def force(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
"""
return pulumi.get(self, "force")
@force.setter
def force(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force", value)
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
"""
return pulumi.get(self, "instance_ids")
@instance_ids.setter
def instance_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_ids", value)
@property
@pulumi.getter(name="scalingGroupId")
def scaling_group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the scaling group of a scaling configuration.
"""
return pulumi.get(self, "scaling_group_id")
@scaling_group_id.setter
def scaling_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scaling_group_id", value)
class Attachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
force: Optional[pulumi.Input[bool]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Attaches several ECS instances to a specified scaling group or remove them from it.
> **NOTE:** ECS instances can be attached or remove only when the scaling group is active and it has no scaling activity in progress.
> **NOTE:** There are two types ECS instances in a scaling group: "AutoCreated" and "Attached". The total number of them can not larger than the scaling group "MaxSize".
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "essattachmentconfig"
default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency",
available_resource_creation="VSwitch")
default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id,
cpu_core_count=2,
memory_size=4)
default_images = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64",
most_recent=True,
owners="system")
default_network = alicloud.vpc.Network("defaultNetwork",
vpc_name=name,
cidr_block="172.16.0.0/16")
default_switch = alicloud.vpc.Switch("defaultSwitch",
vpc_id=default_network.id,
cidr_block="172.16.0.0/24",
zone_id=default_zones.zones[0].id)
default_security_group = alicloud.ecs.SecurityGroup("defaultSecurityGroup", vpc_id=default_network.id)
default_security_group_rule = alicloud.ecs.SecurityGroupRule("defaultSecurityGroupRule",
type="ingress",
ip_protocol="tcp",
nic_type="intranet",
policy="accept",
port_range="22/22",
priority=1,
security_group_id=default_security_group.id,
cidr_ip="172.16.0.0/24")
default_scaling_group = alicloud.ess.ScalingGroup("defaultScalingGroup",
min_size=0,
max_size=2,
scaling_group_name=name,
removal_policies=[
"OldestInstance",
"NewestInstance",
],
vswitch_ids=[default_switch.id])
default_scaling_configuration = alicloud.ess.ScalingConfiguration("defaultScalingConfiguration",
scaling_group_id=default_scaling_group.id,
image_id=default_images.images[0].id,
instance_type=default_instance_types.instance_types[0].id,
security_group_id=default_security_group.id,
force_delete=True,
active=True,
enable=True)
default_instance = []
for range in [{"value": i} for i in range(0, 2)]:
default_instance.append(alicloud.ecs.Instance(f"defaultInstance-{range['value']}",
image_id=default_images.images[0].id,
instance_type=default_instance_types.instance_types[0].id,
security_groups=[default_security_group.id],
internet_charge_type="PayByTraffic",
internet_max_bandwidth_out=10,
instance_charge_type="PostPaid",
system_disk_category="cloud_efficiency",
vswitch_id=default_switch.id,
instance_name=name))
default_attachment = alicloud.ess.Attachment("defaultAttachment",
scaling_group_id=default_scaling_group.id,
instance_ids=[
default_instance[0].id,
default_instance[1].id,
],
force=True)
```
## Import
ESS attachment can be imported using the id or scaling group id, e.g.
```sh
$ pulumi import alicloud:ess/attachment:Attachment example asg-abc123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] force: Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Attaches several ECS instances to a specified scaling group or remove them from it.
> **NOTE:** ECS instances can be attached or remove only when the scaling group is active and it has no scaling activity in progress.
> **NOTE:** There are two types ECS instances in a scaling group: "AutoCreated" and "Attached". The total number of them can not larger than the scaling group "MaxSize".
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "essattachmentconfig"
default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency",
available_resource_creation="VSwitch")
default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id,
cpu_core_count=2,
memory_size=4)
default_images = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64",
most_recent=True,
owners="system")
default_network = alicloud.vpc.Network("defaultNetwork",
vpc_name=name,
cidr_block="172.16.0.0/16")
default_switch = alicloud.vpc.Switch("defaultSwitch",
vpc_id=default_network.id,
cidr_block="172.16.0.0/24",
zone_id=default_zones.zones[0].id)
default_security_group = alicloud.ecs.SecurityGroup("defaultSecurityGroup", vpc_id=default_network.id)
default_security_group_rule = alicloud.ecs.SecurityGroupRule("defaultSecurityGroupRule",
type="ingress",
ip_protocol="tcp",
nic_type="intranet",
policy="accept",
port_range="22/22",
priority=1,
security_group_id=default_security_group.id,
cidr_ip="172.16.0.0/24")
default_scaling_group = alicloud.ess.ScalingGroup("defaultScalingGroup",
min_size=0,
max_size=2,
scaling_group_name=name,
removal_policies=[
"OldestInstance",
"NewestInstance",
],
vswitch_ids=[default_switch.id])
default_scaling_configuration = alicloud.ess.ScalingConfiguration("defaultScalingConfiguration",
scaling_group_id=default_scaling_group.id,
image_id=default_images.images[0].id,
instance_type=default_instance_types.instance_types[0].id,
security_group_id=default_security_group.id,
force_delete=True,
active=True,
enable=True)
default_instance = []
for range in [{"value": i} for i in range(0, 2)]:
default_instance.append(alicloud.ecs.Instance(f"defaultInstance-{range['value']}",
image_id=default_images.images[0].id,
instance_type=default_instance_types.instance_types[0].id,
security_groups=[default_security_group.id],
internet_charge_type="PayByTraffic",
internet_max_bandwidth_out=10,
instance_charge_type="PostPaid",
system_disk_category="cloud_efficiency",
vswitch_id=default_switch.id,
instance_name=name))
default_attachment = alicloud.ess.Attachment("defaultAttachment",
scaling_group_id=default_scaling_group.id,
instance_ids=[
default_instance[0].id,
default_instance[1].id,
],
force=True)
```
## Import
ESS attachment can be imported using the id or scaling group id, e.g.
```sh
$ pulumi import alicloud:ess/attachment:Attachment example asg-abc123456
```
:param str resource_name: The name of the resource.
:param AttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
force: Optional[pulumi.Input[bool]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AttachmentArgs.__new__(AttachmentArgs)
__props__.__dict__["force"] = force
if instance_ids is None and not opts.urn:
raise TypeError("Missing required property 'instance_ids'")
__props__.__dict__["instance_ids"] = instance_ids
if scaling_group_id is None and not opts.urn:
raise TypeError("Missing required property 'scaling_group_id'")
__props__.__dict__["scaling_group_id"] = scaling_group_id
super(Attachment, __self__).__init__(
'alicloud:ess/attachment:Attachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
force: Optional[pulumi.Input[bool]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None) -> 'Attachment':
"""
Get an existing Attachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] force: Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AttachmentState.__new__(_AttachmentState)
__props__.__dict__["force"] = force
__props__.__dict__["instance_ids"] = instance_ids
__props__.__dict__["scaling_group_id"] = scaling_group_id
return Attachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def force(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to remove forcibly "AutoCreated" ECS instances in order to release scaling group capacity "MaxSize" for attaching ECS instances. Default to false.
"""
return pulumi.get(self, "force")
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> pulumi.Output[Sequence[str]]:
"""
ID of the ECS instance to be attached to the scaling group. You can input up to 20 IDs.
"""
return pulumi.get(self, "instance_ids")
@property
@pulumi.getter(name="scalingGroupId")
def scaling_group_id(self) -> pulumi.Output[str]:
"""
ID of the scaling group of a scaling configuration.
"""
return pulumi.get(self, "scaling_group_id")
|
StarcoderdataPython
|
9690876
|
<filename>cpgan_model.py
# <NAME>, March 2020
# Common code for PyTorch implementation of Copy-Pasting GAN
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from cpgan_data import *
from cpgan_tools import *
def create_gaussian_filter(blur_sigma):
bs_round = int(blur_sigma)
kernel_size = bs_round * 2 + 1
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1.0) / 2.0
variance = blur_sigma ** 2.0
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)
gaussian_filter = nn.Conv2d(3, 3, kernel_size=kernel_size, padding=bs_round, groups=3, bias=False)
gaussian_filter.weight.data = gaussian_kernel
gaussian_filter.weight.requires_grad = False
return gaussian_filter
# ==== Code below is adapted from ====
# https://github.com/milesial/Pytorch-UNet
# Adapted for CP-GAN: 3, 64, 128, 256, 512, 256, 128, 64, C
class MyUNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False, blur_sigma=0.0, border_zero=False):
super(MyUNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.blur_sigma = blur_sigma
self.border_zero = border_zero
self.inc = nn.Conv2d(n_channels, 64, kernel_size=3, padding=1)
self.down1 = MyDown(64, 128)
self.down2 = MyDown(128, 256)
self.down3 = MyDown(256, 512)
# Fully connected layers for discriminator output score
self.avg = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(512, 256),
nn.LeakyReLU(0.01),
nn.Linear(256, 1)
)
self.up1 = MyUp(768, 256, bilinear)
self.up2 = MyUp(384, 128, bilinear)
self.up3 = MyUp(192, 64, bilinear)
self.outc = nn.Conv2d(64, n_classes, kernel_size=1)
if blur_sigma:
self.gaussian_filter = create_gaussian_filter(blur_sigma)
# print(gaussian_filter.weight.data) # sums to 1 per channel
def forward(self, x):
# First blur if specified
if self.blur_sigma:
x = self.gaussian_filter(x)
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
score = F.sigmoid(self.avg(x4)) # value in range [0, 1]
x5 = self.up1(x4, x3)
x6 = self.up2(x5, x2)
x7 = self.up3(x6, x1)
output = F.sigmoid(self.outc(x7)) # mask in range [0, 1]
return output, score
class MyDown(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.my_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(0.01)
)
def forward(self, x):
return self.my_conv(x)
class MyUp(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=False):
super().__init__()
if bilinear:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
else:
self.up = nn.Upsample(scale_factor=2, mode='nearest')
self.my_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(0.01)
)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.my_conv(x)
|
StarcoderdataPython
|
1621086
|
<reponame>kancurochat/mcx
import jax.numpy as np
from jax import random, scipy
from mcx.distributions import constraints
from mcx.distributions.distribution import Distribution
from mcx.distributions.shapes import broadcast_batch_shape
class StudentT(Distribution):
parameters = {"df": constraints.strictly_positive}
support = constraints.real
def __init__(self, df):
self.event_shape = ()
self.batch_shape = broadcast_batch_shape(np.shape(df))
self.df = df
def sample(self, rng_key, sample_shape=()):
shape = sample_shape + self.batch_shape + self.event_shape
return random.t(rng_key, self.df, shape)
def logpdf(self, x):
return scipy.stats.t.logpdf(x, self.df)
|
StarcoderdataPython
|
208506
|
from __future__ import print_function
try:
import argparse
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from torch.autograd import Variable
from torch.autograd import grad as torch_grad
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
from itertools import chain as ichain
from clusgan.definitions import DATASETS_DIR, RUNS_DIR
from clusgan.models import Generator_CNN, Encoder_CNN, Discriminator_CNN
from clusgan.utils import save_model, calc_gradient_penalty, sample_z, cross_entropy
from clusgan.datasets import get_dataloader, dataset_list
from clusgan.plots import plot_train_loss
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Training Script")
parser.add_argument("-r", "--run_name", dest="run_name", default='clusgan', help="Name of training run")
parser.add_argument("-n", "--n_epochs", dest="n_epochs", default=200, type=int, help="Number of epochs")
parser.add_argument("-b", "--batch_size", dest="batch_size", default=64, type=int, help="Batch size")
parser.add_argument("-s", "--dataset_name", dest="dataset_name", default='mnist', choices=dataset_list, help="Dataset name")
parser.add_argument("-w", "--wass_metric", dest="wass_metric", action='store_true', help="Flag for Wasserstein metric")
parser.add_argument("-g", "-–gpu", dest="gpu", default=0, type=int, help="GPU id to use")
parser.add_argument("-k", "-–num_workers", dest="num_workers", default=1, type=int, help="Number of dataset workers")
args = parser.parse_args()
run_name = args.run_name
dataset_name = args.dataset_name
device_id = args.gpu
num_workers = args.num_workers
# Training details
n_epochs = args.n_epochs
batch_size = args.batch_size
test_batch_size = 5000
lr = 1e-4
b1 = 0.5
b2 = 0.9 #99
decay = 2.5*1e-5
n_skip_iter = 1 #5
img_size = 28
channels = 1
# Latent space info
latent_dim = 30
n_c = 10
betan = 10
betac = 10
# Wasserstein metric flag
# Wasserstein metric flag
wass_metric = args.wass_metric
mtype = 'van'
if (wass_metric):
mtype = 'wass'
# Make directory structure for this run
sep_und = '_'
run_name_comps = ['%iepoch'%n_epochs, 'z%s'%str(latent_dim), mtype, 'bs%i'%batch_size, run_name]
run_name = sep_und.join(run_name_comps)
run_dir = os.path.join(RUNS_DIR, dataset_name, run_name)
data_dir = os.path.join(DATASETS_DIR, dataset_name)
imgs_dir = os.path.join(run_dir, 'images')
models_dir = os.path.join(run_dir, 'models')
os.makedirs(data_dir, exist_ok=True)
os.makedirs(run_dir, exist_ok=True)
os.makedirs(imgs_dir, exist_ok=True)
os.makedirs(models_dir, exist_ok=True)
print('\nResults to be saved in directory %s\n'%(run_dir))
x_shape = (channels, img_size, img_size)
cuda = True if torch.cuda.is_available() else False
cuda = False
device = torch.device('cuda:0' if cuda else 'cpu')
# Loss function
bce_loss = torch.nn.BCELoss()
xe_loss = torch.nn.CrossEntropyLoss()
mse_loss = torch.nn.MSELoss()
# Initialize generator and discriminator
generator = Generator_CNN(latent_dim, n_c, x_shape)
encoder = Encoder_CNN(latent_dim, n_c)
discriminator = Discriminator_CNN(wass_metric=wass_metric)
if cuda:
torch.cuda.set_device(device_id)
generator.cuda()
encoder.cuda()
discriminator.cuda()
bce_loss.cuda()
xe_loss.cuda()
mse_loss.cuda()
Tensor = torch.FloatTensor if cuda else torch.FloatTensor
# Configure training data loader
dataloader = get_dataloader(dataset_name=dataset_name,
data_dir=data_dir,
batch_size=batch_size,
num_workers=num_workers)
# Test data loader
testdata = get_dataloader(dataset_name=dataset_name, data_dir=data_dir, batch_size=test_batch_size, train_set=False)
test_imgs, test_labels = next(iter(testdata))
test_imgs = Variable(test_imgs.type(Tensor))
ge_chain = ichain(generator.parameters(),
encoder.parameters())
optimizer_GE = torch.optim.Adam(ge_chain, lr=lr, betas=(b1, b2), weight_decay=decay)
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))
#optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2), weight_decay=decay)
# ----------
# Training
# ----------
ge_l = []
d_l = []
c_zn = []
c_zc = []
c_i = []
images = []
# Training loop
print('\nBegin training session with %i epochs...\n'%(n_epochs))
for epoch in range(n_epochs):
for i, (imgs, itruth_label) in enumerate(dataloader):
# Ensure generator/encoder are trainable
generator.train()
encoder.train()
# Zero gradients for models
generator.zero_grad()
encoder.zero_grad()
discriminator.zero_grad()
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# ---------------------------
# Train Generator + Encoder
# ---------------------------
optimizer_GE.zero_grad()
# Sample random latent variables
zn, zc, zc_idx = sample_z(shape=imgs.shape[0],
latent_dim=latent_dim,
n_c=n_c)
# Generate a batch of images
gen_imgs = generator(zn, zc)
# Discriminator output from real and generated samples
D_gen = discriminator(gen_imgs)
D_real = discriminator(real_imgs)
# Step for Generator & Encoder, n_skip_iter times less than for discriminator
if (i % n_skip_iter == 0):
# Encode the generated images
enc_gen_zn, enc_gen_zc, enc_gen_zc_logits = encoder(gen_imgs)
# Calculate losses for z_n, z_c
zn_loss = mse_loss(enc_gen_zn, zn)
zc_loss = xe_loss(enc_gen_zc_logits, zc_idx)
#zc_loss = cross_entropy(enc_gen_zc_logits, zc)
# Check requested metric
if wass_metric:
# Wasserstein GAN loss
ge_loss = torch.mean(D_gen) + betan * zn_loss + betac * zc_loss
else:
# Vanilla GAN loss
valid = Variable(Tensor(gen_imgs.size(0), 1).fill_(1.0), requires_grad=False)
v_loss = bce_loss(D_gen, valid)
ge_loss = v_loss + betan * zn_loss + betac * zc_loss
ge_loss.backward(retain_graph=True)
optimizer_GE.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
if wass_metric:
# Gradient penalty term
grad_penalty = calc_gradient_penalty(discriminator, real_imgs, gen_imgs)
# Wasserstein GAN loss w/gradient penalty
d_loss = torch.mean(D_real) - torch.mean(D_gen) + grad_penalty
else:
# Vanilla GAN loss
fake = Variable(Tensor(gen_imgs.size(0), 1).fill_(0.0), requires_grad=False)
real_loss = bce_loss(D_real, valid)
fake_loss = bce_loss(D_gen, fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
# Save training losses
d_l.append(d_loss.item())
ge_l.append(ge_loss.item())
# Generator in eval mode
generator.eval()
encoder.eval()
# Set number of examples for cycle calcs
n_sqrt_samp = 5
n_samp = n_sqrt_samp * n_sqrt_samp
## Cycle through test real -> enc -> gen
t_imgs, t_label = test_imgs.data, test_labels
#r_imgs, i_label = real_imgs.data[:n_samp], itruth_label[:n_samp]
# Encode sample real instances
e_tzn, e_tzc, e_tzc_logits = encoder(t_imgs)
# Generate sample instances from encoding
teg_imgs = generator(e_tzn, e_tzc)
# Calculate cycle reconstruction loss
img_mse_loss = mse_loss(t_imgs, teg_imgs)
# Save img reco cycle loss
c_i.append(img_mse_loss.item())
## Cycle through randomly sampled encoding -> generator -> encoder
zn_samp, zc_samp, zc_samp_idx = sample_z(shape=n_samp,
latent_dim=latent_dim,
n_c=n_c)
# Generate sample instances
gen_imgs_samp = generator(zn_samp, zc_samp)
# Encode sample instances
zn_e, zc_e, zc_e_logits = encoder(gen_imgs_samp)
# Calculate cycle latent losses
lat_mse_loss = mse_loss(zn_e, zn_samp)
lat_xe_loss = xe_loss(zc_e_logits, zc_samp_idx)
#lat_xe_loss = cross_entropy(zc_e_logits, zc_samp)
# Save latent space cycle losses
c_zn.append(lat_mse_loss.item())
c_zc.append(lat_xe_loss.item())
# Save cycled and generated examples!
r_imgs, i_label = real_imgs.data[:n_samp], itruth_label[:n_samp]
e_zn, e_zc, e_zc_logits = encoder(r_imgs)
reg_imgs = generator(e_zn, e_zc)
# we just want the generated examples
# save_image(r_imgs.data[:n_samp],
# '%s/real_%06i.png' %(imgs_dir, epoch),
# nrow=n_sqrt_samp, normalize=True)
# save_image(reg_imgs.data[:n_samp],
# '%s/reg_%06i.png' %(imgs_dir, epoch),
# nrow=n_sqrt_samp, normalize=True)
save_image(gen_imgs_samp.data[:n_samp],
'%s/gen_%06i.png' %(imgs_dir, epoch),
nrow=n_sqrt_samp, normalize=True)
if epoch % 10 == 0: images.append(gen_imgs_samp.data[:n_samp])
if epoch == 0:
imgs = [np.array(to_pil_image(img)) for img in images]
imageio.mimsave('/content/drive/MyDrive/outputs/clusterGAN_fashion.gif', images)
## Generate samples for specified classes
stack_imgs = []
for idx in range(n_c):
# Sample specific class
zn_samp, zc_samp, zc_samp_idx = sample_z(shape=n_c,
latent_dim=latent_dim,
n_c=n_c,
fix_class=idx)
# Generate sample instances
gen_imgs_samp = generator(zn_samp, zc_samp)
if (len(stack_imgs) == 0):
stack_imgs = gen_imgs_samp
else:
stack_imgs = torch.cat((stack_imgs, gen_imgs_samp), 0)
# Save class-specified generated examples!
save_image(stack_imgs,
'%s/gen_classes_%06i.png' %(imgs_dir, epoch),
nrow=n_c, normalize=True)
# imgs = [np.array(to_pil_image(img)) for img in stack_imgs]
# imageio.mimsave('/content/drive/MyDrive/outputs/generator_images.gif', imgs)
print ("[Epoch %d/%d] \n"\
"\tModel Losses: [D: %f] [GE: %f]" % (epoch,
n_epochs,
d_loss.item(),
ge_loss.item())
)
print("\tCycle Losses: [x: %f] [z_n: %f] [z_c: %f]"%(img_mse_loss.item(),
lat_mse_loss.item(),
lat_xe_loss.item())
)
# now that the training is done, we want to save the gif
imgs = [np.array(to_pil_image(img)) for img in images]
imageio.mimsave('/content/drive/MyDrive/outputs/generator_images.gif', images)
# Save training results
train_df = pd.DataFrame({
'n_epochs' : n_epochs,
'learning_rate' : lr,
'beta_1' : b1,
'beta_2' : b2,
'weight_decay' : decay,
'n_skip_iter' : n_skip_iter,
'latent_dim' : latent_dim,
'n_classes' : n_c,
'beta_n' : betan,
'beta_c' : betac,
'wass_metric' : wass_metric,
'gen_enc_loss' : ['G+E', ge_l],
'disc_loss' : ['D', d_l],
'zn_cycle_loss' : ['$||Z_n-E(G(x))_n||$', c_zn],
'zc_cycle_loss' : ['$||Z_c-E(G(x))_c||$', c_zc],
'img_cycle_loss' : ['$||X-G(E(x))||$', c_i]
})
train_df.to_csv('%s/training_details.csv'%(run_dir))
# Plot some training results
plot_train_loss(df=train_df,
arr_list=['gen_enc_loss', 'disc_loss'],
figname='%s/training_model_losses.png'%(run_dir)
)
plot_train_loss(df=train_df,
arr_list=['zn_cycle_loss', 'zc_cycle_loss', 'img_cycle_loss'],
figname='%s/training_cycle_loss.png'%(run_dir)
)
# Save current state of trained models
model_list = [discriminator, encoder, generator]
save_model(models=model_list, out_dir=models_dir)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11388443
|
from flask import current_app
from testAuxiliaryFuncs import decryptAES, getDecryptor, encryptAES
from bson import json_util, ObjectId
def test_allsites(client, auth, app):
token = auth.login('test1', '<PASSWORD>')
response = client.get('/api/AllSites', headers={"token": token})
with app.app_context():
response = decryptAES(json_util.loads(response.data), getDecryptor(current_app.rsa_private))
response = json_util.dumps(response)
# check if both sites are returned
assert "test_login_site1.com" in response
assert "test_login_site2.com" in response
# check if endpoint returns id of LoginData
assert "_id" in response
def test_get_logindata(client, auth, app):
token = auth.login('test1', '<PASSWORD>')
response = client.get('/api/AllSites', headers={"token": token})
with app.app_context():
response = decryptAES(json_util.loads(response.data), getDecryptor(current_app.rsa_private))
response = client.get('/api/LoginData/' + str(response[1]['_id']), headers={"token": token})
with app.app_context():
response = decryptAES(json_util.loads(response.data), getDecryptor(current_app.rsa_private))
# check if proper user credentials are returned
assert response['login'] == 'test_login2'
assert response['password'] == '<PASSWORD>'
def test_post_logindata(client, auth, app):
token = auth.login('test1', '<PASSWORD>')
js = {
"site": "post_test.com",
"login": "post_test_login",
"password": "<PASSWORD>",
"note": "post_test_note"
}
with app.app_context():
response = client.post('/api/LoginData', json=encryptAES(js, current_app.server_key_pem),
headers={"token": token})
# check response status code
assert response.status_code == 201
check = current_app.db.passwordManager.accounts.find_one(
{
'login': current_app.client_encryption.encrypt("test1", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic",
current_app.data_key_id),
'logindata.site': js['site']
},
{
'_id': 0,
'logindata.$': 1
}
)
check = check['logindata'][0]
# check if login data has been updated
assert js.items() <= check.items()
def test_put_logindata(client, auth, app):
token = auth.login('test1', 'test1')
response = client.get('/api/AllSites', headers={"token": token})
with app.app_context():
response = decryptAES(json_util.loads(response.data), getDecryptor(current_app.rsa_private))
logindata_id = str(response[1]['_id'])
js = {
"site": "put_test.com",
"login": "put_test_login",
"password": "<PASSWORD>",
"note": "put_test_note"
}
response = client.put('/api/LoginData/' + logindata_id,
json=encryptAES(js, current_app.server_key_pem),
headers={"token": token})
# check response status code
assert response.status_code == 201
check = current_app.db.passwordManager.accounts.find_one(
{
'login': current_app.client_encryption.encrypt("test1", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic",
current_app.data_key_id),
'logindata._id': ObjectId(logindata_id)
},
{
'_id': 0,
'logindata.$': 1
}
)
check = check['logindata'][0]
# check if login date has been updated
assert js.items() <= check.items()
def test_delete_logindata(client, auth, app):
token = auth.login('test1', '<PASSWORD>')
response = client.get('/api/AllSites', headers={"token": token})
with app.app_context():
response = decryptAES(json_util.loads(response.data), getDecryptor(current_app.rsa_private))
logindata_id = str(response[1]['_id'])
response = client.delete('/api/LoginData/' + logindata_id, headers={"token": token})
# check response status code
assert response.status_code == 200
# check if logindata record has been deleted from db
with app.app_context():
assert (
current_app.db.passwordManager.accounts.find_one(
{
'login': current_app.client_encryption.encrypt("test1",
"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic",
current_app.data_key_id),
'logindata._id': ObjectId(logindata_id)
}
)
is None
)
def test_backup(client, auth, app):
token = auth.login('test1', '<PASSWORD>')
response = client.get('/api/Backup', headers={"token": token})
# check response status code
assert response.status_code == 200
with app.app_context():
response = decryptAES(json_util.loads(response.data), getDecryptor(current_app.rsa_private))
check = current_app.db.passwordManager.accounts.find_one(
{
'login': current_app.client_encryption.encrypt("test1", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic",
current_app.data_key_id)
},
{
'_id': 0,
'logindata': 1
}
)
# check if proper data has been returned
assert check['logindata'] == response
|
StarcoderdataPython
|
11201120
|
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# Author: <NAME> - <EMAIL>
# Blog: zhouyichu.com
#
# Python release: 3.6.0
#
# Date: 2020-03-20 10:56:58
# Last modified: 2020-12-28 15:30:23
"""
Data structure for probing.
"""
# import logging
from collections import Counter
from functools import total_ordering
import heapq
from typing import List
# from multiprocessing import Pool
# import numpy as np
# import torch
# from tqdm import tqdm
# from tqdm import trange
# from joblib import Parallel, delayed
class Cluster:
# __slots__ is used here because there will be
# so many Cluster object during the probing, I
# want to save as much memory as possible.
__slots__ = ('indices', 'major_label',
'_hash_value',
'children', 'labels')
def __init__(self, indices: List[int], labels: List[int]):
"""Initialize a new cluster with indices
Args:
- indices: The index of each point.
- labels: The label of each point.
"""
assert len(indices) == len(labels)
self.indices = sorted(indices)
self.labels = labels
self.major_label = Counter(labels).most_common(1)[0][0]
self._hash_value = ' '.join([str(i) for i in self.indices])
self._hash_value = hash(self._hash_value)
# The children is used to track the path of merging
# This can be used to speed up the probing during later steps.
self.children = set()
@property
def purity(self) -> float:
n = sum([1 for i in self.labels if i == self.major_label])
return n / len(self.labels)
@staticmethod
def merge(A: 'Cluster', B: 'Cluster') -> 'Cluster':
"""Merge two clusters and produce a new cluster.
"""
assert type(A) == Cluster
assert type(B) == Cluster
indices = A.indices + B.indices
labels = A.labels + B.labels
reval = Cluster(indices, labels)
reval.children = A.children | B.children
# Do not forget A and B themselves.
reval.children.add(A)
reval.children.add(B)
return reval
def __hash__(self):
return self._hash_value
def __eq__(self, other):
return self._hash_value == other._hash_value
def __repr__(self):
n = len(self.indices)
idx = ' '.join([str(self.indices[i]) for i in range(n)])
labels = ' '.join([str(self.labels[i]) for i in range(n)])
s = 'Cluster(Indices:{a}, labels:{b}, major_label={c}, purity={d})'
s = s.format(a=str(idx), b=labels, c=self.major_label, d=self.purity)
return s
@total_ordering
class ClusterDisPair:
"""This is a intermediate class which is used to compute
the distance between two clusters.
This class should not be exposed to the end user.
"""
__slots__ = ('i', 'j', 'dis', '_hash_value')
def __init__(self, i: int, j: int, dis: float):
"""
Be note, here the index i and index j is not the index of
points, instead they are the indices of clusters.
Args:
i: The index of the cluster.
j: The index of the cluster.
dis: The distance between these two clusters.
"""
assert i != j
self.i = min(i, j)
self.j = max(i, j)
self.dis = dis
self._hash_value = hash((self.i, self.j, dis))
def __hash__(self):
return self._hash_value
def __eq__(self, other):
return self._hash_value == other._hash_value
def __lt__(self, other):
return self.dis < other.dis
def __le__(self, other):
return self.dis <= other.dis
def __repr__(self):
s = 'ClusterDisPair(i:{a}, j:{b}, dis:{c})'
s = s.format(a=str(self.i), b=str(self.j), c=str(self.dis))
return s
@total_ordering
class ClusterDisList:
""" A heap list of pair of clusters.
Each list represents all the pair distance of (idx, i), i < idx.
Here, idx and i are the indices of clusters.
"""
__slots__ = ('dis_list', 'idx', '_hash_value')
def __init__(self, dis_list: List[ClusterDisPair], idx: int):
self.dis_list = dis_list
heapq.heapify(self.dis_list)
self.idx = idx
self._hash_value = hash(idx)
def min(self) -> ClusterDisPair:
"""Return the pair of minimum distance of this list.
"""
return heapq.heappop(self.dis_list)
def deactive(self):
self.dis_list = []
def __hash__(self):
return self._hash_value
def __eq__(self, other):
if not self.dis_list and not other.dis_list:
return True
elif not self.dis_list or not other.dis_list:
return False
else:
return self.dis_list[0] == other.dis_list[0]
def __lt__(self, other):
if not self.dis_list:
return False
if not other.dis_list:
return True
return self.dis_list[0] < other.dis_list[0]
def __le__(self, other):
if not self.dis_list:
return False
if not other.dis_list:
return True
return self.dis_list[0] <= other.dis_list[0]
def __repr__(self):
if not self.dis_list:
s = 'Index:{a} is deactivate'
s = s.format(a=str(self.idx))
else:
s = 'Index:{a} has minimum value {b}'
s = s.format(a=str(self.idx),
b=str(self.dis_list[0]))
return s
if __name__ == '__main__':
import random
n = 3
array = []
for i in range(n):
for j in range(n):
array.append(ClusterDisPair(i, j, i+j))
random.shuffle(array)
print(array)
array.sort()
print(array)
|
StarcoderdataPython
|
1702271
|
<reponame>carmenchilson/BirdRoostDetection
"""Read in csv and create train, test, and validation splits for ML."""
import BirdRoostDetection.LoadSettings as settings
import os
import pandas
def ml_splits_by_date(csv_input_path,
csv_output_path,
k=5):
"""Split labeled data for k-fold cross validation.
For machine learning, you need a training, validation, and test set. This
method will read in a csv from csv_input_path. This data should be formatted
like the ml_labels_example file. It will then create k splits of the data.
Each time the data is used for training, k - 2 splits will be used for
training, 1 split will be used for testing, and 1 split will be used for
validating. This method will split the data by date (to avoid cross
contamination of the datasets) and then write out a csv used to look up
which file belongs to which split.
Args:
csv_input_path: The input file location. Formated like
example_labels.csv, a string.
csv_output_path: The output csv location path, a string. The output csv
will be saved to this location.
k: The size of k for k fold cross validation.
"""
pd = pandas.read_csv(csv_input_path)
basenames = {}
file_list = list(pd['AWS_file'])
is_roost_list = list(pd['Roost'])
fold_images = [[] for split_index in range(k)]
index = 0
for i, file_name in enumerate(file_list):
basename = file_name[4:12]
if basename not in basenames:
basenames[basename] = index
index = (index + 1) % 5
hash = basenames[basename]
for split_index in range(k):
if hash == split_index:
fold_images[split_index].append([file_name, is_roost_list[i]])
output = []
for split_index in range(k):
for file_name in fold_images[split_index]:
output.append({
'split_index': split_index,
'AWS_file': file_name[0], 'Roost': file_name[1]})
output_pd = pandas.DataFrame.from_dict(output)
output_pd.to_csv(csv_output_path, index=False)
def main():
ml_splits_by_date(csv_input_path=settings.LABEL_CSV,
csv_output_path=settings.ML_SPLITS_DATA,
k=5)
if __name__ == "__main__":
os.chdir(settings.WORKING_DIRECTORY)
main()
|
StarcoderdataPython
|
1679004
|
from binaryninja.architecture import Architecture
from binaryninja.function import RegisterInfo, InstructionInfo, InstructionTextToken
from binaryninja.enums import Endianness, InstructionTextTokenType, BranchType, SegmentFlag, SectionSemantics
from binaryninja.log import log_info
from .view import Chip8View
from .disasm import Disassembler
class Chip8(Architecture):
name = 'CHIP-8'
endianness = Endianness.BigEndian
address_size = 2
default_int_size = 2
instr_alignment = 2
max_instr_length = 2
opcode_display_length = 2
regs = {
'PC': RegisterInfo('PC', 2),
'SP': RegisterInfo('SP', 1),
'I': RegisterInfo('I', 2),
'DT': RegisterInfo('DT', 1),
'ST': RegisterInfo('ST', 1),
'V0': RegisterInfo('V0', 1),
'V1': RegisterInfo('V1', 1),
'V2': RegisterInfo('V2', 1),
'V3': RegisterInfo('V3', 1),
'V4': RegisterInfo('V4', 1),
'V5': RegisterInfo('V5', 1),
'V6': RegisterInfo('V6', 1),
'V7': RegisterInfo('V7', 1),
'V8': RegisterInfo('V8', 1),
'V9': RegisterInfo('V9', 1),
'Va': RegisterInfo('Va', 1),
'Vb': RegisterInfo('Vb', 1),
'Vc': RegisterInfo('Vc', 1),
'Vd': RegisterInfo('Vd', 1),
'Ve': RegisterInfo('Ve', 1),
'Vf': RegisterInfo('Vf', 1)
}
stack_pointer = 'SP'
def __init__(self):
super().__init__()
self.dis = Disassembler()
def get_instruction_info(self, data, addr):
""" Establishes instruction length and branch info """
if len(data) > 2:
data = data[:2]
result = InstructionInfo()
result.length = 2
vars = self.dis._vars(data)
baddr = vars['addr']
binfo = self.dis.get_branch_info(data)
if binfo == BranchType.UnconditionalBranch or binfo == BranchType.CallDestination:
result.add_branch(binfo, baddr)
elif binfo == BranchType.FunctionReturn or binfo == BranchType.IndirectBranch:
result.add_branch(binfo)
elif binfo == BranchType.TrueBranch:
result.add_branch(BranchType.TrueBranch, addr + 4)
result.add_branch(BranchType.FalseBranch, addr + 2)
elif binfo == BranchType.FalseBranch:
result.add_branch(BranchType.TrueBranch, addr + 4)
result.add_branch(BranchType.FalseBranch, addr + 2)
return result
def get_instruction_text(self, data, addr):
""" Display text for tokanized instruction """
if len(data) > 2:
data = data[:2]
tokens = self.dis.disasm(data, addr)
if not tokens:
tokens = [InstructionTextToken(InstructionTextTokenType.InstructionToken, '_emit'),
InstructionTextToken(InstructionTextTokenType.TextToken, ' '),
InstructionTextToken(InstructionTextTokenType.IntegerToken, hex(data[0]), data[0]),
InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ', '),
InstructionTextToken(InstructionTextTokenType.IntegerToken, hex(data[1]), data[1])]
return tokens, 2
def get_instruction_low_level_il(self, data, addr, il):
""" TODO: Implement a lifter here """
return None
|
StarcoderdataPython
|
1975308
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_schema[updateMap-all-options] 1'] = {
'data': {
'updateMap': {
'map': {
'mapId': '1001',
'name': 'new-name'
}
}
}
}
snapshots['test_schema[updateMap-all-options] 2'] = {
'data': {
'map': {
'beams': {
'edges': [
]
},
'datePosted': '2020-02-18',
'mapFilePaths': {
'edges': [
{
'node': {
'path': 'nersc:/go/to/my/maps'
}
}
]
},
'mapper': 'pwg-xyz',
'name': 'new-name',
'note': '- Note 123'
}
}
}
snapshots['test_schema[updateMap-selective-options] 1'] = {
'data': {
'updateMap': {
'map': {
'mapId': '1001',
'name': 'new-name'
}
}
}
}
snapshots['test_schema[updateMap-selective-options] 2'] = {
'data': {
'map': {
'beams': {
'edges': [
]
},
'datePosted': '2019-02-13',
'mapFilePaths': {
'edges': [
{
'node': {
'path': 'nersc:/go/to/my/maps'
}
}
]
},
'mapper': 'pwg-xyz',
'name': 'new-name',
'note': '''- This is a dummy test with a lat map
- This should not depend on any beam'''
}
}
}
|
StarcoderdataPython
|
13917
|
#!/usr/bin/env python
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import subprocess
# The location of the generate grammar kit script
DIR = os.path.dirname(__file__)
# The location of the plugin directory
PLUGIN_PATH = os.path.abspath(os.path.join(DIR, ".."))
# The location of the grammar-kit directory
GRAMMAR_KIT = os.path.abspath(
os.path.join(DIR, "../../../third-party/java/grammar-kit/")
)
OUT_DIR = os.path.join(PLUGIN_PATH, "gen")
FLEX_OUT_DIR = os.path.join(OUT_DIR, "com/facebook/buck/intellij/ideabuck/lang")
GRAMMAR_KIT_JAR = os.path.join(GRAMMAR_KIT, "grammar-kit.jar")
GRAMMAR_KIT_JFLEX_JAR = os.path.join(GRAMMAR_KIT, "JFlex.jar")
JFLEX_SKELETON = os.path.join(PLUGIN_PATH, "resources/idea-flex.skeleton")
FLEX_FILE = os.path.join(
PLUGIN_PATH, "src/com/facebook/buck/intellij/ideabuck/lang/Buck.flex"
)
BNF_FILE = os.path.join(
PLUGIN_PATH, "src/com/facebook/buck/intellij/ideabuck/lang/Buck.bnf"
)
def subprocess_call(cmd):
print("Running: %s" % (" ".join(cmd)))
subprocess.call(cmd)
shutil.rmtree(OUT_DIR, ignore_errors=True)
subprocess_call(["java", "-jar", GRAMMAR_KIT_JAR, OUT_DIR, BNF_FILE])
subprocess_call(
[
"java",
"-jar",
GRAMMAR_KIT_JFLEX_JAR,
"-sliceandcharat",
"-skel",
JFLEX_SKELETON,
"-d",
FLEX_OUT_DIR,
FLEX_FILE,
]
)
|
StarcoderdataPython
|
9722580
|
<gh_stars>0
from product_details import product_details
def current_firefox_regexp():
current_firefox = int(product_details.firefox_versions['LATEST_FIREFOX_VERSION'].split('.')[0])
versions = ['%s' % i for i in range(current_firefox, current_firefox + 4)]
return '|'.join(versions)
|
StarcoderdataPython
|
388815
|
"""
ddtrace.vendor
==============
Install vendored dependencies under a different top level package to avoid importing `ddtrace/__init__.py`
whenever a dependency is imported. Doing this allows us to have a little more control over import order.
Dependencies
============
six
---
Website: https://six.readthedocs.io/
Source: https://github.com/benjaminp/six
Version: 1.11.0
License: MIT
Notes:
`six/__init__.py` is just the source code's `six.py`
`curl https://raw.githubusercontent.com/benjaminp/six/1.11.0/six.py > ddtrace/vendor/six/__init__.py`
wrapt
-----
Website: https://wrapt.readthedocs.io/en/latest/
Source: https://github.com/GrahamDumpleton/wrapt/
Version: 1.11.1
License: BSD 2-Clause "Simplified" License
Notes:
`wrapt/__init__.py` was updated to include a copy of `wrapt`'s license: https://github.com/GrahamDumpleton/wrapt/blob/1.11.1/LICENSE
`setup.py` will attempt to build the `wrapt/_wrappers.c` C module
dogstatsd
---------
Website: https://datadogpy.readthedocs.io/en/latest/
Source: https://github.com/DataDog/datadogpy
Version: 0.28.0
License: Copyright (c) 2015, Datadog <<EMAIL>>
Notes:
`dogstatsd/__init__.py` was updated to include a copy of the `datadogpy` license: https://github.com/DataDog/datadogpy/blob/master/LICENSE
Only `datadog.dogstatsd` module was vendored to avoid unnecessary dependencies
`datadog/util/compat.py` was copied to `dogstatsd/compat.py`
monotonic
---------
Website: https://pypi.org/project/monotonic/
Source: https://github.com/atdt/monotonic
Version: 1.5
License: Apache License 2.0
Notes:
The source `monotonic.py` was added as `monotonic/__init__.py`
No other changes were made
debtcollector
-------------
Website: https://docs.openstack.org/debtcollector/latest/index.html
Source: https://github.com/openstack/debtcollector
Version: 1.22.0
License: Apache License 2.0
Notes:
Removed dependency on `pbr` and manually set `__version__`
psutil
------
Website: https://github.com/giampaolo/psutil
Source: https://github.com/giampaolo/psutil
Version: 5.6.7
License: BSD 3
attrs
-----
Website: http://www.attrs.org/
Source: https://github.com/python-attrs/attrs
Version: 19.3.0
License: MIT
"""
# Initialize `ddtrace.vendor.datadog.base.log` logger with our custom rate limited logger
# DEV: This helps ensure if there are connection issues we do not spam their logs
# DEV: Overwrite `base.log` instead of `get_logger('datadog.dogstatsd')` so we do
# not conflict with any non-vendored datadog.dogstatsd logger
from ..internal.logger import get_logger
from .dogstatsd import base
base.log = get_logger('ddtrace.vendor.dogstatsd')
|
StarcoderdataPython
|
124760
|
<filename>Trolls/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 07:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=250)),
('UserId', models.CharField(max_length=250)),
('Link', models.CharField(max_length=1000)),
('UserPhoto', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='UserLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Location', models.CharField(max_length=500)),
],
),
migrations.AddField(
model_name='userinfo',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Trolls.UserLocation'),
),
]
|
StarcoderdataPython
|
8095979
|
from django.shortcuts import render
from rest_framework import viewsets, status, generics , permissions, authentication
# Create your views here.
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.generics import ListAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
from .models import NewUser
from .serializers import CustomUserSerializer
class UserGCList(generics.ListCreateAPIView):
queryset = NewUser.objects.all()
serializer_class = CustomUserSerializer
authentication_classes = [
authentication.TokenAuthentication,
authentication.SessionAuthentication]
permission_classes = [permissions.IsAuthenticated]
pagination_class = PageNumberPagination
class UserGCDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = NewUser.objects.all()
serializer_class = CustomUserSerializer
class UserCreate(APIView):
permission_classes = [AllowAny]
def post(self, request, format='json'):
serializer = CustomUserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
json = serializer.data
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BlacklistTokenUpdateView(APIView):
permission_classes = [AllowAny]
authentication_classes = ()
def post(self, request):
try:
refresh_token = request.data["refresh_token"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response(status=status.HTTP_205_RESET_CONTENT)
except Exception as e:
return Response(status=status.HTTP_400_BAD_REQUEST)
#////////////////////////////
class UserViewset(viewsets.ViewSet):
def list(self,request):
users = NewUser.objects.all()
serializer = CustomUserSerializer(users, many=True)
return Response(serializer.data)
@api_view(['GET','POST'])
def apifunction(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
users = NewUser.objects.all()
serializer = CustomUserSerializer(users, many=True)
items = {
"items": serializer.data
}
return Response(items)
elif request.method == 'POST':
serializer = CustomUserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def user_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
user = NewUser.objects.get(pk=pk)
except NewUser.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = CustomUserSerializer(user)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = CustomUserSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
StarcoderdataPython
|
8125232
|
from dataclasses import dataclass, field
from functools import partial
from operator import le
from typing import Optional
from more_properties import cached_property
from toposort import toposort
__all__ = ["RefinementDict", "AmbiguousKeyError"]
class AmbiguousKeyError(KeyError):
pass
@dataclass
class RefinementDict:
"""
A dictionary where the keys are themselves collections
Indexing an element of these collections returns the value associated with
the most precise collection containing that element.
A KeyError is raised if no such collection is found.
"""
lookup: dict = field(default_factory=dict)
fallback: "Optional[RefinementDict]" = None
is_subset: callable = le
is_element: callable = lambda elem, st: elem in st
@cached_property
def dependencies(self):
return {
st: {
subst
for subst in self.lookup
if subst != st and self.is_subset(subst, st)
}
for st in self.lookup
}
@dependencies.deleter
def dependencies(self):
del self.dependency_orders
@partial(cached_property, fdel=lambda self: None)
def dependency_orders(self):
return list(toposort(self.dependencies))
def __getitem__(self, key):
for order in self.dependency_orders:
ancestors = {st for st in order if self.is_element(key, st)}
if len(ancestors) > 1:
raise AmbiguousKeyError(f"{key!r} in all of {ancestors!r}")
if ancestors:
return self.lookup[ancestors.pop()]
if self.fallback is not None:
return self.fallback[key]
raise KeyError(f"{key!r}")
def __setitem__(self, key, value):
del self.dependencies
self.lookup[key] = value
def setdefault(self, key, value):
if self.fallback is None:
self.fallback = RefinementDict(
is_subset=self.is_subset, is_element=self.is_element
)
self.fallback[key] = value
|
StarcoderdataPython
|
12845383
|
texts = {
"browse":"🗂️ Browse categories",
"orders":"📥 My orders",
"cart":"🛒 My cart",
"settings":"⚙ Settings",
"contact":"📞 Contact us",
"home":"🏠 Home",
"contact1":"{Store_name} - {store_phone}",
}
|
StarcoderdataPython
|
364924
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DomainConfigurationArgs', 'DomainConfiguration']
@pulumi.input_type
class DomainConfigurationArgs:
def __init__(__self__, *,
authorizer_config: Optional[pulumi.Input['DomainConfigurationAuthorizerConfigArgs']] = None,
domain_configuration_name: Optional[pulumi.Input[str]] = None,
domain_configuration_status: Optional[pulumi.Input['DomainConfigurationStatus']] = None,
domain_name: Optional[pulumi.Input[str]] = None,
server_certificate_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_type: Optional[pulumi.Input['DomainConfigurationServiceType']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DomainConfigurationTagArgs']]]] = None,
validation_certificate_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DomainConfiguration resource.
"""
if authorizer_config is not None:
pulumi.set(__self__, "authorizer_config", authorizer_config)
if domain_configuration_name is not None:
pulumi.set(__self__, "domain_configuration_name", domain_configuration_name)
if domain_configuration_status is not None:
pulumi.set(__self__, "domain_configuration_status", domain_configuration_status)
if domain_name is not None:
pulumi.set(__self__, "domain_name", domain_name)
if server_certificate_arns is not None:
pulumi.set(__self__, "server_certificate_arns", server_certificate_arns)
if service_type is not None:
pulumi.set(__self__, "service_type", service_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if validation_certificate_arn is not None:
pulumi.set(__self__, "validation_certificate_arn", validation_certificate_arn)
@property
@pulumi.getter(name="authorizerConfig")
def authorizer_config(self) -> Optional[pulumi.Input['DomainConfigurationAuthorizerConfigArgs']]:
return pulumi.get(self, "authorizer_config")
@authorizer_config.setter
def authorizer_config(self, value: Optional[pulumi.Input['DomainConfigurationAuthorizerConfigArgs']]):
pulumi.set(self, "authorizer_config", value)
@property
@pulumi.getter(name="domainConfigurationName")
def domain_configuration_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "domain_configuration_name")
@domain_configuration_name.setter
def domain_configuration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_configuration_name", value)
@property
@pulumi.getter(name="domainConfigurationStatus")
def domain_configuration_status(self) -> Optional[pulumi.Input['DomainConfigurationStatus']]:
return pulumi.get(self, "domain_configuration_status")
@domain_configuration_status.setter
def domain_configuration_status(self, value: Optional[pulumi.Input['DomainConfigurationStatus']]):
pulumi.set(self, "domain_configuration_status", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="serverCertificateArns")
def server_certificate_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "server_certificate_arns")
@server_certificate_arns.setter
def server_certificate_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "server_certificate_arns", value)
@property
@pulumi.getter(name="serviceType")
def service_type(self) -> Optional[pulumi.Input['DomainConfigurationServiceType']]:
return pulumi.get(self, "service_type")
@service_type.setter
def service_type(self, value: Optional[pulumi.Input['DomainConfigurationServiceType']]):
pulumi.set(self, "service_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DomainConfigurationTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DomainConfigurationTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="validationCertificateArn")
def validation_certificate_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "validation_certificate_arn")
@validation_certificate_arn.setter
def validation_certificate_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "validation_certificate_arn", value)
class DomainConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorizer_config: Optional[pulumi.Input[pulumi.InputType['DomainConfigurationAuthorizerConfigArgs']]] = None,
domain_configuration_name: Optional[pulumi.Input[str]] = None,
domain_configuration_status: Optional[pulumi.Input['DomainConfigurationStatus']] = None,
domain_name: Optional[pulumi.Input[str]] = None,
server_certificate_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_type: Optional[pulumi.Input['DomainConfigurationServiceType']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigurationTagArgs']]]]] = None,
validation_certificate_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create and manage a Domain Configuration
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[DomainConfigurationArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create and manage a Domain Configuration
:param str resource_name: The name of the resource.
:param DomainConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorizer_config: Optional[pulumi.Input[pulumi.InputType['DomainConfigurationAuthorizerConfigArgs']]] = None,
domain_configuration_name: Optional[pulumi.Input[str]] = None,
domain_configuration_status: Optional[pulumi.Input['DomainConfigurationStatus']] = None,
domain_name: Optional[pulumi.Input[str]] = None,
server_certificate_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_type: Optional[pulumi.Input['DomainConfigurationServiceType']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigurationTagArgs']]]]] = None,
validation_certificate_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainConfigurationArgs.__new__(DomainConfigurationArgs)
__props__.__dict__["authorizer_config"] = authorizer_config
__props__.__dict__["domain_configuration_name"] = domain_configuration_name
__props__.__dict__["domain_configuration_status"] = domain_configuration_status
__props__.__dict__["domain_name"] = domain_name
__props__.__dict__["server_certificate_arns"] = server_certificate_arns
__props__.__dict__["service_type"] = service_type
__props__.__dict__["tags"] = tags
__props__.__dict__["validation_certificate_arn"] = validation_certificate_arn
__props__.__dict__["arn"] = None
__props__.__dict__["domain_type"] = None
__props__.__dict__["server_certificates"] = None
super(DomainConfiguration, __self__).__init__(
'aws-native:iot:DomainConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DomainConfiguration':
"""
Get an existing DomainConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DomainConfigurationArgs.__new__(DomainConfigurationArgs)
__props__.__dict__["arn"] = None
__props__.__dict__["authorizer_config"] = None
__props__.__dict__["domain_configuration_name"] = None
__props__.__dict__["domain_configuration_status"] = None
__props__.__dict__["domain_name"] = None
__props__.__dict__["domain_type"] = None
__props__.__dict__["server_certificate_arns"] = None
__props__.__dict__["server_certificates"] = None
__props__.__dict__["service_type"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["validation_certificate_arn"] = None
return DomainConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authorizerConfig")
def authorizer_config(self) -> pulumi.Output[Optional['outputs.DomainConfigurationAuthorizerConfig']]:
return pulumi.get(self, "authorizer_config")
@property
@pulumi.getter(name="domainConfigurationName")
def domain_configuration_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "domain_configuration_name")
@property
@pulumi.getter(name="domainConfigurationStatus")
def domain_configuration_status(self) -> pulumi.Output[Optional['DomainConfigurationStatus']]:
return pulumi.get(self, "domain_configuration_status")
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="domainType")
def domain_type(self) -> pulumi.Output['DomainConfigurationDomainType']:
return pulumi.get(self, "domain_type")
@property
@pulumi.getter(name="serverCertificateArns")
def server_certificate_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "server_certificate_arns")
@property
@pulumi.getter(name="serverCertificates")
def server_certificates(self) -> pulumi.Output[Sequence['outputs.DomainConfigurationServerCertificateSummary']]:
return pulumi.get(self, "server_certificates")
@property
@pulumi.getter(name="serviceType")
def service_type(self) -> pulumi.Output[Optional['DomainConfigurationServiceType']]:
return pulumi.get(self, "service_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.DomainConfigurationTag']]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="validationCertificateArn")
def validation_certificate_arn(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "validation_certificate_arn")
|
StarcoderdataPython
|
5027322
|
<gh_stars>1-10
"""Functions to Plot RoboJam Performances
"""
import matplotlib.pyplot as plt
from robojam import divide_performance_into_swipes
input_colour = 'darkblue'
gen_colour = 'firebrick'
def plot_2D(perf_df, name="foo", saving=False, figsize=(8, 8)):
"""Plot a 2D representation of a performance 2D"""
swipes = divide_performance_into_swipes(perf_df)
plt.figure(figsize=figsize)
for swipe in swipes:
p = plt.plot(swipe.x, swipe.y, 'o-')
plt.setp(p, color=gen_colour, linewidth=5.0)
plt.ylim(1.0,0)
plt.xlim(0,1.0)
plt.xticks([])
plt.yticks([])
if saving:
plt.savefig(name+".png", bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_double_2d(perf1, perf2, name="foo", saving=False, figsize=(8, 8)):
"""Plot two performances in 2D"""
plt.figure(figsize=figsize)
swipes = divide_performance_into_swipes(perf1)
for swipe in swipes:
p = plt.plot(swipe.x, swipe.y, 'o-')
plt.setp(p, color=input_colour, linewidth=5.0)
swipes = divide_performance_into_swipes(perf2)
for swipe in swipes:
p = plt.plot(swipe.x, swipe.y, 'o-')
plt.setp(p, color=gen_colour, linewidth=5.0)
plt.ylim(1.0,0)
plt.xlim(0,1.0)
plt.xticks([])
plt.yticks([])
if saving:
plt.savefig(name+".png", bbox_inches='tight')
plt.close()
else:
plt.show()
|
StarcoderdataPython
|
98642
|
from typing import Final
from django.db import models
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
class Choice(models.Model):
title = models.CharField(max_length=4096)
def __str__(self):
return self.title
class Meta:
verbose_name = _("Choice")
verbose_name_plural = _("Choices")
class Question(models.Model):
TYPE_CHOICES: Final[tuple] = (
('text', gettext('Text')),
('one', gettext('One option')),
('many', gettext('Many options')),
)
title = models.CharField(verbose_name=_("Title"), max_length=4096)
question_type = models.CharField(
verbose_name=_("Type"),
max_length=4,
choices=TYPE_CHOICES
)
choices = models.ManyToManyField(
Choice, verbose_name=_("Choices"),
related_name='questions'
)
is_active = models.BooleanField(
verbose_name=_("Question enabled"),
default=True
)
def __str__(self):
return self.title
def delete(self, *args, **kwargs):
self.is_active = False
self.save()
class Meta:
verbose_name = _("Question")
verbose_name_plural = _("Questions")
unique_together = ['title', 'question_type']
class Poll(models.Model):
title = models.CharField(verbose_name=_("Title"), max_length=120)
started_at = models.DateTimeField(verbose_name=_("Start time"))
finish_at = models.DateTimeField(verbose_name=_("Finish time"))
description = models.TextField(
verbose_name=_("Description"),
blank=True
)
is_active = models.BooleanField(
verbose_name=_("Poll enabled"),
default=True,
db_index=True
)
questions = models.ManyToManyField(
Question,
verbose_name=_("Questions"),
related_name='polls'
)
def __str__(self):
return self.title
def delete(self, *args, **kwargs):
self.is_active = False
self.save()
class Meta:
verbose_name = _("Poll")
verbose_name_plural = _("Polls")
class Answer(models.Model):
user = models.IntegerField()
question = models.ForeignKey(
Question,
verbose_name=_("Question"),
related_name='answers',
on_delete=models.CASCADE,
)
choice = models.ForeignKey(
Choice,
verbose_name=_("Choice"),
related_name='answers_one',
on_delete=models.CASCADE,
blank=True,
null=True,
)
choices = models.ManyToManyField(
Choice,
verbose_name=_("Choices"),
related_name='answers_many',
blank=True,
)
choice_text = models.TextField(
verbose_name=_("Choice text"),
blank=True,
null=True,
)
poll = models.ForeignKey(
Poll,
verbose_name=_("Poll"),
related_name='answers',
on_delete=models.CASCADE
)
def __str__(self):
return self.choice.title
class Meta:
verbose_name = _("Answer")
verbose_name_plural = _("Answers")
unique_together = ('user', 'question', 'poll',)
|
StarcoderdataPython
|
9679407
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
#import os
#print os.environ.keys()
#print os.environ.get('FLASKR_SETTINGS')
#加载配置文件内容
app.config.from_object('myWeb.setting') #模块下的setting文件名,不用加py后缀
app.config.from_envvar('FLASKR_SETTINGS') #环境变量,指向配置文件setting的路径
#创建数据库对象
db = SQLAlchemy(app)
#只有在app对象之后声明,用于导入view模块
from myWeb.controller import blog_message
|
StarcoderdataPython
|
3576637
|
<reponame>AR0EN/film-exif<gh_stars>0
# References
# [Exchangeable Image File Format] | (http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf)
# [Piexif] | (https://github.com/hMatoba/Piexif)
# Import
import os
import sys
import copy
import piexif as pxf
from PIL import Image
# Constants
TAG = 'Film Exif Utility'
IPHOTOS_DIR = 'input' # Path to input photos' directory
OPHOTOS_DIR = 'output' # Path to output directory
IPHOTO_EXTS = ['.jpg', '.JPG', '.png', '.PNG']
OPHOTO_EXT = '.jpg'
AUTHOR = u'<NAME>'
CAMERA_MANUFACTURER = u'Kyocera Japan'
CAMERA_MODEL = u'Contax S2 Titanium 60 Years Limited'
CAMERA_SERIAL = u'003643'
LENS_MODEL = u'Carl Zeiss Planar 1,4/50 T* (C/Y)'
LENS_SERIAL = u'9552246'
LENS_FOCAL_LENGTH = 50
LENS_FOCAL_LENGTH_35MM = 50
FILM = u'Kodak Ultramax 400'
ImageIFDCustomized = {
pxf.ImageIFD.Make:u'Kyocera Japan',
pxf.ImageIFD.Model:u'Contax S2 Titanium 60 Years Limited',
pxf.ImageIFD.ImageDescription:u'Kodak Ultramax 400',
}
ExifIFDCustomized = {
pxf.ExifIFD.BodySerialNumber:u'003643',
pxf.ExifIFD.CameraOwnerName:u'<NAME>',
pxf.ExifIFD.LensMake:u'Kyocera Japan',
pxf.ExifIFD.LensModel:u'Carl Zeiss Planar 1,4/50 T* (C/Y)',
pxf.ExifIFD.LensSerialNumber:u'9552246',
pxf.ExifIFD.FocalLength:[5000, 100],
pxf.ExifIFD.FocalLengthIn35mmFilm:50,
}
ExifCustomized = []
ExifCustomized.append(['0th', ImageIFDCustomized])
ExifCustomized.append(['Exif', ExifIFDCustomized])
# Local functions
def log(msg, tag = None, f = sys.stdout):
if tag is not None:
text = '[{0:s}] {1:s}\n'.format(tag, msg)
else:
text = '{0:s}\n'.format(msg)
f.write(text)
def logErr(msg, tag = None):
log(msg, f = sys.stderr)
def getFileExtension(filePath):
ext = None
if filePath is not None:
SEPARATOR = '.'
fileName = os.path.basename(filePath)
_ext = '{0:s}{1:s}'.format('.', fileName.split(SEPARATOR)[-1])
if(_ext in filePath):
ext = _ext
else:
ext = None
else:
ext = None
return ext
def dumpExif(photo):
exif = pxf.load(photo)
for ifd in ("0th", "Exif", "GPS", "1st"):
for tag in exif[ifd]:
print(ifd, pxf.TAGS[ifd][tag]["name"], exif[ifd][tag])
def updateExif(iPhotoPath):
# Load photo and original Exif
imgData = Image.open(iPhotoPath)
exifOrg = pxf.load(imgData.info["exif"])
# Clone Exif
exifNew = copy.deepcopy(exifOrg)
# Update Exif
for ifd in ExifCustomized:
for etag in ifd[1]:
exifNew[ifd[0]][etag] = ifd[1][etag]
photoName = os.path.basename(iPhotoPath)
oPhotoPath = '{0:s}/{1:s}'.format(OPHOTOS_DIR, photoName)
imgData.save(oPhotoPath, exif=pxf.dump(exifNew))
def main():
# Scan through all images
for f in os.listdir(IPHOTOS_DIR):
fullPath = '{0:s}/{1:s}'.format(IPHOTOS_DIR, f)
log('{0:s}'.format(fullPath), TAG)
ext = getFileExtension(f)
if ((ext is not None) and (ext in IPHOTO_EXTS)):
updateExif(fullPath)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4921523
|
# Generated by Django 3.2.1 on 2021-05-05 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hood', '0005_auto_20210505_0757'),
]
operations = [
migrations.AddField(
model_name='profile',
name='neighborhood',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='hood.neighbourhood'),
),
]
|
StarcoderdataPython
|
9655334
|
<gh_stars>1-10
#!/usr/bin/env python
"""
Usage: python num_parameters.py <model_file>.pkl
Prints the number of parameters in a saved model (total number of scalar
elements in all the arrays parameterizing the model).
"""
__author__ = "<NAME>"
import sys
from pylearn2.utils import serial
def num_parameters(model):
"""
.. todo::
WRITEME
"""
params = model.get_params()
return sum(map(lambda x: x.get_value().size, params))
if __name__ == '__main__':
_, model_path = sys.argv
model = serial.load(model_path)
print num_parameters(model)
|
StarcoderdataPython
|
4909934
|
"""Fix the name of modules
This module is useful when you want to rename many of the modules in
your project. That can happen specially when you want to change their
naming style.
For instance::
fixer = FixModuleNames(project)
changes = fixer.get_changes(fixer=str.lower)
project.do(changes)
Here it renames all modules and packages to use lower-cased chars.
You can tell it to use any other style by using the ``fixer``
argument.
"""
from rope.base import taskhandle
from rope.contrib import changestack
from rope.refactor import rename
class FixModuleNames(object):
def __init__(self, project):
self.project = project
def get_changes(self, fixer=str.lower, task_handle=taskhandle.NullTaskHandle()):
"""Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
"""
stack = changestack.ChangeStack(self.project, "Fixing module names")
jobset = task_handle.create_jobset(
"Fixing module names", self._count_fixes(fixer) + 1
)
try:
while True:
for resource in self._tobe_fixed(fixer):
jobset.started_job(resource.path)
renamer = rename.Rename(self.project, resource)
changes = renamer.get_changes(fixer(self._name(resource)))
stack.push(changes)
jobset.finished_job()
break
else:
break
finally:
jobset.started_job("Reverting to original state")
stack.pop_all()
jobset.finished_job()
return stack.merged()
def _count_fixes(self, fixer):
return len(list(self._tobe_fixed(fixer)))
def _tobe_fixed(self, fixer):
for resource in self.project.get_python_files():
modname = self._name(resource)
if modname != fixer(modname):
yield resource
def _name(self, resource):
modname = resource.name.rsplit(".", 1)[0]
if modname == "__init__":
modname = resource.parent.name
return modname
|
StarcoderdataPython
|
321589
|
<reponame>ask/metasyntactic<filename>metasyntactic/themes/muses.py
# -*- coding: utf-8 -*-
'''
##########################
Acme::MetaSyntactic::muses
##########################
****
NAME
****
Acme::MetaSyntactic::muses - Greek Muses
***********
DESCRIPTION
***********
The nine muses from Greek mythology.
************
CONTRIBUTORS
************
Abigail, <NAME> (BooK)
*******
CHANGES
*******
- \*
2012-05-21 - v1.001
Made multilingual. Added translations for \ *de*\ , \ *en*\ , \ *eo*\ , \ *es*\ ,
\ *fr*\ , \ *it*\ , \ *la*\ (the default), \ *nl*\ , \ *pl*\ , \ *pt*\ .
Published in Acme-MetaSyntactic-Themes version 1.002.
- \*
2012-05-14 - v1.000
Introduced in Acme-MetaSyntactic-Themes version 1.001.
- \*
2005-10-24
Submitted by Abigail.
********
SEE ALSO
********
`Acme::MetaSyntactic <http://search.cpan.org/search?query=Acme%3a%3aMetaSyntactic&mode=module>`_, `Acme::MetaSyntactic::List <http://search.cpan.org/search?query=Acme%3a%3aMetaSyntactic%3a%3aList&mode=module>`_.
'''
name = 'muses'
DATA = '''\
# default
la
# names de
Kalliope Klio Erato Euterpe Melpomene Polyhymnia Terpsichore Thalia Urania
# names en
Calliope Clio Erato Euterpe Melpomene Polyhymnia Terpsichore Thalia Urania
# names eo
Kaliopo Klio Erato Euterpo Melpomeno Polimnio Terpsihoro Talio Uranio
# names es
Caliope <NAME> Euterpe Melpomene Polimnia Terpsicore Talia Urania
# names fr
<NAME> Euterpe Melpomene Polymnie Terpsichore Thalie Uranie
# names it
<NAME> Euterpe Melpomene Polimnia Tersicore Talia Urania
# names la
<NAME> Euterpe Melpomene Polyhymnia Terpsichore Thalia Urania
# names nl
Kal<NAME> Euterpe Melpomene Polyhymnia Terpsichore Thaleia Urania
# names pl
<NAME> Euterpe Melpomene Polihymnia Terpsychora Talia Urania
# names pt
Caliope <NAME>uterpe Melpomene Polimnia Terpsicore Talia Urania\
'''
from metasyntactic.base import parse_data
from random import choice, shuffle
from six import iteritems
data = parse_data(DATA)
def default():
try:
if 'default' in data:
return data['default'][0]
except (KeyError, IndexError):
pass
return 'en'
def all():
acc = set()
for category, names in iteritems(data['names']):
if names:
acc |= names
return acc
def names(category=None):
if not category:
category = default()
if category == ':all':
return list(all())
category = category.replace('/', ' ')
return list(data['names'][category])
def random(n=1, category=None):
got = names(category)
if got:
shuffle(got)
if n == 1:
return choice(got)
return got[:n]
def categories():
return set(data['names'])
|
StarcoderdataPython
|
11372456
|
from django.apps import AppConfig
class StockInquiryConfig(AppConfig):
name = 'stock_inquiry'
|
StarcoderdataPython
|
6497291
|
'''
Desenvolva um que pergunta a distância de uma viagem em KM.
Calcule o preço da passagem. Cobrando R$ 0,50 por KM
para viagens de até 200km e R$ 0,45 para viagens mais longas.
'''
distancia = float(input("Digite a distância que você tem que percorrer: "))
precoCurto = 0.50
precoLongo = 0.45
if distancia <= 200:
print("Você vai percorrer {} km e vai ter que pagar R$ {}".format(distancia, precoCurto * distancia))
elif distancia >200:
print("Você vai percorrer {} km longo caminho e vai pagar R$ {} de passagem.".format(distancia, precoLongo * distancia))
|
StarcoderdataPython
|
12848923
|
<reponame>veqtor/veqtor_keras
import tensorflow as tf
from tensorflow.keras.utils import custom_object_scope
from tensorflow.python.keras.testing_utils import layer_test
from veqtor_keras.layers.time_delay_layers import TimeDelayLayer1D, DepthGroupwiseTimeDelayLayer1D, \
DepthGroupwiseTimeDelayLayerFake2D, TimeDelayLayerFake2D
class TimeDelayLayer1DTest(tf.test.TestCase):
def test_simple(self):
with custom_object_scope({'TimeDelayLayer1D': TimeDelayLayer1D}):
layer_test(
TimeDelayLayer1D, kwargs={'output_dim': 4}, input_shape=(5, 32, 3))
class SeparableTimeDelayLayer1DTest(tf.test.TestCase):
def test_simple(self):
with custom_object_scope(
{'DepthGroupwiseTimeDelayLayer1D': DepthGroupwiseTimeDelayLayer1D,
'TimeDelayLayer1D': TimeDelayLayer1D}):
layer_test(
DepthGroupwiseTimeDelayLayer1D, kwargs={'output_mul': 2}, input_shape=(5, 32, 3))
class SeparableTimeDelayLayerFake2DTest(tf.test.TestCase):
def test_simple(self):
with custom_object_scope({'DepthGroupwiseTimeDelayLayerFake2D': DepthGroupwiseTimeDelayLayerFake2D}):
layer_test(
DepthGroupwiseTimeDelayLayerFake2D, input_shape=(5, 16, 16, 3))
class TimeDelayLayerFake2DTest(tf.test.TestCase):
def test_simple(self):
with custom_object_scope({'TimeDelayLayerFake2D': TimeDelayLayerFake2D}):
layer_test(
TimeDelayLayerFake2D, kwargs={'output_dim': 4}, input_shape=(5, 16, 16, 3))
if __name__ == '__main__':
tf.test.main()
|
StarcoderdataPython
|
152923
|
<gh_stars>0
import contextlib
import os
from functools import partial
import click
out = partial(click.secho, bold=True, err=True)
err = partial(click.secho, fg="red", err=True)
@contextlib.contextmanager
def suppress_stdout():
null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
save_fds = [os.dup(1), os.dup(2)]
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
yield
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(save_fds[0], 1)
os.dup2(save_fds[1], 2)
# Close the null files
for fd in null_fds + save_fds:
os.close(fd)
|
StarcoderdataPython
|
1999887
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource_py3 import TrackedResource
class Profile(TrackedResource):
"""Class representing a Traffic Manager profile.
:param id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName}
:type id: str
:param name: The name of the resource
:type name: str
:param type: The type of the resource. Ex-
Microsoft.Network/trafficmanagerProfiles.
:type type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The Azure Region where the resource lives
:type location: str
:param profile_status: The status of the Traffic Manager profile. Possible
values include: 'Enabled', 'Disabled'
:type profile_status: str or
~azure.mgmt.trafficmanager.models.ProfileStatus
:param traffic_routing_method: The traffic routing method of the Traffic
Manager profile. Possible values include: 'Performance', 'Priority',
'Weighted', 'Geographic'
:type traffic_routing_method: str or
~azure.mgmt.trafficmanager.models.TrafficRoutingMethod
:param dns_config: The DNS settings of the Traffic Manager profile.
:type dns_config: ~azure.mgmt.trafficmanager.models.DnsConfig
:param monitor_config: The endpoint monitoring settings of the Traffic
Manager profile.
:type monitor_config: ~azure.mgmt.trafficmanager.models.MonitorConfig
:param endpoints: The list of endpoints in the Traffic Manager profile.
:type endpoints: list[~azure.mgmt.trafficmanager.models.Endpoint]
:param traffic_view_enrollment_status: Indicates whether Traffic View is
'Enabled' or 'Disabled' for the Traffic Manager profile. Null, indicates
'Disabled'. Enabling this feature will increase the cost of the Traffic
Manage profile. Possible values include: 'Enabled', 'Disabled'
:type traffic_view_enrollment_status: str or
~azure.mgmt.trafficmanager.models.TrafficViewEnrollmentStatus
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'profile_status': {'key': 'properties.profileStatus', 'type': 'str'},
'traffic_routing_method': {'key': 'properties.trafficRoutingMethod', 'type': 'str'},
'dns_config': {'key': 'properties.dnsConfig', 'type': 'DnsConfig'},
'monitor_config': {'key': 'properties.monitorConfig', 'type': 'MonitorConfig'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'traffic_view_enrollment_status': {'key': 'properties.trafficViewEnrollmentStatus', 'type': 'str'},
}
def __init__(self, *, id: str=None, name: str=None, type: str=None, tags=None, location: str=None, profile_status=None, traffic_routing_method=None, dns_config=None, monitor_config=None, endpoints=None, traffic_view_enrollment_status=None, **kwargs) -> None:
super(Profile, self).__init__(id=id, name=name, type=type, tags=tags, location=location, **kwargs)
self.profile_status = profile_status
self.traffic_routing_method = traffic_routing_method
self.dns_config = dns_config
self.monitor_config = monitor_config
self.endpoints = endpoints
self.traffic_view_enrollment_status = traffic_view_enrollment_status
|
StarcoderdataPython
|
9649229
|
<reponame>stefanlack/ods-project-quickstarters
#!/usr/bin/env python
import setuptools
setuptools.setup(name='airflow-dag-dependencies',
version='0.1',
description='DAG dependencies',
url='https://www.python.org/sigs/distutils-sig/',
packages=setuptools.find_packages(),
install_requires=[]
)
|
StarcoderdataPython
|
9693358
|
<gh_stars>1-10
Board=[i for i in range(0,9)]
User,Computer='X','O'
Picks=[i for i in range(1,10)]
winners=((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))
def print_board():
x=1
for i in Board:
if x%3==0:
if i in('X','O'):print( i ,end='\n' + '---------'+'\n')
else:print( i+1 ,end='\n' + '---------'+'\n')
if x%3!=0:
if i in('X','O'):print( i ,end=' | ')
else:print( i+1 ,end=' | ')
x+=1
def space_exist():
return Board.count('X')+Board.count('O')!=9
def can_move(brd,plyr,mve):
if mve in Picks and brd[mve-1]==mve-1:
return True
def can_win(brd,plyr,mve):
for i in winners:
win=True
for j in i:
if brd[j]!=plyr:
win=False
break
if win == True:break
return win
def make_move(brd,plyr,mve,undo=False):
if can_move(brd,plyr,mve):
brd[mve-1]=plyr
win=can_win(brd,plyr,mve)
if undo:
brd[mve-1]=mve-1
return(True,win)
else:return(False,False)
def computer_move():
move=-1
for i in range(1,10):
if make_move(Board,Computer,i,True)[1]:
move=i
break
if move==-1:
for i in range(1,10):
if make_move(Board,User,i,True)[1]:
move=i
break
if move==-1:
for i in range(1,10):
if can_move(Board,Computer,i):
move=i
return make_move(Board,Computer,move)
while space_exist():
print_board()
move=int(input("Pick a number [1-9]" ))
moved,won=make_move(Board,User,move)
if not moved:
print("pick another number you dumbfuck")
continue
if won:
print("You won!")
elif computer_move()[1]:
print("You Lost")
break
if not space_exist(): print("Tie")
print_board()
|
StarcoderdataPython
|
5009640
|
"""Sample API Client."""
from __future__ import annotations
import asyncio
import socket
from typing import Any
import ssl
import aiohttp
import async_timeout
import requests
from requests import adapters
from urllib3 import poolmanager
from .const import LOGGER, REVERSE_GEOCODE_URL, POWER_URL, HEAT_URL
API_HEADERS = {aiohttp.hdrs.CONTENT_TYPE: "application/json; charset=UTF-8"}
class ApiClientException(Exception):
"""Api Client Exception."""
class TauronOutagesApiClient:
def __init__(
self,
latitude: float,
longitude: float,
session: aiohttp.ClientSession,
) -> None:
"""Sample API Client."""
self._latitude = latitude
self._longitude = longitude
self._session = session
async def async_get_reverse_geocode(self) -> dict[str, Any]:
"""Get reverse geocode (address, postcode etc)."""
geocode_url = (
f"{REVERSE_GEOCODE_URL}&lat={self._latitude}&lon={self._longitude}"
)
return await self.api_wrapper("get", geocode_url)
async def async_get_city_gaid(self, reverse_geocode) -> dict[str, Any]:
"""Get City Gaid"""
postcode = reverse_geocode.get("address").get("postcode")
get_cities_url = f"{POWER_URL}city/GetCities?partName={postcode}"
return await self.api_wrapper("get", get_cities_url)
async def async_get_street_gaid(self, reverse_geocode, city_gaid) -> dict[str, Any]:
"""Get Street Gaid"""
road = reverse_geocode.get("address").get("road")
get_streets_url = (
f"{POWER_URL}street/GetStreets?ownerGaid={city_gaid}&partName={road}"
)
return await self.api_wrapper("get", get_streets_url)
async def async_get_power_outage_data(self, street_gaid) -> dict[str, Any]:
"""Get data from the Power Outages API."""
get_outages_url = f"{POWER_URL}outage/GetOutages?gaid={street_gaid}&type=street"
return await self.api_wrapper("get", get_outages_url)
async def async_get_heat_outage_data(self, street_gaid) -> dict[str, Any]:
"""Get data from the Heat Outages API."""
get_outages_url = f"{HEAT_URL}outage/GetOutages?gaid={street_gaid}&type=street"
return await self.api_wrapper("get", get_outages_url)
async def api_wrapper(
self,
method: str,
url: str,
data: dict[str, Any] = {},
headers: dict = {"cache-control": "no-cache"},
) -> dict[str, Any] | None:
"""Get information from the API."""
try:
async with async_timeout.timeout(10, loop=asyncio.get_event_loop()):
sslcontext = ssl.create_default_context()
sslcontext.set_ciphers("DEFAULT@SECLEVEL=1")
sslcontext.check_hostname = False
LOGGER.info(url)
response = await self._session.request(
method=method, url=url, headers=headers, json=data, ssl=sslcontext
)
if method == "get":
return await response.json()
except asyncio.TimeoutError as exception:
raise ApiClientException(
f"Timeout error fetching information from {url}"
) from exception
except (KeyError, TypeError) as exception:
raise ApiClientException(
f"Error parsing information from {url} - {exception}"
) from exception
except (aiohttp.ClientError, socket.gaierror) as exception:
raise ApiClientException(
f"Error fetching information from {url} - {exception}"
) from exception
except Exception as exception: # pylint: disable=broad-except
raise ApiClientException(exception) from exception
# to fix the SSLError
class TLSAdapter(adapters.HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
"""Create and initialize the urllib3 PoolManager."""
ctx = ssl.create_default_context()
ctx.set_ciphers("DEFAULT@SECLEVEL=1")
ctx.check_hostname = False
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLS,
ssl_context=ctx,
)
|
StarcoderdataPython
|
5041156
|
from time import strftime
import apollocaffe
import numpy as np
import os
class TrainLogger(object):
def __init__(self, display_interval, log_file="/tmp/apollocaffe_log.txt"):
self.display_interval = display_interval
self.log_file = log_file
os.system("touch %s" % self.log_file)
def log(self, idx, meta_data):
meta_data['start_iter'] = meta_data.get('start_iter', 0)
if idx % self.display_interval == 0:
log_line = ""
try:
loss = np.mean(meta_data['train_loss'][-self.display_interval:])
log_line = "%s - Iteration %4d - Train Loss: %g" % \
(strftime("%Y-%m-%d %H:%M:%S"), idx, loss)
except Exception as ex:
log_line += str(ex)
log_line = "Skipping training log: Unknown Error"
try:
with open(self.log_file, 'ab+') as lfile:
lfile.write("%s\n" % log_line)
except IOError:
print "Trainer Logger Error: %s does not exist." % self.log_file
except Exception as e:
print e
print log_line
class TestLogger(object):
def __init__(self, display_interval, log_file="/tmp/apollocaffe_log.txt"):
self.display_interval = display_interval
self.log_file = log_file
os.system("touch %s" % self.log_file)
def log(self, idx, meta_data):
if idx % self.display_interval == 0:
try:
loss = np.mean(meta_data['test_loss'][-self.display_interval:])
log_line = "%s - Iteration %4d - Test Loss: %g" % \
(strftime("%Y-%m-%d %H:%M:%S"), idx, loss)
except IndexError:
log_line = "Skipping Test log: \
No test_loss provided"
except Exception as e:
log_line = "Skipping test log: Unknown Error"
print e
try:
with open(self.log_file, 'ab+') as lfile:
lfile.write("%s\n" % log_line)
except IOError:
print "TestLogger Error: %s does not exist." % self.log_file
except Exception as e:
print e
print log_line
class SnapshotLogger(object):
def __init__(self, snapshot_interval, snapshot_prefix='/tmp/model',
log_file="/tmp/apollocaffe_log.txt"):
self.snapshot_interval = snapshot_interval
self.snapshot_prefix = snapshot_prefix
self.log_file = log_file
os.system("touch %s" % self.log_file)
def log(self, idx, meta_data):
meta_data['start_iter'] = meta_data.get('start_iter', 0)
if idx % self.snapshot_interval == 0 and idx > meta_data['start_iter']:
try:
filename = '%s_%d.h5' % (self.snapshot_prefix, idx)
log_line = "%s - Iteration %4d - Saving net to %s" % \
(strftime("%Y-%m-%d %H:%M:%S"), idx, filename)
print(log_line)
meta_data['apollo_net'].save(filename)
filename = '%s_%d.caffemodel' % (self.snapshot_prefix, idx) # added by <NAME>
meta_data['apollo_net'].save(filename)
except Exception as e:
print e
print('Saving failed')
|
StarcoderdataPython
|
6475356
|
import argparse
from typing import List, Optional
import torch
from omegaconf import OmegaConf
from classy.utils.optional_deps import requires
try:
import uvicorn
from fastapi import FastAPI
except ImportError:
uvicorn = None
FastAPI = None
from classy.utils.commons import get_local_ip_address
from classy.utils.lightning import (
load_classy_module_from_checkpoint,
load_prediction_dataset_conf_from_checkpoint,
)
from classy.utils.log import get_project_logger
logger = get_project_logger(__name__)
@requires("uvicorn", "serve")
@requires("fastapi", "serve")
def serve(
model_checkpoint_path: str,
port: int,
cuda_device: int,
token_batch_size: int,
prediction_params: Optional[str] = None,
):
# load model
model = load_classy_module_from_checkpoint(model_checkpoint_path)
model.to(torch.device(cuda_device if cuda_device != -1 else "cpu"))
model.freeze()
if prediction_params is not None:
model.load_prediction_params(dict(OmegaConf.load(prediction_params)))
# load dataset conf
dataset_conf = load_prediction_dataset_conf_from_checkpoint(model_checkpoint_path)
# mock call to load resources
next(model.predict(samples=[], dataset_conf=dataset_conf), None)
# for better readability on the OpenAPI docs
# why leak the inner confusing class names
class InputSample(model.serve_input_class):
pass
class OutputSample(model.serve_output_class):
pass
app = FastAPI(title="Classy Serve")
@app.post("/", response_model=List[OutputSample], description="Prediction endpoint")
def predict(input_samples: List[InputSample]) -> List[OutputSample]:
output_samples = []
for predicted_sample in model.predict(
model=model,
samples=[input_sample.unmarshal() for input_sample in input_samples],
dataset_conf=dataset_conf,
token_batch_size=token_batch_size,
):
output_samples.append(OutputSample.marshal(predicted_sample))
return output_samples
@app.get("/healthz")
def healthz():
return "ok"
local_ip_address = get_local_ip_address()
print(f"Model exposed at http://{local_ip_address}:{port}")
print(f"Remember you can checkout the API at http://{local_ip_address}:{port}/docs")
uvicorn.run(app, host="0.0.0.0", port=port)
def main():
args = parse_args()
serve(
model_checkpoint_path=args.model_checkpoint,
prediction_params=args.prediction_params,
port=args.p,
cuda_device=args.cuda_device,
token_batch_size=args.token_batch_size,
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"model_checkpoint", type=str, help="Path to pl_modules checkpoint"
)
parser.add_argument(
"--prediction-params", type=str, default=None, help="Path to prediction params"
)
parser.add_argument(
"-p", type=int, default=8000, help="Port on which to expose the model"
)
parser.add_argument("--cuda-device", type=int, default=-1, help="Cuda device")
parser.add_argument(
"--token-batch-size", type=int, default=128, help="Token batch size"
)
return parser.parse_args()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6704135
|
"""Implement mixture of probability distribution layers"""
import torch
from torch import Tensor, nn
from torch.nn import Module
import torch.nn.functional as F
from typing import List, Union, Tuple
__all__ = ['MixtureOfGaussian', 'MixtureOfExpert']
class MixtureOfGaussian(nn.Linear):
"""
A layer that generates means, stds and mixing coefficients of a mixture of gaussian distributions.
Used as the final layer of a mixture of (Gaussian) density network.
Only support isotropic covariances for the components.
References:
<NAME>. "Pattern Recognition and Machine Learning"
"""
def __init__(self, in_features: int, out_features: int, n_dist: int, bias: bool=True):
assert n_dist > 0 and in_features > 0 and out_features > 0
self.n_dist = n_dist
super(MixtureOfGaussian, self).__init__(in_features, n_dist * (2 + out_features), bias)
def forward(self, input: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""
:param input:
:return: means, stds and mixing coefficients
"""
features = super().forward(input)
mixing_coeffs = F.softmax(features[:, :self.n_dist], dim=-1)
stds = torch.exp(features[:, self.n_dist:self.n_dist * 2])
means = features[:, self.n_dist * 2:]
return means, stds, mixing_coeffs
class MixtureOfExpert(Module):
def __init__(self, experts: List[Module], gate: Module, return_mixture: bool=True):
"""
:param experts: list of separate expert networks. Each must take the same input and return
output of same dimensionality
:param gate: take the input and output (un-normalized) score for each expert
"""
super(MixtureOfExpert, self).__init__()
self.experts = nn.ModuleList(experts)
self.gate = gate
self.softmax = nn.Softmax(dim=-1)
self.return_mixture = return_mixture
def forward(self, input: Tensor) -> Union[Tuple[Tensor, Tensor], Tensor]:
"""
:param input:
:return: if return_mixture, return the mixture of expert output; else return both expert score and expert output
(with the n_expert channel coming last)
"""
expert_scores = self.softmax(self.gate(input))
expert_outputs = torch.stack([expert(input) for expert in self.experts], dim=-1)
expert_scores = expert_scores.view(
list(expert_scores.shape)[:-1] + [1 for _ in range(len(expert_outputs.shape) - len(expert_scores.shape))]
+ list(expert_scores.shape)[-1:]
)
if self.return_mixture:
return torch.sum(expert_outputs * expert_scores, dim=-1)
else:
return expert_outputs, expert_scores
|
StarcoderdataPython
|
158331
|
<gh_stars>10-100
from tmu.tsetlin_machine import TMClassifier
import numpy as np
from time import time
number_of_features = 20
noise = 0.1
X_train = np.random.randint(0, 2, size=(5000, number_of_features), dtype=np.uint32)
Y_train = np.logical_xor(X_train[:,0], X_train[:,1]).astype(dtype=np.uint32)
Y_train = np.where(np.random.rand(5000) <= noise, 1-Y_train, Y_train) # Adds noise
X_test = np.random.randint(0, 2, size=(5000, number_of_features), dtype=np.uint32)
Y_test = np.logical_xor(X_test[:,0], X_test[:,1]).astype(dtype=np.uint32)
tm = TMClassifier(10, 15, 3.0, platform='CUDA', boost_true_positive_feedback=0)
for i in range(20):
tm.fit(X_train, Y_train)
print("Accuracy:", 100*(tm.predict(X_test) == Y_test).mean())
np.set_printoptions(threshold=np.inf, linewidth=200, precision=2, suppress=True)
print("\nClass 0 Positive Clauses:\n")
precision = tm.clause_precision(0, 0, X_test, Y_test)
recall = tm.clause_recall(0, 0, X_test, Y_test)
for j in range(5):
print("Clause #%d W:%d P:%.2f R:%.2f " % (j, tm.get_weight(0, 0, j), precision[j], recall[j]), end=' ')
l = []
for k in range(number_of_features*2):
if tm.get_ta_action(0, 0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
else:
l.append("¬x%d" % (k-number_of_features))
print(" ∧ ".join(l))
print("\nClass 0 Negative Clauses:\n")
precision = tm.clause_precision(0, 1, X_test, Y_test)
recall = tm.clause_recall(0, 1, X_test, Y_test)
for j in range(5):
print("Clause #%d W:%d P:%.2f R:%.2f " % (j, tm.get_weight(0, 1, j), precision[j], recall[j]), end=' ')
l = []
for k in range(number_of_features*2):
if tm.get_ta_action(0, 1, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
else:
l.append("¬x%d" % (k-number_of_features))
print(" ∧ ".join(l))
print("\nClass 1 Positive Clauses:\n")
precision = tm.clause_precision(1, 0, X_test, Y_test)
recall = tm.clause_recall(1, 0, X_test, Y_test)
for j in range(5):
print("Clause #%d W:%d P:%.2f R:%.2f " % (j, tm.get_weight(1, 0, j), precision[j], recall[j]), end=' ')
l = []
for k in range(number_of_features*2):
if tm.get_ta_action(1, 0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
else:
l.append("¬x%d" % (k-number_of_features))
print(" ∧ ".join(l))
print("\nClass 1 Negative Clauses:\n")
precision = tm.clause_precision(1, 1, X_test, Y_test)
recall = tm.clause_recall(1, 1, X_test, Y_test)
for j in range(5):
print("Clause #%d W:%d P:%.2f R:%.2f " % (j, tm.get_weight(1, 1, j), precision[j], recall[j]), end=' ')
l = []
for k in range(number_of_features*2):
if tm.get_ta_action(1, 1, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
else:
l.append("¬x%d" % (k-number_of_features))
print(" ∧ ".join(l))
print("\nClause Co-Occurence Matrix:\n")
print(tm.clause_co_occurrence(X_test, percentage=True).toarray())
print("\nLiteral Frequency:\n")
print(tm.literal_clause_frequency())
|
StarcoderdataPython
|
131264
|
<filename>python/graphscope/analytical/app/pagerank_nx.py<gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from graphscope.framework.app import AppAssets
from graphscope.framework.app import not_compatible_for
from graphscope.framework.app import project_to_simple
__all__ = ["pagerank_nx"]
@project_to_simple
@not_compatible_for("arrow_property", "dynamic_property")
def pagerank_nx(graph, alpha=0.85, max_iter=100, tol=1e-06):
"""Evalute PageRank on a graph in NetworkX version.
Args:
graph (Graph): A projected simple graph.
alpha (float, optional): Dumping factor. Defaults to 0.85.
max_iter (int, optional): Maximum number of iteration. Defaults to 100.
tol (float, optional): Error tolerance used to check convergence in power method solver.
Returns:
:class:`graphscope.framework.context.VertexDataContextDAGNode`:
A context with each vertex assigned with the pagerank value, evaluated in eager mode.
Examples:
.. code:: python
import graphscope as gs
sess = gs.session()
g = sess.g()
pg = g.project(vertices={"vlabel": []}, edges={"elabel": []})
r = gs.pagerank(pg, alpha=0.85, max_iter=10)
s.close()
"""
alpha = float(alpha)
max_iter = int(max_iter)
return AppAssets(algo="pagerank_nx", context="vertex_data")(
graph, alpha, max_iter, tol
)
|
StarcoderdataPython
|
5157030
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
Created By : <NAME> - Bacnv6
Created Date: Mon November 03 10:00:00 VNT 2020
Project : AkaOCR core
_____________________________________________________________________________
This file contain unit test for models
_____________________________________________________________________________
"""
import argparse
import sys
sys.path.append("../")
import torch
from models.detec.heatmap import HEAT
from models.recog.atten import Atten
from models.modules.converters import AttnLabelConverter
from engine.config import setup, load_yaml_config, dict2namespace
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def test_model_detec():
model = HEAT()
model = model.to(device)
x = torch.randn(1, 3, 768, 768).to(device)
print(x.shape)
y = model(x)
print(y[0].shape)
print(y[1].shape)
def test_model_recog(config_recog_yaml):
config = load_yaml_config(config_recog_yaml)
config = dict2namespace(config)
config.MODEL.NUM_CLASS = 3210
config.SOLVER.DEVICE = device
model = Atten(config)
model.to(device=device)
x = torch.randn(1, 1, 32, 128)
x = x.cuda()
x = x.to(device=device)
text = ["xxx"]
converter = AttnLabelConverter(["x", "X", "o"], device=device)
text, length = converter.encode(text, max_label_length=config.MODEL.MAX_LABEL_LENGTH)
y = model(x, text)
print(y.shape)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_recog', type=str, help='path to recog data',
default='../data/attention_resnet_base_v1.yaml')
opt = parser.parse_args()
test_model_detec()
test_model_recog(opt.config_recog)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3520224
|
<gh_stars>1-10
"""
Module for DishLeafNode utils
"""
# Imports
import enum
import math
import re
import logging
module_logger = logging.getLogger(__name__)
# In future, PointingState class will be moved to a file for all the enum attributes for DishLeafNode.
class PointingState(enum.IntEnum):
"""
Pointing state of the dish.
"""
NONE = 0
READY = 1
SLEW = 2
TRACK = 3
SCAN = 4
UNKNOWN = 5
class UnitConverter:
def __init__(self, logger=module_logger):
self.logger = logger
# TODO: FOR FUTURE USE
def dms_to_rad(self, argin):
"""
Converts a number in Deg:Min:Sec to radians.
:param argin: list of numbers in degrees, minutes, seconds respectively in string.
Example: ['20', '30', '40']
:return: A number in radians.
Example: 20.500193925472445 is the returned value for ['20', '30', '40'] input.
"""
try:
degrees = float(argin[0])
minutes = float(argin[1])
seconds = float(argin[2])
rad_value = (math.pi / 180) * (degrees + (minutes / 60) + (seconds / 3600))
return rad_value
except IndexError as error:
log_msg = f"Error while converting Deg:Min:Sec to radians.{error}"
self.logger.error(log_msg)
except SyntaxError as error:
log_msg = f"Error while converting Deg:Min:Sec to radians.{error}"
self.logger.error(log_msg)
# TODO: FOR FUTURE USE
def rad_to_dms(self, argin):
"""
Converts a number in radians to Deg:Min:Sec.
:param argin: A number in radians.
Example: 0.123472
:return: List of numbers in degrees, minutes, seconds respectively in string.
Example: [7.0, 4.0, 27.928156941480466] is returned value for input 0.123472.
"""
try:
# Sign variable represents the sign of the number (in radians) received in input.
# Sign should not be used in the radian to dms conversion. It should just be appended
# to the resulting dms value as it is.
sign = 1
if argin < 0:
sign = -1
dms = []
frac_min, degrees = math.modf(abs(argin) * (180 / math.pi))
frac_sec, minutes = math.modf(frac_min * 60)
seconds = frac_sec * 60
dms.append(int(degrees * sign))
dms.append(int(minutes))
dms.append(seconds)
return dms
except SyntaxError as error:
log_msg = f"Error while converting radians to dig:min:sec.{error}"
self.logger.error(log_msg)
# TODO: FOR FUTURE USE
def dms_to_dd(self, argin):
"""
Converts a number in dig:Min:sec to decimal degrees.
:param argin: A number in Deg:Min:Sec.
Example: 18:31:48.0
:return: A number in decimal Degrees.
Example : "18.529999999999998" is the returned value for 18:31:48.0 input.
"""
try:
dd = re.split("[:]+", argin)
deg_dec = (
abs(float(dd[0])) + ((float(dd[1])) / 60) + ((float(dd[2])) / 3600)
)
if "-" in dd[0]:
return deg_dec * (-1)
else:
return deg_dec
except IndexError as error:
log_msg = f"Error while converting Deg:Min:Sec to decimal degrees.{error}"
self.logger.error(log_msg)
except SyntaxError as error:
log_msg = f"Error while converting Deg:Min:Sec to decimal degrees.{error}"
self.logger.error(log_msg)
class DishMode(enum.IntEnum):
UNKNOWN = 0
OFF = 1
STARTUP = 2
SHUTDOWN = 3
STANDBY_LP = 4
STANDBY_FP = 5
STOW = 6
CONFIG = 7
OPERATE = 8
MAINTENANCE = 9
FORBIDDEN = 10
ERROR = 11
|
StarcoderdataPython
|
8049394
|
from setuptools import setup, find_packages
ANALYSIS_PLUGINS = [
"complexity = pordego_complexity.complexity_analysis:analyze_complexity"
]
with open('LICENSE') as f:
LICENSE = f.read()
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7"
]
VERSION = "1.0.3"
setup(name="pordego-complexity",
version=VERSION,
license=LICENSE,
author="<NAME>",
author_email="<EMAIL>",
description="Pordego plugin for code complexity analysis using the Radon library",
packages=find_packages(exclude=('tests', 'docs', "tests.*")),
url="https://github.com/ttreptow/pordego-complexity",
download_url="https://github.com/ttreptow/pordego-complexity/tarball/{}".format(VERSION),
entry_points={"pordego.analysis": ANALYSIS_PLUGINS},
classifiers=CLASSIFIERS,
install_requires=["radon==2.0.0"]
)
|
StarcoderdataPython
|
181172
|
<reponame>lyraxvincent/phones-priceinkenya
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import re
import numpy as np
import pandas as pd
# a function to clean phone titles so their search can yield better results
def cleantitle(phonetitle):
if str(phonetitle).endswith("GB") or "/" in str(phonetitle):
phonetitle = ' '.join(phonetitle.split()[:-1])
if "(" in str(phonetitle):
phonetitle = phonetitle.split("(")[0].strip()
return phonetitle
else:
return phonetitle
# read in csv file
df = pd.read_csv("csv files/phonesdata.csv")
df_ = df.copy() # make copy to avoid editing the original phone titles
df_['Phone Title'] = df_['Phone Title'].apply(cleantitle)
df_['Phone Title'] = df_['Phone Title'].apply(lambda s: s.replace(" ", "+")) # link syntax
phonenames = list(df_['Phone Title'])
prices = []
for i, phonename in enumerate(phonenames):
site = "https://www.jumia.co.ke/catalog/?q={}".format(phonename)
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site, headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page, "html.parser")
try:
price = soup.find("div", {"class": "prc"}).get_text()
prices.append(price)
print(f"{i}:Added price for [{phonename}]")
except:
price = np.nan
print(f"{i}:Price for [{phonename}] unavailable.")
# add prices column to dataframe
df['PriceJumia(Kshs)'] = pd.Series(prices)
df.to_csv("csv files/phonesdata_with_pfj.csv", index=False)
|
StarcoderdataPython
|
1843103
|
from selenium import webdriver
from .base import FunctionalTest
from .list_page import ListPage
from .my_lists_page import MyListsPage
def quit_if_possible(browser):
try:
browser.quit()
except:
pass
class SharingTest(FunctionalTest):
def test_can_share_a_list_with_another_user(self):
# Edith is a logged-in user
self.create_pre_authenticated_session('<EMAIL>')
edith_browser = self.browser
self.addCleanup(lambda: quit_if_possible(edith_browser))
# Her friend Jerry is also hanging out on the lists site
jerry_browser = webdriver.Chrome()
self.addCleanup(lambda: quit_if_possible(jerry_browser))
self.browser = jerry_browser
self.create_pre_authenticated_session('<EMAIL>')
# Edith goes to the home page and starts a list
self.browser = edith_browser
self.browser.get(self.live_server_url)
list_page = ListPage(self).add_list_item('Get help')
# She notices a "Share the list" option
share_box = list_page.get_share_box()
self.assertEqual(
share_box.get_attribute('placeholder'),
'<EMAIL>'
)
# She shares her list.
# The page updates to that it's shared with Jerry:
list_page.share_list_with('<EMAIL>')
# Jerry now goes to the lists page with his browser
self.browser = jerry_browser
MyListsPage(self).go_to_my_lists_page()
# He sees Edith's list in there!
self.browser.find_element_by_link_text('Get help').click()
# On the list page, Jerry can see that it's Edith's list
self.wait_for(lambda: self.assertEqual(
list_page.get_list_owner(),
'<EMAIL>'
))
# He adds an item to the list
list_page.add_list_item('Hi Edith!')
# When Edith refreshes the page, she sees Jerry's addition
self.browser = edith_browser
self.browser.refresh()
list_page.wait_for_row_in_list_table('Hi Edith!', 2)
|
StarcoderdataPython
|
8012600
|
<filename>code/CM5_racine_main.py
import math
def racine_dicho(x):
min, max, eps = 0, x, 1e-10
while True:
r = (min + max) / 2
if abs(r * r - x) < eps:
break
elif r * r < x:
min = r
else:
max = r
return r
if __name__ == "__main__":
x = float(input("x ? "))
sq_dicho = racine_dicho(x)
sq_math = math.sqrt(x)
print(f"La racine carrée de {x} est {sq_dicho} (= {sq_math})")
|
StarcoderdataPython
|
36771
|
"""Librerias Importadas"""
from flask import Flask
from flask import render_template
from flask import request
App=Flask(__name__)
@App.route('/')
def index():
"""Pagina Principal en donde se introduce el nombre, apellido, comision"""
return render_template('index.html')
@App.route('/porcentaje',methods=['POST'])
def porcentaje():
if request.method=='POST':
""":var file: es la variable que estoy utilizando para acceder al
archivo y copiar en el."""
file=open("archivo.csv","w")
""":var nombre: Donde se guarda el nombre obtenido en el html"""
nombre=request.form['nombre']
""":var apellido: Donde se guarda el apellido obtenido en el html"""
apellido=request.form['apellido']
""":var venta: la variable tipo venta se trae en tipo cadena
y se combierte con el float para poder manipularla"""
venta = float(request.form.get('venta'))
if (venta > 100000):
r = venta * 0.15
elif (venta > 75000):
r = venta * 0.10
elif (venta > 50000):
r = venta * 0.07
elif (venta > 25000):
r = venta * 0.05
else:
r = '¡Usted no ha realizado ventas en el Mes!'
"""Se esta escribiendo en el archivo csv"""
file.write(nombre)
file.write(",")
file.write(apellido)
file.write(",")
file.write(str(venta))
file.write(",")
file.write(str(r))
file.close()
""":return render_templates: es el return que se hace para mandar los valores
al html"""
return render_template('porcentaje.html',nom=nombre,ape=apellido,ven=venta,rr=r)
if __name__=="__main__":
App.run()
|
StarcoderdataPython
|
12858346
|
__all__ = ["ComponentTestCase"]
import os
import sys
import yaml
import unittest
from gada import component
from test.utils import TestCaseBase
class ComponentTestCase(TestCaseBase):
def test_load(self):
"""Test loading the testnodes package that is in PYTHONPATH."""
# Load component configuration
config = self.write_config_and_load(TestCaseBase.CONFIG_NODES)
self.assertEqual(config["runner"], "generic", "incorrect configuration")
# Get node configuration
node_config = component.get_node_config(config, "hello")
self.assertEqual(
node_config["runner"], "generic", "incorrect node configuration"
)
self.assertEqual(node_config["bin"], "python", "incorrect node configuration")
self.assertEqual(
node_config["argv"],
r"${comp_dir}/__init__.py ${argv}",
"incorrect node configuration",
)
def test_load_not_found(self):
"""Test loading a package that is not in the PYTHONPATH."""
with self.assertRaises(Exception):
comp = component.load("invalid")
def test_load_config(self):
"""Test loading config.yml file from testnodes package."""
config = self.write_config_and_load(TestCaseBase.CONFIG_NO_NODES)
self.assertEqual(
config, TestCaseBase.CONFIG_NO_NODES, "incorrect loaded configuration"
)
def test_load_config_empty(self):
"""Test loading an existing but empty config.yml file."""
with open(TestCaseBase.CONFIG_YML, "w+") as f:
f.write("")
config = self.load_config()
self.assertIsNotNone(config, "invalid configuration")
def test_load_config_not_found(self):
"""Test loading a non existing config.yml file."""
self.remove_config()
with self.assertRaises(Exception):
component.load_config(sys)
def test_get_node_config_not_found(self):
"""Test loading a config.yml file with unknown node."""
config = self.write_config_and_load(TestCaseBase.CONFIG_NODES)
with self.assertRaises(Exception):
component.get_node_config(config, "invalid")
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
8139518
|
<reponame>Jingil-Integrated-Management/JIM_backend<filename>apps/drawing/filters.py
import django_filters
from .models import Drawing
class DrawingFilter(django_filters.FilterSet):
created_at = django_filters.DateFilter(
field_name='created_at', lookup_expr='exact')
created_at__lte = django_filters.DateFilter(
field_name='created_at', lookup_expr='lte')
created_at__gte = django_filters.DateFilter(
field_name='created_at', lookup_expr='gte')
class Meta:
model = Drawing
fields = ['client', 'name', 'is_closed', 'is_outsource']
|
StarcoderdataPython
|
3492497
|
import coremltools
coreml_model = coremltools.converters.keras.convert('recognizer.h5',
input_names="image",
image_input_names="image",
image_scale=1/255.0,
is_bgr=False,
class_labels = ['Unknown', 'Seat', 'Piece 1', "Piece 2"])
coreml_model.save('FurnitureNet.mlmodel')
|
StarcoderdataPython
|
70934
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# Filename: core.py
# Purpose: plugin for reading and writing Site object into various format
# Author: microquake development team
# Email: <EMAIL>
#
# Copyright (C) 2016 microquake development team
# --------------------------------------------------------------------
"""
plugin for reading and writing Site object into various format
:copyright:
microquake development team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from microquake.core import logger
def read_csv(filename, site_code='', has_header=True, **kwargs):
"""
read a csv file containing sensor information
The first line of the csv file should contain the site name
The expected file structure is as follows and should contain one header line
<network>, <sensor name>, <sensor type>, <no component>, x, y, z
where x, y and z represents the location of the sensors expressed in a local
coordinate system. Note that the <sensor name> is limited to four character
because of NonLinLoc limitations.
example of file strucuture
1. <Network>, <sensor long name>, <sensor code>, <sensor type>, <gain>,
<sensitivity>, <sx>, <sy>, <sz>, <channel 1 code>, <azimuth>, <dip>,
<channel 2 code>, <azimuth>, <dip>, <channel 3 code>, <azimuth>, <dip>
:param filename: path to a csv file
:type filename: string
:param site_code: site code
:type site_code: string
:param has_header: whether or not the input file has an header
:type has_header: bool
:rparam: site object
:rtype: ~microquake.core.station.Site
"""
from microquake.core.data.station import Site, Network, Station, Channel
with open(filename) as ifile:
networks = []
stations = []
for i, line in enumerate(ifile.readlines()):
if has_header and (i == 0):
continue
tmp = line.split(',')
nc = tmp[0]
long_name = tmp[1]
sc = tmp[2]
st = tmp[3]
smt = tmp[4]
gain = tmp[5]
sensitivity = tmp[6]
sx = float(tmp[7])
sy = float(tmp[8])
sz = float(tmp[9])
channels = []
for c in range(0, 3):
cc = tmp[4 * c + 10]
if not cc:
continue
x = float(tmp[4 * c + 10 + 1])
y = float(tmp[4 * c + 10 + 2])
z = float(tmp[4 * c + 10 + 3])
# az = float(tmp[3 * c + 10 + 1])
# dip = float(tmp[3 * c + 10 + 2])
channel = Channel(code=cc)
channel.orientation = [x, y, z]
# channel.dip_azimuth = (dip, az)
channels.append(channel)
station = Station(long_name=long_name, code=sc, sensor_type=st,
motion_type=smt, gain=gain,
sensitivity=sensitivity, loc=[sx, sy, sz],
channels=channels)
index = None
for j, net in enumerate(networks):
if net.code == nc:
index = j
if index == None:
network = Network(code=nc, stations=[])
networks.append(network)
index = -1
networks[index].stations.append(station)
site = Site(code=site_code, networks=networks)
return site
def read_pickle(filename, **kwargs):
"""
read site saved pickle format
:param filename:
:return:
"""
from microquake.core.data.station import Site
import pickle as pickle
try:
site = pickle.load(open(filename))
except:
logger.error('Not able to read %s' % filename)
return None
if not isinstance(site, Site):
logger.error(
"The pickle file does not contain and microquake.core.station.Site object")
return None
return site
def write_csv(site, filename, **kwargs):
"""
write a Site object to disk in csv format
:param filename: full path to file with extension
:type filename: str
:param site: site object to be saved
:type site: ~microquake.core.data.station.Site
:param protocol: pickling protocol level see pickle.dump documentation
for more information
:type protocol: int
:rtype: None
"""
# TODO write a function to save the site object in csv format
pass
def write_pickle(site, filename, protocol=-1, **kwargs):
"""
write a Site object to disk in pickle (.pickle or .npy extension) format
using the pickle module
:param filename: full path to file with extension
:type filename: str
:param site: site object to be saved
:type site: ~microquake.core.data.station.Site
:param protocol: pickling protocol level see pickle.dump documentation
for more information
:type protocol: int
"""
import pickle as pickle
with open(filename, 'w') as of:
pickle.dump(site, of, protocol=protocol)
def write_vtk(site, filename, **kwargs):
"""
write a Site object to disk in vtk format for viewing in Paraview for
example
:param filename: full path to file with extension
:type filename: str
:param site: site object to be saved
:type site: ~microquake.core.data.station.Site
:param protocol: pickling protocol level see pickle.dump documentation
for more information
:type protocol: int
"""
# TODO write a function to save the site object in vtk format for viewing
# in paraview
pass
|
StarcoderdataPython
|
11287882
|
<reponame>ebursztein/SiteFab<filename>sitefab/nlp.py<gh_stars>1-10
import numpy as np
from perfcounters import PerfCounters
from tabulate import tabulate
from textacy import TextStats, make_spacy_doc, preprocessing
from textacy.text_stats import readability
from textacy.ke.yake import yake
from textacy.ke.textrank import textrank
from textacy.ke.sgrank import sgrank
from textacy.ke.scake import scake
from sitefab.utils import create_objdict, dict_to_objdict
# FIXME: use the config
NUM_TERMS = 50
SPACY_MODEL = 'en_core_web_sm' # 'en_core_web_lg'
# python -m spacy download en_core_web_sm
TERM_EXTRACTOR_ALGO = 'yake' # yake, sgrank, textrank
NGRAMS = (1, 2, 3) # default
def softmax(results, reverse=False):
"""Normalize results values via softmax.
Args:
results (array): term extraction results.
reverse (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
"""
if len(np.asarray(results).shape) == 1:
# !case when there are less than 3 words as the rank algo won't work
fill_value = 1 / len(results)
x = np.full(len(results), fill_value)
results = [[t, 0] for t in results]
else:
x = np.array([i[1] for i in results])
if reverse:
x = 1 - x
e_x = np.exp(x - np.max(x))
scores = e_x / e_x.sum()
normalized_terms = []
for idx, s in enumerate(scores):
normalized_terms.append([results[idx][0], float(s)])
return normalized_terms
def extract_key_terms(doc, num_terms=50, ngrams=(1, 2, 3), algo='yake'):
"""Compute post most important terms
This is particularly useful for the search and related posts
Args:
doc (Spacy.doc): Doc to extract terms from.
num_terms (int, optional): How many terms to return. Defaults to 100.
ngrams (int, optional): which size of ngrams to consider
algo (str, optional): which algorithm to use to find key terms
"""
if not len(doc):
return []
# special case
if len(doc) < 3:
return softmax(str(doc).split(' '))
if algo == 'textrank':
return softmax(textrank(doc, n_keyterms=NUM_TERMS))
elif algo == 'yake':
return softmax(yake(doc, ngrams=ngrams, topn=NUM_TERMS),
reverse=True)
elif algo == 'scake':
return softmax(scake(doc, topn=NUM_TERMS))
elif algo == 'sgrank':
return softmax(sgrank(doc, ngrams=ngrams,
n_keyterms=NUM_TERMS))
else:
err = 'Unknown key term extraction method:%s' % algo
raise Exception(err)
def text_cleanup(text):
"cleanup our text"
text = preprocessing.replace_emails(text, replace_with='')
text = preprocessing.replace_urls(text, replace_with='')
text = preprocessing.replace_hashtags(text, replace_with='')
text = preprocessing.replace_phone_numbers(text, replace_with='')
text = preprocessing.replace_numbers(text, replace_with='')
text = preprocessing.remove_accents(text)
text = preprocessing.remove_punctuation(text)
text = preprocessing.normalize_quotation_marks(text)
text = preprocessing.normalize_hyphenated_words(text)
text = text.replace('\n', ' ').replace('\t', ' ')
text = text.lower()
text = preprocessing.normalize_whitespace(text)
return text
def generate_clean_fields(post):
"Generate a cleaned up version of the post and its metadata"
clean_fields = create_objdict()
# cleaned up fields
clean_fields.title = ''
if post.meta.title:
clean_fields.title = text_cleanup(post.meta.title)
clean_fields.abstract = ""
if post.meta.abstract:
clean_fields.abstract = text_cleanup(post.meta.abstract)
clean_fields.authors = []
if post.meta.authors:
for author in post.meta.authors:
clean_fields.authors.append(text_cleanup(author))
# conference
clean_fields.conference_name = []
if post.meta.conference_name:
clean_fields.conference_name = text_cleanup(
post.meta.conference_name)
clean_fields.conference_short_name = ""
if post.meta.conference_short_name:
clean_fields.conference_short_name = text_cleanup(
post.meta.conference_short_name)
# category, tags, etc
clean_fields.category = ""
if post.meta.category:
clean_fields.category = text_cleanup(post.meta.category)
clean_fields.tags = []
if post.meta.tags:
for tag in post.meta.tags:
clean_fields.tags.append(text_cleanup(tag))
# text
clean_fields.text = ''
if post.text:
# !make sure to use post html and clean it to avoid markup keywords.
clean_fields.text = text_cleanup(post.text)
return clean_fields
def benchmark_term_extractor(doc, counters):
"benchmark various term extractor algorithms"
# TL;DR: yake is probably the best. Feel free to experiment
# see https://github.com/LIAAD/yake
# sgrank is really really slow
results = []
methods = ['textrank', 'yake', 'sgrank'] # 'cake', results = []
for method in methods:
counters.start(method)
results.append(extract_key_terms(doc, algo=method))
counters.stop(method)
counters.report()
table = []
for idx in range(20):
row = []
for aidx in range(len(methods)):
row.append(results[aidx][idx])
table.append(row)
print(tabulate(table, headers=methods))
return counters
def compute_stats(doc):
ts = TextStats(doc)
stats = create_objdict()
counts = {'sentences': ts.n_sents,
'words': ts.n_words,
'unique_words': ts.n_unique_words,
'chars': ts.n_chars,
'chars_per_word': ts.n_chars_per_word,
'long_words': ts.n_long_words,
'syllables': ts.n_syllables,
'syllables_per_word': ts.n_syllables_per_word,
'monosyllable_words': ts.n_monosyllable_words,
'polysyllable_words': ts.n_polysyllable_words
}
stats.counts = dict_to_objdict(counts)
readability = {}
if stats.counts.words > 0:
readability = {'flesch_kincaid_grade_level': ts.flesch_kincaid_grade_level,
'flesch_reading_ease': ts.flesch_reading_ease,
'smog_index': 0,
'gunning_fog_index': ts.gunning_fog_index,
'coleman_liau_index': ts.coleman_liau_index,
'automated_readability_index': ts.automated_readability_index,
'lix': ts.lix,
}
if stats.counts.sentences >= 30:
readability['smog_index'] = ts.smog_index
stats.readability = dict_to_objdict(readability)
return stats
def analyze_post(post, debug=False):
"Perform NLP analysis"
counters = PerfCounters()
nlp = create_objdict()
# clean fields
counters.start('cleanup')
clean_fields = generate_clean_fields(post)
nlp.clean_fields = clean_fields
counters.stop('cleanup')
# creating spacy docs
counters.start('make_spacy_docs')
all_cleaned_content = ' '.join([clean_fields.title, clean_fields.category,
" ".join(clean_fields.tags),
clean_fields.abstract, clean_fields.text])
# overall terms
cleaned_doc = make_spacy_doc(all_cleaned_content, lang=SPACY_MODEL)
# title terms
title_doc = make_spacy_doc(clean_fields.title, lang=SPACY_MODEL)
# for statistics
text_doc = make_spacy_doc(post.text, lang=SPACY_MODEL)
counters.stop('make_spacy_docs')
# terms extraction
counters.start('extract_key_terms')
nlp.terms = extract_key_terms(cleaned_doc, num_terms=NUM_TERMS,
algo=TERM_EXTRACTOR_ALGO, ngrams=NGRAMS)
# !note we restrict ngram to one as we only want the lemmized top terms.
nlp.title_terms = extract_key_terms(title_doc, num_terms=NUM_TERMS,
algo=TERM_EXTRACTOR_ALGO, ngrams=1)
counters.stop('extract_key_terms')
# text stats
counters.start('text_stats')
nlp.stats = compute_stats(text_doc)
counters.stop('text_stats')
if debug:
counters.report()
return nlp
|
StarcoderdataPython
|
4977574
|
<reponame>vaaliferov/119_dls2_nmt
import re
import torch
import telegram
import telegram.ext
import youtokentome as yttm
from plot import *
from config import *
from secret import *
from model import Model
src_tok = yttm.BPE(SRC_TOKENIZER_PATH)
trg_tok = yttm.BPE(TRG_TOKENIZER_PATH)
src_vocab_size = len(src_tok.vocab())
trg_vocab_size = len(trg_tok.vocab())
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(src_vocab_size, trg_vocab_size, device)
state = torch.load(MODEL_PATH, map_location=device)
model.load_state_dict(state, strict=True)
def send_document(context, chat_id, file_path):
with open(file_path, 'rb') as fd:
context.bot.send_document(chat_id, fd)
def handle_text(update, context):
text = update.message.text
user = update.message.from_user
chat_id = update.message.chat_id
attn_maps, beam_search = False, False
if user['id'] != TG_BOT_OWNER_ID:
msg = f"@{user['username']} {user['id']}"
context.bot.send_message(TG_BOT_OWNER_ID, msg)
context.bot.send_message(TG_BOT_OWNER_ID, text)
if text == '/start':
usage = 'Please, send me a text in Russian'
context.bot.send_message(chat_id, usage)
return None
if text[-1] == '*':
text = text[:-1]
beam_search = True
if text[-1] == '#':
text = text[:-1]
attn_maps = True
p = '[^ЁёА-Яа-я0-9 ,.!?-]'
text = re.sub(p, '', text)
text = text.strip(' ')
if len(text) < MIN_CHAR_NUM:
usage = 'Please, send me a text in Russian'
context.bot.send_message(chat_id, usage)
return None
text = text[0].upper() + text[1:]
if text[-1] not in '.!?': text += '.'
src = src_tok.encode(text, bos=True, eos=True)
trg, enc_self_attn, dec_self_attn, dec_enc_attn = model.greedy_generate(src)
result = trg_tok.decode(trg, ignore_ids=SPECIAL_IDS)
context.bot.send_message(chat_id, result[0])
if beam_search and len(src) < 20:
beam_trg, _, _, _ = model.beam_generate(src)
beam_result = trg_tok.decode(beam_trg, ignore_ids=SPECIAL_IDS)
beam_result = [f'{i+1}. {r}' for i, r in enumerate(beam_result)]
context.bot.send_message(chat_id, '\n'.join(beam_result))
if attn_maps and len(src) < 20 and len(trg) < 20:
context.bot.send_message(chat_id, 'please wait.. (~15s)')
src_labels = [src_tok.id_to_subword(t) for t in src]
trg_labels = [trg_tok.id_to_subword(t) for t in trg]
plot_attn(enc_self_attn, src_labels, src_labels, ENC_SELF_ATTN_PLOT_PATH)
plot_attn(dec_enc_attn, src_labels, trg_labels[1:], DEC_ENC_ATTN_PLOT_PATH)
plot_attn(dec_self_attn, trg_labels[1:], trg_labels[1:], DEC_SELF_ATTN_PLOT_PATH)
send_document(context, chat_id, ENC_SELF_ATTN_PLOT_PATH)
send_document(context, chat_id, DEC_SELF_ATTN_PLOT_PATH)
send_document(context, chat_id, DEC_ENC_ATTN_PLOT_PATH)
f = telegram.ext.Filters.text
h = telegram.ext.MessageHandler
u = telegram.ext.Updater(TG_NMT_BOT_TOKEN)
u.dispatcher.add_handler(h(f,handle_text))
u.start_polling(); u.idle()
|
StarcoderdataPython
|
3316583
|
<gh_stars>10-100
from pathlib import Path
from typing import List
class BaseFileExtractor:
""" Base class for file extraction. """
def __init__(self, extenstion: str) -> None:
self._extenstion = extenstion
@staticmethod
def _check_dir_compliance(path: Path) -> bool:
return all((path.is_dir(), not path.name.startswith("."), not path.name.startswith("_")))
def _check_file_compliance(self, path: Path) -> bool:
return all(
(
not path.is_dir(),
path.suffix == self._extenstion,
not path.name.startswith("."),
not path.name.startswith("_"),
)
)
def _extract_recursively(self, folder: Path) -> List[Path]:
files = []
for path in folder.iterdir():
if self._check_dir_compliance(path):
subdirs = self._extract_recursively(path)
files.extend(subdirs)
continue
if not self._check_file_compliance(path):
continue
files.append(path)
return files
|
StarcoderdataPython
|
107531
|
PAD = 0
EOS = 1
BOS = 2
UNK = 3
UNK_WORD = '<unk>'
PAD_WORD = '<pad>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
NEG_INF = -10000 # -float('inf')
|
StarcoderdataPython
|
12838270
|
def swap(vet, i, j):
aux = vet[i]
vet[i] = vet[j]
vet[j] = aux
def partition(vet, left, right):
i = left + 1
j = right
pivot = vet[left]
while i <= j:
if vet[i] <= pivot:
i += 1
else:
if vet[j] >= pivot:
j -= 1
else:
if i <= j:
swap(vet, i, j)
i += 1
j -= 1
swap(vet, left, j)
return j
def quicksort(vet, left, right):
if left < right:
index = partition(vet, left, right)
quicksort(vet, left, index - 1)
quicksort(vet, index + 1, right)
return vet
def main():
vet = [6, 3, 4, 5, 2, 7, 1, 9, 8, 0, 10]
print(quicksort(vet, 0, len(vet) - 1))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6491695
|
import tensorflow as tf
def get_loss_func(phs, prs, pts, nhs, nrs, nts, args):
triple_loss = None
if args.loss == 'margin-based':
triple_loss = margin_loss(phs, prs, pts, nhs, nrs, nts, args.margin, args.loss_norm)
elif args.loss == 'logistic':
triple_loss = logistic_loss(phs, prs, pts, nhs, nrs, nts, args.loss_norm)
elif args.loss == 'limited':
triple_loss = limited_loss(phs, prs, pts, nhs, nrs, nts, args.pos_margin, args.neg_margin, args.loss_norm)
return triple_loss
def margin_loss(phs, prs, pts, nhs, nrs, nts, margin, loss_norm):
with tf.name_scope('margin_loss_distance'):
pos_distance = phs + prs - pts
neg_distance = nhs + nrs - nts
with tf.name_scope('margin_loss'):
if loss_norm == 'L1': # L1 normal
pos_score = tf.reduce_sum(tf.abs(pos_distance), axis=1)
neg_score = tf.reduce_sum(tf.abs(neg_distance), axis=1)
else: # L2 normal
pos_score = tf.reduce_sum(tf.square(pos_distance), axis=1)
neg_score = tf.reduce_sum(tf.square(neg_distance), axis=1)
loss = tf.reduce_sum(tf.nn.relu(tf.constant(margin) + pos_score - neg_score), name='margin_loss')
return loss
def positive_loss(phs, prs, pts, loss_norm):
with tf.name_scope('positive_loss_distance'):
pos_distance = phs + prs - pts
with tf.name_scope('positive_loss_score'):
if loss_norm == 'L1': # L1 score
pos_score = tf.reduce_sum(tf.abs(pos_distance), axis=1)
else: # L2 score
pos_score = tf.reduce_sum(tf.square(pos_distance), axis=1)
loss = tf.reduce_sum(pos_score, name='positive_loss')
return loss
def limited_loss(phs, prs, pts, nhs, nrs, nts, pos_margin, neg_margin, loss_norm, balance=1.0):
with tf.name_scope('limited_loss_distance'):
pos_distance = phs + prs - pts
neg_distance = nhs + nrs - nts
with tf.name_scope('limited_loss_score'):
if loss_norm == 'L1': # L1 score
pos_score = tf.reduce_sum(tf.abs(pos_distance), axis=1)
neg_score = tf.reduce_sum(tf.abs(neg_distance), axis=1)
else: # L2 score
pos_score = tf.reduce_sum(tf.square(pos_distance), axis=1)
neg_score = tf.reduce_sum(tf.square(neg_distance), axis=1)
pos_loss = tf.reduce_sum(tf.nn.relu(pos_score - tf.constant(pos_margin)))
neg_loss = tf.reduce_sum(tf.nn.relu(tf.constant(neg_margin) - neg_score))
loss = tf.add(pos_loss, balance * neg_loss, name='limited_loss')
return loss
def logistic_loss(phs, prs, pts, nhs, nrs, nts, loss_norm):
with tf.name_scope('logistic_loss_distance'):
pos_distance = phs + prs - pts
neg_distance = nhs + nrs - nts
with tf.name_scope('logistic_loss_score'):
if loss_norm == 'L1': # L1 score
pos_score = tf.reduce_sum(tf.abs(pos_distance), axis=1)
neg_score = tf.reduce_sum(tf.abs(neg_distance), axis=1)
else: # L2 score
pos_score = tf.reduce_sum(tf.square(pos_distance), axis=1)
neg_score = tf.reduce_sum(tf.square(neg_distance), axis=1)
pos_loss = tf.reduce_sum(tf.log(1 + tf.exp(pos_score)))
neg_loss = tf.reduce_sum(tf.log(1 + tf.exp(-neg_score)))
loss = tf.add(pos_loss, neg_loss, name='logistic_loss')
return loss
def mapping_loss(tes1, tes2, mapping, eye):
mapped_tes2 = tf.matmul(tes1, mapping)
map_loss = tf.reduce_sum(tf.reduce_sum(tf.pow(tes2 - mapped_tes2, 2), 1))
orthogonal_loss = tf.reduce_sum(tf.reduce_sum(tf.pow(tf.matmul(mapping, mapping, transpose_b=True) - eye, 2), 1))
return map_loss + orthogonal_loss
|
StarcoderdataPython
|
11358641
|
#!/usr/bin/env python
from PyZ3950 import zoom
def run ():
conn = zoom.Connection ('amicus.nlc-bnc.ca', 210)
conn.databaseName = 'NL'
q = zoom.Query ('CCL', 'ti="1066"')
ss = conn.scan (q)
for s in ss[0:10]:
print s
if __name__ == '__main__':
run ()
|
StarcoderdataPython
|
6580893
|
import json
import os
from .base_config import BaseConfig
class DiceboxConfig(BaseConfig):
def __init__(self, config_file: str = "dicebox.config"):
super().__init__(config_file=config_file)
###############################################################################
# Data Set Options
###############################################################################
# Load user defined config
if "DATASET" in os.environ:
self.DATASET = os.environ["DATASET"]
if "DICEBOX_COMPLIANT_DATASET" in os.environ:
if os.environ["DICEBOX_COMPLIANT_DATASET"] == "True":
self.DICEBOX_COMPLIANT_DATASET = True
elif os.environ["DICEBOX_COMPLIANT_DATASET"] == "False":
self.DICEBOX_COMPLIANT_DATASET = False
else:
raise
if "NB_CLASSES" in os.environ:
self.NB_CLASSES = int(os.environ["NB_CLASSES"])
if "IMAGE_WIDTH" in os.environ:
self.IMAGE_WIDTH = int(os.environ["IMAGE_WIDTH"])
if "IMAGE_HEIGHT" in os.environ:
self.IMAGE_HEIGHT = int(os.environ["IMAGE_HEIGHT"])
if "DATA_BASE_DIRECTORY" in os.environ:
self.DATA_BASE_DIRECTORY = os.environ["DATA_BASE_DIRECTORY"]
###############################################################################
# Build Calculated Configs
###############################################################################
self.NETWORK_NAME = "%s_%ix%i" % (self.DATASET, self.IMAGE_WIDTH, self.IMAGE_HEIGHT)
self.INPUT_SHAPE = (self.IMAGE_WIDTH, self.IMAGE_HEIGHT, 3) # 3 indicates the number of channels (RGB) so 3.
self.DATA_DIRECTORY = "%s/%s/data/" % (self.DATA_BASE_DIRECTORY, self.NETWORK_NAME)
###############################################################################
# Neural Network Taxonomy Options
###############################################################################
if "MIN_NEURONS" in os.environ:
self.MIN_NEURONS = int(os.environ["MIN_NEURONS"])
if "MAX_NEURONS" in os.environ:
self.MAX_NEURONS = int(os.environ["MAX_NEURONS"])
if "MIN_LAYERS" in os.environ:
self.MIN_LAYERS = int(os.environ["MIN_LAYERS"])
if "MAX_LAYERS" in os.environ:
self.MAX_LAYERS = int(os.environ["MAX_LAYERS"])
if "LAYER_TYPES" in os.environ:
self.LAYER_TYPES = os.environ["LAYER_TYPES"]
if "ACTIVATION" in os.environ:
self.ACTIVATION = os.environ["ACTIVATION"]
if "OPTIMIZER" in os.environ:
self.OPTIMIZER = os.environ["OPTIMIZER"]
self.TAXONOMY = {
"min_neurons": self.MIN_NEURONS,
"max_neurons": self.MAX_NEURONS,
"min_layers": self.MIN_LAYERS,
"max_layers": self.MAX_LAYERS,
"layer_types": json.loads(self.LAYER_TYPES),
"activation": json.loads(self.ACTIVATION),
"optimizer": json.loads(self.OPTIMIZER),
}
###############################################################################
# Evolution Options
###############################################################################
if "EPOCHS" in os.environ:
self.EPOCHS = int(os.environ["EPOCHS"])
# Number of times to evolve the population.
if "GENERATIONS" in os.environ:
self.GENERATIONS = int(os.environ["GENERATIONS"])
# Number of networks in each generation.
if "POPULATION" in os.environ:
self.POPULATION = int(os.environ["POPULATION"])
if "NOISE" in os.environ:
self.NOISE = float(os.environ["NOISE"])
###############################################################################
# Training Options / Settings for the 1920x1080 dataset
###############################################################################
if "BATCH_SIZE" in os.environ:
self.BATCH_SIZE = int(os.environ["BATCH_SIZE"])
if "TRAIN_BATCH_SIZE" in os.environ:
self.TRAIN_BATCH_SIZE = int(os.environ["TRAIN_BATCH_SIZE"])
if "TEST_BATCH_SIZE" in os.environ:
self.TEST_BATCH_SIZE = int(os.environ["TEST_BATCH_SIZE"])
if "LOAD_BEST_WEIGHTS_ON_START" in os.environ:
if os.environ["LOAD_BEST_WEIGHTS_ON_START"] == "False":
self.LOAD_BEST_WEIGHTS_ON_START = False
elif os.environ["LOAD_BEST_WEIGHTS_ON_START"] == "True":
self.LOAD_BEST_WEIGHTS_ON_START = True
else:
raise
###############################################################################
# Direcrtory Options
###############################################################################
if "LOGS_DIR" in os.environ:
self.LOGS_DIR = os.environ["LOGS_DIR"]
if "WEIGHTS_DIR" in os.environ:
self.WEIGHTS_DIR = os.environ["WEIGHTS_DIR"]
if "TMP_DIR" in os.environ:
self.TMP_DIR = os.environ["TMP_DIR"]
if "POPULATION_DIR" in os.environ:
self.POPULATION_DIR = os.environ["POPULATION_DIR"]
###############################################################################
# Server Options
###############################################################################
if "API_ACCESS_KEY" in os.environ:
self.API_ACCESS_KEY = os.environ["API_ACCESS_KEY"]
if "API_VERSION" in os.environ:
self.API_VERSION = os.environ["API_VERSION"]
if "LISTENING_HOST" in os.environ:
self.LISTENING_HOST = os.environ["LISTENING_HOST"]
if "FLASK_DEBUG" in os.environ:
if os.environ["FLASK_DEBUG"] == "True":
self.FLASK_DEBUG = True
elif os.environ["FLASK_DEBUG"] == "False":
self.FLASK_DEBUG = False
else:
raise
if "MODEL_WEIGHTS_FILENAME" in os.environ:
self.MODEL_WEIGHTS_FILENAME = os.environ["MODEL_WEIGHTS_FILENAME"]
###############################################################################
# Sensory Service Options
###############################################################################
if "SENSORY_SERVER" in os.environ:
self.SENSORY_SERVER = os.environ["SENSORY_SERVER"]
if "SENSORY_PORT" in os.environ:
self.SENSORY_PORT = int(os.environ["SENSORY_PORT"])
if "SENSORY_URI" in os.environ:
self.SENSORY_URI = os.environ["SENSORY_URI"]
if "SENSORY_SERVICE_RABBITMQ_EXCHANGE" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_EXCHANGE = os.environ["SENSORY_SERVICE_RABBITMQ_EXCHANGE"]
if "SENSORY_SERVICE_RABBITMQ_BATCH_REQUEST_ROUTING_KEY" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_BATCH_REQUEST_ROUTING_KEY = os.environ[
"SENSORY_SERVICE_RABBITMQ_BATCH_REQUEST_ROUTING_KEY"
]
if "SENSORY_SERVICE_RABBITMQ_BATCH_REQUEST_TASK_QUEUE" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_BATCH_REQUEST_TASK_QUEUE = os.environ[
"SENSORY_SERVICE_RABBITMQ_BATCH_REQUEST_TASK_QUEUE"
]
if "SENSORY_SERVICE_RABBITMQ_URI" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_URI = os.environ["SENSORY_SERVICE_RABBITMQ_URI"]
if "SENSORY_SERVICE_RABBITMQ_USERNAME" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_USERNAME = os.environ["SENSORY_SERVICE_RABBITMQ_USERNAME"]
if "SENSORY_SERVICE_RABBITMQ_PASSWORD" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_PASSWORD = os.environ["SENSORY_SERVICE_RABBITMQ_PASSWORD"]
if "SENSORY_SERVICE_RABBITMQ_SERVER" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_SERVER = os.environ["SENSORY_SERVICE_RABBITMQ_SERVER"]
if "SENSORY_SERVICE_RABBITMQ_PORT" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_PORT = int(os.environ["SENSORY_SERVICE_RABBITMQ_PORT"])
if "SENSORY_SERVICE_RABBITMQ_VHOST" in os.environ:
self.SENSORY_SERVICE_RABBITMQ_VHOST = os.environ["SENSORY_SERVICE_RABBITMQ_VHOST"]
self.SENSORY_SERVICE_RABBITMQ_URL = "%s%s:%s@%s:%s/%s" % (
self.SENSORY_SERVICE_RABBITMQ_URI,
self.SENSORY_SERVICE_RABBITMQ_USERNAME,
self.SENSORY_SERVICE_RABBITMQ_PASSWORD,
self.SENSORY_SERVICE_RABBITMQ_SERVER,
self.SENSORY_SERVICE_RABBITMQ_PORT,
self.SENSORY_SERVICE_RABBITMQ_VHOST,
)
if "SENSORY_SERVICE_SHARD_SIZE" in os.environ:
self.SENSORY_SERVICE_SHARD_SIZE = int(os.environ["SENSORY_SERVICE_SHARD_SIZE"])
###############################################################################
# Training Service Options
###############################################################################
if "TRAINING_SERVICE_RABBITMQ_EXCHANGE" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_EXCHANGE = os.environ["TRAINING_SERVICE_RABBITMQ_EXCHANGE"]
if "TRAINING_SERVICE_RABBITMQ_TRAINING_REQUEST_ROUTING_KEY" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_TRAINING_REQUEST_ROUTING_KEY = os.environ[
"TRAINING_SERVICE_RABBITMQ_TRAINING_REQUEST_ROUTING_KEY"
]
if "TRAINING_SERVICE_RABBITMQ_TRAIN_REQUEST_TASK_QUEUE" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_TRAIN_REQUEST_TASK_QUEUE = os.environ[
"TRAINING_SERVICE_RABBITMQ_TRAIN_REQUEST_TASK_QUEUE"
]
if "TRAINING_SERVICE_RABBITMQ_RABBITMQ_URI" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_RABBITMQ_URI = os.environ["TRAINING_SERVICE_RABBITMQ_RABBITMQ_URI"]
if "TRAINING_SERVICE_RABBITMQ_USERNAME" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_USERNAME = os.environ["TRAINING_SERVICE_RABBITMQ_USERNAME"]
if "TRAINING_SERVICE_RABBITMQ_PASSWORD" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_PASSWORD = os.environ["TRAINING_SERVICE_RABBITMQ_PASSWORD"]
if "TRAINING_SERVICE_RABBITMQ_SERVER" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_SERVER = os.environ["TRAINING_SERVICE_RABBITMQ_SERVER"]
if "TRAINING_SERVICE_RABBITMQ_PORT" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_PORT = int(os.environ["TRAINING_SERVICE_RABBITMQ_PORT"])
if "TRAINING_SERVICE_RABBITMQ_VHOST" in os.environ:
self.TRAINING_SERVICE_RABBITMQ_VHOST = os.environ["TRAINING_SERVICE_RABBITMQ_VHOST"]
self.TRAINING_SERVICE_RABBITMQ_URL = "%s%s:%s@%s:%s/%s" % (
self.TRAINING_SERVICE_RABBITMQ_RABBITMQ_URI,
self.TRAINING_SERVICE_RABBITMQ_USERNAME,
self.TRAINING_SERVICE_RABBITMQ_PASSWORD,
self.TRAINING_SERVICE_RABBITMQ_SERVER,
self.TRAINING_SERVICE_RABBITMQ_PORT,
self.TRAINING_SERVICE_RABBITMQ_VHOST,
)
###############################################################################
# Training Processor Options
###############################################################################
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_EXCHANGE" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_EXCHANGE = os.environ[
"TRAINING_PROCESSOR_SERVICE_RABBITMQ_EXCHANGE"
]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_TRAINING_REQUEST_ROUTING_KEY" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_TRAINING_REQUEST_ROUTING_KEY = os.environ[
"TRAINING_PROCESSOR_SERVICE_RABBITMQ_TRAINING_REQUEST_ROUTING_KEY"
]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_TRAIN_REQUEST_TASK_QUEUE" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_TRAIN_REQUEST_TASK_QUEUE = os.environ[
"TRAINING_PROCESSOR_SERVICE_RABBITMQ_TRAIN_REQUEST_TASK_QUEUE"
]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_URI" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_URI = os.environ["TRAINING_PROCESSOR_SERVICE_RABBITMQ_URI"]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_USERNAME" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_USERNAME = os.environ[
"TRAINING_PROCESSOR_SERVICE_RABBITMQ_USERNAME"
]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_PASSWORD" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_PASSWORD = os.environ[
"TRAINING_PROCESSOR_SERVICE_RABBITMQ_PASSWORD"
]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_SERVER" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_SERVER = os.environ["TRAINING_PROCESSOR_SERVICE_RABBITMQ_SERVER"]
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_PORT" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_PORT = int(os.environ["TRAINING_PROCESSOR_SERVICE_RABBITMQ_PORT"])
if "TRAINING_PROCESSOR_SERVICE_RABBITMQ_VHOST" in os.environ:
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_VHOST = os.environ["TRAINING_PROCESSOR_SERVICE_RABBITMQ_VHOST"]
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_URL = "%s%s:%s@%s:%s/%s" % (
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_URI,
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_USERNAME,
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_PASSWORD,
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_SERVER,
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_PORT,
self.TRAINING_PROCESSOR_SERVICE_RABBITMQ_VHOST,
)
###############################################################################
# Client Options
###############################################################################
if "CLASSIFICATION_SERVER" in os.environ:
self.CLASSIFICATION_SERVER = os.environ["CLASSIFICATION_SERVER"]
if "CLASSIFICATION_SERVER_PORT" in os.environ:
self.CLASSIFICATION_SERVER_PORT = int(os.environ["CLASSIFICATION_SERVER_PORT"])
if "CLASSIFICATION_SERVER_URI" in os.environ:
self.CLASSIFICATION_SERVER_URI = os.environ["CLASSIFICATION_SERVER_URI"]
|
StarcoderdataPython
|
4969882
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common rates related utilities."""
import collections
import enum
import tensorflow.compat.v2 as tf
from tf_quant_finance.experimental import dates
InterestRateMarket = collections.namedtuple(
'InterestRateMarket',
[
# Instance of class RateCurve. The curve used for computing the forward
# expectation of Libor rate.
'reference_curve',
# Instance of class RateCurve. The curve used for discounting cashflows.
'discount_curve'
])
# TODO(b/149644030): Use daycounts.py for this.
class AverageType(enum.Enum):
"""Averaging types."""
# Componded rate
COMPOUNDING = 1
# Arthmatic average
ARITHMATIC_AVERAGE = 2
class DayCountBasis(enum.Enum):
"""Day count basis for accrual."""
# Actual/360 day count basis
ACTUAL_360 = 1
# Acutal/365 day count basis
ACTUAL_365 = 2
def elapsed_time(date_1, date_2, dtype):
"""Computes elapsed time between two date tensors."""
days_in_year = 365.
return tf.cast(date_1.days_until(date_2), dtype=dtype) / (
days_in_year)
def get_daycount_fraction(date_start, date_end, basis, dtype):
"""Return the day count fraction between two dates using the input basis."""
default_values = tf.zeros(date_start.shape, dtype=dtype)
basis_as_int = tf.constant([x.value for x in basis], dtype=tf.int16)
year_fractions = tf.where(
tf.math.equal(basis_as_int,
tf.constant(DayCountBasis.ACTUAL_365.value,
dtype=tf.int16)),
dates.daycounts.actual_365_fixed(
start_date=date_start, end_date=date_end, dtype=dtype),
tf.where(
tf.math.equal(basis_as_int, tf.constant(
DayCountBasis.ACTUAL_360.value, dtype=tf.int16)),
dates.daycounts.actual_360(
start_date=date_start, end_date=date_end, dtype=dtype),
default_values))
return year_fractions
|
StarcoderdataPython
|
6655927
|
<filename>cvpy61.py
print("Gerador de PA")
print("-" * 15)
primeiro_termo = int(input("Primeiro termo: "))
razao_pa = int(input("Razão da PA: "))
soma = primeiro_termo
contador = 10
while contador > 0:
print(f"{soma} -> ", end="")
soma += razao_pa
contador -= 1
if contador == 0:
print("FIM")
|
StarcoderdataPython
|
6591106
|
<reponame>toxinu/django-bagou
# -*- coding: utf-8 -*-
from message import broadcast
|
StarcoderdataPython
|
3315576
|
#!/usr/bin/env python
from segments.segment import Segment
import vector as vec
import config
import utils
from matplotlib.patches import Arc
from math import sin, cos, radians, degrees, sqrt
def isAngleWithinRange(startAngle, endAngle, pointAngle):
if not (0 <= startAngle <= 360 and 0 <= endAngle <= 360 and 0 <= pointAngle <= 360):
raise ValueError("Angle outside range (0,360)")
if startAngle <= endAngle:
return pointAngle >= startAngle and pointAngle <= endAngle
return pointAngle >= startAngle or pointAngle <= endAngle
def isPointInsideCircleArc(point, circle):
pAngle = degrees(vec.angle(point - circle.center))
return isAngleWithinRange(circle.theta1, circle.theta2, pAngle)
class CircleArc(Segment):
def __init__(self, data):
self.center = vec.Point(data["center"][0], data["center"][1])
self.radius = data["radius"]
self.theta1 = data["theta1"]
self.theta2 = data["theta2"]
self.startsFromA = data["startsFromA"]
def draw(self, ax):
arc = Arc(
(self.center.x, self.center.y),
self.radius * 2,
self.radius * 2,
0,
self.theta1,
self.theta2,
color=config.PATH_COLOR,
)
ax.add_patch(arc)
def getFrameRect(self):
xCoords = [
self.center[0] + self.radius * cos(radians(self.theta1)),
self.center[0] + self.radius * cos(radians(self.theta2)),
]
yCoords = [
self.center[1] + self.radius * sin(radians(self.theta1)),
self.center[1] + self.radius * sin(radians(self.theta2)),
]
for angle in range(0, 360, 90):
if isAngleWithinRange(self.theta1, self.theta2, angle):
xCoords.append(
self.center[0] + self.radius * round(cos(radians(angle)))
)
yCoords.append(
self.center[1] + self.radius * round(sin(radians(angle)))
)
pMin = vec.Point(min(xCoords), min(yCoords))
pMax = vec.Point(max(xCoords), max(yCoords))
return (pMin, pMax)
def intersectionWithLine(self, line):
known, unknown = utils.checkLineParallelism(line)
knownValue = -line[2] / line[known]
if (
knownValue < self.center[known] - self.radius
or knownValue > self.center[known] + self.radius
):
return []
tmp = self.radius**2 - (knownValue - self.center[known]) ** 2
if tmp < 0:
return []
unknownValue1 = self.center[unknown] + sqrt(tmp)
unknownValue2 = self.center[unknown] - sqrt(tmp)
if known:
result = [
vec.Point(unknownValue1, knownValue),
vec.Point(unknownValue2, knownValue),
]
else:
result = [
vec.Point(knownValue, unknownValue1),
vec.Point(knownValue, unknownValue2),
]
result = utils.removeDuplicatesPreservingOrder(result)
# remove points outside arc
return [p for p in result if isPointInsideCircleArc(p, self)]
def orderPoints(self, points):
points.sort(
key=lambda p: vec.angle(p - self.center), reverse=not self.startsFromA
)
if self.theta1 > self.theta2 and points:
if self.startsFromA:
if degrees(vec.angle(points[0] - self.center)) < self.theta1:
while degrees(vec.angle(points[-1] - self.center)) >= self.theta1:
points.insert(0, points.pop())
else:
if degrees(vec.angle(points[0] - self.center)) > self.theta2:
while degrees(vec.angle(points[-1] - self.center)) <= self.theta2:
points.insert(0, points.pop())
return points
|
StarcoderdataPython
|
1934802
|
"""The django management command sync_from_asana"""
import logging
from asana.error import NotFoundError, InvalidTokenError, ForbiddenError
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from djasana.connect import client_connect
from djasana.models import (
Attachment, Project, Story, SyncToken,
Tag, Task, Team, User, Webhook, Workspace)
from djasana.settings import settings
from djasana.utils import (
pop_unsupported_fields, set_webhook, sync_attachment, sync_project, sync_story, sync_task)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Sync data from Asana to the database"""
help = 'Import data from Asana and insert/update model instances'
commit = True
client = None
process_archived = False
synced_ids = [] # A running list of remote ids of tasks that have been synced.
@staticmethod
def get_client():
return client_connect()
def add_arguments(self, parser):
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be synced.'
)
parser.add_argument(
"-w", "--workspace", action="append", default=[],
help='Sync only the named workspace (can be used multiple times). '
'By default all workspaces will be updated from Asana.'
)
parser.add_argument(
"-p", "--project", action="append", default=[],
help='Sync only the named project (can be used multiple times). '
'By default all projects will be updated from Asana.'
)
parser.add_argument(
"-m", "--model", action="append", default=[],
help='Sync only the named model (can be used multiple times). '
'By default all models will be updated from Asana.'
)
parser.add_argument(
"-mx", "--model-exclude", action="append", default=[],
help='Exclude the named model (can be used multiple times).'
)
parser.add_argument(
"-a", "--archive", action="store_false", dest='archive',
help='Sync project tasks etc. even if the project is archived. '
'By default, only tasks of unarchived projects are updated from Asana. '
'Regardless of this setting, the project itself will be updated, '
'perhaps becoming marked as archived. '
)
parser.add_argument(
'--nocommit', action='store_false', dest='commit',
default=True, help='Will not commit changes to the database.'
)
def handle(self, *args, **options):
self.commit = not options.get('nocommit')
if self.commit and options.get('interactive', True):
self.stdout.write(
'WARNING: This will irreparably synchronize your local database from Asana.')
if not self._confirm():
self.stdout.write("No action taken.")
return
self.process_archived = options.get('archive')
models = self._get_models(options)
if options.get('verbosity', 0) >= 1:
message = "Synchronizing data from Asana."
self.stdout.write(message)
logger.info(message)
workspaces = options.get('workspace') or []
if settings.ASANA_WORKSPACE:
workspaces.append(settings.ASANA_WORKSPACE)
# Allow client to be mocked:
self.client = self.client or self.get_client()
workspace_ids = self._get_workspace_ids(workspaces)
projects = options.get('project')
for workspace_id in workspace_ids:
self._sync_workspace_id(workspace_id, projects, models)
@staticmethod
def _confirm():
yes_or_no = input("Are you sure you wish to continue? [y/N] ")
return yes_or_no.lower().startswith('y')
@staticmethod
def _get_models(options):
"""Returns a list of models to sync"""
models = options.get('model')
models_exclude = options.get('model_exclude')
app_models = list(apps.get_app_config('djasana').get_models())
if models:
good_models = []
model_names = [model_.__name__.lower() for model_ in app_models]
for model in models:
try:
index = model_names.index(model.lower())
except ValueError:
raise CommandError('{} is not an Asana model'.format(model))
else:
good_models.append(app_models[index])
models = good_models
else:
models = app_models
if models_exclude:
models = [model
for model in models
if model.__name__.lower() not in [m.lower() for m in models_exclude]]
return models
def _check_sync_project_id(self, project_id, workspace, models):
"""If we have a valid sync token for this project sync new events else sync the project"""
new_sync = False
try:
sync_token = SyncToken.objects.get(project_id=project_id)
try:
events = self.client.events.get({'resource': project_id, 'sync': sync_token.sync})
self._process_events(project_id, events, models)
self._set_webhook(workspace, project_id)
return
except InvalidTokenError as error:
sync_token.sync = error.sync
sync_token.save()
except SyncToken.DoesNotExist:
try:
self.client.events.get({'resource': project_id})
except InvalidTokenError as error:
new_sync = error.sync
is_archived = self._sync_project_id(project_id, models)
if not is_archived:
self._set_webhook(workspace, project_id)
if new_sync:
SyncToken.objects.create(project_id=project_id, sync=new_sync)
def _get_workspace_ids(self, workspaces):
workspace_ids = []
bad_list = []
workspaces_ = self.client.workspaces.find_all()
if workspaces:
for workspace in workspaces:
for wks in workspaces_:
if workspace in (wks['gid'], wks['name']):
workspace_ids.append(wks['gid'])
break
else:
bad_list.append(workspace)
else:
workspace_ids = [wks['gid'] for wks in workspaces_]
if bad_list:
if len(bad_list) == 1:
raise CommandError('{} is not an Asana workspace'.format(workspaces[0]))
raise CommandError('Specified workspaces are not valid: {}'.format(
', '.join(bad_list)))
# Return newer workspaces first so they get synced earlier
return sorted(workspace_ids, reverse=True)
def _get_project_ids(self, projects, workspace_id):
project_ids = []
bad_list = []
projects_ = self.client.projects.find_all({'workspace': workspace_id})
if projects:
for project in projects:
for prj in projects_:
if project in (prj['gid'], prj['name']):
project_ids.append(prj['gid'])
break
else:
bad_list.append(project)
else:
project_ids = [prj['gid'] for prj in projects_]
if bad_list:
if len(bad_list) == 1:
raise CommandError('{} is not an Asana project'.format(bad_list[0]))
raise CommandError('Specified projects are not valid: {}'.format(', '.join(bad_list)))
# Return newer projects first so they get synced earlier
return sorted(project_ids, reverse=True)
def _set_webhook(self, workspace, project_id):
"""Sets a webhook if the setting is configured and a webhook does not currently exist"""
if not (self.commit and settings.DJASANA_WEBHOOK_URL):
return
webhooks = [webhook for webhook in self.client.webhooks.get_all({
'workspace': workspace.remote_id, 'resource': project_id})]
if webhooks:
# If there is exactly one, and it is active, we are good to go,
# else delete them and start a new one.
webhooks_ = Webhook.objects.filter(project_id=project_id)
if len(webhooks) == webhooks_.count() == 1 and webhooks[0]['active']:
return
for webhook in webhooks:
self.client.webhooks.delete_by_id(webhook['id'])
Webhook.objects.filter(id__in=webhooks_.values_list('id', flat=True)[1:]).delete()
set_webhook(self.client, project_id)
def _process_events(self, project_id, events, models):
project = Project.objects.get(remote_id=project_id)
ignored_tasks = 0
for event in events['data']:
if event['type'] == 'project':
if Project in models:
if event['action'] == 'removed':
Project.objects.get(remote_id=event['resource']['gid']).delete()
else:
self._sync_project_id(project_id, models)
else:
ignored_tasks += 1
elif event['type'] == 'task':
if Task in models:
if event['action'] == 'removed':
Task.objects.get(remote_id=event['resource']['gid']).delete()
else:
self._sync_task(event['resource'], project, models)
else:
ignored_tasks += 1
elif event['type'] == 'story':
if Story in models:
self._sync_story(event['resource'])
else:
ignored_tasks += 1
tasks_done = len(events['data']) - ignored_tasks
if self.commit:
message = 'Successfully synced {0} events for project {1}.'.format(
tasks_done, project.name)
if ignored_tasks:
message += ' {0} events ignored for excluded models.'.format(ignored_tasks)
self.stdout.write(self.style.SUCCESS(message))
logger.info(message)
def _sync_project_id(self, project_id, models):
"""Sync this project by polling it. Returns boolean 'is archived?'"""
project_dict = self.client.projects.find_by_id(project_id)
logger.debug('Sync project %s', project_dict['name'])
logger.debug(project_dict)
if self.commit:
project = sync_project(self.client, project_dict)
if Task in models and not project_dict['archived'] or self.process_archived:
for task in self.client.tasks.find_all({'project': project_id}):
self._sync_task(task, project, models)
# Delete local tasks for this project that are no longer in Asana.
tasks_to_delete = Task.objects.filter(projects=project).exclude(
remote_id__in=self.synced_ids).exclude(remote_id__isnull=True)
if tasks_to_delete.count() > 0:
id_list = list(tasks_to_delete.values_list('remote_id', flat=True))
tasks_to_delete.delete()
message = 'Deleted {} tasks no longer present: {}'.format(len(id_list), id_list)
self.stdout.write(self.style.SUCCESS(message))
logger.info(message)
if self.commit:
message = 'Successfully synced project {}.'.format(project.name)
self.stdout.write(self.style.SUCCESS(message))
logger.info(message)
return project_dict['archived']
def _sync_story(self, story):
story_id = story.get('gid')
try:
story_dict = self.client.stories.find_by_id(story_id)
except NotFoundError as error:
logger.info(error.response)
return
logger.debug(story_dict)
remote_id = story_dict['gid']
sync_story(remote_id, story_dict)
def _sync_tag(self, tag, workspace):
tag_dict = self.client.tags.find_by_id(tag['gid'])
logger.debug(tag_dict)
if self.commit:
remote_id = tag_dict['gid']
tag_dict['workspace'] = workspace
followers_dict = tag_dict.pop('followers')
pop_unsupported_fields(tag_dict, Tag)
tag = Tag.objects.get_or_create(
remote_id=remote_id,
defaults=tag_dict)[0]
follower_ids = [follower['gid'] for follower in followers_dict]
followers = User.objects.filter(id__in=follower_ids)
tag.followers.set(followers)
def _sync_task(self, task, project, models, skip_subtasks=False):
"""Sync this task and its parent, dependencies, and subtasks
For parents and subtasks, this method is called recursively, so skip_subtasks True is
passed when syncing a parent task from a subtask.
"""
task_id = task['gid']
try:
task_dict = self.client.tasks.find_by_id(task_id)
except (ForbiddenError, NotFoundError):
try:
Task.objects.get(remote_id=task_id).delete()
except Task.DoesNotExist:
pass
return
logger.debug('Sync task %s', task_dict['name'])
logger.debug(task_dict)
if Task in models and self.commit:
remote_id = task_dict['gid']
parent = task_dict.pop('parent', None)
dependencies = task_dict.pop('dependencies', None) or []
if parent:
# If this is a task we already know about, assume it was just synced.
parent_id = parent['gid']
if parent_id not in self.synced_ids and \
not Task.objects.filter(remote_id=parent_id).exists():
self._sync_task(parent, project, models, skip_subtasks=True)
task_dict['parent_id'] = parent_id
task_ = sync_task(remote_id, task_dict, project, sync_tags=Tag in models)
self.synced_ids.append(remote_id)
if not skip_subtasks:
for subtask in self.client.tasks.subtasks(task_id):
if subtask['gid'] not in self.synced_ids:
self._sync_task(subtask, project, models)
if dependencies:
for subtask in dependencies:
if subtask['gid'] not in self.synced_ids:
self._sync_task(subtask, project, models)
task_.dependencies.set(
Task.objects.filter(remote_id__in=[dep['gid'] for dep in dependencies]))
if Attachment in models and self.commit:
for attachment in self.client.attachments.find_by_task(task_id):
sync_attachment(self.client, task_, attachment['gid'])
if Story in models and self.commit:
for story in self.client.stories.find_by_task(task_id):
self._sync_story(story)
return
def _sync_team(self, team):
team_dict = self.client.teams.find_by_id(team['gid'])
logger.debug(team_dict)
if self.commit:
remote_id = team_dict['gid']
organization = team_dict.pop('organization')
team_dict['organization_id'] = organization['gid']
team_dict['organization_name'] = organization['name']
pop_unsupported_fields(team_dict, Team)
Team.objects.get_or_create(
remote_id=remote_id,
defaults=team_dict)
def _sync_user(self, user, workspace):
user_dict = self.client.users.find_by_id(user['gid'])
logger.debug(user_dict)
if self.commit:
remote_id = user_dict['gid']
user_dict.pop('workspaces')
if user_dict['photo']:
user_dict['photo'] = user_dict['photo']['image_128x128']
user = User.objects.update_or_create(
remote_id=remote_id,
defaults=user_dict)[0]
if workspace:
user.workspaces.add(workspace)
def _sync_workspace_id(self, workspace_id, projects, models):
workspace_dict = self.client.workspaces.find_by_id(workspace_id)
logger.debug('Sync workspace %s', workspace_dict['name'])
logger.debug(workspace_dict)
if Workspace in models and self.commit:
remote_id = workspace_dict['gid']
workspace_dict.pop('email_domains')
workspace = Workspace.objects.update_or_create(
remote_id=remote_id, defaults=workspace_dict)[0]
else:
workspace = None
project_ids = self._get_project_ids(projects, workspace_id)
if 'workspace_id' in self.client.options \
and workspace_id != self.client.options['workspace_id']:
self.client.options['workspace_id'] = str(workspace_id)
if User in models:
for user in self.client.users.find_all({'workspace': workspace_id}):
self._sync_user(user, workspace)
if Tag in models:
for tag in self.client.tags.find_by_workspace(workspace_id):
self._sync_tag(tag, workspace)
if Team in models:
for team in self.client.teams.find_by_organization(workspace_id):
self._sync_team(team)
if Project in models:
for project_id in project_ids:
self._check_sync_project_id(project_id, workspace, models)
if workspace:
message = 'Successfully synced workspace {}.'.format(workspace.name)
self.stdout.write(self.style.SUCCESS(message))
logger.info(message)
|
StarcoderdataPython
|
6575731
|
#!/usr/bin/env python
import os
import optparse
from subprocess import Popen
from subprocess import PIPE
class WKOption(object):
"""
Build an option to be used throughout
"""
def __init__(self, name, shortcut, otype=str, action=None, dest=None, default=None, help=None, validate=None, \
validate_error=None, value=None):
self.name = name
self.shortcut = shortcut
self.otype = bool if (default is True or default is False) else otype
self.action = "store_true" if self.otype is bool else "store"
self.dest = dest if dest else name.replace('-', '_')
self.default = default
self.help = help
self._validate = validate
self.validate_error = validate_error
# if there's a value passed to us use it, else use the default
if value is not None:
self.value = value
else:
self.value = default
def validate(self):
if self.validate is None:
return True
# only try to validate if we have a function to do so
if self.validate(self.value):
return True
else:
return False, self.validate_error
def long(self):
return '--' + self.name.replace('_', '-')
def to_cmd(self):
"""
Return the str of this command, bool is just --long, etc
"""
if self.otype is bool:
if self.value:
return self.long()
else:
return ""
else:
return " ".join([self.long(), str(self.value) if self.value is not None else ""])
OPTIONS = [
WKOption('enable-plugins', '-F', default=True, help="Use flash and other plugins."),
WKOption('disable-javascript', '-J', default=False, help="Disable javascript."),
WKOption('no-background', '-b', default=False, help="Do not print background."),
WKOption('grayscale', '-g', default=False, help="Make greyscale."),
WKOption(
'orientation', '-O', default="Portrait", help="Set page orientation.",
validate=lambda x: x in ['Portrait', 'Landscape'],
validate_error="Orientation argument must be either Portrait or Landscape"
),
WKOption(
'page-size', '-s', default="A4", help="Set page size.",
validate=lambda x: x in ['A4', 'Letter'],
validate_error="Page size argument must be A4 or Letter"
),
WKOption('print-media-type', '', default = False, help="Set print media type."),
WKOption('dpi', '-D', default=100, help="Set DPI"),
WKOption('username', '-U', default="", help="Set the HTTP username"),
WKOption('password', '-P', default="", help="Set the HTTP password"),
WKOption('margin-bottom', '-B', default=10, help="Bottom page margin."),
WKOption('margin-top', '-T', default=10, help="Top page margin."),
WKOption('margin-left', '-L', default=10, help="Left page margin."),
WKOption('margin-right', '-R', default=10, help="Right page margin."),
WKOption(
'disable-smart-shrinking', None, default=False,
help="Disable the intelligent shrinking strategy used by WebKit that makes the pixel/dpi ratio none constant",
)
]
class WKHtmlToPdf(object):
"""
Convert an html page via its URL into a pdf.
"""
def __init__(self, *args, **kwargs):
self.url = None
self.output_file = None
# get the url and output_file options
try:
self.url, self.output_file = kwargs['url'], kwargs['output_file']
except KeyError:
self.url, self.output_file = args[0], args[1]
except IndexError:
pass
if not self.url or not self.output_file:
raise Exception("Missing url and output file arguments")
# save the file to /tmp if a full path is not specified
output_path = os.path.split(self.output_file)[0]
if not output_path:
self.output_file = os.path.join('/tmp', self.output_file)
# set the options per the kwargs coming in
for option in OPTIONS:
try:
option.value = kwargs[option.dest] # try to get the value for that kwarg passed to us.
except KeyError:
pass # can't find? just ignore and move on
self.params = [option.to_cmd() for option in OPTIONS]
self.screen_resolution = [1024, 768]
self.color_depth = 24
def render(self):
"""
Render the URL into a pdf and setup the evironment if required.
"""
# setup the environment if it isn't set up yet
if not os.getenv('DISPLAY'):
os.system("Xvfb :0 -screen 0 %sx%sx%s & " % (
self.screen_resolution[0],
self.screen_resolution[1],
self.color_depth
))
os.putenv("DISPLAY", '127.0.0.1:0')
# execute the command
command = 'wkhtmltopdf %s "%s" "%s" >> /tmp/wkhtp.log' % (
" ".join([cmd for cmd in self.params]),
self.url,
self.output_file
)
try:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode == 0:
# call was successful
return
elif retcode < 0:
raise Exception("Terminated by signal: ", -retcode)
else:
raise Exception(stderr)
except OSError, exc:
raise exc
def wkhtmltopdf(*args, **kwargs):
wkhp = WKHtmlToPdf(*args, **kwargs)
wkhp.render()
if __name__ == '__main__':
# parse through the system argumants
usage = "Usage: %prog [options] url output_file"
parser = optparse.OptionParser()
for option in OPTIONS:
if option.shortcut:
parser.add_option(
option.shortcut,
option.long(),
action=option.action,
dest=option.dest,
default=option.default,
help=option.help
)
else:
parser.add_option(
option.long(),
action=option.action,
dest=option.dest,
default=option.default,
help=option.help
)
options, args = parser.parse_args()
# call the main method with parsed argumants
wkhtmltopdf(*args, **options.__dict__)
|
StarcoderdataPython
|
4968007
|
Task
Given an integer, n, perform the following conditional actions:
If n is odd, print Weird
If n is even and in the inclusive range of to , print Not Weird
If n is even and in the inclusive range of to , print Weird
If n is even and greater than , print Not Weird
Input Format
A single line containing a positive integer, n.
Constraints
Output Format
Print Weird if the number is weird; otherwise, print Not Weird.
Sample Input 0
3
Sample Output 0
Weird
Explanation 0
n=3
n is odd and odd numbers are weird, so we print Weird.
Sample Input 1
24
Sample Output 1
Not Weird
Explanation 1
n=24
n > 20 and n is even, so it isn't weird. Thus, we print Not Weird.
Current Buffer (saved locally, editable)
1
#!/bin/python
2
3
import math
4
import os
5
import random
6
import re
7
import sys
8
9
10
if __name__ == '__main__':
11
n = int(raw_input())
12
13
14
if n % 2 != 0:
15
print 'Weird'
16
elif n >= 2 and n <= 5:
17
print 'Not Weird'
18
elif n >= 6 and n <= 20:
19
print 'Weird'
20
elif n > 20:
21
print 'Not Weird'
22
23
24
|
StarcoderdataPython
|
11295087
|
<gh_stars>1-10
##########################################################################
#
# Demonstrate how to create a transaction.
#
# We will need a running bitcoind installation to manage our wallet
# and to retrieve UTXOs
#
# MIT license
#
# Copyright (c) 2018 christianb93
# Permission is hereby granted, free of charge, to
# any person obtaining a copy of this software and
# associated documentation files (the "Software"),
# to deal in the Software without restriction,
# including without limitation the rights to use,
# copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY
# OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
##########################################################################
import argparse
import binascii
import random
import btc.txn
import btc.script
import btc.keys
import btc.utils
####################################################
# Parse arguments
####################################################
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--amount",
type=float,
default="1.0",
help="Amount in BTC to be transferred"
)
parser.add_argument("--target",
default="mpV4bFDWN8NrWX9u3V47UgzxD9wSLQivwj",
help="Target address"
)
args=parser.parse_args()
return args
####################################################
# Main
####################################################
args = get_args()
#
# First we make an RPC call to retrieve unspent transaction output and
# select the outputs that we are going to spend
#
listunspent = btc.utils.rpcCall("listunspent")
#
# Now extract transaction IDs and store that in a list of
# dictionaries
#
# We split this list into one list of entries that are greater
# than the amount we want to transfer and one list of entries
# that are smaller
#
#
smaller = []
greater = []
amount_to_spend = float(args.amount)
for _ in listunspent:
if _['spendable']:
txid = _['txid']
vout = _['vout']
amount = float(_['amount'])
address = _['address']
coin = {'txid': txid,
'vout': vout,
'amount': amount,
'address' : address}
if amount > amount_to_spend:
greater.append(coin)
else:
smaller.append(coin)
#
# Next we sort the lists.
#
greater.sort(key=lambda entry: entry['amount'])
smaller.sort(key=lambda entry: entry['amount'], reverse=True)
#
# If greater is not emtpy, take the smallest (i.e. now first)
# element
#
if len(greater) > 0:
amount_funded = greater[0]['amount']
to_be_spent = [greater[0]]
else:
#
# We need to combine more than one transaction output
#
to_be_spent = []
amount_funded = 0
for _ in smaller:
if amount_funded < amount_to_spend:
to_be_spent.append(_)
amount_funded += _['amount']
if (amount_funded < amount_to_spend):
# Failed, clean up list
to_be_spent = []
if 0 == len(to_be_spent):
print("Could not fund transaction")
exit(1)
else:
print("Here is the list of transaction outputs that I will use: ")
print(to_be_spent)
#
# Now go through the resulting list and build a list of private
# keys. At the same time, we build a list of transaction outputs
#
txos = []
privateKeys = []
for _ in to_be_spent:
tx = btc.txn.txn()
raw = btc.utils.rpcCall("getrawtransaction", [_['txid']])
tx.deserialize(raw)
#
# Get private key using again an RPC call
#
privKey = btc.utils.rpcCall("dumpprivkey", [_['address']])
privKey = btc.keys.wifToPayloadBytes(privKey)
privKey = int.from_bytes(privKey, "big")
privateKeys.append(privKey)
txos.append(tx.getOutputs()[_['vout']])
#
# Next we create our transaction. First we create the transaction
# inputs. We leave the signature scripts empty for the time
# being
#
txn = btc.txn.txn()
for _ in to_be_spent:
txin = btc.txn.txin(prevTxid = _['txid'], vout = _['vout'])
txn.addInput(txin)
#
# Next we do the outputs. For the time being, we use only one output
# So we need to convert the address to a public key hash
#
publicKeyHash = btc.keys.ecAddressToPKH(args.target)
publicKeyHash = binascii.hexlify(publicKeyHash).decode('ascii')
#
# Create locking script
#
lockingScript = btc.script.scriptPubKey(scriptType = btc.script.SCRIPTTYPE_P2PKH,
pubKeyHash = publicKeyHash)
#
# and output
#
txout = btc.txn.txout(value = int(amount_to_spend * 100000000),
scriptPubKey = lockingScript)
txn.addOutput(txout)
#
# Sign it
#
txn = btc.script.signTransaction(txn, txos, privateKeys)
#
# and send it
#
print("Sending transaction")
raw = txn.serialize()
s = btc.utils.rpcCall("sendrawtransaction", [raw, True])
print("Done, resulting transaction ID: ")
print(s)
|
StarcoderdataPython
|
5086880
|
<gh_stars>0
from django.core.exceptions import ValidationError
class KolibriError(Exception):
pass
class KolibriValidationError(ValidationError, KolibriError):
pass
class KolibriUpgradeError(KolibriError):
"""
Should be used whenever an error arises that is due to an anticipated future incompatible change,
for example: change in content database schemas, change in content that is not supported by old versions
of Kolibri.
"""
pass
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.